From bdaa306aafe44d7caa47bd54084248868b0b4e27 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 001/438] Add an on ramp option --- .../exp_configs/non_rl/i210_subnetwork.py | 74 ++++--- flow/controllers/routing_controllers.py | 21 ++ flow/envs/base.py | 14 +- flow/networks/i210_subnetwork.py | 196 ++++++++++-------- 4 files changed, 184 insertions(+), 121 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index dd85c56cf..c3db70f04 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -4,6 +4,7 @@ import numpy as np from flow.controllers.car_following_models import IDMController +from flow.controllers.routing_controllers import I210Router from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -15,18 +16,35 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# create the base vehicle type that will be used for inflows -vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), -) +ON_RAMP = True + +if ON_RAMP: + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + routing_controller=(I210Router, {}) + ) + +else: + # create the base vehicle type that will be used for inflows + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) inflow = InFlows() # main highway @@ -37,18 +55,19 @@ departLane="random", departSpeed=23) # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321, -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421, -# departLane="random", -# departSpeed=20) +if ON_RAMP: + inflow.add( + veh_type="human", + edge="27414345", + vehs_per_hour=321, + departLane="random", + departSpeed=20) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=421, + departLane="random", + departSpeed=20) NET_TEMPLATE = os.path.join( config.PROJECT_PATH, @@ -71,20 +90,21 @@ sim=SumoParams( sim_step=0.5, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4500, + horizon=7200, ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=NET_TEMPLATE + template=NET_TEMPLATE, + additional_params={"use_on_ramp": ON_RAMP} ), # vehicles to be placed in the network at the start of a rollout (see diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index e6ccdde78..c880b5bbf 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -124,3 +124,24 @@ def choose_route(self, env): new_route = super().choose_route(env) return new_route + +class I210Router(ContinuousRouter): + """Assists in choosing routes in select cases for the Bay Bridge network. + Extension to the Continuous Router. + Usage + ----- + See base class for usage example. + """ + + def choose_route(self, env): + """See parent class.""" + edge = env.k.vehicle.get_edge(self.veh_id) + lane = env.k.vehicle.get_lane(self.veh_id) + + # vehicles on these edges in lanes 4 and 5 are not going to be able to make it out in time + if edge == "119257908#1-AddedOffRampEdge" and lane in [5, 4, 3]: + new_route = env.available_routes["119257908#1-AddedOffRampEdge"][0][0] + else: + new_route = super().choose_route(env) + + return new_route diff --git a/flow/envs/base.py b/flow/envs/base.py index 1abb8a3c9..60eab6ebe 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -396,8 +396,18 @@ def step(self, rl_actions): # test if the environment should terminate due to a collision or the # time horizon being met done = (self.time_counter >= self.env_params.sims_per_step * - (self.env_params.warmup_steps + self.env_params.horizon) - or crash) + (self.env_params.warmup_steps + self.env_params.horizon)) + if crash: + print( + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************\n" + "WARNING: There was a crash. \n" + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************" + ) + # compute the info for each agent infos = {} diff --git a/flow/networks/i210_subnetwork.py b/flow/networks/i210_subnetwork.py index d8e05efb5..febb39b00 100644 --- a/flow/networks/i210_subnetwork.py +++ b/flow/networks/i210_subnetwork.py @@ -45,97 +45,109 @@ def specify_routes(self, net_params): Routes for vehicles moving through the bay bridge from Oakland to San Francisco. """ - rts = { - # Main highway - "119257914": [ - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - # (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 17 / 8378) - ], - # "119257908#0": [ - # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOnRampEdge": [ - # (["119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1": [ - # (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOffRampEdge": [ - # (["119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#2": [ - # (["119257908#2", "119257908#3"], 1), - # ], - # "119257908#3": [ - # (["119257908#3"], 1), - # ], - # - # # On-ramp - # "27414345": [ - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 9 / 321), - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 9 / 321), - # ], - # "27414342#0": [ - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 20 / 421), - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 20 / 421), - # ], - # "27414342#1-AddedOnRampEdge": [ - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 0.5), - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - # ], - # - # # Off-ramp - # "173381935": [ - # (["173381935"], 1), - # ], - } + if net_params.additional_params["use_on_ramp"]: + rts = { + # Main highway + "119257914": [ + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 17 / 8378) + ], + "119257908#0": [ + (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOnRampEdge": [ + (["119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1": [ + (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOffRampEdge": [ + (["119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#2": [ + (["119257908#2", "119257908#3"], 1), + ], + "119257908#3": [ + (["119257908#3"], 1), + ], + + # On-ramp + "27414345": [ + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 9 / 321), + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 9 / 321), + ], + "27414342#0": [ + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 20 / 421), + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 20 / 421), + ], + "27414342#1-AddedOnRampEdge": [ + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 0.5), + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 0.5), + ], + + # Off-ramp + "173381935": [ + (["173381935"], 1), + ], + } + + else: + rts = { + # Main highway + "119257914": [ + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1), + ], + } return rts From 2eac0da8ecb3dbdbc45fd6efcca2718c9207fc12 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 18:25:53 -0700 Subject: [PATCH 002/438] The acceleration noise is now scaled by the sqrt of the sim step as suggested by Benni --- flow/controllers/base_controller.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 41780826b..4004b1c4d 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -107,7 +107,7 @@ def get_action(self, env): # add noise to the accelerations, if requested if self.accel_noise > 0: - accel += np.random.normal(0, self.accel_noise) + accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) # run the failsafes, if requested if self.fail_safe == 'instantaneous': From 4c49ab74022a613a713f93a4ff7828821dec8cad Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Thu, 19 Mar 2020 10:32:47 -0700 Subject: [PATCH 003/438] Fixed issue 840 (#841) --- flow/utils/rllib.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index b5abc9a23..80193c22b 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -6,6 +6,7 @@ import json from copy import deepcopy import os +import sys import flow.envs from flow.core.params import SumoLaneChangeParams, SumoCarFollowingParams, \ @@ -207,6 +208,9 @@ def get_rllib_config(path): def get_rllib_pkl(path): """Return the data from the specified rllib configuration file.""" + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, '../../examples/') + sys.path.append(filename) config_path = os.path.join(path, "params.pkl") if not os.path.exists(config_path): config_path = os.path.join(path, "../params.pkl") From 35f5b5f1b96e6ca9db6175b0fd9ccdcfb30bff6b Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Thu, 19 Mar 2020 11:18:07 -0700 Subject: [PATCH 004/438] Added code to output json. Added code to resolve macOS matplotlib import error --- examples/simulate.py | 9 +++++++++ flow/visualize/time_space_diagram.py | 7 ++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/examples/simulate.py b/examples/simulate.py index 60767b6b7..4ec46b974 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -5,7 +5,10 @@ """ import argparse import sys +import json +import os from flow.core.experiment import Experiment +from flow.utils.rllib import FlowParamsEncoder def parse_args(args): @@ -70,6 +73,12 @@ def parse_args(args): if flags.gen_emission: flow_params['sim'].emission_path = "./data" + # Create the flow_params object + json_filename = flow_params['exp_tag'] + with open(os.path.join(flow_params['sim'].emission_path, json_filename) + '.json', 'w') as outfile: + json.dump(flow_params, outfile, + cls=FlowParamsEncoder, sort_keys=True, indent=4) + # Create the experiment object. exp = Experiment(flow_params, callables) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index d8dad01e9..a08ecdf0f 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -21,7 +21,12 @@ import argparse import csv -from matplotlib import pyplot as plt +try: + from matplotlib import pyplot as plt +except ImportError: + import matplotlib + matplotlib.use('TkAgg') + from matplotlib import pyplot as plt from matplotlib.collections import LineCollection import matplotlib.colors as colors import numpy as np From b8d12126b09bbf2552b27c5ed35887ec078dad97 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 11:32:12 -0700 Subject: [PATCH 005/438] Increased inflows to 10800 to match density in Bennis ring --- examples/exp_configs/non_rl/i210_subnetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index c3db70f04..4f19b89b5 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -16,7 +16,7 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -ON_RAMP = True +ON_RAMP = False if ON_RAMP: vehicles = VehicleParams() @@ -51,7 +51,7 @@ inflow.add( veh_type="human", edge="119257914", - vehs_per_hour=8378, + vehs_per_hour=10800, departLane="random", departSpeed=23) # on ramp From 661564baeaec5f1be107a65b4ba3a4f6ea727c8c Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:10:07 -0700 Subject: [PATCH 006/438] Upgrade the network to not have keepclear value on the junctions --- .../exp_configs/templates/sumo/test2.net.xml | 78 ++++++++++++++----- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 00e3edcd5..16170b917 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,5 +1,41 @@ + + @@ -4680,24 +4716,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4801,10 +4837,10 @@ - + - - + + From 5869c581ca884af61902cea9e6acfe52a7b15e80 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:41:31 -0700 Subject: [PATCH 007/438] Convert inflows to pick out the best lane to travel in instead of a random lane --- examples/exp_configs/non_rl/i210_subnetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 4f19b89b5..335461270 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -52,8 +52,8 @@ veh_type="human", edge="119257914", vehs_per_hour=10800, - departLane="random", - departSpeed=23) + departLane="best", + departSpeed=23.0) # on ramp if ON_RAMP: inflow.add( From d6ed510694f97cfa2a76539b481ec2c81a920144 Mon Sep 17 00:00:00 2001 From: zpymyyn Date: Mon, 23 Mar 2020 22:09:09 +0200 Subject: [PATCH 008/438] specify python version for pip install (#859) To avoid error caused by multiple versions of python and pip --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f9508507e..70b894da2 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ class build_ext(_build_ext.build_ext): def run(self): """Install traci wheels.""" subprocess.check_call( - ['pip', 'install', + ['python3','-m','pip', 'install', 'https://akreidieh.s3.amazonaws.com/sumo/flow-0.4.0/' 'sumotools-0.4.0-py3-none-any.whl']) From 0166e419330ba56c16df84a413bedb78e65be2b4 Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Tue, 24 Mar 2020 13:38:06 -0700 Subject: [PATCH 009/438] support for h-baselines (#874) * started adding support for h-baselines * some cleanup * some cleanup * pydocstyle * added test to parse_args * working support for multiagent envs * added tests for train_h_baselines * maybe a bug fix * maybe a bug fix * one more try * helping out coveralls * got rid of broken test * pep8 --- .coveragerc | 1 + .travis.yml | 12 +- docs/source/flow_setup.rst | 91 ++++++--- examples/README.md | 164 +++++++++++++--- examples/train.py | 300 ++++++++++++++++++++++-------- tests/fast_tests/test_examples.py | 63 +++++++ tests/fast_tests/test_util.py | 20 -- 7 files changed, 496 insertions(+), 155 deletions(-) diff --git a/.coveragerc b/.coveragerc index 70674c543..3505cadcb 100644 --- a/.coveragerc +++ b/.coveragerc @@ -20,3 +20,4 @@ exclude_lines = raise NotImplementedError @ray.remote def policy_mapping_fn* + def main(args)* diff --git a/.travis.yml b/.travis.yml index 30f3174a4..297281bc7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -54,6 +54,17 @@ before_install: - export AIMSUN_SITEPACKAGES="/home/travis/miniconda/envs/aimsun_flow" - export AIMSUN_NEXT_PATH="/home/user/Aimsun_Next_XXX" + # Install stable-baselines + - pip install stable_baselines==2.7.0 + + # Install h-baselines + - pushd $HOME + - git clone https://github.com/AboudyKreidieh/h-baselines.git + - pushd h-baselines + - pip install -e . + - popd + - popd + - ls ../ install: @@ -62,7 +73,6 @@ install: - pip install -e . - pip install coveralls - pip install jupyter - - pip install stable_baselines==2.7.0 script: - nose2 --with-coverage diff --git a/docs/source/flow_setup.rst b/docs/source/flow_setup.rst index 606a9d6d4..60734b7b1 100644 --- a/docs/source/flow_setup.rst +++ b/docs/source/flow_setup.rst @@ -2,7 +2,7 @@ .. contents:: Table of contents Local Installation of Flow -================== +========================== To get Flow running, you need three things: Flow, @@ -108,7 +108,9 @@ Note that, if the above commands did not work, you may need to run ``source ~/.bashrc`` or open a new terminal to update your $PATH variable. *Troubleshooting*: -If you are a Mac user and the above command gives you the error ``FXApp:openDisplay: unable to open display :0.0``, make sure to open the application XQuartz. +If you are a Mac user and the above command gives you the error +``FXApp:openDisplay: unable to open display :0.0``, make sure to open the +application XQuartz. Testing your SUMO and Flow installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -151,7 +153,10 @@ during the execution of various tasks. The path should look something like: export AIMSUN_NEXT_PATH="/home/user/Aimsun_Next_X_Y_Z/" # Linux export AIMSUN_NEXT_PATH="/Applications/Aimsun Next.app/Contents/MacOS/" # OS X -`Note for Mac users:` when you download Aimsun, you will get a folder named "Programming". You need to rename it to "programming" (all lowercase) and to move it inside the "Aimsun Next.app/Contents/MacOS/" directory so that the python API can work. +`Note for Mac users:` when you download Aimsun, you will get a folder named +"Programming". You need to rename it to "programming" (all lowercase) and to +move it inside the "Aimsun Next.app/Contents/MacOS/" directory so that the +python API can work. In addition, being that Aimsun's python API is written to support Python 2.7.4, we will need to create a Python 2.7.4 conda environment that Aimsun can refer @@ -198,8 +203,11 @@ to activate the `flow` env. Type: source activate flow python examples/simulate.py ring --aimsun -*Troubleshootig for Ubuntu users with Aimsun 8.4*: when you run the above example, you may get a subprocess.Popen error ``OSError: [Errno 8] Exec format error:``. -To fix this, go to the `Aimsun Next` main directory, open the `Aimsun_Next` binary with a text editor and add the shebang to the first line of the script ``#!/bin/sh``. +*Troubleshootig for Ubuntu users with Aimsun 8.4*: when you run the above +example, you may get a subprocess.Popen error ``OSError: [Errno 8] Exec format error:``. +To fix this, go to the `Aimsun Next` main directory, open the `Aimsun_Next` +binary with a text editor and add the shebang to the first line of the script +``#!/bin/sh``. (Optional) Install Ray RLlib ---------------------------- @@ -211,10 +219,11 @@ RLlib is one such library. First visit and install the required packages. -If you are not intending to develop RL algorithms or customize rllib you don't need to do anything, -Ray was installed when you created the conda environment. +If you are not intending to develop RL algorithms or customize rllib you don't +need to do anything, Ray was installed when you created the conda environment. -If you are intending to modify Ray, the installation process for this library is as follows: +If you are intending to modify Ray, the installation process for this library +is as follows: :: @@ -249,6 +258,34 @@ In order to test run an Flow experiment in RLlib, try the following command: If it does not fail, this means that you have Flow properly configured with RLlib. + +Visualizing with Tensorboard +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To visualize the training progress: + +:: + + tensorboard --logdir=~/ray_results + +If tensorboard is not installed, you can install with pip: + +:: + + pip install tensorboard + +For information on how to deploy a cluster, refer to the +`Ray instructions `_. +The basic workflow is running the following locally, ssh-ing into the host +machine, and starting jobs from there. + +:: + + pip install boto3 + ray create-or-update scripts/ray_autoscale.yaml + ray teardown scripts/ray_autoscale.yaml + + (Optional) Install Stable Baselines ----------------------------------- @@ -267,33 +304,29 @@ You can test your installation by running python examples/train.py singleagent_ring --rl_trainer Stable-Baselines +(Optional) Install h-baselines +------------------------------ -(Optional) Visualizing with Tensorboard ---------------------------------------- - -To visualize the training progress: +h-baselines is another variant of stable-baselines that support the use of +single-agent, multiagent, and hierarchical policies. To install h-baselines, +run the following commands: :: - tensorboard --logdir=~/ray_results - -If tensorboard is not installed, you can install with pip: + git clone https://github.com/AboudyKreidieh/h-baselines.git + cd h-baselines + source activate flow # if using a Flow environment + pip install -e . -:: - pip install tensorboard +Testing your h-baselines installation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For information on how to deploy a cluster, refer to the `Ray instructions `_. -The basic workflow is running the following locally, ssh-ing into the host machine, and starting -jobs from there. +You can test your installation by running :: - pip install boto3 - ray create-or-update scripts/ray_autoscale.yaml - ray teardown scripts/ray_autoscale.yaml - - + python examples/train.py singleagent_ring --rl_trainer h-baselines (Optional) Direct install of SUMO from GitHub @@ -358,16 +391,22 @@ If you have Ubuntu 14.04+, run the following command Virtual installation of Flow (using docker containers) -================================ +====================================================== To install a containerized Flow stack, run: + :: + docker run -d -p 5901:5901 -p 6901:6901 fywu85/flow-desktop:latest To access the docker container, go to the following URL and enter the default password `password`: + :: + http://localhost:6901/vnc.html To use the Jupyter Notebook inside the container, run: + :: + jupyter notebook --ip=127.0.0.1 diff --git a/examples/README.md b/examples/README.md index f25f488c5..a9d681131 100644 --- a/examples/README.md +++ b/examples/README.md @@ -10,8 +10,8 @@ within the Flow framework on a variety of traffic problems. These examples are python files that may be executed either from terminal or via a text editor. For example, in order to execute the non-RL Ring example we run: -```shell -python simulate.py ring +```shell script +python simulate.py "ring" ``` The examples are categorized into the following 3 sections: @@ -24,58 +24,168 @@ micro-simulator sumo and traffic macro-simulator Aimsun. To execute these examples, run -```shell +```shell script python simulate.py EXP_CONFIG ``` -where `EXP_CONFIG` is the name of the experiment configuration file, as located in -`exp_configs/non_rl.` +where `EXP_CONFIG` is the name of the experiment configuration file, as located +in `exp_configs/non_rl.` There are several *optional* arguments that can be added to the above command: -```shell +```shell script python simulate.py EXP_CONFIG --num_runs n --no_render --aimsun --gen_emission ``` -where `--num_runs` indicates the number of simulations to run (default of `n` is 1), `--no_render` indicates whether to deactivate the simulation GUI during runtime (by default simulation GUI is active), `--aimsun` indicates whether to run the simulation using the simulator Aimsun (the default simulator is SUMO), and `--gen_emission` indicates whether to generate an emission file from the simulation. +where `--num_runs` indicates the number of simulations to run (default of `n` +is 1), `--no_render` indicates whether to deactivate the simulation GUI during +runtime (by default simulation GUI is active), `--aimsun` indicates whether to +run the simulation using the simulator Aimsun (the default simulator is SUMO), +and `--gen_emission` indicates whether to generate an emission file from the +simulation. -## RL examples based on RLlib +## RL examples -These examples are similar networks as those mentioned in *non-RL examples*, but in the -presence of autonomous vehicle (AV) or traffic light agents -being trained through RL algorithms provided by *RLlib*. +### RLlib + +These examples are similar networks as those mentioned in *non-RL examples*, +but in the presence of autonomous vehicle (AV) or traffic light agents being +trained through RL algorithms provided by *RLlib*. To execute these examples, run -```shell - python train.py EXP_CONFIG - (or python train.py EXP_CONFIG --rl_trainer RLlib) +```shell script +python train.py EXP_CONFIG --rl_trainer "rllib" ``` -where `EXP_CONFIG` is the name of the experiment configuration file, as located in -`exp_configs/rl/singleagent` or `exp_configs/rl/multiagent.` +where `EXP_CONFIG` is the name of the experiment configuration file, as located +in `exp_configs/rl/singleagent` or `exp_configs/rl/multiagent.` -## RL examples based on stable_baselines +### stable-baselines These examples provide similar networks as those -mentioned in *non-RL examples*, but in the presence of autonomous vehicle (AV) or traffic -light agents being trained through RL algorithms provided by OpenAI *stable -baselines*. +mentioned in *non-RL examples*, but in the presence of autonomous vehicle (AV) +or traffic light agents being trained through RL algorithms provided by OpenAI +*stable-baselines*. To execute these examples, run -```shell - python train.py EXP_CONFIG --rl_trainer Stable-Baselines +```shell script +python train.py EXP_CONFIG --rl_trainer "stable-baselines" ``` -where `EXP_CONFIG` is the name of the experiment configuration file, as located in -`exp_configs/rl/singleagent.` +where `EXP_CONFIG` is the name of the experiment configuration file, as located +in `exp_configs/rl/singleagent.` Note that, currently, multiagent experiments are only supported through RLlib. There are several *optional* arguments that can be added to the above command: -```shell - python train.py EXP_CONFIG --rl_trainer Stable-Baselines --num_cpus n1 --num_steps n2 --rollout_size r +```shell script +python train.py EXP_CONFIG --rl_trainer "stable-baselines" --num_cpus n1 --num_steps n2 --rollout_size r +``` +where `--num_cpus` indicates the number of CPUs to use (default of `n1` is 1), +`--num_steps` indicates the total steps to perform the learning (default of +`n2` is 5000), and `--rollout_size` indicates the number of steps in a training +batch (default of `r` is 1000) + +### h-baselines + +A third RL algorithms package supported by the `train.py` script is +[h-baselines](https://github.com/AboudyKreidieh/h-baselines). In order to use +the algorithms supported by this package, begin by installing h-baselines by +following the setup instructions located +[here](https://flow.readthedocs.io/en/latest/flow_setup.html#optional-install-h-baselines). +A policy can be trained using one of the exp_configs as follows: + +```shell script +python examples/train.py singleagent_ring --rl_trainer h-baselines ``` -where `--num_cpus` indicates the number of CPUs to use (default of `n1` is 1), `--num_steps` indicates the total steps to perform the learning (default of `n2` is 5000), and `--rollout_size` indicates the number of steps in a training batch (default of `r` is 1000) + +**Logging:** + +The above script executes a training operation and begins logging training and +testing data under the path: *training_data/singleagent_ring/*. + +To visualize the statistics of various tensorflow operations in tensorboard, +type: + +```shell script +tensorboard --logdir /examples/training_data/singleagent_ring/ +``` + +Moreover, as training progressive, per-iteration and cumulative statistics are +printed as a table on your terminal. These statistics are stored under the csv +files *train.csv* and *eval.csv* (if also using an evaluation environment) +within the same directory. + +**Hyperparameters:** + +When using h-baseline, multiple new command-line arguments can be passed to +adjust the choice of algorithm and variable hyperparameters of the algorithms. +These new arguments are as follows: + +* `--alg` (*str*): The algorithm to use. Must be one of [TD3, SAC]. Defaults to + 'TD3'. +* `--evaluate` (*store_true*): whether to add an evaluation environment. The + evaluation environment is similar to the training environment, but with + `env_params.evaluate` set to True. +* `--n_training` (*int*): Number of training operations to perform. Each + training operation is performed on a new seed. Defaults to 1. +* `--total_steps` (*int*): Total number of timesteps used during training. + Defaults to 1000000. +* `--seed` (*int*): Sets the seed for numpy, tensorflow, and random. Defaults + to 1. +* `--log_interval` (*int*): the number of training steps before logging + training results. Defaults to 2000. +* `--eval_interval` (*int*): number of simulation steps in the training + environment before an evaluation is performed. Only relevant if `--evaluate` + is called. Defaults to 50000. +* `--save_interval` (int): number of simulation steps in the training + environment before the model is saved. Defaults to 50000. +* `--initial_exploration_steps` (*int*): number of timesteps that the policy is + run before training to initialize the replay buffer with samples. Defaults to + 10000. +* `--nb_train_steps` (*int*): the number of training steps. Defaults to 1. +* `--nb_rollout_steps` (*int*): the number of rollout steps. Defaults to 1. +* `--nb_eval_episodes` (*int*): the number of evaluation episodes. Only + relevant if `--evaluate` is called. Defaults to 50. +* `--reward_scale` (*float*): the value the reward should be scaled by. + Defaults to 1. +* `--buffer_size` (*int*): the max number of transitions to store. Defaults to + 200000. +* `--batch_size` (*int*): the size of the batch for learning the policy. + Defaults to 128. +* `--actor_lr` (*float*): the actor learning rate. Defaults to 3e-4. +* `--critic_lr` (*float*): the critic learning rate. Defaults to 3e-4. +* `--tau` (*float*): the soft update coefficient (keep old values, between 0 + and 1). Defatuls to 0.005. +* `--gamma` (*float*): the discount rate. Defaults to 0.99. +* `--layer_norm` (*store_true*): enable layer normalisation +* `--use_huber` (*store_true*): specifies whether to use the huber distance + function as the loss for the critic. If set to False, the mean-squared error + metric is used instead") +* `--actor_update_freq` (*int*): number of training steps per actor policy + update step. The critic policy is updated every training step. Only used when + the algorithm is set to "TD3". Defaults to 2. +* `--noise` (*float*): scaling term to the range of the action space, that is + subsequently used as the standard deviation of Gaussian noise added to the + action if `apply_noise` is set to True in `get_action`". Only used when the + algorithm is set to "TD3". Defaults to 0.1. +* `--target_policy_noise` (*float*): standard deviation term to the noise from + the output of the target actor policy. See TD3 paper for more. Only used when + the algorithm is set to "TD3". Defaults to 0.2. +* `--target_noise_clip` (*float*): clipping term for the noise injected in the + target actor policy. Only used when the algorithm is set to "TD3". Defaults + to 0.5. +* `--target_entropy` (*float*): target entropy used when learning the entropy + coefficient. If set to None, a heuristic value is used. Only used when the + algorithm is set to "SAC". Defaults to None. + +Additionally, the following arguments can be passed when training a multiagent +policy: + +* `--shared` (*store_true*): whether to use a shared policy for all agents +* `--maddpg` (*store_true*): whether to use an algorithm-specific variant of + the MADDPG algorithm + ## Simulated Examples diff --git a/examples/train.py b/examples/train.py index a159c13ee..a1288e2f0 100644 --- a/examples/train.py +++ b/examples/train.py @@ -6,12 +6,12 @@ Usage python train.py EXP_CONFIG """ - import argparse import json import os import sys from time import strftime +from copy import deepcopy from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv from stable_baselines import PPO2 @@ -20,16 +20,15 @@ from ray import tune from ray.tune import run_experiments from ray.tune.registry import register_env -from flow.utils.registry import make_create_env try: from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class -from copy import deepcopy from flow.core.util import ensure_dir from flow.utils.registry import env_constructor from flow.utils.rllib import FlowParamsEncoder, get_flow_params +from flow.utils.registry import make_create_env def parse_args(args): @@ -72,11 +71,16 @@ def parse_args(args): return parser.parse_known_args(args)[0] -def run_model_stablebaseline(flow_params, num_cpus=1, rollout_size=50, num_steps=50): +def run_model_stablebaseline(flow_params, + num_cpus=1, + rollout_size=50, + num_steps=50): """Run the model for num_steps if provided. Parameters ---------- + flow_params : dict + flow-specific parameters num_cpus : int number of CPUs used during training rollout_size : int @@ -163,7 +167,8 @@ def setup_exps_rllib(flow_params, print("policy_graphs", policy_graphs) config['multiagent'].update({'policies': policy_graphs}) if policy_mapping_fn is not None: - config['multiagent'].update({'policy_mapping_fn': tune.function(policy_mapping_fn)}) + config['multiagent'].update( + {'policy_mapping_fn': tune.function(policy_mapping_fn)}) if policies_to_train is not None: config['multiagent'].update({'policies_to_train': policies_to_train}) @@ -174,89 +179,222 @@ def setup_exps_rllib(flow_params, return alg_run, gym_name, config -if __name__ == "__main__": - flags = parse_args(sys.argv[1:]) +def train_rllib(submodule, flags): + """Train policies using the PPO algorithm in RLlib.""" + flow_params = submodule.flow_params + n_cpus = submodule.N_CPUS + n_rollouts = submodule.N_ROLLOUTS + policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) + policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) + policies_to_train = getattr(submodule, "policies_to_train", None) + + alg_run, gym_name, config = setup_exps_rllib( + flow_params, n_cpus, n_rollouts, + policy_graphs, policy_mapping_fn, policies_to_train) + + ray.init(num_cpus=n_cpus + 1, object_store_memory=200 * 1024 * 1024) + exp_config = { + "run": alg_run, + "env": gym_name, + "config": { + **config + }, + "checkpoint_freq": 20, + "checkpoint_at_end": True, + "max_failures": 999, + "stop": { + "training_iteration": flags.num_steps, + }, + } + + if flags.checkpoint_path is not None: + exp_config['restore'] = flags.checkpoint_path + run_experiments({flow_params["exp_tag"]: exp_config}) + + +def train_h_baselines(flow_params, args, multiagent): + """Train policies using SAC and TD3 with h-baselines.""" + from hbaselines.algorithms import OffPolicyRLAlgorithm + from hbaselines.utils.train import parse_options, get_hyperparameters + from hbaselines.envs.mixed_autonomy.envs import FlowEnv + + flow_params = deepcopy(flow_params) + + # Get the command-line arguments that are relevant here + args = parse_options(description="", example_usage="", args=args) + + # the base directory that the logged data will be stored in + base_dir = "training_data" + + # Create the training environment. + env = FlowEnv( + flow_params, + multiagent=multiagent, + shared=args.shared, + maddpg=args.maddpg, + render=args.render, + version=0 + ) + + # Create the evaluation environment. + if args.evaluate: + eval_flow_params = deepcopy(flow_params) + eval_flow_params['env'].evaluate = True + eval_env = FlowEnv( + eval_flow_params, + multiagent=multiagent, + shared=args.shared, + maddpg=args.maddpg, + render=args.render_eval, + version=1 + ) + else: + eval_env = None + + for i in range(args.n_training): + # value of the next seed + seed = args.seed + i + + # The time when the current experiment started. + now = strftime("%Y-%m-%d-%H:%M:%S") + + # Create a save directory folder (if it doesn't exist). + dir_name = os.path.join(base_dir, '{}/{}'.format(args.env_name, now)) + ensure_dir(dir_name) + + # Get the policy class. + if args.alg == "TD3": + if multiagent: + from hbaselines.multi_fcnet.td3 import MultiFeedForwardPolicy + policy = MultiFeedForwardPolicy + else: + from hbaselines.fcnet.td3 import FeedForwardPolicy + policy = FeedForwardPolicy + elif args.alg == "SAC": + if multiagent: + from hbaselines.multi_fcnet.sac import MultiFeedForwardPolicy + policy = MultiFeedForwardPolicy + else: + from hbaselines.fcnet.sac import FeedForwardPolicy + policy = FeedForwardPolicy + else: + raise ValueError("Unknown algorithm: {}".format(args.alg)) + + # Get the hyperparameters. + hp = get_hyperparameters(args, policy) + + # Add the seed for logging purposes. + params_with_extra = hp.copy() + params_with_extra['seed'] = seed + params_with_extra['env_name'] = args.env_name + params_with_extra['policy_name'] = policy.__name__ + params_with_extra['algorithm'] = args.alg + params_with_extra['date/time'] = now + + # Add the hyperparameters to the folder. + with open(os.path.join(dir_name, 'hyperparameters.json'), 'w') as f: + json.dump(params_with_extra, f, sort_keys=True, indent=4) + + # Create the algorithm object. + alg = OffPolicyRLAlgorithm( + policy=policy, + env=env, + eval_env=eval_env, + **hp + ) - # import relevant information from the exp_config script - module = __import__("exp_configs.rl.singleagent", fromlist=[flags.exp_config]) - module_ma = __import__("exp_configs.rl.multiagent", fromlist=[flags.exp_config]) + # Perform training. + alg.learn( + total_timesteps=args.total_steps, + log_dir=dir_name, + log_interval=args.log_interval, + eval_interval=args.eval_interval, + save_interval=args.save_interval, + initial_exploration_steps=args.initial_exploration_steps, + seed=seed, + ) + + +def train_stable_baselines(submodule, flags): + """Train policies using the PPO algorithm in stable-baselines.""" + flow_params = submodule.flow_params + # Path to the saved files + exp_tag = flow_params['exp_tag'] + result_name = '{}/{}'.format(exp_tag, strftime("%Y-%m-%d-%H:%M:%S")) + + # Perform training. + print('Beginning training.') + model = run_model_stablebaseline( + flow_params, flags.num_cpus, flags.rollout_size, flags.num_steps) + + # Save the model to a desired folder and then delete it to demonstrate + # loading. + print('Saving the trained model!') + path = os.path.realpath(os.path.expanduser('~/baseline_results')) + ensure_dir(path) + save_path = os.path.join(path, result_name) + model.save(save_path) + + # dump the flow params + with open(os.path.join(path, result_name) + '.json', 'w') as outfile: + json.dump(flow_params, outfile, + cls=FlowParamsEncoder, sort_keys=True, indent=4) + + # Replay the result by loading the model + print('Loading the trained model and testing it out!') + model = PPO2.load(save_path) + flow_params = get_flow_params(os.path.join(path, result_name) + '.json') + flow_params['sim'].render = True + env = env_constructor(params=flow_params, version=0)() + # The algorithms require a vectorized environment to run + eval_env = DummyVecEnv([lambda: env]) + obs = eval_env.reset() + reward = 0 + for _ in range(flow_params['env'].horizon): + action, _states = model.predict(obs) + obs, rewards, dones, info = eval_env.step(action) + reward += rewards + print('the final reward is {}'.format(reward)) + + +def main(args): + """Perform the training operations.""" + # Parse script-level arguments (not including package arguments). + flags = parse_args(args) + + # Import relevant information from the exp_config script. + module = __import__( + "exp_configs.rl.singleagent", fromlist=[flags.exp_config]) + module_ma = __import__( + "exp_configs.rl.multiagent", fromlist=[flags.exp_config]) + + # Import the sub-module containing the specified exp_config and determine + # whether the environment is single agent or multi-agent. if hasattr(module, flags.exp_config): submodule = getattr(module, flags.exp_config) + multiagent = False elif hasattr(module_ma, flags.exp_config): submodule = getattr(module_ma, flags.exp_config) - assert flags.rl_trainer.lower() == "RLlib".lower(), \ + assert flags.rl_trainer.lower() in ["rllib", "h-baselines"], \ "Currently, multiagent experiments are only supported through "\ - "RLlib. Try running this experiment using RLlib: 'python train.py EXP_CONFIG'" + "RLlib. Try running this experiment using RLlib: " \ + "'python train.py EXP_CONFIG'" + multiagent = True else: - assert False, "Unable to find experiment config!" + raise ValueError("Unable to find experiment config.") + + # Perform the training operation. if flags.rl_trainer.lower() == "rllib": + train_rllib(submodule, flags) + elif flags.rl_trainer.lower() == "stable-baselines": + train_stable_baselines(submodule, flags) + elif flags.rl_trainer.lower() == "h-baselines": flow_params = submodule.flow_params - n_cpus = submodule.N_CPUS - n_rollouts = submodule.N_ROLLOUTS - policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) - policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) - policies_to_train = getattr(submodule, "policies_to_train", None) - - alg_run, gym_name, config = setup_exps_rllib( - flow_params, n_cpus, n_rollouts, - policy_graphs, policy_mapping_fn, policies_to_train) - - ray.init(num_cpus=n_cpus + 1, object_store_memory=200 * 1024 * 1024) - exp_config = { - "run": alg_run, - "env": gym_name, - "config": { - **config - }, - "checkpoint_freq": 20, - "checkpoint_at_end": True, - "max_failures": 999, - "stop": { - "training_iteration": flags.num_steps, - }, - } - - if flags.checkpoint_path is not None: - exp_config['restore'] = flags.checkpoint_path - trials = run_experiments({flow_params["exp_tag"]: exp_config}) - - elif flags.rl_trainer == "Stable-Baselines": - flow_params = submodule.flow_params - # Path to the saved files - exp_tag = flow_params['exp_tag'] - result_name = '{}/{}'.format(exp_tag, strftime("%Y-%m-%d-%H:%M:%S")) - - # Perform training. - print('Beginning training.') - model = run_model_stablebaseline(flow_params, flags.num_cpus, flags.rollout_size, flags.num_steps) - - # Save the model to a desired folder and then delete it to demonstrate - # loading. - print('Saving the trained model!') - path = os.path.realpath(os.path.expanduser('~/baseline_results')) - ensure_dir(path) - save_path = os.path.join(path, result_name) - model.save(save_path) - - # dump the flow params - with open(os.path.join(path, result_name) + '.json', 'w') as outfile: - json.dump(flow_params, outfile, - cls=FlowParamsEncoder, sort_keys=True, indent=4) - - # Replay the result by loading the model - print('Loading the trained model and testing it out!') - model = PPO2.load(save_path) - flow_params = get_flow_params(os.path.join(path, result_name) + '.json') - flow_params['sim'].render = True - env_constructor = env_constructor(params=flow_params, version=0)() - # The algorithms require a vectorized environment to run - eval_env = DummyVecEnv([lambda: env_constructor]) - obs = eval_env.reset() - reward = 0 - for _ in range(flow_params['env'].horizon): - action, _states = model.predict(obs) - obs, rewards, dones, info = eval_env.step(action) - reward += rewards - print('the final reward is {}'.format(reward)) + train_h_baselines(flow_params, args, multiagent) else: - assert False, "rl_trainer should be either 'RLlib' or 'Stable-Baselines'!" + raise ValueError("rl_trainer should be either 'rllib', 'h-baselines', " + "or 'stable-baselines'.") + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index 5c36d8760..a05fed68e 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -26,8 +26,10 @@ flow_params as multiagent_traffic_light_grid from examples.exp_configs.rl.multiagent.multiagent_highway import flow_params as multiagent_highway +from examples.train import parse_args as parse_train_args from examples.train import run_model_stablebaseline as run_stable_baselines_model from examples.train import setup_exps_rllib as setup_rllib_exps +from examples.train import train_h_baselines from examples.exp_configs.non_rl.bay_bridge import flow_params as non_rl_bay_bridge from examples.exp_configs.non_rl.bay_bridge_toll import flow_params as non_rl_bay_bridge_toll @@ -121,6 +123,42 @@ def run_simulation(flow_params): exp.run(1) +class TestTrain(unittest.TestCase): + + def test_parse_args(self): + """Tests the parse_args method in train.py.""" + # test the default case + args = parse_train_args(["exp_config"]) + + self.assertDictEqual(vars(args), { + 'exp_config': 'exp_config', + 'rl_trainer': 'rllib', + 'num_cpus': 1, + 'num_steps': 5000, + 'rollout_size': 1000, + 'checkpoint_path': None + }) + + # test the case when optional args are specified + args = parse_train_args([ + "exp_config", + "--rl_trainer", "h-baselines", + "--num_cpus" "2", + "--num_steps", "3", + "--rollout_size", "4", + "--checkpoint_path", "5", + ]) + + self.assertDictEqual(vars(args), { + 'checkpoint_path': '5', + 'exp_config': 'exp_config', + 'num_cpus': 1, + 'num_steps': 3, + 'rl_trainer': 'h-baselines', + 'rollout_size': 4 + }) + + class TestStableBaselineExamples(unittest.TestCase): """Tests the example scripts in examples/exp_configs/rl/singleagent for stable_baselines. @@ -148,6 +186,31 @@ def test_singleagent_bottleneck(self): self.run_exp(singleagent_bottleneck) +class TestHBaselineExamples(unittest.TestCase): + """Tests the functionality of the h-baselines features in train.py. + + This is done by running a set of experiments for 10 time-steps and + confirming that it runs. + """ + @staticmethod + def run_exp(flow_params, multiagent): + train_h_baselines( + flow_params=flow_params, + args=[ + flow_params["env_name"].__name__, + "--initial_exploration_steps", "1", + "--total_steps", "10" + ], + multiagent=multiagent, + ) + + def test_singleagent_ring(self): + self.run_exp(singleagent_ring.copy(), multiagent=False) + + def test_multiagent_ring(self): + self.run_exp(multiagent_ring.copy(), multiagent=True) + + class TestRllibExamples(unittest.TestCase): """Tests the example scripts in examples/exp_configs/rl/singleagent and examples/exp_configs/rl/multiagent for RLlib. diff --git a/tests/fast_tests/test_util.py b/tests/fast_tests/test_util.py index 458fadcf4..67386cc77 100644 --- a/tests/fast_tests/test_util.py +++ b/tests/fast_tests/test_util.py @@ -14,7 +14,6 @@ from flow.core.util import emission_to_csv from flow.envs import MergePOEnv from flow.networks import MergeNetwork -from flow.utils.flow_warnings import deprecated_attribute from flow.utils.registry import make_create_env from flow.utils.rllib import FlowParamsEncoder, get_flow_params @@ -60,25 +59,6 @@ def test_emission_to_csv(self): self.assertEqual(len(dict1), 104) -class TestWarnings(unittest.TestCase): - """Tests warning functions located in flow.utils.warnings""" - - def test_deprecated_attribute(self): - # dummy class - class Foo(object): - pass - - # dummy attribute name - dep_from = "bar_deprecated" - dep_to = "bar_new" - - # check the deprecation warning is printing what is expected - self.assertWarnsRegex( - PendingDeprecationWarning, - "The attribute bar_deprecated in Foo is deprecated, use bar_new " - "instead.", deprecated_attribute, Foo(), dep_from, dep_to) - - class TestRegistry(unittest.TestCase): """Tests the methods located in flow/utils/registry.py""" From 903bb729ccd6f4ad174ceac639a6665ded59d131 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 24 Mar 2020 22:49:17 -0700 Subject: [PATCH 010/438] Add 1 lane highway network for Benni --- examples/exp_configs/non_rl/highway.py | 40 +++++++------------ .../exp_configs/non_rl/i210_subnetwork.py | 2 +- flow/networks/highway.py | 2 +- 3 files changed, 16 insertions(+), 28 deletions(-) diff --git a/examples/exp_configs/non_rl/highway.py b/examples/exp_configs/non_rl/highway.py index e7505f2d7..1905e2f7f 100644 --- a/examples/exp_configs/non_rl/highway.py +++ b/examples/exp_configs/non_rl/highway.py @@ -5,25 +5,19 @@ from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.envs import LaneChangeAccelEnv +from flow.envs import TestEnv vehicles = VehicleParams() vehicles.add( - veh_id="human", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) -vehicles.add( - veh_id="human2", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) @@ -31,13 +25,7 @@ inflow.add( veh_type="human", edge="highway_0", - probability=0.25, - departLane="free", - departSpeed=20) -inflow.add( - veh_type="human2", - edge="highway_0", - probability=0.25, + vehs_per_hour=10800 / 5.0, departLane="free", departSpeed=20) @@ -47,7 +35,7 @@ exp_tag='highway', # name of the flow environment the experiment is running on - env_name=LaneChangeAccelEnv, + env_name=TestEnv, # name of the network class the experiment is running on network=HighwayNetwork, @@ -58,12 +46,12 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( render=True, - lateral_resolution=1.0, + sim_step=0.5 ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=1500, + horizon=4000, additional_params=ADDITIONAL_ENV_PARAMS.copy(), ), diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 335461270..ceb625907 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -5,7 +5,7 @@ from flow.controllers.car_following_models import IDMController from flow.controllers.routing_controllers import I210Router -from flow.core.params import SumoParams +from flow.core.params import SumoParams, SumoCarFollowingParams from flow.core.params import EnvParams from flow.core.params import NetParams from flow.core.params import SumoLaneChangeParams diff --git a/flow/networks/highway.py b/flow/networks/highway.py index c63292067..595b0f286 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -9,7 +9,7 @@ # length of the highway "length": 1000, # number of lanes - "lanes": 4, + "lanes": 1, # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into From 5603f035581c6db19c3c7f6fcc8ab1378fbd215b Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Wed, 25 Mar 2020 10:24:39 -0700 Subject: [PATCH 011/438] bug fix for num_rl_vehicles during reset (#884) --- flow/core/kernel/vehicle/traci.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 657b89a94..50cd106c9 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -311,7 +311,6 @@ def _add_departed(self, veh_id, veh_type): if accel_controller[0] == RLController: if veh_id not in self.__rl_ids: self.__rl_ids.append(veh_id) - self.num_rl_vehicles += 1 else: if veh_id not in self.__human_ids: self.__human_ids.append(veh_id) @@ -362,6 +361,7 @@ def _add_departed(self, veh_id, veh_type): # make sure that the order of rl_ids is kept sorted self.__rl_ids.sort() + self.num_rl_vehicles = len(self.__rl_ids) # get the subscription results from the new vehicle new_obs = self.kernel_api.vehicle.getSubscriptionResults(veh_id) From 91144cae9ddb0651603ab32b8652ca11cf7f9579 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 26 Mar 2020 12:46:39 -0700 Subject: [PATCH 012/438] Add current dev version of multiagent I210 --- environment.yml | 4 +- .../rl/multiagent/multiagent_i210.py | 38 +++-- examples/train.py | 146 +++++++++++++----- requirements.txt | 4 +- 4 files changed, 132 insertions(+), 60 deletions(-) diff --git a/environment.yml b/environment.yml index f57c8d33d..480ea7eba 100644 --- a/environment.yml +++ b/environment.yml @@ -21,9 +21,11 @@ dependencies: - matplotlib==3.0.0 - dill - lz4 - - ray==0.7.3 + - ray==0.8.0 - setproctitle - psutil - opencv-python - boto3==1.4.8 - redis~=2.10.6 + - tabulate + - pytz diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 94f709ff4..872568cab 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -8,8 +8,9 @@ from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy from ray.tune.registry import register_env +from flow.controllers import RLController +from flow.controllers.car_following_models import IDMController import flow.config as config -from flow.controllers.rlcontroller import RLController from flow.core.params import EnvParams from flow.core.params import NetParams from flow.core.params import InitialConfig @@ -23,14 +24,8 @@ # SET UP PARAMETERS FOR THE SIMULATION -# number of training iterations -N_TRAINING_ITERATIONS = 200 -# number of rollouts per training iteration -N_ROLLOUTS = 2 # number of steps per rollout -HORIZON = 500 -# number of parallel workers -N_CPUS = 1 +HORIZON = 4000 # percentage of autonomous vehicles compared to human vehicles on highway PENETRATION_RATE = 10 @@ -38,10 +33,12 @@ # SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() additional_env_params.update({ - 'max_accel': 1, - 'max_decel': 1, + 'max_accel': 2.6, + 'max_decel': 4.5, # configure the observation space. Look at the I210MultiEnv class for more info. 'lead_obs': True, + # whether to add in a reward for the speed of nearby vehicles + "local_reward": True }) # CREATE VEHICLE TYPES AND INFLOWS @@ -50,9 +47,8 @@ vehicles.add( "human", num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ) + lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.6}), ) vehicles.add( "av", @@ -68,11 +64,11 @@ inflow.add( veh_type="human", edge="119257914", - vehs_per_hour=8378 * pen_rate, + vehs_per_hour=int(10800 * (1 - pen_rate)), # probability=1.0, departLane="random", departSpeed=20) -# on ramp +# # on ramp # inflow.add( # veh_type="human", # edge="27414345", @@ -91,7 +87,7 @@ inflow.add( veh_type="av", edge="119257914", - vehs_per_hour=int(8378 * pen_rate), + vehs_per_hour=int(10800 * pen_rate), # probability=1.0, departLane="random", departSpeed=20) @@ -128,16 +124,18 @@ # simulation-related parameters sim=SumoParams( - sim_step=0.8, + sim_step=0.5, render=False, - color_by_speed=True, - restart_instance=True + color_by_speed=False, + restart_instance=True, + use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, sims_per_step=1, + warmup_steps=0, additional_params=additional_env_params, ), @@ -171,7 +169,7 @@ obs_space = test_env.observation_space act_space = test_env.action_space -POLICY_GRAPHS = {'av': (PPOTFPolicy, obs_space, act_space, {})} +POLICY_GRAPHS = {'av': (None, obs_space, act_space, {})} POLICIES_TO_TRAIN = ['av'] diff --git a/examples/train.py b/examples/train.py index a1288e2f0..1f2cd6300 100644 --- a/examples/train.py +++ b/examples/train.py @@ -7,12 +7,16 @@ python train.py EXP_CONFIG """ import argparse +from datetime import datetime import json import os import sys from time import strftime from copy import deepcopy +import numpy as np +import pytz + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv from stable_baselines import PPO2 @@ -26,14 +30,15 @@ from ray.rllib.agents.registry import get_agent_class from flow.core.util import ensure_dir +from flow.core.rewards import energy_consumption from flow.utils.registry import env_constructor from flow.utils.rllib import FlowParamsEncoder, get_flow_params from flow.utils.registry import make_create_env + def parse_args(args): """Parse training options user can specify in command line. - Returns ------- argparse.Namespace @@ -54,16 +59,35 @@ def parse_args(args): parser.add_argument( '--rl_trainer', type=str, default="rllib", help='the RL trainer to use. either rllib or Stable-Baselines') - + parser.add_argument( + '--algorithm', type=str, default="PPO", + help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' + ) + parser.add_argument('--exp_title', type=str, default='test', + help='Informative experiment title to help distinguish results') parser.add_argument( '--num_cpus', type=int, default=1, help='How many CPUs to use') parser.add_argument( '--num_steps', type=int, default=5000, - help='How many total steps to perform learning over') + help='How many total steps to perform learning over. Relevant for stable-baselines') + parser.add_argument( + '--grid_search', action='store_true', default=False, + help='Whether to grid search over hyperparams') + parser.add_argument( + '--num_iterations', type=int, default=200, + help='How many iterations are in a training run.') + parser.add_argument( + '--num_rollouts', type=int, default=1, + help='How many rollouts are in a training batch') parser.add_argument( '--rollout_size', type=int, default=1000, help='How many steps are in a training batch.') + parser.add_argument('--use_s3', action='store_true', help='If true, upload results to s3') + parser.add_argument('--local_mode', action='store_true', default=False, + help='If true only 1 CPU will be used') + parser.add_argument('--render', action='store_true', default=False, + help='If true, we render the display') parser.add_argument( '--checkpoint_path', type=str, default=None, help='Directory with checkpoint to restore training from.') @@ -110,11 +134,12 @@ def run_model_stablebaseline(flow_params, def setup_exps_rllib(flow_params, n_cpus, n_rollouts, + flags, policy_graphs=None, policy_mapping_fn=None, - policies_to_train=None): + policies_to_train=None, + ): """Return the relevant components of an RLlib experiment. - Parameters ---------- flow_params : dict @@ -123,13 +148,14 @@ def setup_exps_rllib(flow_params, number of CPUs to run the experiment over n_rollouts : int number of rollouts per training iteration + flags: + custom arguments policy_graphs : dict, optional TODO policy_mapping_fn : function, optional TODO policies_to_train : list of str, optional TODO - Returns ------- str @@ -141,20 +167,59 @@ def setup_exps_rllib(flow_params, """ horizon = flow_params['env'].horizon - alg_run = "PPO" - - agent_cls = get_agent_class(alg_run) - config = deepcopy(agent_cls._default_config) - - config["num_workers"] = n_cpus - config["train_batch_size"] = horizon * n_rollouts - config["gamma"] = 0.999 # discount rate - config["model"].update({"fcnet_hiddens": [32, 32, 32]}) - config["use_gae"] = True - config["lambda"] = 0.97 - config["kl_target"] = 0.02 - config["num_sgd_iter"] = 10 - config["horizon"] = horizon + alg_run = flags.algorithm.upper() + + if alg_run == "PPO": + agent_cls = get_agent_class(alg_run) + config = deepcopy(agent_cls._default_config) + + config["num_workers"] = n_cpus + config["horizon"] = horizon + config["model"].update({"fcnet_hiddens": [32, 32, 32]}) + config["train_batch_size"] = horizon * n_rollouts + config["gamma"] = 0.999 # discount rate + config["use_gae"] = True + config["lambda"] = 0.97 + config["kl_target"] = 0.02 + config["num_sgd_iter"] = 10 + elif alg_run == "TD3": + agent_cls = get_agent_class(alg_run) + config = deepcopy(agent_cls._default_config) + + config["num_workers"] = n_cpus + config["horizon"] = horizon + config["buffer_size"] = 20000 # reduced to test if this is the source of memory problems + if flags.grid_search: + config["prioritized_replay"] = tune.grid_search(['True', 'False']) + config["actor_lr"] = tune.grid_search([1e-3, 1e-4]) + config["critic_lr"] = tune.grid_search([1e-3, 1e-4]) + config["n_step"] = tune.grid_search([1, 10]) + else: + sys.exit("We only support PPO and TD3 right now.") + + # define some standard and useful callbacks + def on_episode_start(info): + episode = info["episode"] + episode.user_data["avg_speed"] = [] + episode.user_data["avg_energy"] = [] + + def on_episode_step(info): + episode = info["episode"] + env = info["env"].get_unwrapped()[0] + speed = np.mean([speed for speed in env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]) + if not np.isnan(speed): + episode.user_data["avg_speed"].append(speed) + episode.user_data["avg_energy"].append(energy_consumption(env)) + + def on_episode_end(info): + episode = info["episode"] + avg_speed = np.mean(episode.user_data["avg_speed"]) + episode.custom_metrics["avg_speed"] = avg_speed + episode.custom_metrics["avg_energy_per_veh"] = np.mean(episode.user_data["avg_energy"]) + + config["callbacks"] = {"on_episode_start": tune.function(on_episode_start), + "on_episode_step": tune.function(on_episode_step), + "on_episode_end": tune.function(on_episode_end)} # save the flow params for replay flow_json = json.dumps( @@ -167,8 +232,7 @@ def setup_exps_rllib(flow_params, print("policy_graphs", policy_graphs) config['multiagent'].update({'policies': policy_graphs}) if policy_mapping_fn is not None: - config['multiagent'].update( - {'policy_mapping_fn': tune.function(policy_mapping_fn)}) + config['multiagent'].update({'policy_mapping_fn': tune.function(policy_mapping_fn)}) if policies_to_train is not None: config['multiagent'].update({'policies_to_train': policies_to_train}) @@ -182,34 +246,40 @@ def setup_exps_rllib(flow_params, def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" flow_params = submodule.flow_params - n_cpus = submodule.N_CPUS - n_rollouts = submodule.N_ROLLOUTS + flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) policies_to_train = getattr(submodule, "policies_to_train", None) alg_run, gym_name, config = setup_exps_rllib( - flow_params, n_cpus, n_rollouts, + flow_params, flags.num_cpus, flags.num_rollouts, flags, policy_graphs, policy_mapping_fn, policies_to_train) - ray.init(num_cpus=n_cpus + 1, object_store_memory=200 * 1024 * 1024) - exp_config = { - "run": alg_run, - "env": gym_name, - "config": { - **config - }, + config['num_workers'] = flags.num_cpus + config['env'] = gym_name + + if flags.local_mode: + ray.init(local_mode=True) + else: + ray.init() + exp_dict = { + "run_or_experiment": alg_run, + "name": gym_name, + "config": config, "checkpoint_freq": 20, "checkpoint_at_end": True, - "max_failures": 999, + "max_failures": 0, "stop": { - "training_iteration": flags.num_steps, + "training_iteration": flags.num_iterations, }, } - - if flags.checkpoint_path is not None: - exp_config['restore'] = flags.checkpoint_path - run_experiments({flow_params["exp_tag"]: exp_config}) + date = datetime.now(tz=pytz.utc) + date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") + s3_string = "s3://i210.experiments/i210/" \ + + date + '/' + flags.exp_title + if flags.use_s3: + exp_dict['upload_dir'] = s3_string + tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) def train_h_baselines(flow_params, args, multiagent): diff --git a/requirements.txt b/requirements.txt index 546cb4e26..4569dfca5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ pyglet==1.3.2 matplotlib==3.1.0 imutils==0.5.1 numpydoc -ray==0.7.3 +ray==0.8.0 opencv-python dill lz4 @@ -25,3 +25,5 @@ boto3==1.4.8 redis~=2.10.6 pandas==0.24.2 plotly==2.4.0 +tabulate +pytz \ No newline at end of file From eec0a02b430238c3a7ce4f05de0f04fb362d6e2b Mon Sep 17 00:00:00 2001 From: Kanaad Parvate Date: Sun, 29 Mar 2020 16:20:28 -0700 Subject: [PATCH 013/438] flake and pydocstyle --- examples/exp_configs/rl/multiagent/multiagent_i210.py | 1 - examples/train.py | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 872568cab..1779adf69 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -5,7 +5,6 @@ """ import os -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy from ray.tune.registry import register_env from flow.controllers import RLController diff --git a/examples/train.py b/examples/train.py index 1f2cd6300..ec99d4ee1 100644 --- a/examples/train.py +++ b/examples/train.py @@ -22,7 +22,6 @@ import ray from ray import tune -from ray.tune import run_experiments from ray.tune.registry import register_env try: from ray.rllib.agents.agent import get_agent_class @@ -36,9 +35,9 @@ from flow.utils.registry import make_create_env - def parse_args(args): """Parse training options user can specify in command line. + Returns ------- argparse.Namespace @@ -140,6 +139,7 @@ def setup_exps_rllib(flow_params, policies_to_train=None, ): """Return the relevant components of an RLlib experiment. + Parameters ---------- flow_params : dict @@ -188,7 +188,7 @@ def setup_exps_rllib(flow_params, config["num_workers"] = n_cpus config["horizon"] = horizon - config["buffer_size"] = 20000 # reduced to test if this is the source of memory problems + config["buffer_size"] = 20000 # reduced to test if this is the source of memory problems if flags.grid_search: config["prioritized_replay"] = tune.grid_search(['True', 'False']) config["actor_lr"] = tune.grid_search([1e-3, 1e-4]) From 27f325b9fd9031f027b32732d195b808167ee980 Mon Sep 17 00:00:00 2001 From: Kanaad Parvate Date: Sun, 29 Mar 2020 16:21:59 -0700 Subject: [PATCH 014/438] missed a flake8 --- examples/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index ec99d4ee1..4e6f97bbe 100644 --- a/examples/train.py +++ b/examples/train.py @@ -139,7 +139,7 @@ def setup_exps_rllib(flow_params, policies_to_train=None, ): """Return the relevant components of an RLlib experiment. - + Parameters ---------- flow_params : dict From ba2ff131f64db344b8d928ff290266653680213f Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Mon, 30 Mar 2020 11:56:15 -0700 Subject: [PATCH 015/438] Add an option for a local reward that just computes speed of the AV and its follower (#891) --- .../exp_configs/non_rl/i210_subnetwork.py | 2 +- flow/envs/multiagent/i210.py | 72 +++++++++++-------- 2 files changed, 44 insertions(+), 30 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index dd85c56cf..d993ae93a 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -101,7 +101,7 @@ edge_id = "119257908#1-AddedOnRampEdge" custom_callables = { "avg_merge_speed": lambda env: np.nan_to_num(np.mean( - env.k.vehicle.get_speed(env.k.vehicle.get_ids_by_edge(edge_id)))), + env.k.vehicle.get_speed(env.k.vehicle.get_ids()))), "avg_outflow": lambda env: np.nan_to_num( env.k.vehicle.get_outflow_rate(120)), # we multiply by 5 to account for the vehicle length and by 1000 to convert diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 409aeb14f..4082eb415 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -16,6 +16,8 @@ "max_decel": 1, # whether we use an obs space that contains adjacent lane info or just the lead obs "lead_obs": True, + # whether the reward should come from local vehicles instead of global rewards + "local_reward": True } @@ -137,35 +139,47 @@ def compute_reward(self, rl_actions, **kwargs): return {} rewards = {} - for rl_id in self.k.vehicle.get_rl_ids(): - if self.env_params.evaluate: - # reward is speed of vehicle if we are in evaluation mode - reward = self.k.vehicle.get_speed(rl_id) - elif kwargs['fail']: - # reward is 0 if a collision occurred - reward = 0 - else: - # reward high system-level velocities - cost1 = average_velocity(self, fail=kwargs['fail']) - - # penalize small time headways - cost2 = 0 - t_min = 1 # smallest acceptable time headway - - lead_id = self.k.vehicle.get_leader(rl_id) - if lead_id not in ["", None] \ - and self.k.vehicle.get_speed(rl_id) > 0: - t_headway = max( - self.k.vehicle.get_headway(rl_id) / - self.k.vehicle.get_speed(rl_id), 0) - cost2 += min((t_headway - t_min) / t_min, 0) - - # weights for cost1, cost2, and cost3, respectively - eta1, eta2 = 1.00, 0.10 - - reward = max(eta1 * cost1 + eta2 * cost2, 0) - - rewards[rl_id] = reward + if self.env_params.additional_params["local_reward"]: + for rl_id in self.k.vehicle.get_rl_ids(): + rewards[rl_id] = 0 + speeds = [] + follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) + speeds.extend([speed for speed in follow_speed if speed >= 0]) + if self.k.vehicle.get_speed(rl_id) >= 0: + speeds.append(self.k.vehicle.get_speed(rl_id)) + if len(speeds) > 0: + # rescale so the q function can estimate it quickly + rewards[rl_id] = np.mean(speeds) / 500.0 + else: + for rl_id in self.k.vehicle.get_rl_ids(): + if self.env_params.evaluate: + # reward is speed of vehicle if we are in evaluation mode + reward = self.k.vehicle.get_speed(rl_id) + elif kwargs['fail']: + # reward is 0 if a collision occurred + reward = 0 + else: + # reward high system-level velocities + cost1 = average_velocity(self, fail=kwargs['fail']) + + # penalize small time headways + cost2 = 0 + t_min = 1 # smallest acceptable time headway + + lead_id = self.k.vehicle.get_leader(rl_id) + if lead_id not in ["", None] \ + and self.k.vehicle.get_speed(rl_id) > 0: + t_headway = max( + self.k.vehicle.get_headway(rl_id) / + self.k.vehicle.get_speed(rl_id), 0) + cost2 += min((t_headway - t_min) / t_min, 0) + + # weights for cost1, cost2, and cost3, respectively + eta1, eta2 = 1.00, 0.10 + + reward = max(eta1 * cost1 + eta2 * cost2, 0) + + rewards[rl_id] = reward return rewards def additional_command(self): From 861c31e21d042471d9ea3be54d9ab0145d4321ff Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 31 Mar 2020 11:49:58 -0700 Subject: [PATCH 016/438] Noise fix (#894) * Add an option for a local reward that just computes speed of the AV and its follower * Set the noise scaling to match Bennis suggestions --- examples/exp_configs/rl/multiagent/multiagent_i210.py | 2 +- flow/controllers/base_controller.py | 2 +- flow/envs/multiagent/i210.py | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 1779adf69..7710b4a4d 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -47,7 +47,7 @@ "human", num_vehicles=0, lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.6}), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), ) vehicles.add( "av", diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 41780826b..4004b1c4d 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -107,7 +107,7 @@ def get_action(self, env): # add noise to the accelerations, if requested if self.accel_noise > 0: - accel += np.random.normal(0, self.accel_noise) + accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) # run the failsafes, if requested if self.fail_safe == 'instantaneous': diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 4082eb415..6368e7a2d 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -144,7 +144,8 @@ def compute_reward(self, rl_actions, **kwargs): rewards[rl_id] = 0 speeds = [] follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) - speeds.extend([speed for speed in follow_speed if speed >= 0]) + if follow_speed >= 0: + speeds.append(follow_speed) if self.k.vehicle.get_speed(rl_id) >= 0: speeds.append(self.k.vehicle.get_speed(rl_id)) if len(speeds) > 0: From d04f1440c7f3e953ac8cdddcc591b75f2800a13b Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 31 Mar 2020 13:43:29 -0700 Subject: [PATCH 017/438] Python upgrade (#895) - Upgrade python - Clean up AMI --- environment.yml | 23 +++++++++++------------ examples/train.py | 18 +++++++++++++----- requirements.txt | 7 ++++--- scripts/ray_autoscale.yaml | 4 +++- 4 files changed, 31 insertions(+), 21 deletions(-) diff --git a/environment.yml b/environment.yml index 480ea7eba..162bed533 100644 --- a/environment.yml +++ b/environment.yml @@ -1,18 +1,17 @@ name: flow dependencies: - - python==3.6.8 - - scipy==1.1.0 - - lxml==4.4.1 - - six==1.11.0 - - path.py - - python-dateutil==2.7.3 - - pip>=18.0 - - tensorflow==1.9.0 - - cloudpickle==1.2.1 - - setuptools==41.0.0 - - plotly==2.4.0 + - python==3.7.3 - pip: + - scipy==1.1.0 + - lxml==4.4.1 + - six==1.11.0 + - path.py + - python-dateutil==2.7.3 + - pip>=18.0 + - tensorflow==1.14.0 + - setuptools==41.0.0 + - plotly==2.4.0 - gym==0.14.0 - pyprind==2.11.2 - nose2==0.8.0 @@ -25,7 +24,7 @@ dependencies: - setproctitle - psutil - opencv-python - - boto3==1.4.8 + - boto3==1.10.45 - redis~=2.10.6 - tabulate - pytz diff --git a/examples/train.py b/examples/train.py index 4e6f97bbe..8150bc883 100644 --- a/examples/train.py +++ b/examples/train.py @@ -17,8 +17,11 @@ import numpy as np import pytz -from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv -from stable_baselines import PPO2 +try: + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 +except ImportError: + print("Stable-baselines not installed") import ray from ray import tune @@ -54,6 +57,10 @@ def parse_args(args): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') + parser.add_argument( + 'exp_title', type=str, + help='Title to give the run.') + # optional input parameters parser.add_argument( '--rl_trainer', type=str, default="rllib", @@ -62,8 +69,6 @@ def parse_args(args): '--algorithm', type=str, default="PPO", help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' ) - parser.add_argument('--exp_title', type=str, default='test', - help='Informative experiment title to help distinguish results') parser.add_argument( '--num_cpus', type=int, default=1, help='How many CPUs to use') @@ -76,6 +81,9 @@ def parse_args(args): parser.add_argument( '--num_iterations', type=int, default=200, help='How many iterations are in a training run.') + parser.add_argument( + '--checkpoint_freq', type=int, default=20, + help='How often to checkpoint.') parser.add_argument( '--num_rollouts', type=int, default=1, help='How many rollouts are in a training batch') @@ -266,7 +274,7 @@ def train_rllib(submodule, flags): "run_or_experiment": alg_run, "name": gym_name, "config": config, - "checkpoint_freq": 20, + "checkpoint_freq": flags.checkpoint_freq, "checkpoint_at_end": True, "max_failures": 0, "stop": { diff --git a/requirements.txt b/requirements.txt index 4569dfca5..191ecc740 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,6 @@ path.py joblib==0.10.3 python-dateutil==2.7.3 cached_property -cloudpickle==1.2.0 pyglet==1.3.2 matplotlib==3.1.0 imutils==0.5.1 @@ -21,9 +20,11 @@ lz4 setproctitle psutil opencv-python -boto3==1.4.8 +boto3==1.10.45 redis~=2.10.6 pandas==0.24.2 plotly==2.4.0 tabulate -pytz \ No newline at end of file +tensorflow==1.14.0 +awscli==1.16.309 +pytz diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 5bf2a9c4a..ea84bbee0 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -67,9 +67,11 @@ worker_nodes: # Additional options in the boto docs. setup_commands: - - cd flow && git fetch && git checkout origin/master + - cd flow && git fetch && git checkout origin/i210_dev head_setup_commands: + - pip install ray==0.8.0 + - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions - pip install awscli==1.16.309 - pip install pytz From 1100d8d2223014ea3919bbf553f7498d9c69bd60 Mon Sep 17 00:00:00 2001 From: Kanaad Parvate Date: Sun, 5 Apr 2020 22:41:18 -0700 Subject: [PATCH 018/438] I210 Replay Script (#886) Replay script for the i210 env. --- .../rl/multiagent/multiagent_i210.py | 45 ++- flow/utils/rllib.py | 7 + flow/visualize/i210_replay.py | 378 ++++++++++++++++++ flow/visualize/plot_custom_callables.py | 74 ++++ flow/visualize/transfer/util.py | 138 +++++++ 5 files changed, 627 insertions(+), 15 deletions(-) create mode 100644 flow/visualize/i210_replay.py create mode 100644 flow/visualize/plot_custom_callables.py create mode 100644 flow/visualize/transfer/util.py diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 7710b4a4d..327282e28 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -4,6 +4,7 @@ highway with ramps network. """ import os +import numpy as np from ray.tune.registry import register_env @@ -17,6 +18,7 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams +from flow.core.rewards import energy_consumption from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS from flow.utils.registry import make_create_env @@ -26,6 +28,10 @@ # number of steps per rollout HORIZON = 4000 +VEH_PER_HOUR_BASE_119257914 = 10800 +VEH_PER_HOUR_BASE_27414345 = 321 +VEH_PER_HOUR_BASE_27414342 = 421 + # percentage of autonomous vehicles compared to human vehicles on highway PENETRATION_RATE = 10 @@ -63,46 +69,46 @@ inflow.add( veh_type="human", edge="119257914", - vehs_per_hour=int(10800 * (1 - pen_rate)), + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * (1 - pen_rate)), # probability=1.0, - departLane="random", + depart_lane="random", departSpeed=20) # # on ramp # inflow.add( # veh_type="human", # edge="27414345", # vehs_per_hour=321 * pen_rate, -# departLane="random", -# departSpeed=20) +# depart_lane="random", +# depart_speed=20) # inflow.add( # veh_type="human", # edge="27414342#0", # vehs_per_hour=421 * pen_rate, -# departLane="random", -# departSpeed=20) +# depart_lane="random", +# depart_speed=20) # Now add the AVs # main highway inflow.add( veh_type="av", edge="119257914", - vehs_per_hour=int(10800 * pen_rate), + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pen_rate), # probability=1.0, - departLane="random", - departSpeed=20) + depart_lane="random", + depart_speed=20) # # on ramp # inflow.add( # veh_type="av", # edge="27414345", -# vehs_per_hour=int(321 * pen_rate), -# departLane="random", -# departSpeed=20) +# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414345 * pen_rate), +# depart_lane="random", +# depart_speed=20) # inflow.add( # veh_type="av", # edge="27414342#0", -# vehs_per_hour=int(421 * pen_rate), -# departLane="random", -# departSpeed=20) +# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414342 * pen_rate), +# depart_lane="random", +# depart_speed=20) NET_TEMPLATE = os.path.join( config.PROJECT_PATH, @@ -176,3 +182,12 @@ def policy_mapping_fn(_): """Map a policy in RLlib.""" return 'av' + + +custom_callables = { + "avg_speed": lambda env: np.mean([speed for speed in + env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), + "avg_outflow": lambda env: np.nan_to_num( + env.k.vehicle.get_outflow_rate(120)), + "avg_energy": lambda env: -1*energy_consumption(env, 0.1) +} diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index 80193c22b..7d777d769 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -146,6 +146,13 @@ def get_flow_params(config): if flow_params["net"]["inflows"]: net.inflows.__dict__ = flow_params["net"]["inflows"].copy() + if len(net.template) > 0: + dirname = os.getcwd() + filename = os.path.join(dirname, '../../examples') + split = net.template.split('examples')[1][1:] + path = os.path.abspath(os.path.join(filename, split)) + net.template = path + env = EnvParams() env.__dict__ = flow_params["env"].copy() diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py new file mode 100644 index 000000000..502d646aa --- /dev/null +++ b/flow/visualize/i210_replay.py @@ -0,0 +1,378 @@ +"""Transfer and replay for i210 environment.""" +import argparse +from collections import defaultdict +from copy import deepcopy +import numpy as np +import json +import os +import time + +import ray +try: + from ray.rllib.agents.agent import get_agent_class +except ImportError: + from ray.rllib.agents.registry import get_agent_class +from ray.tune.registry import register_env + +from flow.core.util import emission_to_csv +from flow.utils.registry import make_create_env +from flow.utils.rllib import get_flow_params +from flow.utils.rllib import get_rllib_config +from flow.utils.rllib import get_rllib_pkl +from flow.utils.rllib import FlowParamsEncoder + + +from flow.visualize.transfer.util import inflows_range + +from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS +from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables + +EXAMPLE_USAGE = """ +example usage: + python i210_replay.py -r /ray_results/experiment_dir/result_dir -c 1 + python i210_replay.py --controller idm + python i210_replay.py --controller idm --run_transfer + +Here the arguments are: +1 - the path to the simulation results +2 - the number of the checkpoint +""" + + +def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config=None, result_dir=None): + """Replay or run transfer test (defined by transfer_fn) by modif. + + Arguments: + --------- + args {[Namespace]} -- [args from argparser] + flow_params {[flow_params object, pulled from ]} -- [description] + transfer_fn {[type]} -- [description] + + Keyword Arguments: + ----------------- + rllib_config {[type]} -- [description] (default: {None}) + result_dir {[type]} -- [description] (default: {None}) + """ + assert bool(args.controller) ^ bool(rllib_config), \ + "Need to specify either controller or rllib_config, but not both" + + if args.run_transfer: + flow_params = transfer_test.flow_params_modifier_fn(flow_params) + + if args.controller: + test_params = {} + if args.controller == 'idm': + from flow.controllers.car_following_models import IDMController + controller = IDMController + test_params.update({'v0': 1, 'T': 1, 'a': 0.2, 'b': 0.2}) # An example of really obvious changes + elif args.controller == 'default_human': + controller = flow_params['veh'].type_parameters['human']['acceleration_controller'][0] + test_params.update(flow_params['veh'].type_parameters['human']['acceleration_controller'][1]) + elif args.controller == 'follower_stopper': + from flow.controllers.velocity_controllers import FollowerStopper + controller = FollowerStopper + test_params.update({'v_des': 15}) + elif args.controller == 'sumo': + from flow.controllers.car_following_models import SimCarFollowingController + controller = SimCarFollowingController + + flow_params['veh'].type_parameters['av']['acceleration_controller'] = (controller, test_params) + + for veh_param in flow_params['veh'].initial: + if veh_param['veh_id'] == 'av': + veh_param['acceleration_controller'] = (controller, test_params) + + sim_params = flow_params['sim'] + sim_params.num_clients = 1 + + sim_params.restart_instance = True + dir_path = os.path.dirname(os.path.realpath(__file__)) + emission_path = '{0}/test_time_rollout/'.format(dir_path) + sim_params.emission_path = emission_path if args.gen_emission else None + + # pick your rendering mode + if args.render_mode == 'sumo_web3d': + sim_params.num_clients = 2 + sim_params.render = False + elif args.render_mode == 'drgb': + sim_params.render = 'drgb' + sim_params.pxpm = 4 + elif args.render_mode == 'sumo_gui': + sim_params.render = False # will be set to True below + elif args.render_mode == 'no_render': + sim_params.render = False + if args.save_render: + if args.render_mode != 'sumo_gui': + sim_params.render = 'drgb' + sim_params.pxpm = 4 + sim_params.save_render = True + + # Start the environment with the gui turned on and a path for the + # emission file + env_params = flow_params['env'] + env_params.restart_instance = False + if args.evaluate: + env_params.evaluate = True + + # lower the horizon if testing + if args.horizon: + env_params.horizon = args.horizon + + # Create and register a gym+rllib env + create_env, env_name = make_create_env(params=flow_params, version=0) + env = create_env(env_name) + + if args.render_mode == 'sumo_gui': + env.sim_params.render = True # set to True after initializing agent and env + + # if restart_instance, don't restart here because env.reset will restart later + if not sim_params.restart_instance: + env.restart_simulation(sim_params=sim_params, render=sim_params.render) + + if rllib_config: + # check if we have a multiagent environment but in a + # backwards compatible way + if rllib_config.get('multiagent', {}).get('policies', None): + multiagent = True + pkl = get_rllib_pkl(result_dir) + rllib_config['multiagent'] = pkl['multiagent'] + else: + multiagent = False + raise NotImplementedError + + # Run on only one cpu for rendering purposes + rllib_config['num_workers'] = 0 + + # lower the horizon if testing + if args.horizon: + rllib_config['horizon'] = args.horizon + + assert 'run' in rllib_config['env_config'], "Was this trained with the latest version of Flow?" + # Determine agent and checkpoint + config_run = rllib_config['env_config']['run'] + + rllib_flow_params = get_flow_params(rllib_config) + agent_create_env, agent_env_name = make_create_env(params=rllib_flow_params, version=0) + register_env(agent_env_name, agent_create_env) + agent_cls = get_agent_class(config_run) + + # create the agent that will be used to compute the actions + agent = agent_cls(env=agent_env_name, config=rllib_config) + checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num + checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num + agent.restore(checkpoint) + + if multiagent: + # map the agent id to its policy + policy_map_fn = rllib_config['multiagent']['policy_mapping_fn'] + + if rllib_config['model']['use_lstm']: + use_lstm = True + if multiagent: + # map the agent id to its policy + size = rllib_config['model']['lstm_cell_size'] + lstm_state = defaultdict(lambda: [np.zeros(size, np.float32), + np.zeros(size, np.float32)]) + else: + lstm_state = [ + np.zeros(rllib_config['model']['lstm_cell_size'], np.float32), + np.zeros(rllib_config['model']['lstm_cell_size'], np.float32) + ] + else: + use_lstm = False + + # used to store + info_dict = { + "velocities": [], + "outflows": [], + } + info_dict.update({ + key: [] for key in custom_callables.keys() + }) + + for i in range(args.num_rollouts): + vel = [] + custom_vals = {key: [] for key in custom_callables.keys()} + state = env.reset() + for _ in range(env_params.horizon): + + if rllib_config: + if multiagent: + action = {} + for agent_id in state.keys(): + if use_lstm: + action[agent_id], lstm_state[agent_id], _ = \ + agent.compute_action( + state[agent_id], state=lstm_state[agent_id], + policy_id=policy_map_fn(agent_id)) + else: + action[agent_id] = agent.compute_action( + state[agent_id], policy_id=policy_map_fn(agent_id)) + else: + if use_lstm: + raise NotImplementedError + else: + action = agent.compute_action(state) + else: + action = None + + state, reward, done, _ = env.step(action) + + # Compute the velocity speeds and cumulative returns. + veh_ids = env.k.vehicle.get_ids() + vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) + + # Compute the results for the custom callables. + for (key, lambda_func) in custom_callables.items(): + custom_vals[key].append(lambda_func(env)) + + if type(done) is dict and done['__all__']: + break + elif type(done) is not dict and done: + break + + # Store the information from the run in info_dict. + outflow = env.k.vehicle.get_outflow_rate(int(500)) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + + print('======== Summary of results ========') + if args.run_transfer: + print("Transfer test: {}".format(transfer_test.transfer_str)) + print("====================================") + + # Print the averages/std for all variables in the info_dict. + for key in info_dict.keys(): + print("Average, std {}: {}, {}".format( + key, np.mean(info_dict[key]), np.std(info_dict[key]))) + + # terminate the environment + env.unwrapped.terminate() + + if output_dir: + if args.run_transfer: + exp_name = "{}-replay".format(transfer_test.transfer_str) + else: + exp_name = "i210_replay" + replay_out = os.path.join(output_dir, '{}-info.npy'.format(exp_name)) + np.save(replay_out, info_dict) + # if prompted, convert the emission file into a csv file + if args.gen_emission: + emission_filename = '{0}-emission.xml'.format(env.network.name) + time.sleep(0.1) + + emission_path = \ + '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename) + + output_path = os.path.join(output_dir, '{}-emission.csv'.format(exp_name)) + # convert the emission file into a csv file + emission_to_csv(emission_path, output_path=output_path) + + # print the location of the emission csv file + print("\nGenerated emission file at " + output_path) + + # delete the .xml version of the emission file + os.remove(emission_path) + + # Create the flow_params object + with open(os.path.join(output_dir, exp_name) + '.json', 'w') as outfile: + json.dump(flow_params, outfile, + cls=FlowParamsEncoder, sort_keys=True, indent=4) + + return info_dict + + +def create_parser(): + """Create the parser to capture CLI arguments.""" + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description='[Flow] Evaluates a reinforcement learning agent ' + 'given a checkpoint.', + epilog=EXAMPLE_USAGE) + + parser.add_argument( + '--rllib_result_dir', '-r', required=False, type=str, help='Directory containing results') + parser.add_argument('--checkpoint_num', '-c', required=False, type=str, help='Checkpoint number.') + + parser.add_argument( + '--num_rollouts', + type=int, + default=1, + help='The number of rollouts to visualize.') + parser.add_argument( + '--gen_emission', + action='store_true', + help='Specifies whether to generate an emission file from the ' + 'simulation') + parser.add_argument( + '--evaluate', + action='store_true', + help='Specifies whether to use the \'evaluate\' reward ' + 'for the environment.') + parser.add_argument( + '--render_mode', '-rm', + type=str, + default=None, + help='Pick the render mode. Options include sumo_web3d, ' + 'rgbd and sumo_gui') + parser.add_argument( + '--save_render', + action='store_true', + help='Saves a rendered video to a file. NOTE: Overrides render_mode ' + 'with pyglet rendering.') + parser.add_argument( + '--horizon', + type=int, + help='Specifies the horizon.') + parser.add_argument( + '--local', + action='store_true', + help='Adjusts run settings to be compatible with limited ' + 'memory capacity' + ) + parser.add_argument( + '--controller', + type=str, + help='Which custom controller to use. Defaults to IDM' + ) + parser.add_argument( + '--run_transfer', + action='store_true', + help='Runs transfer tests if true' + ) + parser.add_argument( + '--output_dir', + type=str, + help='Directory to save results.', + default=None + ) + return parser + + +if __name__ == '__main__': + parser = create_parser() + args = parser.parse_args() + + rllib_config = None + rllib_result_dir = None + if args.rllib_result_dir is not None: + rllib_result_dir = args.rllib_result_dir if args.rllib_result_dir[-1] != '/' \ + else args.rllib_result_dir[:-1] + + rllib_config = get_rllib_config(rllib_result_dir) + + flow_params = deepcopy(I210_MA_DEFAULT_FLOW_PARAMS) + + if args.local: + ray.init(num_cpus=1, object_store_memory=200 * 1024 * 1024) + else: + ray.init(num_cpus=1) + + if args.run_transfer: + for transfer_test in inflows_range(penetration_rates=[0.05, 0.1, 0.2], flow_rate_coefs=[0.8, 1.0, 1.2]): + replay(args, flow_params, output_dir=args.output_dir, transfer_test=transfer_test, + rllib_config=rllib_config, result_dir=rllib_result_dir) + else: + replay(args, flow_params, output_dir=args.output_dir, rllib_config=rllib_config, result_dir=rllib_result_dir) diff --git a/flow/visualize/plot_custom_callables.py b/flow/visualize/plot_custom_callables.py new file mode 100644 index 000000000..8df0e7f62 --- /dev/null +++ b/flow/visualize/plot_custom_callables.py @@ -0,0 +1,74 @@ +"""Generate charts from with .npy files containing custom callables through replay.""" + +import argparse +from datetime import datetime +import errno +import numpy as np +import matplotlib.pyplot as plt +import os +import pytz +import sys + + +def parse_flags(args): + """Parse training options user can specify in command line. + + Returns + ------- + argparse.Namespace + the output parser object + """ + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description="Parse argument used when running a Flow simulation.", + epilog="python train.py EXP_CONFIG") + parser.add_argument("target_folder", type=str, + help='Folder containing results') + parser.add_argument("--output_folder", type=str, required=False, default=None, + help='Folder to save charts to.') + parser.add_argument("--show_images", action='store_true', + help='Whether to display charts.') + return parser.parse_args(args) + + +if __name__ == "__main__": + flags = parse_flags(sys.argv[1:]) + + date = datetime.now(tz=pytz.utc) + date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") + + if flags.output_folder: + if not os.path.exists(flags.output_folder): + try: + os.makedirs(flags.output_folder) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + info_dicts = [] + custom_callable_names = set() + exp_names = [] + for (dirpath, dir_names, file_names) in os.walk(flags.target_folder): + for file_name in file_names: + if file_name[-4:] == ".npy": + exp_name = os.path.basename(os.path.dirname(dirpath)) + info_dict = np.load(os.path.join(dirpath, file_name), allow_pickle=True).item() + + info_dicts.append(info_dict) + exp_names.append(exp_name) + custom_callable_names.update(info_dict.keys()) + + for name in custom_callable_names: + y_vals = [np.mean(info_dict[name]) for info_dict in info_dicts] + y_stds = [np.std(info_dict[name]) for info_dict in info_dicts] + x_pos = np.arange(len(exp_names)) + + plt.bar(x_pos, y_vals, align='center', alpha=0.5) + plt.xticks(x_pos, [exp_name for exp_name in exp_names], rotation=60) + plt.ylabel('Experiment') + plt.title('I210 Replay Result: {}'.format(name)) + plt.tight_layout() + if flags.output_folder: + plt.savefig(os.path.join(flags.output_folder, '{}-plot.png'.format(name))) + + plt.show() diff --git a/flow/visualize/transfer/util.py b/flow/visualize/transfer/util.py new file mode 100644 index 000000000..50b503956 --- /dev/null +++ b/flow/visualize/transfer/util.py @@ -0,0 +1,138 @@ +"""Definitions of transfer classes.""" +from copy import deepcopy + +from flow.core.params import InFlows +from examples.exp_configs.rl.multiagent.multiagent_i210 import VEH_PER_HOUR_BASE_119257914, \ + VEH_PER_HOUR_BASE_27414345, VEH_PER_HOUR_BASE_27414342 + + +def make_inflows(penetration_rate=0.1, flow_rate_coef=1.0, departSpeed=20, on_ramp=False): + """Generate inflows object from parameters. Uses default inflows from multiagent_i210. + + Keyword Arguments: + ----------------- + penetration_rate {float} -- [AV Penetration Rate] (default: {0.1}) + flow_rate_coef {float} -- [Scale flow rate by] (default: {1.0}) + departSpeed {int} -- [Initial speed of all flows] (default: {20}) + + Returns + ------- + [Inflows] -- [Inflows parameter object] + + """ + inflow = InFlows() + # main highway + assert penetration_rate < 1.0, "your penetration rate is over 100%" + assert penetration_rate > 0.0, "your penetration rate should be above zero" + + inflow_119257914 = dict(veh_type="human", + edge="119257914", + vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * penetration_rate * flow_rate_coef, + # probability=1.0, + departLane="random", + departSpeed=departSpeed) + inflow_119257914_av = dict(veh_type="av", + edge="119257914", + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * penetration_rate * flow_rate_coef), + # probability=1.0, + departLane="random", + departSpeed=departSpeed) + if on_ramp: + inflow_27414345 = dict(veh_type="human", + edge="27414345", + vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * penetration_rate * flow_rate_coef, + departLane="random", + departSpeed=departSpeed) + + inflow_27414342 = dict(veh_type="human", + edge="27414342#0", + vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * penetration_rate * flow_rate_coef, + departLane="random", + departSpeed=departSpeed) + + all_inflow_defs = (inflow_119257914, inflow_27414345, inflow_27414342, inflow_119257914_av) + else: + all_inflow_defs = (inflow_119257914, inflow_119257914_av) + + for inflow_def in all_inflow_defs: + inflow.add(**inflow_def) + + return inflow + + +class BaseTransfer: + """Base Transfer class.""" + + def __init__(self): + self.transfer_str = "Base" + pass + + def flow_params_modifier_fn(self, flow_params, clone_params=True): + """Return modified flow_params. + + Arguments: + --------- + flow_params {[flow_params_dictionary]} -- [flow_params] + """ + if clone_params: + flow_params = deepcopy(flow_params) + + return flow_params + + def env_modifier_fn(self, env): + """Modify the env before rollouts are run. + + Arguments: + --------- + env {[I210MultiEnv]} -- [Env to modify] + """ + pass + + +class InflowTransfer(BaseTransfer): + """Modifies the inflow of i210 env.""" + + def __init__(self, penetration_rate=0.1, flow_rate_coef=1.0, departSpeed=20): + super(InflowTransfer, self).__init__() + self.penetration_rate = penetration_rate + self.flow_rate_coef = flow_rate_coef + self.departSpeed = departSpeed + + self.transfer_str = "{:0.2f}_pen_{:0.2f}_flow_rate_coef_{:0.2f}_depspeed".format( + penetration_rate, flow_rate_coef, departSpeed) + + def flow_params_modifier_fn(self, flow_params, clone_params=True): + """See Parent.""" + if clone_params: + flow_params = deepcopy(flow_params) + + flow_params['net'].inflows = make_inflows(self.penetration_rate, self.flow_rate_coef, self.departSpeed) + + return flow_params + + +def inflows_range(penetration_rates=0.1, flow_rate_coefs=1.0, departSpeeds=20.0): + """Generate inflow objects given penetration_rates, flow_rates, and depart speeds. + + Keyword Arguments: + ----------------- + penetration_rates {float | list of floats} -- [single, or multiple penetration rates] (default: {0.1}) + flow_rate_coefs {float | list of floats} -- [single, or multiple flow rate coefficient] (default: {1.0}) + departSpeeds {float | list of floats} -- [single, or multiple depart speeds] (default: {20.0}) + + Yields + ------ + [InflowTransfer] -- [Transfer object] + """ + if not hasattr(penetration_rates, '__iter__'): + penetration_rates = [penetration_rates] + if not hasattr(flow_rate_coefs, '__iter__'): + flow_rate_coefs = [flow_rate_coefs] + if not hasattr(departSpeeds, '__iter__'): + departSpeeds = [departSpeeds] + + for departSpeed in departSpeeds: + for penetration_rate in penetration_rates: + for flow_rate_coef in flow_rate_coefs: + yield InflowTransfer(penetration_rate=penetration_rate, flow_rate_coef=flow_rate_coef, + departSpeed=departSpeed) From 4ce53319e4768cae17f866f3cfc9686db9080ae7 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Apr 2020 15:28:57 -0700 Subject: [PATCH 019/438] datapip pipeline implemented --- examples/data_pipeline.py | 179 ++++++++++++++++++++++++++++ examples/query.py | 8 ++ examples/run_query.py | 34 ++++++ examples/simulate.py | 10 +- flow/controllers/base_controller.py | 12 ++ flow/core/experiment.py | 27 ++++- flow/core/kernel/vehicle/base.py | 16 +++ flow/core/kernel/vehicle/traci.py | 15 +++ 8 files changed, 299 insertions(+), 2 deletions(-) create mode 100644 examples/data_pipeline.py create mode 100644 examples/query.py create mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py new file mode 100644 index 000000000..5fdc30cf2 --- /dev/null +++ b/examples/data_pipeline.py @@ -0,0 +1,179 @@ +import pandas as pd +import boto3 +from botocore.exceptions import ClientError +from examples.query import QueryStrings +from time import time + + +def generate_trajectory_table(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based on standard SUMO emission + + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ + raw_output = pd.read_csv(data_path, index_col=["time", "id"]) + required_cols = {"time", "id", "speed", "x", "y"} + raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) + + extra_info = pd.DataFrame.from_dict(extra_info) + extra_info.set_index(["time", "id"]) + raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) + + # add the partition column + raw_output['partition'] = partition_name + + output_file_path = data_path[:-4]+"_trajectory.csv" + raw_output.to_csv(output_file_path, index=False) + return output_file_path + + +def upload_to_s3(bucket_name, bucket_key, file_path): + """ upload a file to S3 bucket + + Parameters + ---------- + bucket_name : str + the bucket to upload to + bucket_key: str + the key within the bucket for the file + file_path: str + the path of the file to be uploaded + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + return + + +class AthenaQuery: + + def __init__(self): + self.MAX_WAIT = 60 + self.client = boto3.client("athena") + self.existing_partitions = self.get_existing_partitions() + + def get_existing_partitions(self): + """prints the existing partitions in the S3 bucket""" + + response = self.client.start_query_execution( + QueryString='SHOW PARTITIONS trajectory_table', + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("get current partitions timed out") + response = self.client.get_query_results( + QueryExecutionId=response['QueryExecutionId'], + MaxResults=1000 + ) + return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + + def check_status(self, execution_id): + """ Return the status of the execution with given id + + Parameters + ---------- + execution_id : string + id of the execution that is checked for + Returns + ------- + status: str + QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED + """ + + response = self.client.get_query_execution( + QueryExecutionId=execution_id + ) + return response['QueryExecution']['Status']['State'] + + def wait_for_execution(self, execution_id): + """ wait for the execution to finish or time-out + + Parameters + ---------- + execution_id : str + id of the execution this is watiing for + Returns + ------- + time_out: bool + True if time-out, False if success + Raises + ------ + RuntimeError: if execution failed or get canceled + """ + start = time() + while time() - start < self.MAX_WAIT: + state = self.check_status(execution_id) + if state == 'FAILED' or state == 'CANCELLED': + raise RuntimeError("update partition failed") + elif state == 'SUCCEEDED': + return False + return True + + def update_partition(self, partition): + """ load the given partition to the trajectory_table on Athena + + Parameters + ---------- + partition : str + the new partition that needs to be loaded + """ + response = self.client.start_query_execution( + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("update partition timed out") + self.existing_partitions.append(partition) + return + + def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + """ start the execution of a query, does not wait for it to finish + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be run + result_location: str, optional + location on the S3 bucket where the result will be stored + partition: str, optional + name of the partition to run this query on + Returns + ------- + execution_id: str + the execution id of the execution started by this method + Raises + ------ + ValueError: if tries to run a query not existed in QueryStrings enum + """ + if query_name not in QueryStrings.__members__: + raise ValueError("query not existed: please add it to query.py") + + if partition not in self.existing_partitions: + self.update_partition(partition) + + response = self.client.start_query_execution( + QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + ResultConfiguration={ + 'OutputLocation': result_location, + }, + WorkGroup='primary' + ) + return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py new file mode 100644 index 000000000..3fbbe69e1 --- /dev/null +++ b/examples/query.py @@ -0,0 +1,8 @@ +from enum import Enum + +tags = {} + + +class QueryStrings(Enum): + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py new file mode 100644 index 000000000..7b4a5af7d --- /dev/null +++ b/examples/run_query.py @@ -0,0 +1,34 @@ +import argparse +import sys +from examples.data_pipeline import AthenaQuery +from examples.query import QueryStrings + +parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" + "a S3 location") +parser.add_argument("--run", type=str, nargs="+") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--partition", type=str, nargs='?', default="default") +parser.add_argument("--list_partitions", action="store_true") +parser.add_argument("--check_status", type=str, nargs='+') +parser.add_argument("--list_queries", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args() + queryEngine = AthenaQuery() + + if args.run: + execution_ids = [] + for query_name in args.run: + execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + print(execution_ids) + if args.list_partitions: + print(queryEngine.existing_partitions) + if args.check_status: + status = dict() + for execution_id in args.check_status: + status[execution_id] = queryEngine.check_status(execution_id) + print(status) + if args.list_queries: + for q in QueryStrings: + print(q) diff --git a/examples/simulate.py b/examples/simulate.py index 60767b6b7..6ad0048eb 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -45,6 +45,12 @@ def parse_args(args): action='store_true', help='Specifies whether to generate an emission file from the ' 'simulation.') + parser.add_argument( + '--to_aws', + type=str, nargs='?', default=None, const="default", + help='Specifies the name of the partition to store the output' + 'file on S3. Putting not None value for this argument' + 'automatically set gen_emission to True.') return parser.parse_known_args(args)[0] @@ -52,6 +58,8 @@ def parse_args(args): if __name__ == "__main__": flags = parse_args(sys.argv[1:]) + flags.gen_emission = flags.gen_emission or flags.to_aws + # Get the flow_params object. module = __import__("exp_configs.non_rl", fromlist=[flags.exp_config]) flow_params = getattr(module, flags.exp_config).flow_params @@ -74,4 +82,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 4004b1c4d..6e6734764 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,6 +88,9 @@ def get_action(self, env): float the modified form of the acceleration """ + # clear the current stored accel_without_noise of this vehicle None + env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed if len(env.k.vehicle.get_edge(self.veh_id)) == 0: @@ -105,6 +108,15 @@ def get_action(self, env): if accel is None: return None + # store the acceleration without noise to each vehicle + # run fail safe if requested + accel_without_noice = accel + if self.fail_safe == 'instantaneous': + accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + elif self.fail_safe == 'safe_velocity': + accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 69a78cb0e..a81f3b130 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,6 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_table, upload_to_s3 import datetime import logging import time @@ -85,7 +86,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters @@ -98,6 +99,10 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False): convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. Returns ------- @@ -136,6 +141,8 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] + extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} for i in range(num_runs): ret = 0 @@ -153,6 +160,18 @@ def rl_actions(*_): vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) @@ -195,4 +214,10 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) + output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + output_file.split('/')[-1], output_file) + return info_dict diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index d9fc773cd..72951a5d6 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -663,3 +663,19 @@ def get_max_speed(self, veh_id, error): float """ raise NotImplementedError + + ########################################################################### + # Methods for Datapipeline # + ########################################################################### + + def get_accel(self, veh_id): + """ see traci class """ + raise NotImplementedError + + def update_accel_without_noise(self, veh_id, accel_without_noise): + """ see traci class """ + raise NotImplementedError + + def get_accel_without_noise(self, veh_id): + """ see traci class """ + raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 657b89a94..051797b10 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -109,6 +109,7 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] + self.__vehicles[veh_id]["accel_without_noise"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1094,3 +1095,17 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) + + # add for data pipeline + def get_accel(self, veh_id): + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + + def update_accel_without_noise(self, veh_id, accel_without_noise): + self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + + def get_accel_without_noise(self, veh_id): + return self.__vehicles[veh_id]["accel_without_noise"] + + def get_road_grade(self, veh_id): + # TODO + return 0 From 5e3e88742197c0587423b35c0ec9a9457ad75cf0 Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Thu, 9 Apr 2020 11:50:44 -0700 Subject: [PATCH 020/438] style --- examples/simulate.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/simulate.py b/examples/simulate.py index 4ec46b974..848f030a4 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -76,8 +76,8 @@ def parse_args(args): # Create the flow_params object json_filename = flow_params['exp_tag'] with open(os.path.join(flow_params['sim'].emission_path, json_filename) + '.json', 'w') as outfile: - json.dump(flow_params, outfile, - cls=FlowParamsEncoder, sort_keys=True, indent=4) + json.dump(flow_params, outfile, + cls=FlowParamsEncoder, sort_keys=True, indent=4) # Create the experiment object. exp = Experiment(flow_params, callables) From adcc61787729ec7a60af1bb5e294f1df2eeca825 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 10 Apr 2020 19:54:30 -0700 Subject: [PATCH 021/438] multiple runs issue solved, testing added --- examples/data_pipeline.py | 55 +++++++- examples/datapipeline_test.py | 33 +++++ examples/query.py | 13 +- examples/run_query.py | 6 +- flow/core/experiment.py | 224 +----------------------------- flow/core/kernel/vehicle/base.py | 4 + flow/core/kernel/vehicle/traci.py | 3 + 7 files changed, 107 insertions(+), 231 deletions(-) create mode 100644 examples/datapipeline_test.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 5fdc30cf2..9d56548c2 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,7 +1,8 @@ import pandas as pd +import numpy as np import boto3 from botocore.exceptions import ClientError -from examples.query import QueryStrings +from examples.query import QueryStrings, testing_functions from time import time @@ -30,13 +31,22 @@ def generate_trajectory_table(data_path, extra_info, partition_name): raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) # add the partition column - raw_output['partition'] = partition_name - + # raw_output['partition'] = partition_name + raw_output = raw_output.sort_values(by=["time", "id"]) output_file_path = data_path[:-4]+"_trajectory.csv" raw_output.to_csv(output_file_path, index=False) return output_file_path +def generate_trajectory_from_flow(data_path, extra_info, partition_name): + extra_info = pd.DataFrame.from_dict(extra_info) + # extra_info["partition"] = partition_name + extra_info.to_csv(data_path, index=False) + upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + extra_info.to_csv(upload_only_file_path, index=False, header=False) + return upload_only_file_path + + def upload_to_s3(bucket_name, bucket_key, file_path): """ upload a file to S3 bucket @@ -176,4 +186,41 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re }, WorkGroup='primary' ) - return response['QueryExecutionId'] \ No newline at end of file + return response['QueryExecutionId'] + +########################################################################### +# Helpers for testing the SQL Queries # +########################################################################### + + +def test_sql_query(query_name): + if query_name not in testing_functions: + raise ValueError("no tests supported for this query") + + # Run the respective sql query + queryEngine = AthenaQuery() + execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + partition="test") + if queryEngine.wait_for_execution(execution_id): + raise RuntimeError("execution timed out") + + # get the Athena query result from S3 + s3 = boto3.resource("s3") + s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") + athena_result = pd.read_csv("data/athena_result.csv") + athena_result = athena_result.sort_values(by=["time", "id"]) + + # get the python expected result + expected_result = pd.read_csv("data/test_data.csv") + expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") + expected_result.columns = ["time", "id", "power"] + expected_result = expected_result.sort_values(by=["time", "id"]) + + difference = athena_result["power"] - expected_result["power"] + print("average difference is: " + str(np.mean(difference))) + print("std of difference is: " + str(np.std(difference))) + print("average ratio of difference to expected is: " + + str(np.mean(np.divide(difference, expected_result["power"])))) + difference = pd.DataFrame(difference) + difference.to_csv("./difference.csv") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py new file mode 100644 index 000000000..564060d3b --- /dev/null +++ b/examples/datapipeline_test.py @@ -0,0 +1,33 @@ +import math + +# Vehicle Mass +M = 1200 +# Gravity +g = 9.81 +# Density of Air +ro_air = 1.225 +# Rolling resistance coefficient +C_r = .005 +# Aerodynamic drag coefficient +C_a = 0.3 +# Vehicle Cross sectional Area +A = 2.6 +# Road grade +theta = 0 + + +def heavyside(inp): + return 0 if inp <= 0 else 1 + + +def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) + accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) + rolling_friction = M * g * C_r * mu + air_drag = .5 * ro_air * A * C_a * mu**3 + power = accel_and_slope + rolling_friction + air_drag + return power + + +def apply_energy_one(row): + return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py index 3fbbe69e1..6354cec3b 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,8 +1,17 @@ from enum import Enum +from examples.datapipeline_test import apply_energy_one -tags = {} +tags = {"energy": ["ENERGY_ONE"]} + +testing_functions = {"ENERGY_ONE": apply_energy_one} class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" + ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py index 7b4a5af7d..ea8839b09 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,6 +1,5 @@ import argparse -import sys -from examples.data_pipeline import AthenaQuery +from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -11,6 +10,7 @@ parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') parser.add_argument("--list_queries", action="store_true") +parser.add_argument("--test_query", nargs=1) if __name__ == "__main__": @@ -32,3 +32,5 @@ if args.list_queries: for q in QueryStrings: print(q) + if args.test_query: + test_sql_query(args.test_query[0]) \ No newline at end of file diff --git a/flow/core/experiment.py b/flow/core/experiment.py index a81f3b130..63c92e798 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,223 +1 @@ -"""Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv -from flow.utils.registry import make_create_env -from examples.data_pipeline import generate_trajectory_table, upload_to_s3 -import datetime -import logging -import time -import os -import numpy as np - - -class Experiment: - """ - Class for systematically running simulations in any supported simulator. - - This class acts as a runner for a network and environment. In order to use - it to run an network and environment in the absence of a method specifying - the actions of RL agents in the network, type the following: - - >>> from flow.envs import Env - >>> flow_params = dict(...) # see the examples in exp_config - >>> exp = Experiment(flow_params) # for some experiment configuration - >>> exp.run(num_runs=1) - - If you wish to specify the actions of RL agents in the network, this may be - done as follows: - - >>> rl_actions = lambda state: 0 # replace with something appropriate - >>> exp.run(num_runs=1, rl_actions=rl_actions) - - Finally, if you would like to like to plot and visualize your results, this - class can generate csv files from emission files produced by sumo. These - files will contain the speeds, positions, edges, etc... of every vehicle - in the network at every time step. - - In order to ensure that the simulator constructs an emission file, set the - ``emission_path`` attribute in ``SimParams`` to some path. - - >>> from flow.core.params import SimParams - >>> flow_params['sim'] = SimParams(emission_path="./data") - - Once you have included this in your environment, run your Experiment object - as follows: - - >>> exp.run(num_runs=1, convert_to_csv=True) - - After the experiment is complete, look at the "./data" directory. There - will be two files, one with the suffix .xml and another with the suffix - .csv. The latter should be easily interpretable from any csv reader (e.g. - Excel), and can be parsed using tools such as numpy and pandas. - - Attributes - ---------- - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we want - to extract from the environment. The lambda will be called at each step - to extract information from the env and it will be stored in a dict - keyed by the str. - env : flow.envs.Env - the environment object the simulator will run - """ - - def __init__(self, flow_params, custom_callables=None): - """Instantiate the Experiment class. - - Parameters - ---------- - flow_params : dict - flow-specific parameters - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we - want to extract from the environment. The lambda will be called at - each step to extract information from the env and it will be stored - in a dict keyed by the str. - """ - self.custom_callables = custom_callables or {} - - # Get the env name and a creator for the environment. - create_env, _ = make_create_env(flow_params) - - # Create the environment. - self.env = create_env() - - logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) - - logging.info("Initializing environment.") - - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): - """Run the given network for a set number of runs. - - Parameters - ---------- - num_runs : int - number of runs the experiment should perform - rl_actions : method, optional - maps states to actions to be performed by the RL agents (if - there are any) - convert_to_csv : bool - Specifies whether to convert the emission file created by sumo - into a csv file - partition_name: str - Specifies the S3 partition you want to store the output file, - will be used to later for query. If NONE, won't upload output - to S3. - - Returns - ------- - info_dict : dict < str, Any > - contains returns, average speed per step - """ - num_steps = self.env.env_params.horizon - - # raise an error if convert_to_csv is set to True but no emission - # file will be generated, to avoid getting an error at the end of the - # simulation - if convert_to_csv and self.env.sim_params.emission_path is None: - raise ValueError( - 'The experiment was run with convert_to_csv set ' - 'to True, but no emission file will be generated. If you wish ' - 'to generate an emission file, you should set the parameter ' - 'emission_path in the simulation parameters (SumoParams or ' - 'AimsunParams) to the path of the folder where emissions ' - 'output should be generated. If you do not wish to generate ' - 'emissions, set the convert_to_csv parameter to False.') - - # used to store - info_dict = { - "returns": [], - "velocities": [], - "outflows": [], - } - info_dict.update({ - key: [] for key in self.custom_callables.keys() - }) - - if rl_actions is None: - def rl_actions(*_): - return None - - # time profiling information - t = time.time() - times = [] - extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} - - for i in range(num_runs): - ret = 0 - vel = [] - custom_vals = {key: [] for key in self.custom_callables.keys()} - state = self.env.reset() - for j in range(num_steps): - t0 = time.time() - state, reward, done, _ = self.env.step(rl_actions(state)) - t1 = time.time() - times.append(1 / (t1 - t0)) - - # Compute the velocity speeds and cumulative returns. - veh_ids = self.env.k.vehicle.get_ids() - vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) - ret += reward - - # collect additional information for the data pipeline - for vid in veh_ids: - extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) - extra_info["id"].append(vid) - extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) - extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) - extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) - extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) - extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) - extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) - extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - - # Compute the results for the custom callables. - for (key, lambda_func) in self.custom_callables.items(): - custom_vals[key].append(lambda_func(self.env)) - - if done: - break - - # Store the information from the run in info_dict. - outflow = self.env.k.vehicle.get_outflow_rate(int(500)) - info_dict["returns"].append(ret) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) - - print("Round {0}, return: {1}".format(i, ret)) - - # Print the averages/std for all variables in the info_dict. - for key in info_dict.keys(): - print("Average, std {}: {}, {}".format( - key, np.mean(info_dict[key]), np.std(info_dict[key]))) - - print("Total time:", time.time() - t) - print("steps/second:", np.mean(times)) - self.env.terminate() - - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - # collect the location of the emission file - dir_path = self.env.sim_params.emission_path - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - - output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) - - if partition_name: - upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + output_file.split('/')[-1], output_file) - - return info_dict +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 72951a5d6..2a5cf4596 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -676,6 +676,10 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """ see traci class """ raise NotImplementedError + def get_2D_position(self, veh_id, error=-1001): + """ see traci class """ + raise NotImplementedError + def get_accel_without_noise(self, veh_id): """ see traci class """ raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 051797b10..f5ea8e2eb 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1106,6 +1106,9 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): return self.__vehicles[veh_id]["accel_without_noise"] + def get_2D_position(self, veh_id, error=-1001): + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) + def get_road_grade(self, veh_id): # TODO return 0 From 47057758ba3cf84dd125ac102fd0bae6681ac91a Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 15 Apr 2020 20:24:51 -0700 Subject: [PATCH 022/438] Evinitsky/straight road pr (#909) Add a straight road training environment. Swap out the reward for a desired velocity squared reward. --- examples/README.md | 6 +- examples/exp_configs/non_rl/straight_road.py | 131 ++++++++++++++ .../rl/multiagent/multiagent_straight_road.py | 162 ++++++++++++++++++ examples/train.py | 11 +- flow/core/kernel/vehicle/traci.py | 2 + flow/core/params.py | 7 +- flow/envs/multiagent/__init__.py | 5 +- flow/envs/multiagent/base.py | 10 +- flow/envs/multiagent/i210.py | 53 ++++-- flow/networks/highway.py | 3 +- flow/visualize/time_space_diagram.py | 79 ++++++++- requirements.txt | 2 + scripts/ray_autoscale.yaml | 4 +- 13 files changed, 443 insertions(+), 32 deletions(-) create mode 100644 examples/exp_configs/non_rl/straight_road.py create mode 100644 examples/exp_configs/rl/multiagent/multiagent_straight_road.py diff --git a/examples/README.md b/examples/README.md index a9d681131..8156831fe 100644 --- a/examples/README.md +++ b/examples/README.md @@ -53,11 +53,11 @@ trained through RL algorithms provided by *RLlib*. To execute these examples, run ```shell script -python train.py EXP_CONFIG --rl_trainer "rllib" +python train.py EXP_CONFIG --rl_trainer "rllib" --algorithm ``` where `EXP_CONFIG` is the name of the experiment configuration file, as located -in `exp_configs/rl/singleagent` or `exp_configs/rl/multiagent.` - +in `exp_configs/rl/singleagent` or `exp_configs/rl/multiagent.` Here `` +should be the name of your desired algorithm. Currently we support PPO and TD3. ### stable-baselines diff --git a/examples/exp_configs/non_rl/straight_road.py b/examples/exp_configs/non_rl/straight_road.py new file mode 100644 index 000000000..c557ce836 --- /dev/null +++ b/examples/exp_configs/non_rl/straight_road.py @@ -0,0 +1,131 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +import numpy as np + +from flow.controllers import IDMController +from flow.controllers.velocity_controllers import FollowerStopper +from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ + VehicleParams, SumoParams, SumoLaneChangeParams +from flow.networks import HighwayNetwork +from flow.envs import TestEnv +from flow.networks.highway import ADDITIONAL_NET_PARAMS + + +# SET UP PARAMETERS FOR THE SIMULATION + +# number of steps per rollout +HORIZON = 2000 + +# inflow rate on the highway in vehicles per hour +HIGHWAY_INFLOW_RATE = 10800 / 5 +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 0.0 + + +# SET UP PARAMETERS FOR THE NETWORK + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2000, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2 +}) + +# CREATE VEHICLE TYPES AND INFLOWS + +vehicles = VehicleParams() +inflows = InFlows() + +# human vehicles +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), +) + +if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + num_vehicles=0, + acceleration_controller=(FollowerStopper, {"v_des": 18.0}), + ) + +# add human vehicles on the highway +# add human vehicles on the highway +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23", + name="idm_highway_inflow") + +if PENETRATION_RATE > 0.0: + inflows.add( + veh_type="av", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23", + name="av_highway_inflow") + +# SET UP FLOW PARAMETERS + +flow_params = dict( + # name of the experiment + exp_tag='multiagent_highway', + + # name of the flow environment the experiment is running on + env_name=TestEnv, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + restart_instance=False + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) + +custom_callables = { + "avg_speed": lambda env: np.nan_to_num(np.mean( + env.k.vehicle.get_speed(env.k.vehicle.get_ids_by_edge(['highway_0', 'highway_1'])))), +} diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py new file mode 100644 index 000000000..9ed38656f --- /dev/null +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -0,0 +1,162 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +from flow.controllers import RLController, IDMController +from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ + VehicleParams, SumoParams, SumoLaneChangeParams +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS +from flow.networks import HighwayNetwork +from flow.envs.multiagent import MultiStraightRoad +from flow.networks.highway import ADDITIONAL_NET_PARAMS +from flow.utils.registry import make_create_env +from ray.tune.registry import register_env + + +# SET UP PARAMETERS FOR THE SIMULATION + +# number of steps per rollout +HORIZON = 2000 + +# inflow rate on the highway in vehicles per hour +HIGHWAY_INFLOW_RATE = 10800 / 5 +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 10 + + +# SET UP PARAMETERS FOR THE NETWORK + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2000, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2 +}) + + +# SET UP PARAMETERS FOR THE ENVIRONMENT + +additional_env_params = ADDITIONAL_ENV_PARAMS.copy() +additional_env_params.update({ + 'max_accel': 2.6, + 'max_decel': 4.5, + 'target_velocity': 18, + 'local_reward': True, + 'lead_obs': True +}) + + +# CREATE VEHICLE TYPES AND INFLOWS + +vehicles = VehicleParams() +inflows = InFlows() + +# human vehicles +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), +) + +# autonomous vehicles +vehicles.add( + veh_id='rl', + acceleration_controller=(RLController, {})) + +# add human vehicles on the highway +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="idm_highway_inflow") + +# add autonomous vehicles on the highway +# they will stay on the highway, i.e. they won't exit through the off-ramps +inflows.add( + veh_type="rl", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="rl_highway_inflow") + +# SET UP FLOW PARAMETERS + +flow_params = dict( + # name of the experiment + exp_tag='multiagent_highway', + + # name of the flow environment the experiment is running on + env_name=MultiStraightRoad, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, # do not put more than one + additional_params=additional_env_params, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + use_ballistic=True, + restart_instance=False + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) + + +# SET UP RLLIB MULTI-AGENT FEATURES + +create_env, env_name = make_create_env(params=flow_params, version=0) + +# register as rllib env +register_env(env_name, create_env) + +# multiagent configuration +test_env = create_env() +obs_space = test_env.observation_space +act_space = test_env.action_space + + +POLICY_GRAPHS = {'av': (None, obs_space, act_space, {})} + +POLICIES_TO_TRAIN = ['av'] + + +def policy_mapping_fn(_): + """Map a policy in RLlib.""" + return 'av' diff --git a/examples/train.py b/examples/train.py index 8150bc883..d688f2b9a 100644 --- a/examples/train.py +++ b/examples/train.py @@ -25,6 +25,7 @@ import ray from ray import tune +from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper from ray.tune.registry import register_env try: from ray.rllib.agents.agent import get_agent_class @@ -203,7 +204,7 @@ def setup_exps_rllib(flow_params, config["critic_lr"] = tune.grid_search([1e-3, 1e-4]) config["n_step"] = tune.grid_search([1, 10]) else: - sys.exit("We only support PPO and TD3 right now.") + sys.exit("We only support PPO, TD3, right now.") # define some standard and useful callbacks def on_episode_start(info): @@ -214,6 +215,8 @@ def on_episode_start(info): def on_episode_step(info): episode = info["episode"] env = info["env"].get_unwrapped()[0] + if isinstance(env, _GroupAgentsWrapper): + env = env.env speed = np.mean([speed for speed in env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]) if not np.isnan(speed): episode.user_data["avg_speed"].append(speed) @@ -246,7 +249,6 @@ def on_episode_end(info): create_env, gym_name = make_create_env(params=flow_params) - # Register as rllib env register_env(gym_name, create_env) return alg_run, gym_name, config @@ -266,6 +268,10 @@ def train_rllib(submodule, flags): config['num_workers'] = flags.num_cpus config['env'] = gym_name + # create a custom string that makes looking at the experiment names easier + def trial_str_creator(trial): + return "{}_{}".format(trial.trainable_name, trial.experiment_tag) + if flags.local_mode: ray.init(local_mode=True) else: @@ -276,6 +282,7 @@ def train_rllib(submodule, flags): "config": config, "checkpoint_freq": flags.checkpoint_freq, "checkpoint_at_end": True, + 'trial_name_creator': trial_str_creator, "max_failures": 0, "stop": { "training_iteration": flags.num_iterations, diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 50cd106c9..22dcc8837 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -990,6 +990,8 @@ def choose_routes(self, veh_ids, route_choices): def get_x_by_id(self, veh_id): """See parent class.""" + if isinstance(veh_id, (list, np.ndarray)): + return [self.get_x_by_id(vehID) for vehID in veh_id] if self.get_edge(veh_id) == '': # occurs when a vehicle crashes is teleported for some other reason return 0. diff --git a/flow/core/params.py b/flow/core/params.py index 5a7467580..afead7017 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -637,6 +637,9 @@ class EnvParams: specifies whether to clip actions from the policy by their range when they are inputted to the reward function. Note that the actions are still clipped before they are provided to `apply_rl_actions`. + done_at_exit : bool, optional + If true, done is returned as True when the vehicle exits. This is only + applied to multi-agent environments. """ def __init__(self, @@ -645,7 +648,8 @@ def __init__(self, warmup_steps=0, sims_per_step=1, evaluate=False, - clip_actions=True): + clip_actions=True, + done_at_exit=True): """Instantiate EnvParams.""" self.additional_params = \ additional_params if additional_params is not None else {} @@ -654,6 +658,7 @@ def __init__(self, self.sims_per_step = sims_per_step self.evaluate = evaluate self.clip_actions = clip_actions + self.done_at_exit = done_at_exit def get_additional_param(self, key): """Return a variable from additional_params.""" diff --git a/flow/envs/multiagent/__init__.py b/flow/envs/multiagent/__init__.py index f7889591d..818d6662b 100644 --- a/flow/envs/multiagent/__init__.py +++ b/flow/envs/multiagent/__init__.py @@ -10,7 +10,7 @@ from flow.envs.multiagent.traffic_light_grid import MultiTrafficLightGridPOEnv from flow.envs.multiagent.highway import MultiAgentHighwayPOEnv from flow.envs.multiagent.merge import MultiAgentMergePOEnv -from flow.envs.multiagent.i210 import I210MultiEnv +from flow.envs.multiagent.i210 import I210MultiEnv, MultiStraightRoad __all__ = [ 'MultiEnv', @@ -21,5 +21,6 @@ 'MultiAgentAccelPOEnv', 'MultiAgentWaveAttenuationPOEnv', 'MultiAgentMergePOEnv', - 'I210MultiEnv' + 'I210MultiEnv', + 'MultiStraightRoad', ] diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index ec95474c6..dfc7c72ad 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -122,10 +122,11 @@ def step(self, rl_actions): else: reward = self.compute_reward(rl_actions, fail=crash) - for rl_id in self.k.vehicle.get_arrived_rl_ids(): - done[rl_id] = True - reward[rl_id] = 0 - states[rl_id] = np.zeros(self.observation_space.shape[0]) + if self.env_params.done_at_exit: + for rl_id in self.k.vehicle.get_arrived_rl_ids(): + done[rl_id] = True + reward[rl_id] = 0 + states[rl_id] = -1 * np.ones(self.observation_space.shape[0]) return states, reward, done, infos @@ -154,6 +155,7 @@ def reset(self, new_inflow_rate=None): self.sim_params.render = True # got to restart the simulation to make it actually display anything self.restart_simulation(self.sim_params) + self.should_render = False # warn about not using restart_instance when using inflows if len(self.net_params.inflows.get()) > 0 and \ diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 6368e7a2d..f931b3bec 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -8,6 +8,8 @@ # largest number of lanes on any given edge in the network MAX_LANES = 6 +SPEED_SCALE = 50 +HEADWAY_SCALE = 1000 ADDITIONAL_ENV_PARAMS = { # maximum acceleration for autonomous vehicles, in m/s^2 @@ -61,6 +63,7 @@ class I210MultiEnv(MultiEnv): def __init__(self, env_params, sim_params, network, simulator='traci'): super().__init__(env_params, sim_params, network, simulator) self.lead_obs = env_params.additional_params.get("lead_obs") + self.max_lanes = MAX_LANES @property def observation_space(self): @@ -76,8 +79,8 @@ def observation_space(self): # speed, dist to ego vehicle, binary value which is 1 if the vehicle is # an AV else: - leading_obs = 3 * MAX_LANES - follow_obs = 3 * MAX_LANES + leading_obs = 3 * self.max_lanes + follow_obs = 3 * self.max_lanes # speed and lane self_obs = 2 @@ -119,11 +122,16 @@ def get_state(self): veh_info = {} for rl_id in self.k.vehicle.get_rl_ids(): speed = self.k.vehicle.get_speed(rl_id) - headway = self.k.vehicle.get_headway(rl_id) - lead_speed = self.k.vehicle.get_speed(self.k.vehicle.get_leader(rl_id)) - if lead_speed == -1001: - lead_speed = 0 - veh_info.update({rl_id: np.array([speed / 50.0, headway / 1000.0, lead_speed / 50.0])}) + lead_id = self.k.vehicle.get_leader(rl_id) + if lead_id in ["", None]: + # in case leader is not visible + lead_speed = SPEED_SCALE + headway = HEADWAY_SCALE + else: + lead_speed = self.k.vehicle.get_speed(lead_id) + headway = self.k.vehicle.get_headway(rl_id) + self.leader.append(lead_id) + veh_info.update({rl_id: np.array([speed / SPEED_SCALE, headway /HEADWAY_SCALE, lead_speed / SPEED_SCALE])}) else: veh_info = {rl_id: np.concatenate((self.state_util(rl_id), self.veh_statistics(rl_id))) @@ -131,8 +139,6 @@ def get_state(self): return veh_info def compute_reward(self, rl_actions, **kwargs): - # TODO(@evinitsky) we need something way better than this. Something that adds - # in notions of local reward """See class definition.""" # in the warmup steps if rl_actions is None: @@ -140,6 +146,7 @@ def compute_reward(self, rl_actions, **kwargs): rewards = {} if self.env_params.additional_params["local_reward"]: + des_speed = self.env_params.additional_params["target_velocity"] for rl_id in self.k.vehicle.get_rl_ids(): rewards[rl_id] = 0 speeds = [] @@ -150,7 +157,8 @@ def compute_reward(self, rl_actions, **kwargs): speeds.append(self.k.vehicle.get_speed(rl_id)) if len(speeds) > 0: # rescale so the q function can estimate it quickly - rewards[rl_id] = np.mean(speeds) / 500.0 + rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed))**2 + for speed in speeds]) / (des_speed**2) else: for rl_id in self.k.vehicle.get_rl_ids(): if self.env_params.evaluate: @@ -194,10 +202,6 @@ def additional_command(self): lead_id = self.k.vehicle.get_leader(rl_id) if lead_id: self.k.vehicle.set_observed(lead_id) - # follower - follow_id = self.k.vehicle.get_follower(rl_id) - if follow_id: - self.k.vehicle.set_observed(follow_id) def state_util(self, rl_id): """Return an array of headway, tailway, leader speed, follower speed. @@ -238,3 +242,24 @@ def veh_statistics(self, rl_id): speed = self.k.vehicle.get_speed(rl_id) / 100.0 lane = (self.k.vehicle.get_lane(rl_id) + 1) / 10.0 return np.array([speed, lane]) + + +class MultiStraightRoad(I210MultiEnv): + """Partially observable multi-agent environment for a straight road. Look at superclass for more information.""" + + def __init__(self, env_params, sim_params, network, simulator): + super().__init__(env_params, sim_params, network, simulator) + self.max_lanes = 1 + + def _apply_rl_actions(self, rl_actions): + """See class definition.""" + # in the warmup steps, rl_actions is None + if rl_actions: + rl_ids = [] + accels = [] + for rl_id, actions in rl_actions.items(): + accels.append(actions[0]) + rl_ids.append(rl_id) + + # prevent the AV from blocking the entrance + self.k.vehicle.apply_acceleration(rl_ids, accels) diff --git a/flow/networks/highway.py b/flow/networks/highway.py index c63292067..e1234053c 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -128,7 +128,8 @@ def specify_routes(self, net_params): def specify_edge_starts(self): """See parent class.""" - edgestarts = [("highway_{}".format(i), 0) + length = self.net_params.additional_params["length"] + edgestarts = [("highway_{}".format(i), (length / self.num_edges) * i) for i in range(self.num_edges)] return edgestarts diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index a08ecdf0f..9ac6938d4 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -17,7 +17,7 @@ python time_space_diagram.py .csv .json """ from flow.utils.rllib import get_flow_params -from flow.networks import RingNetwork, FigureEightNetwork, MergeNetwork, I210SubNetwork +from flow.networks import RingNetwork, FigureEightNetwork, MergeNetwork, I210SubNetwork, HighwayNetwork import argparse import csv @@ -37,7 +37,8 @@ RingNetwork, FigureEightNetwork, MergeNetwork, - I210SubNetwork + I210SubNetwork, + HighwayNetwork ] @@ -129,12 +130,14 @@ def get_time_space_data(data, params): assert params['network'] in ACCEPTABLE_NETWORKS, \ 'Network must be one of: ' + ', '.join(ACCEPTABLE_NETWORKS) + # switcher used to compute the positions based on the type of network # switcher used to compute the positions based on the type of network switcher = { RingNetwork: _ring_road, MergeNetwork: _merge, FigureEightNetwork: _figure_eight, - I210SubNetwork: _i210_subnetwork + I210SubNetwork: _i210_subnetwork, + HighwayNetwork: _highway, } # Collect a list of all the unique times. @@ -226,6 +229,74 @@ def _merge(data, params, all_time): return pos, speed, all_time +def _highway(data, params, all_time): + r"""Generate position and speed data for the highway subnetwork. + + Parameters + ---------- + data : dict of dict + Key = "veh_id": name of the vehicle \n Elements: + * "time": time step at every sample + * "edge": edge ID at every sample + * "pos": relative position at every sample + * "vel": speed at every sample + params : dict + flow-specific parameters + all_time : array_like + a (n_steps,) vector representing the unique time steps in the + simulation + Returns + ------- + as_array + n_steps x n_veh matrix specifying the absolute position of every + vehicle at every time step. Set to zero if the vehicle is not present + in the network at that time step. + as_array + n_steps x n_veh matrix specifying the speed of every vehicle at every + time step. Set to zero if the vehicle is not present in the network at + that time step. + """ + length = params['net'].additional_params['length'] + num_edges = params['net'].additional_params['num_edges'] + edge_len = length / num_edges + edge_starts = {} + for i in range(num_edges): + edge_starts.update({"highway_{}".format(i): i * edge_len, ":edge_{}_0".format(i): i * edge_len}) + + # compute the absolute position + for veh_id in data.keys(): + data[veh_id]['abs_pos'] = _get_abs_pos_1_edge(data[veh_id]['edge'], + data[veh_id]['pos'], + edge_starts) + + # track only vehicles that were around during this time period + # create the output variables + pos = np.zeros((all_time.shape[0], len(data.keys()))) + speed = np.zeros((all_time.shape[0], len(data.keys()))) + observed_row_list = [] + for i, veh_id in enumerate(sorted(data.keys())): + for spd, abs_pos, ti, edge, lane in zip(data[veh_id]['vel'], + data[veh_id]['abs_pos'], + data[veh_id]['time'], + data[veh_id]['edge'], + data[veh_id]['lane']): + # avoid vehicles not on the relevant edges. Also only check the second to + # last lane + if edge not in edge_starts.keys() or ti not in all_time: + continue + else: + if i not in observed_row_list: + observed_row_list.append(i) + ind = np.where(ti == all_time)[0] + pos[ind, i] = abs_pos + speed[ind, i] = spd + + pos = pos[:, observed_row_list] + speed = speed[:, observed_row_list] + + return pos, speed, all_time + + def _ring_road(data, params, all_time): r"""Generate position and speed data for the ring road. @@ -585,7 +656,7 @@ def _get_abs_pos_1_edge(edges, rel_pos, edge_starts): for indx_car in range(pos.shape[1]): unique_car_pos = pos[:, indx_car] - if flow_params['network'] == I210SubNetwork: + if flow_params['network'] == I210SubNetwork or flow_params['network'] == HighwayNetwork: indices = np.where(pos[:, indx_car] != 0)[0] unique_car_speed = speed[indices, indx_car] points = np.array([time[indices], pos[indices, indx_car]]).T.reshape(-1, 1, 2) diff --git a/requirements.txt b/requirements.txt index 191ecc740..c069a6cb6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,6 @@ plotly==2.4.0 tabulate tensorflow==1.14.0 awscli==1.16.309 +torch==1.4.0 pytz +tensorboardX diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index ea84bbee0..9fcf97cb5 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -32,7 +32,7 @@ auth: # By default Ray creates a new private keypair, but you can also use your own. # If you do so, make sure to also set "KeyName" in the head and worker node # configurations below. -# ssh_private_key: /path/to/your/key.pem +# ssh_private_key: # Provider-specific config for the head node, e.g. instance type. By default # Ray will auto-configure unspecified fields such as SubnetId and KeyName. @@ -74,6 +74,8 @@ head_setup_commands: - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions - pip install awscli==1.16.309 + - pip install stable-baselines + - pip install torch==1.4.0 - pip install pytz # Custom commands that will be run on worker nodes after common setup. From 95e63a8be6156826cc74b73838fbf519230ae775 Mon Sep 17 00:00:00 2001 From: chendiw <31671291+chendiw@users.noreply.github.com> Date: Tue, 21 Apr 2020 15:14:31 -0700 Subject: [PATCH 023/438] moved imports under functions in train.py (#903) * deleting unworking params from SumoChangeLaneParams * deleted unworking params, sublane working in highway : * moved imports inside functions * Apply suggestions from code review * bug fixes * bug fix Co-authored-by: Aboudy Kreidieh --- examples/train.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/examples/train.py b/examples/train.py index a1288e2f0..652d0efa5 100644 --- a/examples/train.py +++ b/examples/train.py @@ -13,18 +13,6 @@ from time import strftime from copy import deepcopy -from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv -from stable_baselines import PPO2 - -import ray -from ray import tune -from ray.tune import run_experiments -from ray.tune.registry import register_env -try: - from ray.rllib.agents.agent import get_agent_class -except ImportError: - from ray.rllib.agents.registry import get_agent_class - from flow.core.util import ensure_dir from flow.utils.registry import env_constructor from flow.utils.rllib import FlowParamsEncoder, get_flow_params @@ -94,6 +82,9 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 + if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -139,6 +130,13 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ + from ray import tune + from ray.tune.registry import register_env + try: + from ray.rllib.agents.agent import get_agent_class + except ImportError: + from ray.rllib.agents.registry import get_agent_class + horizon = flow_params['env'].horizon alg_run = "PPO" @@ -181,6 +179,9 @@ def setup_exps_rllib(flow_params, def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + from ray.tune import run_experiments + flow_params = submodule.flow_params n_cpus = submodule.N_CPUS n_rollouts = submodule.N_ROLLOUTS @@ -216,7 +217,7 @@ def train_h_baselines(flow_params, args, multiagent): """Train policies using SAC and TD3 with h-baselines.""" from hbaselines.algorithms import OffPolicyRLAlgorithm from hbaselines.utils.train import parse_options, get_hyperparameters - from hbaselines.envs.mixed_autonomy.envs import FlowEnv + from hbaselines.envs.mixed_autonomy import FlowEnv flow_params = deepcopy(flow_params) @@ -317,6 +318,9 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" + from stable_baselines.common.vec_env import DummyVecEnv + from stable_baselines import PPO2 + flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 1759b027dbf24354e050bce3c6c6705092c2d6ec Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 22 Apr 2020 05:22:01 -0700 Subject: [PATCH 024/438] added more support for lambda function --- examples/data_pipeline.py | 28 ++++++++++++++++++++++++++-- examples/lambda_function.py | 26 ++++++++++++++++++++++++++ examples/simulate.py | 8 +++++++- flow/core/experiment.py | 2 +- 4 files changed, 60 insertions(+), 4 deletions(-) create mode 100644 examples/lambda_function.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 9d56548c2..28d3b5e73 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -39,6 +39,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based only on flow output + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -47,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): return upload_only_file_path -def upload_to_s3(bucket_name, bucket_key, file_path): +def upload_to_s3(bucket_name, bucket_key, file_path, only_query): """ upload a file to S3 bucket Parameters @@ -58,9 +76,15 @@ def upload_to_s3(bucket_name, bucket_key, file_path): the key within the bucket for the file file_path: str the path of the file to be uploaded + only_query: str + specify which query should be run on this file by lambda: + if empty: run none of them + if "all": run all available analysis query + if a string of list of queries: run only those mentioned in the list """ s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + s3.Bucket(bucket_name).upload_file(file_path, bucket_key, + ExtraArgs={"Metadata": {"run-query": only_query}}) return diff --git a/examples/lambda_function.py b/examples/lambda_function.py new file mode 100644 index 000000000..01ce1512a --- /dev/null +++ b/examples/lambda_function.py @@ -0,0 +1,26 @@ +import boto3 +from urllib.parse import unquote_plus +from examples.data_pipeline import AthenaQuery +from examples.query import tags + +s3 = boto3.client('s3') +queryEngine = AthenaQuery() + + +def lambda_handler(event, context): + for record in event['Records']: + bucket = record['s3']['bucket']['name'] + key = unquote_plus(record['s3']['object']['key']) + partition = key.split('/')[-2].split('=')[-1] + response = s3.head_object(Bucket=bucket, Key=key) + run_query = response["Metadata"]["run-query"] + + if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if run_query == "all": + query_list = tags["analysis"] + elif not run_query: + break + else: + query_list = run_query.split("\', \'") + for query_name in query_list: + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file diff --git a/examples/simulate.py b/examples/simulate.py index 6ad0048eb..40e04d4f6 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -51,6 +51,12 @@ def parse_args(args): help='Specifies the name of the partition to store the output' 'file on S3. Putting not None value for this argument' 'automatically set gen_emission to True.') + parser.add_argument( + '--only_query', + nargs='*', default="[\'all\']", + help='specify which query should be run by lambda' + 'for detail, see upload_to_s3 in data_pipeline.py' + ) return parser.parse_known_args(args)[0] @@ -82,4 +88,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 63c92e798..12391f9ae 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file From e84952580b1c7aeb3809593313121169872790d2 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 02:54:33 -0700 Subject: [PATCH 025/438] fix windoes line ending issue with experiment.py --- flow/core/experiment.py | 240 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 239 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 12391f9ae..80d607e7d 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1,239 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" +from flow.core.util import emission_to_csv +from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +import datetime +import logging +import time +import os +import numpy as np +import uuid + + +class Experiment: + """ + Class for systematically running simulations in any supported simulator. + + This class acts as a runner for a network and environment. In order to use + it to run an network and environment in the absence of a method specifying + the actions of RL agents in the network, type the following: + + >>> from flow.envs import Env + >>> flow_params = dict(...) # see the examples in exp_config + >>> exp = Experiment(flow_params) # for some experiment configuration + >>> exp.run(num_runs=1) + + If you wish to specify the actions of RL agents in the network, this may be + done as follows: + + >>> rl_actions = lambda state: 0 # replace with something appropriate + >>> exp.run(num_runs=1, rl_actions=rl_actions) + + Finally, if you would like to like to plot and visualize your results, this + class can generate csv files from emission files produced by sumo. These + files will contain the speeds, positions, edges, etc... of every vehicle + in the network at every time step. + + In order to ensure that the simulator constructs an emission file, set the + ``emission_path`` attribute in ``SimParams`` to some path. + + >>> from flow.core.params import SimParams + >>> flow_params['sim'] = SimParams(emission_path="./data") + + Once you have included this in your environment, run your Experiment object + as follows: + + >>> exp.run(num_runs=1, convert_to_csv=True) + + After the experiment is complete, look at the "./data" directory. There + will be two files, one with the suffix .xml and another with the suffix + .csv. The latter should be easily interpretable from any csv reader (e.g. + Excel), and can be parsed using tools such as numpy and pandas. + + Attributes + ---------- + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we want + to extract from the environment. The lambda will be called at each step + to extract information from the env and it will be stored in a dict + keyed by the str. + env : flow.envs.Env + the environment object the simulator will run + """ + + def __init__(self, flow_params, custom_callables=None): + """Instantiate the Experiment class. + + Parameters + ---------- + flow_params : dict + flow-specific parameters + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we + want to extract from the environment. The lambda will be called at + each step to extract information from the env and it will be stored + in a dict keyed by the str. + """ + self.custom_callables = custom_callables or {} + + # Get the env name and a creator for the environment. + create_env, _ = make_create_env(flow_params) + + # Create the environment. + self.env = create_env() + + logging.info(" Starting experiment {} at {}".format( + self.env.network.name, str(datetime.datetime.utcnow()))) + + logging.info("Initializing environment.") + + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + """Run the given network for a set number of runs. + + Parameters + ---------- + num_runs : int + number of runs the experiment should perform + rl_actions : method, optional + maps states to actions to be performed by the RL agents (if + there are any) + convert_to_csv : bool + Specifies whether to convert the emission file created by sumo + into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. + only_query: str + Specifies whether queries should be automatically run the + simulation data when it gets uploaded to s3 + + Returns + ------- + info_dict : dict < str, Any > + contains returns, average speed per step + """ + num_steps = self.env.env_params.horizon + + # raise an error if convert_to_csv is set to True but no emission + # file will be generated, to avoid getting an error at the end of the + # simulation + if convert_to_csv and self.env.sim_params.emission_path is None: + raise ValueError( + 'The experiment was run with convert_to_csv set ' + 'to True, but no emission file will be generated. If you wish ' + 'to generate an emission file, you should set the parameter ' + 'emission_path in the simulation parameters (SumoParams or ' + 'AimsunParams) to the path of the folder where emissions ' + 'output should be generated. If you do not wish to generate ' + 'emissions, set the convert_to_csv parameter to False.') + + # used to store + info_dict = { + "returns": [], + "velocities": [], + "outflows": [], + } + info_dict.update({ + key: [] for key in self.custom_callables.keys() + }) + + if rl_actions is None: + def rl_actions(*_): + return None + + # time profiling information + t = time.time() + times = [] + extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], + "road_grade": [], "source_id": []} + source_id = uuid.uuid4().hex + + for i in range(num_runs): + ret = 0 + vel = [] + custom_vals = {key: [] for key in self.custom_callables.keys()} + state = self.env.reset() + for j in range(num_steps): + t0 = time.time() + state, reward, done, _ = self.env.step(rl_actions(state)) + t1 = time.time() + times.append(1 / (t1 - t0)) + + # Compute the velocity speeds and cumulative returns. + veh_ids = self.env.k.vehicle.get_ids() + vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) + ret += reward + + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( + self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + position = self.env.k.vehicle.get_2D_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) + extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + + # Compute the results for the custom callables. + for (key, lambda_func) in self.custom_callables.items(): + custom_vals[key].append(lambda_func(self.env)) + + if done: + break + + # Store the information from the run in info_dict. + outflow = self.env.k.vehicle.get_outflow_rate(int(500)) + info_dict["returns"].append(ret) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + + print("Round {0}, return: {1}".format(i, ret)) + + # Print the averages/std for all variables in the info_dict. + for key in info_dict.keys(): + print("Average, std {}: {}, {}".format( + key, np.mean(info_dict[key]), np.std(info_dict[key]))) + + print("Total time:", time.time() - t) + print("steps/second:", np.mean(times)) + self.env.terminate() + + if convert_to_csv and self.env.simulator == "traci": + # wait a short period of time to ensure the xml file is readable + time.sleep(0.1) + + # collect the location of the emission file + dir_path = self.env.sim_params.emission_path + emission_filename = \ + "{0}-emission.xml".format(self.env.network.name) + emission_path = os.path.join(dir_path, emission_filename) + + # convert the emission file into a csv + emission_to_csv(emission_path) + + # Delete the .xml version of the emission file. + os.remove(emission_path) + + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + # delete the S3-only version of the trajectory file + os.remove(upload_file_path) + + return info_dict From 517499ee2d832deb266a0b86e8785ca105a63547 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:35:54 -0700 Subject: [PATCH 026/438] fix style issue --- examples/data_pipeline.py | 113 ++++++++++++++++-------- examples/datapipeline_test.py | 4 + examples/lambda_function.py | 10 +++ examples/query.py | 11 ++- examples/run_query.py | 1 + flow/controllers/routing_controllers.py | 1 + flow/core/kernel/vehicle/base.py | 12 ++- flow/core/kernel/vehicle/traci.py | 5 ++ 8 files changed, 111 insertions(+), 46 deletions(-) diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 28d3b5e73..03b0f87e5 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,3 +1,4 @@ +"""contains class and helper functions for the data pipeline.""" import pandas as pd import numpy as np import boto3 @@ -7,21 +8,21 @@ def generate_trajectory_table(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based on standard SUMO emission + """Generate desired output for the trajectory_table based on standard SUMO emission. - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ raw_output = pd.read_csv(data_path, index_col=["time", "id"]) required_cols = {"time", "id", "speed", "x", "y"} raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) @@ -39,24 +40,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based only on flow output - - Parameters - ---------- - data_path : str - output file path - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder - """ + """Generate desired output for the trajectory_table based only on flow output. + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -66,7 +67,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): def upload_to_s3(bucket_name, bucket_key, file_path, only_query): - """ upload a file to S3 bucket + """Upload a file to S3 bucket. Parameters ---------- @@ -89,15 +90,40 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): class AthenaQuery: + """ + Class used to run query. + + Act as a query engine, maintains an open session with AWS Athena. + + Attributes + ---------- + MAX_WAIT: int + maximum number of seconds to wait before declares time-out + client: boto3.client + the athena client that is used to run the query + existing_partitions: list + a list of partitions that is already recorded in Athena's datalog, + this is obtained through query at the initialization of this class + instance. + """ def __init__(self): + """Initialize AthenaQuery instance. + + initialize a client session with AWS Athena, + query Athena to obtain extisting_partition. + """ self.MAX_WAIT = 60 self.client = boto3.client("athena") self.existing_partitions = self.get_existing_partitions() def get_existing_partitions(self): - """prints the existing partitions in the S3 bucket""" + """Return the existing partitions in the S3 bucket. + Returns + ------- + partitions: a list of existing partitions on S3 bucket + """ response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ @@ -114,7 +140,7 @@ def get_existing_partitions(self): return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): - """ Return the status of the execution with given id + """Return the status of the execution with given id. Parameters ---------- @@ -125,14 +151,13 @@ def check_status(self, execution_id): status: str QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED """ - response = self.client.get_query_execution( QueryExecutionId=execution_id ) return response['QueryExecution']['Status']['State'] def wait_for_execution(self, execution_id): - """ wait for the execution to finish or time-out + """Wait for the execution to finish or time-out. Parameters ---------- @@ -156,7 +181,7 @@ def wait_for_execution(self, execution_id): return True def update_partition(self, partition): - """ load the given partition to the trajectory_table on Athena + """Load the given partition to the trajectory_table on Athena. Parameters ---------- @@ -176,7 +201,7 @@ def update_partition(self, partition): return def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """ start the execution of a query, does not wait for it to finish + """Start the execution of a query, does not wait for it to finish. Parameters ---------- @@ -218,6 +243,16 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re def test_sql_query(query_name): + """Start the execution of a query, does not wait for it to finish. + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be tested + Raises + ------ + RuntimeError: if timeout + """ if query_name not in testing_functions: raise ValueError("no tests supported for this query") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py index 564060d3b..ae0ea382f 100644 --- a/examples/datapipeline_test.py +++ b/examples/datapipeline_test.py @@ -1,3 +1,4 @@ +"""functions that calculates the expected result for testing.""" import math # Vehicle Mass @@ -17,10 +18,12 @@ def heavyside(inp): + """Return 1 if input is positive.""" return 0 if inp <= 0 else 1 def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + """Calculate the expected power for POWER_DEMAND_MODEL query.""" acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) rolling_friction = M * g * C_r * mu @@ -30,4 +33,5 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): + """Apply the power calculation to a row of the dataframe.""" return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/lambda_function.py b/examples/lambda_function.py index 01ce1512a..4f7937c85 100644 --- a/examples/lambda_function.py +++ b/examples/lambda_function.py @@ -1,3 +1,4 @@ +"""lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus from examples.data_pipeline import AthenaQuery @@ -8,6 +9,15 @@ def lambda_handler(event, context): + """Invoke by AWS Lambda upon triggered by an event. + + Parameters + ---------- + event : dic < str: dic > + an S3 event + context: + not used + """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) diff --git a/examples/query.py b/examples/query.py index 6354cec3b..0f0ee13b4 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,15 +1,20 @@ +"""stores all the pre-defined query strings.""" from enum import Enum from examples.datapipeline_test import apply_energy_one -tags = {"energy": ["ENERGY_ONE"]} +# tags for different queries +tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} -testing_functions = {"ENERGY_ONE": apply_energy_one} +# specify the function to calculate the expected result of each query +testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} class QueryStrings(Enum): + """An enumeration of all the pre-defined query strings.""" + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ diff --git a/examples/run_query.py b/examples/run_query.py index ea8839b09..64baa6656 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,3 +1,4 @@ +"""runner script for invoking query manually.""" import argparse from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index c880b5bbf..24f8af3f3 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -127,6 +127,7 @@ def choose_route(self, env): class I210Router(ContinuousRouter): """Assists in choosing routes in select cases for the Bay Bridge network. + Extension to the Continuous Router. Usage ----- diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 2a5cf4596..a3972e86a 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -669,17 +669,21 @@ def get_max_speed(self, veh_id, error): ########################################################################### def get_accel(self, veh_id): - """ see traci class """ + """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError def update_accel_without_noise(self, veh_id, accel_without_noise): - """ see traci class """ + """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError def get_2D_position(self, veh_id, error=-1001): - """ see traci class """ + """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError def get_accel_without_noise(self, veh_id): - """ see traci class """ + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_road_grade(self, veh_id): + """Return the road-grade of the vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index f5ea8e2eb..9a7e3b485 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1098,17 +1098,22 @@ def set_max_speed(self, veh_id, max_speed): # add for data pipeline def get_accel(self, veh_id): + """See parent class.""" return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def update_accel_without_noise(self, veh_id, accel_without_noise): + """See parent class.""" self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise def get_accel_without_noise(self, veh_id): + """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] def get_2D_position(self, veh_id, error=-1001): + """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) def get_road_grade(self, veh_id): + """See parent class.""" # TODO return 0 From c429bf267f6ec18ecf1c9647ea637a490438ee36 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:38:47 -0700 Subject: [PATCH 027/438] reorganized file locations --- {examples => flow/data_pipeline}/data_pipeline.py | 0 {examples => flow/data_pipeline}/datapipeline_test.py | 0 {examples => flow/data_pipeline}/lambda_function.py | 0 {examples => flow/data_pipeline}/query.py | 0 {examples => flow/data_pipeline}/run_query.py | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename {examples => flow/data_pipeline}/data_pipeline.py (100%) rename {examples => flow/data_pipeline}/datapipeline_test.py (100%) rename {examples => flow/data_pipeline}/lambda_function.py (100%) rename {examples => flow/data_pipeline}/query.py (100%) rename {examples => flow/data_pipeline}/run_query.py (100%) diff --git a/examples/data_pipeline.py b/flow/data_pipeline/data_pipeline.py similarity index 100% rename from examples/data_pipeline.py rename to flow/data_pipeline/data_pipeline.py diff --git a/examples/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py similarity index 100% rename from examples/datapipeline_test.py rename to flow/data_pipeline/datapipeline_test.py diff --git a/examples/lambda_function.py b/flow/data_pipeline/lambda_function.py similarity index 100% rename from examples/lambda_function.py rename to flow/data_pipeline/lambda_function.py diff --git a/examples/query.py b/flow/data_pipeline/query.py similarity index 100% rename from examples/query.py rename to flow/data_pipeline/query.py diff --git a/examples/run_query.py b/flow/data_pipeline/run_query.py similarity index 100% rename from examples/run_query.py rename to flow/data_pipeline/run_query.py From 2177ef6e66af579530a003e961fb5302852bbb33 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:58:44 -0700 Subject: [PATCH 028/438] fix some more style issues --- examples/simulate.py | 3 ++- flow/controllers/base_controller.py | 10 +++++----- flow/controllers/routing_controllers.py | 1 + flow/core/experiment.py | 3 ++- flow/core/kernel/vehicle/base.py | 2 +- flow/core/kernel/vehicle/traci.py | 2 +- flow/data_pipeline/__init__.py | 1 + flow/data_pipeline/data_pipeline.py | 3 +-- flow/data_pipeline/datapipeline_test.py | 2 +- flow/data_pipeline/lambda_function.py | 2 +- flow/data_pipeline/query.py | 12 ++++++------ flow/data_pipeline/run_query.py | 8 ++++---- flow/envs/base.py | 1 - 13 files changed, 26 insertions(+), 24 deletions(-) create mode 100644 flow/data_pipeline/__init__.py diff --git a/examples/simulate.py b/examples/simulate.py index 40e04d4f6..b90360760 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -88,4 +88,5 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, + only_query=flags.only_query) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 6e6734764..7adcdf310 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -110,18 +110,18 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noice = accel + accel_without_noise = accel if self.fail_safe == 'instantaneous': - accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) elif self.fail_safe == 'safe_velocity': - accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) - # run the failsafes, if requested + # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index 24f8af3f3..18d6c1842 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -125,6 +125,7 @@ def choose_route(self, env): return new_route + class I210Router(ContinuousRouter): """Assists in choosing routes in select cases for the Bay Bridge network. diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 80d607e7d..aa5028836 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -231,7 +231,8 @@ def rl_actions(*_): if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index a3972e86a..79b1a897a 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -676,7 +676,7 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 9a7e3b485..d830a49e9 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1109,7 +1109,7 @@ def get_accel_without_noise(self, veh_id): """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py new file mode 100644 index 000000000..622e09b06 --- /dev/null +++ b/flow/data_pipeline/__init__.py @@ -0,0 +1 @@ +"""Empty init file to ensure that data_pipeline is recognized as a package""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 03b0f87e5..afbc09f92 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,8 +2,7 @@ import pandas as pd import numpy as np import boto3 -from botocore.exceptions import ClientError -from examples.query import QueryStrings, testing_functions +from flow.data_pipeline.query import QueryStrings, testing_functions from time import time diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py index ae0ea382f..0e1a50518 100644 --- a/flow/data_pipeline/datapipeline_test.py +++ b/flow/data_pipeline/datapipeline_test.py @@ -34,4 +34,4 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file + return [row[0], row[1], calculate_power(row[4], row[6])] diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 4f7937c85..afef55a4b 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0f0ee13b4..af1b51ce7 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -1,6 +1,6 @@ """stores all the pre-defined query strings.""" from enum import Enum -from examples.datapipeline_test import apply_energy_one +from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} @@ -15,8 +15,8 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ No newline at end of file + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index 64baa6656..f065a726e 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -1,10 +1,10 @@ """runner script for invoking query manually.""" import argparse -from examples.data_pipeline import AthenaQuery, test_sql_query -from examples.query import QueryStrings +from flow.data_pipeline.data_pipeline import AthenaQuery, test_sql_query +from flow.data_pipeline.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" - "a S3 location") + "a S3 location") parser.add_argument("--run", type=str, nargs="+") parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") parser.add_argument("--partition", type=str, nargs='?', default="default") @@ -34,4 +34,4 @@ for q in QueryStrings: print(q) if args.test_query: - test_sql_query(args.test_query[0]) \ No newline at end of file + test_sql_query(args.test_query[0]) diff --git a/flow/envs/base.py b/flow/envs/base.py index 60eab6ebe..625861afe 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -408,7 +408,6 @@ def step(self, rl_actions): "**********************************************************" ) - # compute the info for each agent infos = {} From 4b8346470714678ed2a50883a32ce60c79681ac6 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 13:02:33 -0700 Subject: [PATCH 029/438] fix one more style issue --- flow/data_pipeline/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py index 622e09b06..d9d6a6573 100644 --- a/flow/data_pipeline/__init__.py +++ b/flow/data_pipeline/__init__.py @@ -1 +1 @@ -"""Empty init file to ensure that data_pipeline is recognized as a package""" +"""Empty init file to ensure that data_pipeline is recognized as a package.""" From eb67d2804574f20c42f64c974e8df4e8f722532a Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Tue, 28 Apr 2020 12:57:47 -0700 Subject: [PATCH 030/438] New AMI with ray 0.8.0, tensorflow 2.1.0, h-baselines, stable-baselines (#916) --- scripts/ray_autoscale.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 9fcf97cb5..d0c9cccbb 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -40,7 +40,7 @@ auth: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances head_node: InstanceType: c4.4xlarge - ImageId: ami-09544298704576518 # Flow AMI (Ubuntu) + ImageId: ami-0b489700e7f810707 # Flow AMI (Ubuntu) InstanceMarketOptions: MarketType: spot #Additional options can be found in the boto docs, e.g. @@ -55,7 +55,7 @@ head_node: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances worker_nodes: InstanceType: c4.4xlarge - ImageId: ami-09544298704576518 # Flow AMI (Ubuntu) + ImageId: ami-0b489700e7f810707 # Flow AMI (Ubuntu) #Run workers on spot by default. Comment this out to use on-demand. InstanceMarketOptions: From 86761a69dc3445d9850db9f0f0a63ff8b917d725 Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Sat, 2 May 2020 02:51:06 -0700 Subject: [PATCH 031/438] Bando / ghost edge (#917) * added bando model * added ghost edge to the highway network * added highway-single example * bug fixes * more tests --- examples/exp_configs/non_rl/highway_single.py | 110 ++++++++++++++++++ flow/controllers/__init__.py | 5 +- flow/controllers/car_following_models.py | 83 +++++++++++++ flow/networks/highway.py | 81 +++++++++++-- tests/fast_tests/test_controllers.py | 58 ++++++++- tests/fast_tests/test_examples.py | 5 + tests/fast_tests/test_scenarios.py | 61 +++++++++- tests/fast_tests/test_vehicles.py | 16 ++- tests/setup_scripts.py | 4 +- 9 files changed, 406 insertions(+), 17 deletions(-) create mode 100644 examples/exp_configs/non_rl/highway_single.py diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py new file mode 100644 index 000000000..46b18c0e9 --- /dev/null +++ b/examples/exp_configs/non_rl/highway_single.py @@ -0,0 +1,110 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +from flow.controllers import BandoFTLController +from flow.core.params import EnvParams +from flow.core.params import NetParams +from flow.core.params import InitialConfig +from flow.core.params import InFlows +from flow.core.params import VehicleParams +from flow.core.params import SumoParams +from flow.core.params import SumoLaneChangeParams +from flow.networks import HighwayNetwork +from flow.envs import TestEnv +from flow.networks.highway import ADDITIONAL_NET_PARAMS + +TRAFFIC_SPEED = 11 +END_SPEED = 16 +TRAFFIC_FLOW = 2056 +HORIZON = 3600 +INCLUDE_NOISE = False + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2500, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2, + # whether to include a ghost edge of length 500m. This edge is provided a + # different speed limit. + "use_ghost_edge": True, + # speed limit for the ghost edge + "ghost_speed_limit": END_SPEED +}) + +vehicles = VehicleParams() +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(BandoFTLController, { + 'alpha': .5, + 'beta': 20.0, + 'h_st': 12.0, + 'h_go': 50.0, + 'v_max': 30.0, + 'noise': 1.0 if INCLUDE_NOISE else 0.0, + }), +) + +inflows = InFlows() +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=TRAFFIC_FLOW, + depart_lane="free", + depart_speed=TRAFFIC_SPEED, + name="idm_highway_inflow") + +# SET UP FLOW PARAMETERS + +flow_params = dict( + # name of the experiment + exp_tag='highway-single', + + # name of the flow environment the experiment is running on + env_name=TestEnv, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + restart_instance=False + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) diff --git a/flow/controllers/__init__.py b/flow/controllers/__init__.py index 6cb20077a..4dfcf05b7 100755 --- a/flow/controllers/__init__.py +++ b/flow/controllers/__init__.py @@ -14,7 +14,8 @@ from flow.controllers.base_controller import BaseController from flow.controllers.car_following_models import CFMController, \ BCMController, OVMController, LinearOVM, IDMController, \ - SimCarFollowingController, LACController, GippsController + SimCarFollowingController, LACController, GippsController, \ + BandoFTLController from flow.controllers.velocity_controllers import FollowerStopper, \ PISaturation, NonLocalFollowerStopper @@ -35,5 +36,5 @@ "IDMController", "SimCarFollowingController", "FollowerStopper", "PISaturation", "StaticLaneChanger", "SimLaneChangeController", "ContinuousRouter", "GridRouter", "BayBridgeRouter", "LACController", - "GippsController", "NonLocalFollowerStopper" + "GippsController", "NonLocalFollowerStopper", "BandoFTLController" ] diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index f86c546e8..42c9b2a9b 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -580,3 +580,86 @@ def get_accel(self, env): v_next = min(v_acc, v_safe, self.v_desired) return (v_next-v)/env.sim_step + + +class BandoFTLController(BaseController): + """Bando follow-the-leader controller. + + Usage + ----- + See BaseController for usage example. + + Attributes + ---------- + veh_id : str + Vehicle ID for SUMO identification + car_following_params : flow.core.params.SumoCarFollowingParams + see parent class + alpha : float + gain on desired velocity to current velocity difference + (default: 0.6) + beta : float + gain on lead car velocity and self velocity difference + (default: 0.9) + h_st : float + headway for stopping (default: 5) + h_go : float + headway for full speed (default: 35) + v_max : float + max velocity (default: 30) + time_delay : float + time delay (default: 0.5) + noise : float + std dev of normal perturbation to the acceleration (default: 0) + fail_safe : str + type of flow-imposed failsafe the vehicle should posses, defaults + to no failsafe (None) + """ + + def __init__(self, + veh_id, + car_following_params, + alpha=.5, + beta=20, + h_st=2, + h_go=10, + v_max=32, + want_max_accel=False, + time_delay=0, + noise=0, + fail_safe=None): + """Instantiate an Bando controller.""" + BaseController.__init__( + self, + veh_id, + car_following_params, + delay=time_delay, + fail_safe=fail_safe, + noise=noise, + ) + self.veh_id = veh_id + self.v_max = v_max + self.alpha = alpha + self.beta = beta + self.h_st = h_st + self.h_go = h_go + self.want_max_accel = want_max_accel + + def get_accel(self, env): + """See parent class.""" + lead_id = env.k.vehicle.get_leader(self.veh_id) + if not lead_id: # no car ahead + if self.want_max_accel: + return self.max_accel + + v_l = env.k.vehicle.get_speed(lead_id) + v = env.k.vehicle.get_speed(self.veh_id) + s = env.k.vehicle.get_headway(self.veh_id) + return self.accel_func(v, v_l, s) + + def accel_func(self, v, v_l, s): + """Compute the acceleration function.""" + v_h = self.v_max * ((np.tanh(s/self.h_st-2)+np.tanh(2))/(1+np.tanh(2))) + s_dot = v_l - v + u = self.alpha * (v_h - v) + self.beta * s_dot/(s**2) + return u diff --git a/flow/networks/highway.py b/flow/networks/highway.py index c63292067..7e9c18ad5 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -13,7 +13,12 @@ # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into - "num_edges": 1 + "num_edges": 1, + # whether to include a ghost edge of length 500m. This edge is provided a + # different speed limit. + "use_ghost_edge": False, + # speed limit for the ghost edge + "ghost_speed_limit": 25, } @@ -29,6 +34,9 @@ class HighwayNetwork(Network): * **lanes** : number of lanes in the highway * **speed_limit** : max speed limit of the highway * **num_edges** : number of edges to divide the highway into + * **use_ghost_edge** : whether to include a ghost edge of length 500m. This + edge is provided a different speed limit. + * **ghost_speed_limit** : speed limit for the ghost edge Usage ----- @@ -62,9 +70,7 @@ def __init__(self, if p not in net_params.additional_params: raise KeyError('Network parameter "{}" not supplied'.format(p)) - self.length = net_params.additional_params["length"] - self.lanes = net_params.additional_params["lanes"] - self.num_edges = net_params.additional_params.get("num_edges", 1) + self.end_length = 500 super().__init__(name, vehicles, net_params, initial_config, traffic_lights) @@ -83,6 +89,13 @@ def specify_nodes(self, net_params): "y": 0 }] + if self.net_params.additional_params["use_ghost_edge"]: + nodes += [{ + "id": "edge_{}".format(num_edges + 1), + "x": length + self.end_length, + "y": 0 + }] + return nodes def specify_edges(self, net_params): @@ -101,12 +114,22 @@ def specify_edges(self, net_params): "length": segment_length }] + if self.net_params.additional_params["use_ghost_edge"]: + edges += [{ + "id": "highway_end", + "type": "highway_end", + "from": "edge_{}".format(num_edges), + "to": "edge_{}".format(num_edges + 1), + "length": self.end_length + }] + return edges def specify_types(self, net_params): """See parent class.""" lanes = net_params.additional_params["lanes"] speed_limit = net_params.additional_params["speed_limit"] + end_speed_limit = net_params.additional_params["ghost_speed_limit"] types = [{ "id": "highwayType", @@ -114,6 +137,13 @@ def specify_types(self, net_params): "speed": speed_limit }] + if self.net_params.additional_params["use_ghost_edge"]: + types += [{ + "id": "highway_end", + "numLanes": lanes, + "speed": end_speed_limit + }] + return types def specify_routes(self, net_params): @@ -123,14 +153,51 @@ def specify_routes(self, net_params): for i in range(num_edges): rts["highway_{}".format(i)] = ["highway_{}".format(j) for j in range(i, num_edges)] + if self.net_params.additional_params["use_ghost_edge"]: + rts["highway_{}".format(i)].append("highway_end") return rts def specify_edge_starts(self): """See parent class.""" - edgestarts = [("highway_{}".format(i), 0) - for i in range(self.num_edges)] - return edgestarts + junction_length = 0.1 + length = self.net_params.additional_params["length"] + num_edges = self.net_params.additional_params.get("num_edges", 1) + + # Add the main edges. + edge_starts = [ + ("highway_{}".format(i), + i * (length / num_edges + junction_length)) + for i in range(num_edges) + ] + + if self.net_params.additional_params["use_ghost_edge"]: + edge_starts += [ + ("highway_end", length + num_edges * junction_length) + ] + + return edge_starts + + def specify_internal_edge_starts(self): + """See parent class.""" + junction_length = 0.1 + length = self.net_params.additional_params["length"] + num_edges = self.net_params.additional_params.get("num_edges", 1) + + # Add the junctions. + edge_starts = [ + (":edge_{}".format(i + 1), + (i + 1) * length / num_edges + i * junction_length) + for i in range(num_edges - 1) + ] + + if self.net_params.additional_params["use_ghost_edge"]: + edge_starts += [ + (":edge_{}".format(num_edges), + length + (num_edges - 1) * junction_length) + ] + + return edge_starts @staticmethod def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles): diff --git a/tests/fast_tests/test_controllers.py b/tests/fast_tests/test_controllers.py index 76146dbe6..58967cef8 100644 --- a/tests/fast_tests/test_controllers.py +++ b/tests/fast_tests/test_controllers.py @@ -8,7 +8,7 @@ from flow.controllers.routing_controllers import ContinuousRouter from flow.controllers.car_following_models import IDMController, \ OVMController, BCMController, LinearOVM, CFMController, LACController, \ - GippsController + GippsController, BandoFTLController from flow.controllers import FollowerStopper, PISaturation, NonLocalFollowerStopper from tests.setup_scripts import ring_road_exp_setup import os @@ -709,7 +709,7 @@ def test_get_action(self): np.testing.assert_array_almost_equal(requested_accel, expected_accel) -class TestGippsontroller(unittest.TestCase): +class TestGippsController(unittest.TestCase): """ Tests that the Gipps Controller returning mathematically accurate values. """ @@ -765,5 +765,59 @@ def test_get_action(self): np.testing.assert_array_almost_equal(requested_accel, expected_accel) +class TestBandoFTLController(unittest.TestCase): + """ + Tests that the Bando Controller returning mathematically accurate values. + """ + + def setUp(self): + # add a few vehicles to the network using the requested model + # also make sure that the input params are what is expected + contr_params = { + "alpha": .5, + "beta": 20, + "h_st": 2, + "h_go": 10, + "v_max": 32, + "want_max_accel": False, + } + + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(BandoFTLController, contr_params), + routing_controller=(ContinuousRouter, {}), + car_following_params=SumoCarFollowingParams( + accel=15, decel=5), + num_vehicles=5) + + # create the environment and network classes for a ring road + self.env, _, _ = ring_road_exp_setup(vehicles=vehicles) + + def tearDown(self): + # terminate the traci instance + self.env.terminate() + + # free data used by the class + self.env = None + + def test_get_action(self): + self.env.reset() + ids = self.env.k.vehicle.get_ids() + + test_headways = [2, 4, 6, 8, 10] + for i, veh_id in enumerate(ids): + self.env.k.vehicle.set_headway(veh_id, test_headways[i]) + + requested_accel = [ + self.env.k.vehicle.get_acc_controller(veh_id).get_action(self.env) + for veh_id in ids + ] + + expected_accel = [1.649129, 7.853475, 14.057821, 15.70695, 15.959713] + + np.testing.assert_array_almost_equal(requested_accel, expected_accel) + + if __name__ == '__main__': unittest.main() diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index a05fed68e..336c17bf8 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -42,6 +42,7 @@ from examples.exp_configs.non_rl.minicity import flow_params as non_rl_minicity from examples.exp_configs.non_rl.ring import flow_params as non_rl_ring from examples.exp_configs.non_rl.i210_subnetwork import flow_params as non_rl_i210 +from examples.exp_configs.non_rl.highway_single import flow_params as non_rl_highway_single os.environ['TEST_FLAG'] = 'True' os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' @@ -110,6 +111,10 @@ def test_i210(self): """Verify that examples/exp_configs/non_rl/i210_subnetwork.py is working.""" self.run_simulation(non_rl_i210) + def test_highway_single(self): + """Verify that examples/exp_configs/non_rl/highway_single.py is working.""" + self.run_simulation(non_rl_highway_single) + @staticmethod def run_simulation(flow_params): # make the horizon small and set render to False diff --git a/tests/fast_tests/test_scenarios.py b/tests/fast_tests/test_scenarios.py index f9dd47c04..d72a50b17 100644 --- a/tests/fast_tests/test_scenarios.py +++ b/tests/fast_tests/test_scenarios.py @@ -5,6 +5,7 @@ from flow.networks import BottleneckNetwork, FigureEightNetwork, \ TrafficLightGridNetwork, HighwayNetwork, RingNetwork, MergeNetwork, \ MiniCityNetwork, MultiRingNetwork +from tests.setup_scripts import highway_exp_setup __all__ = [ "MultiRingNetwork", "MiniCityNetwork" @@ -94,11 +95,69 @@ def test_additional_net_params(self): "length": 1000, "lanes": 4, "speed_limit": 30, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25 } ) ) + def test_ghost_edge(self): + """Validate the functionality of the ghost edge feature.""" + # =================================================================== # + # Without a ghost edge # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25 + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1000) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), ["highway_0"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + + # =================================================================== # + # With a ghost edge # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": True, + "ghost_speed_limit": 25 + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1500.1) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), + ["highway_0", "highway_end"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + self.assertEqual(env.k.network.speed_limit("highway_end"), 25) + class TestRingNetwork(unittest.TestCase): diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index 485a6a072..b791bba64 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -258,7 +258,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -330,7 +332,9 @@ def test_no_junctions_highway(self): "lanes": 4, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -398,7 +402,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 3 + "num_edges": 3, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -465,7 +471,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 3 + "num_edges": 3, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() diff --git a/tests/setup_scripts.py b/tests/setup_scripts.py index 08d5b2c1e..ac88d2e42 100644 --- a/tests/setup_scripts.py +++ b/tests/setup_scripts.py @@ -343,7 +343,9 @@ def highway_exp_setup(sim_params=None, "lanes": 1, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) From a26c9b7fe7d75f7b13cab40bf99f85c8e7d930b9 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Sun, 3 May 2020 23:47:51 -0700 Subject: [PATCH 032/438] Benchmark fix (#919) * Add the appropriate reward to the grid benchmark back * Put the bottleneck in a congested regime * Bump bottleneck inflows to put it in the congested regime --- flow/benchmarks/README.md | 6 +++--- flow/benchmarks/bottleneck0.py | 2 +- flow/benchmarks/bottleneck1.py | 2 +- flow/benchmarks/bottleneck2.py | 2 +- flow/benchmarks/grid0.py | 4 ++-- flow/benchmarks/grid1.py | 4 ++-- flow/envs/__init__.py | 3 ++- flow/envs/traffic_light_grid.py | 11 +++++++++++ 8 files changed, 23 insertions(+), 11 deletions(-) diff --git a/flow/benchmarks/README.md b/flow/benchmarks/README.md index 963ad5b70..bbcba9414 100644 --- a/flow/benchmarks/README.md +++ b/flow/benchmarks/README.md @@ -38,12 +38,12 @@ inflow = 300 veh/hour/lane S=(915,), A=(25,), T=400. this problem is to learn to avoid the *capacity drop* that is characteristic to bottleneck structures in transportation networks, and maximize the total outflow in a mixed-autonomy setting. -- `flow.benchmarks.bottleneck0` 4 lanes, inflow = 1900 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck0` 4 lanes, inflow = 2500 veh/hour, 10% CAV penetration, no vehicles are allowed to lane change, S=(141,), A=(20,), T=1000. -- `flow.benchmarks.bottleneck1` 4 lanes, inflow = 1900 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck1` 4 lanes, inflow = 2500 veh/hour, 10% CAV penetration, the human drivers follow the standard lane changing model in the simulator, S=(141,), A=(20,), T=1000. -- `flow.benchmarks.bottleneck2` 8 lanes, inflow = 3800 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck2` 8 lanes, inflow = 5000 veh/hour, 10% CAV penetration, no vehicles are allowed to lane change, S=(281,), A=(40,), T=1000. ## Training on Custom Algorithms diff --git a/flow/benchmarks/bottleneck0.py b/flow/benchmarks/bottleneck0.py index b0e86844c..b07947ad7 100644 --- a/flow/benchmarks/bottleneck0.py +++ b/flow/benchmarks/bottleneck0.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/bottleneck1.py b/flow/benchmarks/bottleneck1.py index 26ae6527a..9c8d9c192 100644 --- a/flow/benchmarks/bottleneck1.py +++ b/flow/benchmarks/bottleneck1.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/bottleneck2.py b/flow/benchmarks/bottleneck2.py index 5052b3b88..4651d448b 100644 --- a/flow/benchmarks/bottleneck2.py +++ b/flow/benchmarks/bottleneck2.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/grid0.py b/flow/benchmarks/grid0.py index 1655c3b3c..5c4ee5349 100644 --- a/flow/benchmarks/grid0.py +++ b/flow/benchmarks/grid0.py @@ -4,7 +4,7 @@ - **Observation Dimension**: (339, ) - **Horizon**: 400 steps """ -from flow.envs import TrafficLightGridPOEnv +from flow.envs import TrafficLightGridBenchmarkEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams @@ -68,7 +68,7 @@ exp_tag="grid_0", # name of the flow environment the experiment is running on - env_name=TrafficLightGridPOEnv, + env_name=TrafficLightGridBenchmarkEnv, # name of the network class the experiment is running on network=TrafficLightGridNetwork, diff --git a/flow/benchmarks/grid1.py b/flow/benchmarks/grid1.py index ec2a27454..83055adfd 100644 --- a/flow/benchmarks/grid1.py +++ b/flow/benchmarks/grid1.py @@ -4,7 +4,7 @@ - **Observation Dimension**: (915, ) - **Horizon**: 400 steps """ -from flow.envs import TrafficLightGridPOEnv +from flow.envs import TrafficLightGridBenchmarkEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams @@ -68,7 +68,7 @@ exp_tag="grid_1", # name of the flow environment the experiment is running on - env_name=TrafficLightGridPOEnv, + env_name=TrafficLightGridBenchmarkEnv, # name of the network class the experiment is running on network=TrafficLightGridNetwork, diff --git a/flow/envs/__init__.py b/flow/envs/__init__.py index 5befe6a33..611ed3d9a 100755 --- a/flow/envs/__init__.py +++ b/flow/envs/__init__.py @@ -4,7 +4,7 @@ from flow.envs.bottleneck import BottleneckAccelEnv, BottleneckEnv, \ BottleneckDesiredVelocityEnv from flow.envs.traffic_light_grid import TrafficLightGridEnv, \ - TrafficLightGridPOEnv, TrafficLightGridTestEnv + TrafficLightGridPOEnv, TrafficLightGridTestEnv, TrafficLightGridBenchmarkEnv from flow.envs.ring.lane_change_accel import LaneChangeAccelEnv, \ LaneChangeAccelPOEnv from flow.envs.ring.accel import AccelEnv @@ -33,6 +33,7 @@ 'WaveAttenuationPOEnv', 'TrafficLightGridEnv', 'TrafficLightGridPOEnv', + 'TrafficLightGridBenchmarkEnv', 'BottleneckDesiredVelocityEnv', 'TestEnv', 'BayBridgeEnv', diff --git a/flow/envs/traffic_light_grid.py b/flow/envs/traffic_light_grid.py index 53391a329..8be0cb8a5 100644 --- a/flow/envs/traffic_light_grid.py +++ b/flow/envs/traffic_light_grid.py @@ -731,6 +731,17 @@ def additional_command(self): [self.k.vehicle.set_observed(veh_id) for veh_id in self.observed_ids] +class TrafficLightGridBenchmarkEnv(TrafficLightGridPOEnv): + """Class used for the benchmarks in `Benchmarks for reinforcement learning inmixed-autonomy traffic`.""" + + def compute_reward(self, rl_actions, **kwargs): + """See class definition.""" + if self.env_params.evaluate: + return - rewards.min_delay_unscaled(self) + else: + return rewards.desired_velocity(self) + + class TrafficLightGridTestEnv(TrafficLightGridEnv): """ Class for use in testing. From d15f19b0d5ca4529374d62553f4dd677c39f5008 Mon Sep 17 00:00:00 2001 From: Kanaad Parvate Date: Tue, 5 May 2020 11:14:57 -0700 Subject: [PATCH 033/438] fix train multiagent_i210 (#915) --- flow/envs/multiagent/i210.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index f931b3bec..09705ccf5 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -19,7 +19,8 @@ # whether we use an obs space that contains adjacent lane info or just the lead obs "lead_obs": True, # whether the reward should come from local vehicles instead of global rewards - "local_reward": True + "local_reward": True, + "target_velocity": 25 } @@ -64,6 +65,7 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): super().__init__(env_params, sim_params, network, simulator) self.lead_obs = env_params.additional_params.get("lead_obs") self.max_lanes = MAX_LANES + self.leader = [] @property def observation_space(self): From 50be2d074027fb465fc4a9103b3cc09fb1123ede Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Thu, 7 May 2020 23:51:53 -0700 Subject: [PATCH 034/438] get not departed vehicles (#922) * added function to kernel/vehicle to get number of not departed vehiles * fixed over indentation of the docstring * indentation edit * pep8 Co-authored-by: AboudyKreidieh --- flow/core/kernel/simulation/traci.py | 10 +++++--- flow/core/kernel/vehicle/base.py | 7 ++++++ flow/core/kernel/vehicle/traci.py | 37 ++++++++++++++++++++++------ 3 files changed, 43 insertions(+), 11 deletions(-) diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 0ee29ada6..35b3c2612 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -46,9 +46,13 @@ def pass_api(self, kernel_api): # subscribe some simulation parameters needed to check for entering, # exiting, and colliding vehicles self.kernel_api.simulation.subscribe([ - tc.VAR_DEPARTED_VEHICLES_IDS, tc.VAR_ARRIVED_VEHICLES_IDS, - tc.VAR_TELEPORT_STARTING_VEHICLES_IDS, tc.VAR_TIME_STEP, - tc.VAR_DELTA_T + tc.VAR_DEPARTED_VEHICLES_IDS, + tc.VAR_ARRIVED_VEHICLES_IDS, + tc.VAR_TELEPORT_STARTING_VEHICLES_IDS, + tc.VAR_TIME_STEP, + tc.VAR_DELTA_T, + tc.VAR_LOADED_VEHICLES_NUMBER, + tc.VAR_DEPARTED_VEHICLES_NUMBER ]) def simulation_step(self): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index d9fc773cd..c68d68c3a 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -290,6 +290,13 @@ def get_departed_ids(self): """Return the ids of vehicles that departed in the last time step.""" raise NotImplementedError + def get_num_not_departed(self): + """Return the number of vehicles not departed in the last time step. + + This includes vehicles that were loaded but not departed. + """ + raise NotImplementedError + def get_speed(self, veh_id, error=-1001): """Return the speed of the specified vehicle. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 50cd106c9..41b5093b2 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -22,7 +22,8 @@ STEPS = 10 rdelta = 255 / STEPS # smoothly go from red to green as the speed increases -color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in range(STEPS + 1)] +color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in + range(STEPS + 1)] class TraCIVehicle(KernelVehicle): @@ -56,6 +57,8 @@ def __init__(self, self.num_vehicles = 0 # number of rl vehicles in the network self.num_rl_vehicles = 0 + # number of vehicles loaded but not departed vehicles + self.num_not_departed = 0 # contains the parameters associated with each type of vehicle self.type_parameters = {} @@ -101,6 +104,7 @@ def initialize(self, vehicles): self.minGap = vehicles.minGap self.num_vehicles = 0 self.num_rl_vehicles = 0 + self.num_not_departed = 0 self.__vehicles.clear() for typ in vehicles.initial: @@ -183,11 +187,12 @@ def update(self, reset): self._departed_ids.clear() self._arrived_ids.clear() self._arrived_rl_ids.clear() + self.num_not_departed = 0 # add vehicles from a network template, if applicable if hasattr(self.master_kernel.network.network, "template_vehicles"): - for veh_id in self.master_kernel.network.network.\ + for veh_id in self.master_kernel.network.network. \ template_vehicles: vals = deepcopy(self.master_kernel.network.network. template_vehicles[veh_id]) @@ -212,6 +217,10 @@ def update(self, reset): self._departed_ids.append(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]) self._arrived_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]) + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -321,8 +330,12 @@ def _add_departed(self, veh_id, veh_type): # subscribe the new vehicle self.kernel_api.vehicle.subscribe(veh_id, [ - tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, tc.VAR_ROAD_ID, - tc.VAR_SPEED, tc.VAR_EDGES, tc.VAR_POSITION, tc.VAR_ANGLE, + tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, + tc.VAR_ROAD_ID, + tc.VAR_SPEED, + tc.VAR_EDGES, + tc.VAR_POSITION, + tc.VAR_ANGLE, tc.VAR_SPEED_WITHOUT_TRACI ]) self.kernel_api.vehicle.subscribeLeader(veh_id, 2000) @@ -523,6 +536,10 @@ def get_departed_ids(self): else: return 0 + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): @@ -1007,7 +1024,8 @@ def update_vehicle_colors(self): for veh_id in self.get_rl_ids(): try: # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: # color rl vehicles red self.set_color(veh_id=veh_id, color=RED) except (FatalTraCIError, TraCIException) as e: @@ -1018,7 +1036,8 @@ def update_vehicle_colors(self): try: color = CYAN if veh_id in self.get_observed_ids() else WHITE # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color) except (FatalTraCIError, TraCIException) as e: print('Error when updating human vehicle colors:', e) @@ -1028,7 +1047,8 @@ def update_vehicle_colors(self): if 'av' in veh_id: color = RED # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color) except (FatalTraCIError, TraCIException) as e: print('Error when updating human vehicle colors:', e) @@ -1041,7 +1061,8 @@ def update_vehicle_colors(self): veh_speed = self.get_speed(veh_id) bin_index = np.digitize(veh_speed, speed_ranges) # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color_bins[bin_index]) # clear the list of observed vehicles From ba2e214c6d263c2e8b107f8edde1666f3cf282d9 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Fri, 8 May 2020 15:43:03 -0700 Subject: [PATCH 035/438] Add option to reroute exiting vehicles back into the network (#918) Add option to reroute exiting vehicles back into the network --- .../rl/multiagent/multiagent_i210.py | 14 +- .../rl/multiagent/multiagent_straight_road.py | 12 +- .../singleagent/singleagent_straight_road.py | 164 +++++++++++++ flow/envs/__init__.py | 2 + flow/envs/base.py | 13 + flow/envs/multiagent/base.py | 9 + flow/envs/multiagent/i210.py | 105 +++++--- flow/envs/straightroad_env.py | 231 ++++++++++++++++++ flow/utils/rllib.py | 2 +- flow/visualize/visualizer_rllib.py | 4 +- 10 files changed, 510 insertions(+), 46 deletions(-) create mode 100644 examples/exp_configs/rl/singleagent/singleagent_straight_road.py create mode 100644 flow/envs/straightroad_env.py diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 327282e28..b74f64027 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -26,7 +26,7 @@ # SET UP PARAMETERS FOR THE SIMULATION # number of steps per rollout -HORIZON = 4000 +HORIZON = 2000 VEH_PER_HOUR_BASE_119257914 = 10800 VEH_PER_HOUR_BASE_27414345 = 321 @@ -43,7 +43,10 @@ # configure the observation space. Look at the I210MultiEnv class for more info. 'lead_obs': True, # whether to add in a reward for the speed of nearby vehicles - "local_reward": True + "local_reward": True, + # whether to reroute vehicles once they have exited + "reroute_on_exit": True, + 'target_velocity': 18, }) # CREATE VEHICLE TYPES AND INFLOWS @@ -114,6 +117,10 @@ config.PROJECT_PATH, "examples/exp_configs/templates/sumo/test2.net.xml") +warmup_steps = 0 +if additional_env_params['reroute_on_exit']: + warmup_steps = 400 + flow_params = dict( # name of the experiment exp_tag='I_210_subnetwork', @@ -140,8 +147,9 @@ env=EnvParams( horizon=HORIZON, sims_per_step=1, - warmup_steps=0, + warmup_steps=warmup_steps, additional_params=additional_env_params, + done_at_exit=False ), # network-related parameters (see flow.core.params.NetParams and the diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index 9ed38656f..a15471539 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -48,7 +48,9 @@ 'max_decel': 4.5, 'target_velocity': 18, 'local_reward': True, - 'lead_obs': True + 'lead_obs': True, + # whether to reroute vehicles once they have exited + "reroute_on_exit": True }) @@ -69,6 +71,7 @@ # autonomous vehicles vehicles.add( + color='red', veh_id='rl', acceleration_controller=(RLController, {})) @@ -92,6 +95,9 @@ name="rl_highway_inflow") # SET UP FLOW PARAMETERS +warmup_steps = 0 +if additional_env_params['reroute_on_exit']: + warmup_steps = 400 flow_params = dict( # name of the experiment @@ -109,7 +115,7 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - warmup_steps=0, + warmup_steps=warmup_steps, sims_per_step=1, # do not put more than one additional_params=additional_env_params, ), @@ -119,7 +125,7 @@ sim_step=0.5, render=False, use_ballistic=True, - restart_instance=False + restart_instance=True ), # network-related parameters (see flow.core.params.NetParams and the diff --git a/examples/exp_configs/rl/singleagent/singleagent_straight_road.py b/examples/exp_configs/rl/singleagent/singleagent_straight_road.py new file mode 100644 index 000000000..265d34d42 --- /dev/null +++ b/examples/exp_configs/rl/singleagent/singleagent_straight_road.py @@ -0,0 +1,164 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +from flow.controllers import RLController, IDMController +from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ + VehicleParams, SumoParams, SumoLaneChangeParams +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS +from flow.networks import HighwayNetwork +from flow.envs import SingleStraightRoad +from flow.networks.highway import ADDITIONAL_NET_PARAMS +from flow.utils.registry import make_create_env +from ray.tune.registry import register_env + + +# SET UP PARAMETERS FOR THE SIMULATION + +# number of steps per rollout +HORIZON = 2000 + +# inflow rate on the highway in vehicles per hour +HIGHWAY_INFLOW_RATE = 10800 / 5 +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 10 + + +# SET UP PARAMETERS FOR THE NETWORK + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2000, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2 +}) + + +# SET UP PARAMETERS FOR THE ENVIRONMENT + +additional_env_params = ADDITIONAL_ENV_PARAMS.copy() +additional_env_params.update({ + 'max_accel': 2.6, + 'max_decel': 4.5, + 'target_velocity': 18.0, + 'local_reward': True, + 'lead_obs': True, + "terminate_on_wave": False, + # the environment is not allowed to terminate below this horizon length + 'wave_termination_horizon': 1000, + # the speed below which we consider a wave to have occured + 'wave_termination_speed': 10.0, + # whether the vehicle continues to acquire reward after it exits the system. This causes it to have incentive + # to leave the network in a good state after it leaves + 'reward_after_exit': True +}) + + +# CREATE VEHICLE TYPES AND INFLOWS + +vehicles = VehicleParams() +inflows = InFlows() + +# human vehicles +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), +) + +# autonomous vehicles +vehicles.add( + veh_id='rl', + acceleration_controller=(RLController, {})) + +# add human vehicles on the highway +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="idm_highway_inflow") + +# add autonomous vehicles on the highway +# they will stay on the highway, i.e. they won't exit through the off-ramps +inflows.add( + veh_type="rl", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="rl_highway_inflow") + +# SET UP FLOW PARAMETERS +done_at_exit = True +if additional_env_params['reward_after_exit']: + done_at_exit = False + +flow_params = dict( + # name of the experiment + exp_tag='singleagent_highway', + + # name of the flow environment the experiment is running on + env_name=SingleStraightRoad, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, # do not put more than one + done_at_exit=done_at_exit, + additional_params=additional_env_params, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + use_ballistic=True, + restart_instance=True + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) + + +# SET UP RLLIB MULTI-AGENT FEATURES + +create_env, env_name = make_create_env(params=flow_params, version=0) + +# register as rllib env +register_env(env_name, create_env) + +# multiagent configuration +test_env = create_env() +obs_space = test_env.observation_space +act_space = test_env.action_space diff --git a/flow/envs/__init__.py b/flow/envs/__init__.py index 5befe6a33..6f4351cc0 100755 --- a/flow/envs/__init__.py +++ b/flow/envs/__init__.py @@ -11,6 +11,7 @@ from flow.envs.ring.wave_attenuation import WaveAttenuationEnv, \ WaveAttenuationPOEnv from flow.envs.merge import MergePOEnv +from flow.envs.straightroad_env import SingleStraightRoad from flow.envs.test import TestEnv # deprecated classes whose names have changed @@ -36,6 +37,7 @@ 'BottleneckDesiredVelocityEnv', 'TestEnv', 'BayBridgeEnv', + 'SingleStraightRoad', # deprecated classes 'BottleNeckAccelEnv', 'DesiredVelocityEnv', diff --git a/flow/envs/base.py b/flow/envs/base.py index 1abb8a3c9..adc959b9a 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -148,6 +148,10 @@ def __init__(self, self.state = None self.obs_var_labels = [] + # track IDs that have ever been observed in the system + self.observed_ids = set() + self.observed_rl_ids = set() + # simulation step size self.sim_step = sim_params.sim_step @@ -322,6 +326,11 @@ def step(self, rl_actions): contains other diagnostic information from the previous action """ for _ in range(self.env_params.sims_per_step): + # This tracks vehicles that have appeared during warmup steps + if self.time_counter <= self.env_params.sims_per_step * self.env_params.warmup_steps: + self.observed_ids.update(self.k.vehicle.get_ids()) + self.observed_rl_ids.update(self.k.vehicle.get_rl_ids()) + self.time_counter += 1 self.step_counter += 1 @@ -430,6 +439,10 @@ def reset(self): # reset the time counter self.time_counter = 0 + # reset the observed ids + self.observed_ids = set() + self.observed_rl_ids = set() + # Now that we've passed the possibly fake init steps some rl libraries # do, we can feel free to actually render things if self.should_render: diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index dfc7c72ad..126107b00 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -49,6 +49,10 @@ def step(self, rl_actions): contains other diagnostic information from the previous action """ for _ in range(self.env_params.sims_per_step): + if self.time_counter <= self.env_params.sims_per_step * self.env_params.warmup_steps: + self.observed_ids.update(self.k.vehicle.get_ids()) + self.observed_rl_ids.update(self.k.vehicle.get_rl_ids()) + self.time_counter += 1 self.step_counter += 1 @@ -103,6 +107,7 @@ def step(self, rl_actions): # stop collecting new simulation steps if there is a collision if crash: + print('A CRASH! A CRASH!!!!!! AAAAAAAAAH!!!!!') break states = self.get_state() @@ -149,6 +154,10 @@ def reset(self, new_inflow_rate=None): # reset the time counter self.time_counter = 0 + # reset the observed ids + self.observed_ids = set() + self.observed_rl_ids = set() + # Now that we've passed the possibly fake init steps some rl libraries # do, we can feel free to actually render things if self.should_render: diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 09705ccf5..a6e39cdec 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -3,7 +3,6 @@ from gym.spaces import Box import numpy as np -from flow.core.rewards import average_velocity from flow.envs.multiagent.base import MultiEnv # largest number of lanes on any given edge in the network @@ -64,7 +63,11 @@ class I210MultiEnv(MultiEnv): def __init__(self, env_params, sim_params, network, simulator='traci'): super().__init__(env_params, sim_params, network, simulator) self.lead_obs = env_params.additional_params.get("lead_obs") + self.reroute_on_exit = env_params.additional_params.get("reroute_on_exit") self.max_lanes = MAX_LANES + self.num_enter_lanes = 5 + self.entrance_edge = "119257914" + self.exit_edge = "119257908#3" self.leader = [] @property @@ -132,8 +135,8 @@ def get_state(self): else: lead_speed = self.k.vehicle.get_speed(lead_id) headway = self.k.vehicle.get_headway(rl_id) - self.leader.append(lead_id) - veh_info.update({rl_id: np.array([speed / SPEED_SCALE, headway /HEADWAY_SCALE, lead_speed / SPEED_SCALE])}) + veh_info.update({rl_id: np.array([speed / SPEED_SCALE, headway / HEADWAY_SCALE, + lead_speed / SPEED_SCALE])}) else: veh_info = {rl_id: np.concatenate((self.state_util(rl_id), self.veh_statistics(rl_id))) @@ -158,46 +161,25 @@ def compute_reward(self, rl_actions, **kwargs): if self.k.vehicle.get_speed(rl_id) >= 0: speeds.append(self.k.vehicle.get_speed(rl_id)) if len(speeds) > 0: - # rescale so the q function can estimate it quickly - rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed))**2 - for speed in speeds]) / (des_speed**2) + # rescale so the critic can estimate it quickly + rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in speeds]) / (des_speed ** 2) else: - for rl_id in self.k.vehicle.get_rl_ids(): - if self.env_params.evaluate: - # reward is speed of vehicle if we are in evaluation mode - reward = self.k.vehicle.get_speed(rl_id) - elif kwargs['fail']: - # reward is 0 if a collision occurred - reward = 0 - else: - # reward high system-level velocities - cost1 = average_velocity(self, fail=kwargs['fail']) - - # penalize small time headways - cost2 = 0 - t_min = 1 # smallest acceptable time headway - - lead_id = self.k.vehicle.get_leader(rl_id) - if lead_id not in ["", None] \ - and self.k.vehicle.get_speed(rl_id) > 0: - t_headway = max( - self.k.vehicle.get_headway(rl_id) / - self.k.vehicle.get_speed(rl_id), 0) - cost2 += min((t_headway - t_min) / t_min, 0) - - # weights for cost1, cost2, and cost3, respectively - eta1, eta2 = 1.00, 0.10 - - reward = max(eta1 * cost1 + eta2 * cost2, 0) - - rewards[rl_id] = reward + speeds = self.k.vehicle.get_speed(self.k.vehicle.get_ids()) + des_speed = self.env_params.additional_params["target_velocity"] + # rescale so the critic can estimate it quickly + reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in speeds]) / (des_speed ** 2)) + rewards = {rl_id: reward for rl_id in self.k.vehicle.get_rl_ids()} return rewards def additional_command(self): """See parent class. - Define which vehicles are observed for visualization purposes. + Define which vehicles are observed for visualization purposes. Additionally, optionally reroute vehicles + back once they have exited. """ + super().additional_command() # specify observed vehicles for rl_id in self.k.vehicle.get_rl_ids(): # leader @@ -205,6 +187,41 @@ def additional_command(self): if lead_id: self.k.vehicle.set_observed(lead_id) + if self.reroute_on_exit and self.time_counter >= self.env_params.sims_per_step * self.env_params.warmup_steps \ + and not self.env_params.evaluate: + veh_ids = self.k.vehicle.get_ids() + edges = self.k.vehicle.get_edge(veh_ids) + for veh_id, edge in zip(veh_ids, edges): + if edge == "": + continue + if edge[0] == ":": # center edge + continue + # on the exit edge, near the end, and is the vehicle furthest along + if edge == self.exit_edge and \ + (self.k.vehicle.get_position(veh_id) > self.k.network.edge_length(self.exit_edge) - 100) \ + and self.k.vehicle.get_leader(veh_id) is None: + type_id = self.k.vehicle.get_type(veh_id) + # remove the vehicle + self.k.vehicle.remove(veh_id) + lane = np.random.randint(low=0, high=self.num_enter_lanes) + # reintroduce it at the start of the network + # TODO(@evinitsky) select the lane and speed a bit more cleanly + # Note, the position is 10 so you are not overlapping with the inflow car that is being removed. + # this allows the vehicle to be immediately inserted. + self.k.vehicle.add( + veh_id=veh_id, + edge=self.entrance_edge, + type_id=str(type_id), + lane=str(lane), + pos="10.0", + speed="23.0") + + departed_ids = self.k.vehicle.get_departed_ids() + if len(departed_ids) > 0: + for veh_id in departed_ids: + if veh_id not in self.observed_ids: + self.k.vehicle.remove(veh_id) + def state_util(self, rl_id): """Return an array of headway, tailway, leader speed, follower speed. @@ -245,13 +262,27 @@ def veh_statistics(self, rl_id): lane = (self.k.vehicle.get_lane(rl_id) + 1) / 10.0 return np.array([speed, lane]) + def step(self, rl_actions): + """See parent class for more details; add option to reroute vehicles.""" + state, reward, done, info = super().step(rl_actions) + # handle the edge case where a vehicle hasn't been put back when the rollout terminates + if self.reroute_on_exit and done['__all__']: + for rl_id in self.observed_rl_ids: + if rl_id not in state.keys(): + done[rl_id] = True + reward[rl_id] = 0 + state[rl_id] = -1 * np.ones(self.observation_space.shape[0]) + return state, reward, done, info + class MultiStraightRoad(I210MultiEnv): """Partially observable multi-agent environment for a straight road. Look at superclass for more information.""" def __init__(self, env_params, sim_params, network, simulator): super().__init__(env_params, sim_params, network, simulator) - self.max_lanes = 1 + self.num_enter_lanes = 1 + self.entrance_edge = self.network.routes['highway_0'][0][0][0] + self.exit_edge = self.network.routes['highway_0'][0][0][-1] def _apply_rl_actions(self, rl_actions): """See class definition.""" diff --git a/flow/envs/straightroad_env.py b/flow/envs/straightroad_env.py new file mode 100644 index 000000000..92fbb855b --- /dev/null +++ b/flow/envs/straightroad_env.py @@ -0,0 +1,231 @@ +"""Environment for training vehicles to reduce congestion in the I210.""" + +from gym.spaces import Box +import numpy as np + +from flow.envs.base import Env + +# largest number of lanes on any given edge in the network +MAX_LANES = 6 +MAX_NUM_VEHS = 8 +SPEED_SCALE = 50 +HEADWAY_SCALE = 1000 + +ADDITIONAL_ENV_PARAMS = { + # maximum acceleration for autonomous vehicles, in m/s^2 + "max_accel": 1, + # maximum deceleration for autonomous vehicles, in m/s^2 + "max_decel": 1, + # whether we use an obs space that contains adjacent lane info or just the lead obs + "lead_obs": True, + # whether the reward should come from local vehicles instead of global rewards + "local_reward": True, + # if the environment terminates once a wave has occurred + "terminate_on_wave": False, + # the environment is not allowed to terminate below this horizon length + 'wave_termination_horizon': 500, + # the speed below which we consider a wave to have occured + 'wave_termination_speed': 10.0 +} + + +class I210SingleEnv(Env): + """Partially observable single-agent environment for the I-210 subnetworks. + + The policy is shared among the agents, so there can be a non-constant + number of RL vehicles throughout the simulation. + Required from env_params: + * max_accel: maximum acceleration for autonomous vehicles, in m/s^2 + * max_decel: maximum deceleration for autonomous vehicles, in m/s^2 + The following states, actions and rewards are considered for one autonomous + vehicle only, as they will be computed in the same way for each of them. + States + The observation consists of the speeds and bumper-to-bumper headways of + the vehicles immediately preceding and following autonomous vehicles in + all of the preceding lanes as well, a binary value indicating which of + these vehicles is autonomous, and the speed of the autonomous vehicle. + Missing vehicles are padded with zeros. + Actions + The action consists of an acceleration, bound according to the + environment parameters, as well as three values that will be converted + into probabilities via softmax to decide of a lane change (left, none + or right). NOTE: lane changing is currently not enabled. It's a TODO. + Rewards + The reward function encourages proximity of the system-level velocity + to a desired velocity specified in the environment parameters, while + slightly penalizing small time headways among autonomous vehicles. + Termination + A rollout is terminated if the time horizon is reached or if two + vehicles collide into one another. + """ + + def __init__(self, env_params, sim_params, network, simulator='traci'): + super().__init__(env_params, sim_params, network, simulator) + self.lead_obs = env_params.additional_params.get("lead_obs") + self.max_lanes = MAX_LANES + self.total_reward = 0.0 + + @property + def observation_space(self): + """See class definition.""" + # speed, speed of leader, headway + if self.lead_obs: + return Box( + low=-float('inf'), + high=float('inf'), + shape=(3 * MAX_NUM_VEHS,), + dtype=np.float32 + ) + # speed, dist to ego vehicle, binary value which is 1 if the vehicle is + # an AV + else: + leading_obs = 3 * self.max_lanes + follow_obs = 3 * self.max_lanes + + # speed and lane + self_obs = 2 + + return Box( + low=-float('inf'), + high=float('inf'), + shape=(leading_obs + follow_obs + self_obs,), + dtype=np.float32 + ) + + @property + def action_space(self): + """See class definition.""" + return Box( + low=-np.abs(self.env_params.additional_params['max_decel']), + high=self.env_params.additional_params['max_accel'], + shape=(1 * MAX_NUM_VEHS,), # (4,), + dtype=np.float32) + + def _apply_rl_actions(self, rl_actions): + """See class definition.""" + # in the warmup steps, rl_actions is None + if rl_actions is not None: + accels = [] + veh_ids = [] + rl_ids = self.get_sorted_rl_ids() + + for i, rl_id in enumerate(self.rl_id_list): + accels.append(rl_actions[i]) + veh_ids.append(rl_id) + + # lane_change_softmax = np.exp(actions[1:4]) + # lane_change_softmax /= np.sum(lane_change_softmax) + # lane_change_action = np.random.choice([-1, 0, 1], + # p=lane_change_softmax) + + self.k.vehicle.apply_acceleration(rl_ids, accels) + + def get_state(self): + """See class definition.""" + rl_ids = self.get_sorted_rl_ids() + self.rl_id_list = rl_ids + veh_info = np.zeros(self.observation_space.shape[0]) + per_vehicle_obs = 3 + for i, rl_id in enumerate(rl_ids): + speed = self.k.vehicle.get_speed(rl_id) + lead_id = self.k.vehicle.get_leader(rl_id) + if lead_id in ["", None]: + # in case leader is not visible + lead_speed = SPEED_SCALE + headway = HEADWAY_SCALE + else: + lead_speed = self.k.vehicle.get_speed(lead_id) + headway = self.k.vehicle.get_headway(rl_id) + veh_info[i * per_vehicle_obs: (i + 1) * per_vehicle_obs] = [speed / SPEED_SCALE, + headway / HEADWAY_SCALE, + lead_speed / SPEED_SCALE] + return veh_info + + def compute_reward(self, rl_actions, **kwargs): + """See class definition.""" + # in the warmup steps + if rl_actions is None: + return {} + + rl_ids = self.get_sorted_rl_ids() + + des_speed = self.env_params.additional_params["target_velocity"] + rewards = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in self.k.vehicle.get_speed(rl_ids)])) / (des_speed ** 2) + return rewards + + def get_sorted_rl_ids(self): + """Return the MAX_NUM_VEHS closest to the exit.""" + rl_ids = self.k.vehicle.get_rl_ids() + rl_ids = sorted(rl_ids, key=lambda veh_id: self.k.vehicle.get_x_by_id(veh_id)) + rl_ids = rl_ids[-MAX_NUM_VEHS:] + return rl_ids + + def additional_command(self): + """Define which vehicles are observed for visualization purposes.""" + # specify observed vehicles + for rl_id in self.k.vehicle.get_rl_ids(): + # leader + lead_id = self.k.vehicle.get_leader(rl_id) + if lead_id: + self.k.vehicle.set_observed(lead_id) + + def state_util(self, rl_id): + """Return an array of headway, tailway, leader speed, follower speed. + + Also return a 1 if leader is rl 0 otherwise, a 1 if follower is rl 0 otherwise. + If there are fewer than MAX_LANES the extra + entries are filled with -1 to disambiguate from zeros. + """ + veh = self.k.vehicle + lane_headways = veh.get_lane_headways(rl_id).copy() + lane_tailways = veh.get_lane_tailways(rl_id).copy() + lane_leader_speed = veh.get_lane_leaders_speed(rl_id).copy() + lane_follower_speed = veh.get_lane_followers_speed(rl_id).copy() + leader_ids = veh.get_lane_leaders(rl_id).copy() + follower_ids = veh.get_lane_followers(rl_id).copy() + rl_ids = self.k.vehicle.get_rl_ids() + is_leader_rl = [1 if l_id in rl_ids else 0 for l_id in leader_ids] + is_follow_rl = [1 if f_id in rl_ids else 0 for f_id in follower_ids] + diff = MAX_LANES - len(is_leader_rl) + if diff > 0: + # the minus 1 disambiguates missing cars from missing lanes + lane_headways += diff * [-1] + lane_tailways += diff * [-1] + lane_leader_speed += diff * [-1] + lane_follower_speed += diff * [-1] + is_leader_rl += diff * [-1] + is_follow_rl += diff * [-1] + lane_headways = np.asarray(lane_headways) / 1000 + lane_tailways = np.asarray(lane_tailways) / 1000 + lane_leader_speed = np.asarray(lane_leader_speed) / 100 + lane_follower_speed = np.asarray(lane_follower_speed) / 100 + return np.concatenate((lane_headways, lane_tailways, lane_leader_speed, + lane_follower_speed, is_leader_rl, + is_follow_rl)) + + def veh_statistics(self, rl_id): + """Return speed, edge information, and x, y about the vehicle itself.""" + speed = self.k.vehicle.get_speed(rl_id) / 100.0 + lane = (self.k.vehicle.get_lane(rl_id) + 1) / 10.0 + return np.array([speed, lane]) + + +class SingleStraightRoad(I210SingleEnv): + """Partially observable multi-agent environment for a straight road. Look at superclass for more information.""" + + def __init__(self, env_params, sim_params, network, simulator): + super().__init__(env_params, sim_params, network, simulator) + self.max_lanes = 1 + + def step(self, rl_actions): + """See parent class.""" + obs, rew, done, info = super().step(rl_actions) + mean_speed = np.nan_to_num(np.mean(self.k.vehicle.get_speed(self.k.vehicle.get_ids()))) + if self.env_params.additional_params['terminate_on_wave'] and \ + mean_speed < self.env_params.additional_params['wave_termination_speed'] \ + and self.time_counter > self.env_params.additional_params['wave_termination_horizon'] \ + and len(self.k.vehicle.get_ids()) > 0: + done = True + + return obs, rew, done, info diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index 7d777d769..ca8072c85 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -146,7 +146,7 @@ def get_flow_params(config): if flow_params["net"]["inflows"]: net.inflows.__dict__ = flow_params["net"]["inflows"].copy() - if len(net.template) > 0: + if net.template and len(net.template) > 0: dirname = os.getcwd() filename = os.path.join(dirname, '../../examples') split = net.template.split('examples')[1][1:] diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 8c38a91c1..c1dd83193 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -166,7 +166,7 @@ def visualizer_rllib(args): if multiagent: rets = {} # map the agent id to its policy - policy_map_fn = config['multiagent']['policy_mapping_fn'].func + policy_map_fn = config['multiagent']['policy_mapping_fn'] for key in config['multiagent']['policies'].keys(): rets[key] = [] else: @@ -177,7 +177,7 @@ def visualizer_rllib(args): if multiagent: state_init = {} # map the agent id to its policy - policy_map_fn = config['multiagent']['policy_mapping_fn'].func + policy_map_fn = config['multiagent']['policy_mapping_fn'] size = config['model']['lstm_cell_size'] for key in config['multiagent']['policies'].keys(): state_init[key] = [np.zeros(size, np.float32), From 6560f8c66c93f49990025abd0f66362dd2b86597 Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Fri, 8 May 2020 16:21:11 -0700 Subject: [PATCH 036/438] added lane change class --- flow/controllers/__init__.py | 5 +- flow/controllers/base_controller.py | 9 ++ flow/controllers/car_following_models.py | 43 ++++++ flow/controllers/lane_change_controllers.py | 157 ++++++++++++++++++++ flow/core/kernel/vehicle/traci.py | 2 +- flow/envs/ring/lane_change_accel.py | 1 + 6 files changed, 214 insertions(+), 3 deletions(-) diff --git a/flow/controllers/__init__.py b/flow/controllers/__init__.py index 4dfcf05b7..a6c86260a 100755 --- a/flow/controllers/__init__.py +++ b/flow/controllers/__init__.py @@ -23,7 +23,7 @@ from flow.controllers.base_lane_changing_controller import \ BaseLaneChangeController from flow.controllers.lane_change_controllers import StaticLaneChanger, \ - SimLaneChangeController + SimLaneChangeController, AILaneChangeController # routing controllers from flow.controllers.base_routing_controller import BaseRouter @@ -36,5 +36,6 @@ "IDMController", "SimCarFollowingController", "FollowerStopper", "PISaturation", "StaticLaneChanger", "SimLaneChangeController", "ContinuousRouter", "GridRouter", "BayBridgeRouter", "LACController", - "GippsController", "NonLocalFollowerStopper", "BandoFTLController" + "GippsController", "NonLocalFollowerStopper", "BandoFTLController", + "AILaneChangeController" ] diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 41780826b..36cd56721 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -67,6 +67,15 @@ def get_accel(self, env): """Return the acceleration of the controller.""" raise NotImplementedError + def get_custom_accel(self, this_vel, lead_vel, h): + """Return the custom computed acceleration of the controller. + + This method computes acceleration based on custom state information, + while get_accel() method compute acceleration based on the current state + information that are obtained from the environment. + """ + raise NotImplementedError + def get_action(self, env): """Convert the get_accel() acceleration into an action. diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index 42c9b2a9b..7ec1ef34a 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -87,6 +87,10 @@ def get_accel(self, env): return self.k_d*(d_l - self.d_des) + self.k_v*(lead_vel - this_vel) + \ self.k_c*(self.v_des - this_vel) + def get_custom_accel(self, this_vel, lead_vel, h): + """See parent class.""" + raise NotImplementedError + class BCMController(BaseController): """Bilateral car-following model controller. @@ -175,6 +179,10 @@ def get_accel(self, env): self.k_v * ((lead_vel - this_vel) - (this_vel - trail_vel)) + \ self.k_c * (self.v_des - this_vel) + def get_custom_accel(self, this_vel, lead_vel, h): + """See parent class.""" + raise NotImplementedError + class LACController(BaseController): """Linear Adaptive Cruise Control. @@ -244,6 +252,10 @@ def get_accel(self, env): return self.a + def get_custom_accel(self, this_vel, lead_vel, h): + """See parent class.""" + raise NotImplementedError + class OVMController(BaseController): """Optimal Vehicle Model controller. @@ -327,6 +339,10 @@ def get_accel(self, env): return self.alpha * (v_h - this_vel) + self.beta * h_dot + def get_custom_accel(self, this_vel, lead_vel, h): + """See parent class.""" + raise NotImplementedError + class LinearOVM(BaseController): """Linear OVM controller. @@ -396,6 +412,10 @@ def get_accel(self, env): return (v_h - this_vel) / self.adaptation + def get_custom_accel(self, this_vel, lead_vel, h): + """See parent class.""" + raise NotImplementedError + class IDMController(BaseController): """Intelligent Driver Model (IDM) controller. @@ -481,6 +501,21 @@ def get_accel(self, env): return self.a * (1 - (v / self.v0)**self.delta - (s_star / h)**2) + def get_custom_accel(self, this_vel, lead_vel, h): + """See parent class.""" + # in order to deal with ZeroDivisionError + if abs(h) < 1e-3: + h = 1e-3 + + if lead_vel is None: # no car ahead + s_star = 0 + else: + s_star = self.s0 + max( + 0, this_vel * self.T + this_vel * (this_vel - lead_vel) / + (2 * np.sqrt(self.a * self.b))) + + return self.a * (1 - (this_vel / self.v0)**self.delta - (s_star / h)**2) + class SimCarFollowingController(BaseController): """Controller whose actions are purely defined by the simulator. @@ -496,6 +531,10 @@ def get_accel(self, env): """See parent class.""" return None + def get_custom_accel(self, this_vel, lead_vel, h): + """See parent class.""" + raise NotImplementedError + class GippsController(BaseController): """Gipps' Model controller. @@ -581,6 +620,10 @@ def get_accel(self, env): return (v_next-v)/env.sim_step + def get_custom_accel(self, this_vel, lead_vel, h): + """See parent class.""" + raise NotImplementedError + class BandoFTLController(BaseController): """Bando follow-the-leader controller. diff --git a/flow/controllers/lane_change_controllers.py b/flow/controllers/lane_change_controllers.py index 7f7a16ff3..c589be6f8 100755 --- a/flow/controllers/lane_change_controllers.py +++ b/flow/controllers/lane_change_controllers.py @@ -24,3 +24,160 @@ class StaticLaneChanger(BaseLaneChangeController): def get_lane_change_action(self, env): """See parent class.""" return 0 + + +class AILaneChangeController(BaseLaneChangeController): + """A lane-changing controller based on acceleration incentive model. + + Usage + ----- + See base class for usage example. + + Attributes + ---------- + veh_id : str + Vehicle ID for SUMO/Aimsun identification + lane_change_params : flow.core.param.SumoLaneChangeParams + see parent class + left_delta : float + used for the incentive criterion for left lane change (default: 2.6) + right_delta : float + used for the incentive criterion for right lane change (default: 2.7) + left_beta : float + used for the incentive criterion for left lane change (default: 2.6) + right_beta : float + used for the incentive criterion for right lane change (default: 2.7) + """ + def __init__(self, + veh_id, + lane_change_params=None, + left_delta=2.6, + right_delta=2.7, + left_beta=2.6, + right_beta=2.7): + """Instantiate an AI lane-change controller.""" + BaseLaneChangeController.__init__( + self, + veh_id, + lane_change_params, + ) + + self.veh_id = veh_id + self.left_delta = left_delta + self.right_delta = right_delta + self.left_beta = left_beta + self.right_beta = right_beta + + def get_lane_change_action(self, env): + """See parent class.""" + # acceleration if the ego vehicle remains in current lane. + ego_accel_controller = env.k.vehicle.get_acc_controller(self.veh_id) + acc_in_present_lane = ego_accel_controller.get_accel(env) + + # get ego vehicle lane number, and velocity + ego_lane = env.k.vehicle.get_lane(self.veh_id) + ego_vel = env.k.vehicle.get_speed(self.veh_id) + + # get lane leaders, followers, headways, and tailways + lane_leaders = env.k.vehicle.get_lane_leaders(self.veh_id) + lane_followers = env.k.vehicle.get_lane_followers(self.veh_id) + lane_headways = env.k.vehicle.get_lane_headways(self.veh_id) + lane_tailways = env.k.vehicle.get_lane_tailways(self.veh_id) + + # determine left and right lane number + this_edge = env.k.vehicle.get_edge(self.veh_id) + num_lanes = env.k.network.num_lanes(this_edge) + l_lane = ego_lane - 1 if ego_lane > 0 else None + r_lane = ego_lane + 1 if ego_lane < num_lanes - 1 else None + + # compute ego and new follower accelerations if moving to left lane + if l_lane is not None: + # get left leader and follower vehicle ID + l_l = lane_leaders[l_lane] + l_f = lane_followers[l_lane] + + # ego acceleration if the ego vehicle is in the lane to the left + if l_l != '': + # left leader velocity and headway + l_l_vel = env.k.vehicle.get_speed(l_l) + l_l_headway = lane_headways[l_lane] + + acc_in_left_lane = ego_accel_controller.get_custom_accel( + this_vel=ego_vel, + lead_vel=l_l_vel, + h=l_l_headway) + else: # if left lane exists but left leader does not exist + # in this case we assign maximum acceleration + acc_in_left_lane = ego_accel_controller.max_accel # FIXME + + # follower acceleration if the ego vehicle is in the left lane + if l_f != '': + # left follower velocity and headway + l_f_vel = env.k.vehicle.get_speed(l_f) + l_f_tailway = lane_tailways[l_lane] + + l_f_accel_controller = env.k.vehicle.get_acc_controller(l_f) + left_lane_follower_acc = l_f_accel_controller.get_custom_accel( + this_vel=l_f_vel, + lead_vel=ego_vel, + h=l_f_tailway) + else: # if left lane exists but left follower does not exist + # in this case we assign maximum acceleration + left_lane_follower_acc = ego_accel_controller.max_accel + else: + acc_in_left_lane = None + left_lane_follower_acc = None + + # compute ego and new follower accelerations if moving to right lane + if r_lane is not None: + # get right leader and follower vehicle ID + r_l = lane_leaders[r_lane] + r_f = lane_followers[r_lane] + + # ego acceleration if the ego vehicle is in the lane to the right + if r_l != '': + # right leader velocity and headway + r_l_vel = env.k.vehicle.get_speed(r_l) + r_l_headway = lane_headways[r_lane] + + acc_in_right_lane = ego_accel_controller.get_custom_accel( + this_vel=ego_vel, + lead_vel=r_l_vel, + h=r_l_headway) + else: # if right lane exists but right leader does not exist + # assign maximum acceleration + acc_in_right_lane = ego_accel_controller.max_accel + + # follower acceleration if the ego vehicle is in the right lane + if r_f != '': + # right follower velocity and headway + r_f_vel = env.k.vehicle.get_speed(r_f) + r_f_headway = lane_tailways[r_lane] + + r_f_accel_controller = env.k.vehicle.get_acc_controller(r_f) + right_lane_follower_acc = r_f_accel_controller.get_custom_accel( + this_vel=r_f_vel, + lead_vel=ego_vel, + h=r_f_headway) + else: # if right lane exists but right follower does not exist + # assign maximum acceleration + right_lane_follower_acc = ego_accel_controller.max_accel + else: + acc_in_right_lane = None + right_lane_follower_acc = None + + # assert to make sure the CFM have the get_custom_accel() # TODO + + # determine lane change action + if l_lane is not None and acc_in_left_lane >= - self.left_beta and \ + left_lane_follower_acc >= -self.left_beta and \ + acc_in_left_lane >= acc_in_present_lane + self.left_delta: + action = 1 + elif r_lane is not None and acc_in_right_lane >= - self.right_beta and \ + right_lane_follower_acc >= -self.right_beta and \ + acc_in_right_lane >= acc_in_present_lane + self.right_delta: + action = -1 + else: + action = 0 + + return action diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 50cd106c9..a196322d6 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -727,7 +727,7 @@ def _multi_lane_headways(self): for lane in range(max_lanes): edge_dict[edge][lane].sort(key=lambda x: x[1]) - for veh_id in self.get_rl_ids(): + for veh_id in self.get_ids(): # FIXME (yf) use tracked vehicles # collect the lane leaders, followers, headways, and tailways for # each vehicle edge = self.get_edge(veh_id) diff --git a/flow/envs/ring/lane_change_accel.py b/flow/envs/ring/lane_change_accel.py index d464fc33d..09dcacbc3 100755 --- a/flow/envs/ring/lane_change_accel.py +++ b/flow/envs/ring/lane_change_accel.py @@ -159,6 +159,7 @@ def additional_command(self): if self.k.vehicle.num_rl_vehicles > 0: for veh_id in self.k.vehicle.get_human_ids(): self.k.vehicle.set_observed(veh_id) + print("lkhkhkhkhkhkhkh") class LaneChangeAccelPOEnv(LaneChangeAccelEnv): From 7398f5014ad990dd7539c52be32a8ca68edb0c8f Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Sat, 9 May 2020 15:31:44 -0700 Subject: [PATCH 037/438] changed _departed_ids, and _arrived_ids in the update function (#926) * changed _departed_ids, and _arrived_ids in the update function * fixed bug in get_departed_ids and get_arrived_ids --- flow/core/kernel/simulation/traci.py | 3 ++- flow/core/kernel/vehicle/traci.py | 27 ++++++++++----------------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 35b3c2612..2cd109024 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -52,7 +52,8 @@ def pass_api(self, kernel_api): tc.VAR_TIME_STEP, tc.VAR_DELTA_T, tc.VAR_LOADED_VEHICLES_NUMBER, - tc.VAR_DEPARTED_VEHICLES_NUMBER + tc.VAR_DEPARTED_VEHICLES_NUMBER, + tc.VAR_ARRIVED_VEHICLES_NUMBER ]) def simulation_step(self): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 41b5093b2..d165dbdea 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -71,11 +71,11 @@ def __init__(self, # number of vehicles that entered the network for every time-step self._num_departed = [] - self._departed_ids = [] + self._departed_ids = 0 # number of vehicles to exit the network for every time-step self._num_arrived = [] - self._arrived_ids = [] + self._arrived_ids = 0 self._arrived_rl_ids = [] # whether or not to automatically color vehicles @@ -184,8 +184,8 @@ def update(self, reset): self.prev_last_lc[veh_id] = -float("inf") self._num_departed.clear() self._num_arrived.clear() - self._departed_ids.clear() - self._arrived_ids.clear() + self._departed_ids = 0 + self._arrived_ids = 0 self._arrived_rl_ids.clear() self.num_not_departed = 0 @@ -211,11 +211,10 @@ def update(self, reset): self.__vehicles[veh_id]["last_lc"] = self.time_counter # updated the list of departed and arrived vehicles - self._num_departed.append( - len(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS])) - self._num_arrived.append(len(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS])) - self._departed_ids.append(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]) - self._arrived_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]) + self._num_departed.append(sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER]) + self._num_arrived.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_NUMBER]) + self._departed_ids = sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS] + self._arrived_ids = sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS] # update the number of not departed vehicles self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ @@ -517,10 +516,7 @@ def get_num_arrived(self): def get_arrived_ids(self): """See parent class.""" - if len(self._arrived_ids) > 0: - return self._arrived_ids[-1] - else: - return 0 + return self._arrived_ids def get_arrived_rl_ids(self): """See parent class.""" @@ -531,10 +527,7 @@ def get_arrived_rl_ids(self): def get_departed_ids(self): """See parent class.""" - if len(self._departed_ids) > 0: - return self._departed_ids[-1] - else: - return 0 + return self._departed_ids def get_num_not_departed(self): """See parent class.""" From 894bf407ce1bbe0553cdd38da1f93f3b67d75b8f Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Sun, 10 May 2020 17:51:12 -0700 Subject: [PATCH 038/438] added tracked vehicle flag for collecting neighbors --- flow/core/kernel/vehicle/traci.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index a196322d6..d75aaa21e 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -10,6 +10,7 @@ from flow.controllers.car_following_models import SimCarFollowingController from flow.controllers.rlcontroller import RLController from flow.controllers.lane_change_controllers import SimLaneChangeController +from flow.controllers.lane_change_controllers import AILaneChangeController from bisect import bisect_left import itertools from copy import deepcopy @@ -85,6 +86,9 @@ def __init__(self, # old speeds used to compute accelerations self.previous_speeds = {} + # flag to collect lane leaders/followers/headways/tailways for all + self.collect_info_all = False + def initialize(self, vehicles): """Initialize vehicle state information. @@ -299,6 +303,11 @@ def _add_departed(self, veh_id, veh_type): self.__vehicles[veh_id]["lane_changer"] = \ lc_controller[0](veh_id=veh_id, **lc_controller[1]) + # if lane changer is AILaneChangeController, set collect info flag True + if lc_controller[0] == AILaneChangeController and \ + not self.collect_info_all: + self.collect_info_all = True + # specify the routing controller class rt_controller = self.type_parameters[veh_type]["routing_controller"] if rt_controller is not None: @@ -727,7 +736,11 @@ def _multi_lane_headways(self): for lane in range(max_lanes): edge_dict[edge][lane].sort(key=lambda x: x[1]) - for veh_id in self.get_ids(): # FIXME (yf) use tracked vehicles + # get tracked vehicles IDs, for these vehicles info will be collected + tracked_vehs = self.get_ids() if self.collect_info_all \ + else self.get_rl_ids() # FIXME (yf) better collect tracked vehicles + + for veh_id in tracked_vehs: # collect the lane leaders, followers, headways, and tailways for # each vehicle edge = self.get_edge(veh_id) @@ -1094,3 +1107,4 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) + From 826be95a9be4defb554e5631a815e9876978f216 Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Sun, 10 May 2020 17:52:14 -0700 Subject: [PATCH 039/438] minor --- flow/envs/ring/lane_change_accel.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/envs/ring/lane_change_accel.py b/flow/envs/ring/lane_change_accel.py index 09dcacbc3..d464fc33d 100755 --- a/flow/envs/ring/lane_change_accel.py +++ b/flow/envs/ring/lane_change_accel.py @@ -159,7 +159,6 @@ def additional_command(self): if self.k.vehicle.num_rl_vehicles > 0: for veh_id in self.k.vehicle.get_human_ids(): self.k.vehicle.set_observed(veh_id) - print("lkhkhkhkhkhkhkh") class LaneChangeAccelPOEnv(LaneChangeAccelEnv): From 378da40deb1259141ce45d3dce197bc5039a684f Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Sun, 10 May 2020 19:12:26 -0700 Subject: [PATCH 040/438] added assertion for get_custom_accel() method, fixed no leader in adjacent lane case --- flow/controllers/lane_change_controllers.py | 122 +++++++++++++++----- 1 file changed, 95 insertions(+), 27 deletions(-) diff --git a/flow/controllers/lane_change_controllers.py b/flow/controllers/lane_change_controllers.py index c589be6f8..68920c434 100755 --- a/flow/controllers/lane_change_controllers.py +++ b/flow/controllers/lane_change_controllers.py @@ -1,5 +1,7 @@ """Contains a list of custom lane change controllers.""" +import sys + from flow.controllers.base_lane_changing_controller import \ BaseLaneChangeController @@ -48,6 +50,7 @@ class AILaneChangeController(BaseLaneChangeController): right_beta : float used for the incentive criterion for right lane change (default: 2.7) """ + def __init__(self, veh_id, lane_change_params=None, @@ -60,7 +63,7 @@ def __init__(self, self, veh_id, lane_change_params, - ) + ) self.veh_id = veh_id self.left_delta = left_delta @@ -97,30 +100,64 @@ def get_lane_change_action(self, env): l_f = lane_followers[l_lane] # ego acceleration if the ego vehicle is in the lane to the left - if l_l != '': + if l_l not in ['', None]: # left leader velocity and headway l_l_vel = env.k.vehicle.get_speed(l_l) l_l_headway = lane_headways[l_lane] - acc_in_left_lane = ego_accel_controller.get_custom_accel( - this_vel=ego_vel, - lead_vel=l_l_vel, - h=l_l_headway) + # assert to make sure the CFM have the get_custom_accel() + try: + acc_in_left_lane = ego_accel_controller.get_custom_accel( + this_vel=ego_vel, + lead_vel=l_l_vel, + h=l_l_headway) + except NotImplementedError: + print( + "====================================================\n" + "The get_custom_accel() method is not implemented for\n" + "the selected Car Following model. Please implement \n" + " the method or use another Car Following model \n" + "=====================================================") + sys.exit(1) else: # if left lane exists but left leader does not exist - # in this case we assign maximum acceleration - acc_in_left_lane = ego_accel_controller.max_accel # FIXME + # in this case we assign high velocity and headway to the leader + l_l_vel = 30 + l_l_headway = 1000 + try: + acc_in_left_lane = ego_accel_controller.get_custom_accel( + this_vel=ego_vel, + lead_vel=l_l_vel, + h=l_l_headway) + except NotImplementedError: + print( + "====================================================\n" + "The get_custom_accel() method is not implemented for\n" + "the selected Car Following model. Please implement \n" + " the method or use another Car Following model \n" + "=====================================================") + sys.exit(1) # follower acceleration if the ego vehicle is in the left lane - if l_f != '': + if l_f not in ['', None]: # left follower velocity and headway l_f_vel = env.k.vehicle.get_speed(l_f) l_f_tailway = lane_tailways[l_lane] l_f_accel_controller = env.k.vehicle.get_acc_controller(l_f) - left_lane_follower_acc = l_f_accel_controller.get_custom_accel( - this_vel=l_f_vel, - lead_vel=ego_vel, - h=l_f_tailway) + try: + left_lane_follower_acc = l_f_accel_controller. \ + get_custom_accel( + this_vel=l_f_vel, + lead_vel=ego_vel, + h=l_f_tailway) + except NotImplementedError: + print( + "====================================================\n" + "The get_custom_accel() method is not implemented for\n" + "the selected Car Following model. Please implement \n" + " the method or use another Car Following model \n" + "=====================================================") + sys.exit(1) else: # if left lane exists but left follower does not exist # in this case we assign maximum acceleration left_lane_follower_acc = ego_accel_controller.max_accel @@ -135,30 +172,63 @@ def get_lane_change_action(self, env): r_f = lane_followers[r_lane] # ego acceleration if the ego vehicle is in the lane to the right - if r_l != '': + if r_l not in ['', None]: # right leader velocity and headway r_l_vel = env.k.vehicle.get_speed(r_l) r_l_headway = lane_headways[r_lane] - acc_in_right_lane = ego_accel_controller.get_custom_accel( - this_vel=ego_vel, - lead_vel=r_l_vel, - h=r_l_headway) + try: + acc_in_right_lane = ego_accel_controller.get_custom_accel( + this_vel=ego_vel, + lead_vel=r_l_vel, + h=r_l_headway) + except NotImplementedError: + print( + "====================================================\n" + "The get_custom_accel() method is not implemented for\n" + "the selected Car Following model. Please implement \n" + " the method or use another Car Following model \n" + "=====================================================") + sys.exit(1) else: # if right lane exists but right leader does not exist - # assign maximum acceleration - acc_in_right_lane = ego_accel_controller.max_accel + # in this case we assign high velocity and headway to the leader + r_l_vel = 30 + r_l_headway = 1000 + try: + acc_in_right_lane = ego_accel_controller.get_custom_accel( + this_vel=ego_vel, + lead_vel=r_l_vel, + h=r_l_headway) + except NotImplementedError: + print( + "====================================================\n" + "The get_custom_accel() method is not implemented for\n" + "the selected Car Following model. Please implement \n" + " the method or use another Car Following model \n" + "=====================================================") + sys.exit(1) # follower acceleration if the ego vehicle is in the right lane - if r_f != '': + if r_f not in ['', None]: # right follower velocity and headway r_f_vel = env.k.vehicle.get_speed(r_f) r_f_headway = lane_tailways[r_lane] r_f_accel_controller = env.k.vehicle.get_acc_controller(r_f) - right_lane_follower_acc = r_f_accel_controller.get_custom_accel( - this_vel=r_f_vel, - lead_vel=ego_vel, - h=r_f_headway) + try: + right_lane_follower_acc = r_f_accel_controller.\ + get_custom_accel( + this_vel=r_f_vel, + lead_vel=ego_vel, + h=r_f_headway) + except NotImplementedError: + print( + "====================================================\n" + "The get_custom_accel() method is not implemented for\n" + "the selected Car Following model. Please implement \n" + " the method or use another Car Following model \n" + "=====================================================") + sys.exit(1) else: # if right lane exists but right follower does not exist # assign maximum acceleration right_lane_follower_acc = ego_accel_controller.max_accel @@ -166,8 +236,6 @@ def get_lane_change_action(self, env): acc_in_right_lane = None right_lane_follower_acc = None - # assert to make sure the CFM have the get_custom_accel() # TODO - # determine lane change action if l_lane is not None and acc_in_left_lane >= - self.left_beta and \ left_lane_follower_acc >= -self.left_beta and \ From 7a2063970027160099a9a1cc7b24703837b85f9d Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Sun, 10 May 2020 19:33:37 -0700 Subject: [PATCH 041/438] revised no leader in adjacent lane --- flow/controllers/lane_change_controllers.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flow/controllers/lane_change_controllers.py b/flow/controllers/lane_change_controllers.py index 68920c434..776ec9ee5 100755 --- a/flow/controllers/lane_change_controllers.py +++ b/flow/controllers/lane_change_controllers.py @@ -120,9 +120,9 @@ def get_lane_change_action(self, env): "=====================================================") sys.exit(1) else: # if left lane exists but left leader does not exist - # in this case we assign high velocity and headway to the leader - l_l_vel = 30 - l_l_headway = 1000 + # in this case we assign None to the leader velocity and headway + l_l_vel = None + l_l_headway = None try: acc_in_left_lane = ego_accel_controller.get_custom_accel( this_vel=ego_vel, @@ -191,9 +191,9 @@ def get_lane_change_action(self, env): "=====================================================") sys.exit(1) else: # if right lane exists but right leader does not exist - # in this case we assign high velocity and headway to the leader - r_l_vel = 30 - r_l_headway = 1000 + # in this case we assign None to the leader velocity and headway + r_l_vel = None + r_l_headway = None try: acc_in_right_lane = ego_accel_controller.get_custom_accel( this_vel=ego_vel, From 9d32421c48300f3db42b1abeaf6518f9c2ff75db Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Sun, 10 May 2020 19:40:00 -0700 Subject: [PATCH 042/438] minor edit --- flow/core/kernel/vehicle/traci.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index d75aaa21e..06ac8ab55 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1107,4 +1107,3 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) - From 8d0c0201b82bd3a8e89fa3f13a30dc1cd065341b Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Mon, 11 May 2020 10:46:21 -0700 Subject: [PATCH 043/438] bug fix --- flow/controllers/lane_change_controllers.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/flow/controllers/lane_change_controllers.py b/flow/controllers/lane_change_controllers.py index 776ec9ee5..2c2a35524 100755 --- a/flow/controllers/lane_change_controllers.py +++ b/flow/controllers/lane_change_controllers.py @@ -120,9 +120,10 @@ def get_lane_change_action(self, env): "=====================================================") sys.exit(1) else: # if left lane exists but left leader does not exist - # in this case we assign None to the leader velocity and headway + # in this case we assign None to the leader velocity and + # large number to headway l_l_vel = None - l_l_headway = None + l_l_headway = 1000 try: acc_in_left_lane = ego_accel_controller.get_custom_accel( this_vel=ego_vel, @@ -191,9 +192,10 @@ def get_lane_change_action(self, env): "=====================================================") sys.exit(1) else: # if right lane exists but right leader does not exist - # in this case we assign None to the leader velocity and headway + # in this case we assign None to the leader velocity and + # large number to headway r_l_vel = None - r_l_headway = None + r_l_headway = 1000 try: acc_in_right_lane = ego_accel_controller.get_custom_accel( this_vel=ego_vel, From 6ef1f0f231d3a3e643b0ee74b540846826733ff5 Mon Sep 17 00:00:00 2001 From: chendiw <31671291+chendiw@users.noreply.github.com> Date: Tue, 21 Apr 2020 15:14:31 -0700 Subject: [PATCH 044/438] moved imports under functions in train.py (#903) * deleting unworking params from SumoChangeLaneParams * deleted unworking params, sublane working in highway : * moved imports inside functions * Apply suggestions from code review * bug fixes * bug fix Co-authored-by: Aboudy Kreidieh --- examples/train.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index d688f2b9a..9cfaf28c6 100644 --- a/examples/train.py +++ b/examples/train.py @@ -126,6 +126,9 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 + if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -174,6 +177,13 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ + from ray import tune + from ray.tune.registry import register_env + try: + from ray.rllib.agents.agent import get_agent_class + except ImportError: + from ray.rllib.agents.registry import get_agent_class + horizon = flow_params['env'].horizon alg_run = flags.algorithm.upper() @@ -255,6 +265,9 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + from ray.tune import run_experiments + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -301,7 +314,7 @@ def train_h_baselines(flow_params, args, multiagent): """Train policies using SAC and TD3 with h-baselines.""" from hbaselines.algorithms import OffPolicyRLAlgorithm from hbaselines.utils.train import parse_options, get_hyperparameters - from hbaselines.envs.mixed_autonomy.envs import FlowEnv + from hbaselines.envs.mixed_autonomy import FlowEnv flow_params = deepcopy(flow_params) @@ -402,6 +415,9 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" + from stable_baselines.common.vec_env import DummyVecEnv + from stable_baselines import PPO2 + flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 771e504a413cdf0720de6304df7ebc48db43ddca Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Sat, 2 May 2020 02:51:06 -0700 Subject: [PATCH 045/438] Bando / ghost edge (#917) * added bando model * added ghost edge to the highway network * added highway-single example * bug fixes * more tests --- examples/exp_configs/non_rl/highway_single.py | 110 ++++++++++++++++++ flow/controllers/__init__.py | 5 +- flow/controllers/car_following_models.py | 83 +++++++++++++ flow/networks/highway.py | 80 +++++++++++-- tests/fast_tests/test_controllers.py | 58 ++++++++- tests/fast_tests/test_examples.py | 5 + tests/fast_tests/test_scenarios.py | 61 +++++++++- tests/fast_tests/test_vehicles.py | 16 ++- tests/setup_scripts.py | 4 +- 9 files changed, 405 insertions(+), 17 deletions(-) create mode 100644 examples/exp_configs/non_rl/highway_single.py diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py new file mode 100644 index 000000000..46b18c0e9 --- /dev/null +++ b/examples/exp_configs/non_rl/highway_single.py @@ -0,0 +1,110 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +from flow.controllers import BandoFTLController +from flow.core.params import EnvParams +from flow.core.params import NetParams +from flow.core.params import InitialConfig +from flow.core.params import InFlows +from flow.core.params import VehicleParams +from flow.core.params import SumoParams +from flow.core.params import SumoLaneChangeParams +from flow.networks import HighwayNetwork +from flow.envs import TestEnv +from flow.networks.highway import ADDITIONAL_NET_PARAMS + +TRAFFIC_SPEED = 11 +END_SPEED = 16 +TRAFFIC_FLOW = 2056 +HORIZON = 3600 +INCLUDE_NOISE = False + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2500, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2, + # whether to include a ghost edge of length 500m. This edge is provided a + # different speed limit. + "use_ghost_edge": True, + # speed limit for the ghost edge + "ghost_speed_limit": END_SPEED +}) + +vehicles = VehicleParams() +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(BandoFTLController, { + 'alpha': .5, + 'beta': 20.0, + 'h_st': 12.0, + 'h_go': 50.0, + 'v_max': 30.0, + 'noise': 1.0 if INCLUDE_NOISE else 0.0, + }), +) + +inflows = InFlows() +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=TRAFFIC_FLOW, + depart_lane="free", + depart_speed=TRAFFIC_SPEED, + name="idm_highway_inflow") + +# SET UP FLOW PARAMETERS + +flow_params = dict( + # name of the experiment + exp_tag='highway-single', + + # name of the flow environment the experiment is running on + env_name=TestEnv, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + restart_instance=False + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) diff --git a/flow/controllers/__init__.py b/flow/controllers/__init__.py index 6cb20077a..4dfcf05b7 100755 --- a/flow/controllers/__init__.py +++ b/flow/controllers/__init__.py @@ -14,7 +14,8 @@ from flow.controllers.base_controller import BaseController from flow.controllers.car_following_models import CFMController, \ BCMController, OVMController, LinearOVM, IDMController, \ - SimCarFollowingController, LACController, GippsController + SimCarFollowingController, LACController, GippsController, \ + BandoFTLController from flow.controllers.velocity_controllers import FollowerStopper, \ PISaturation, NonLocalFollowerStopper @@ -35,5 +36,5 @@ "IDMController", "SimCarFollowingController", "FollowerStopper", "PISaturation", "StaticLaneChanger", "SimLaneChangeController", "ContinuousRouter", "GridRouter", "BayBridgeRouter", "LACController", - "GippsController", "NonLocalFollowerStopper" + "GippsController", "NonLocalFollowerStopper", "BandoFTLController" ] diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index f86c546e8..42c9b2a9b 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -580,3 +580,86 @@ def get_accel(self, env): v_next = min(v_acc, v_safe, self.v_desired) return (v_next-v)/env.sim_step + + +class BandoFTLController(BaseController): + """Bando follow-the-leader controller. + + Usage + ----- + See BaseController for usage example. + + Attributes + ---------- + veh_id : str + Vehicle ID for SUMO identification + car_following_params : flow.core.params.SumoCarFollowingParams + see parent class + alpha : float + gain on desired velocity to current velocity difference + (default: 0.6) + beta : float + gain on lead car velocity and self velocity difference + (default: 0.9) + h_st : float + headway for stopping (default: 5) + h_go : float + headway for full speed (default: 35) + v_max : float + max velocity (default: 30) + time_delay : float + time delay (default: 0.5) + noise : float + std dev of normal perturbation to the acceleration (default: 0) + fail_safe : str + type of flow-imposed failsafe the vehicle should posses, defaults + to no failsafe (None) + """ + + def __init__(self, + veh_id, + car_following_params, + alpha=.5, + beta=20, + h_st=2, + h_go=10, + v_max=32, + want_max_accel=False, + time_delay=0, + noise=0, + fail_safe=None): + """Instantiate an Bando controller.""" + BaseController.__init__( + self, + veh_id, + car_following_params, + delay=time_delay, + fail_safe=fail_safe, + noise=noise, + ) + self.veh_id = veh_id + self.v_max = v_max + self.alpha = alpha + self.beta = beta + self.h_st = h_st + self.h_go = h_go + self.want_max_accel = want_max_accel + + def get_accel(self, env): + """See parent class.""" + lead_id = env.k.vehicle.get_leader(self.veh_id) + if not lead_id: # no car ahead + if self.want_max_accel: + return self.max_accel + + v_l = env.k.vehicle.get_speed(lead_id) + v = env.k.vehicle.get_speed(self.veh_id) + s = env.k.vehicle.get_headway(self.veh_id) + return self.accel_func(v, v_l, s) + + def accel_func(self, v, v_l, s): + """Compute the acceleration function.""" + v_h = self.v_max * ((np.tanh(s/self.h_st-2)+np.tanh(2))/(1+np.tanh(2))) + s_dot = v_l - v + u = self.alpha * (v_h - v) + self.beta * s_dot/(s**2) + return u diff --git a/flow/networks/highway.py b/flow/networks/highway.py index e1234053c..7e9c18ad5 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -13,7 +13,12 @@ # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into - "num_edges": 1 + "num_edges": 1, + # whether to include a ghost edge of length 500m. This edge is provided a + # different speed limit. + "use_ghost_edge": False, + # speed limit for the ghost edge + "ghost_speed_limit": 25, } @@ -29,6 +34,9 @@ class HighwayNetwork(Network): * **lanes** : number of lanes in the highway * **speed_limit** : max speed limit of the highway * **num_edges** : number of edges to divide the highway into + * **use_ghost_edge** : whether to include a ghost edge of length 500m. This + edge is provided a different speed limit. + * **ghost_speed_limit** : speed limit for the ghost edge Usage ----- @@ -62,9 +70,7 @@ def __init__(self, if p not in net_params.additional_params: raise KeyError('Network parameter "{}" not supplied'.format(p)) - self.length = net_params.additional_params["length"] - self.lanes = net_params.additional_params["lanes"] - self.num_edges = net_params.additional_params.get("num_edges", 1) + self.end_length = 500 super().__init__(name, vehicles, net_params, initial_config, traffic_lights) @@ -83,6 +89,13 @@ def specify_nodes(self, net_params): "y": 0 }] + if self.net_params.additional_params["use_ghost_edge"]: + nodes += [{ + "id": "edge_{}".format(num_edges + 1), + "x": length + self.end_length, + "y": 0 + }] + return nodes def specify_edges(self, net_params): @@ -101,12 +114,22 @@ def specify_edges(self, net_params): "length": segment_length }] + if self.net_params.additional_params["use_ghost_edge"]: + edges += [{ + "id": "highway_end", + "type": "highway_end", + "from": "edge_{}".format(num_edges), + "to": "edge_{}".format(num_edges + 1), + "length": self.end_length + }] + return edges def specify_types(self, net_params): """See parent class.""" lanes = net_params.additional_params["lanes"] speed_limit = net_params.additional_params["speed_limit"] + end_speed_limit = net_params.additional_params["ghost_speed_limit"] types = [{ "id": "highwayType", @@ -114,6 +137,13 @@ def specify_types(self, net_params): "speed": speed_limit }] + if self.net_params.additional_params["use_ghost_edge"]: + types += [{ + "id": "highway_end", + "numLanes": lanes, + "speed": end_speed_limit + }] + return types def specify_routes(self, net_params): @@ -123,15 +153,51 @@ def specify_routes(self, net_params): for i in range(num_edges): rts["highway_{}".format(i)] = ["highway_{}".format(j) for j in range(i, num_edges)] + if self.net_params.additional_params["use_ghost_edge"]: + rts["highway_{}".format(i)].append("highway_end") return rts def specify_edge_starts(self): """See parent class.""" + junction_length = 0.1 + length = self.net_params.additional_params["length"] + num_edges = self.net_params.additional_params.get("num_edges", 1) + + # Add the main edges. + edge_starts = [ + ("highway_{}".format(i), + i * (length / num_edges + junction_length)) + for i in range(num_edges) + ] + + if self.net_params.additional_params["use_ghost_edge"]: + edge_starts += [ + ("highway_end", length + num_edges * junction_length) + ] + + return edge_starts + + def specify_internal_edge_starts(self): + """See parent class.""" + junction_length = 0.1 length = self.net_params.additional_params["length"] - edgestarts = [("highway_{}".format(i), (length / self.num_edges) * i) - for i in range(self.num_edges)] - return edgestarts + num_edges = self.net_params.additional_params.get("num_edges", 1) + + # Add the junctions. + edge_starts = [ + (":edge_{}".format(i + 1), + (i + 1) * length / num_edges + i * junction_length) + for i in range(num_edges - 1) + ] + + if self.net_params.additional_params["use_ghost_edge"]: + edge_starts += [ + (":edge_{}".format(num_edges), + length + (num_edges - 1) * junction_length) + ] + + return edge_starts @staticmethod def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles): diff --git a/tests/fast_tests/test_controllers.py b/tests/fast_tests/test_controllers.py index 76146dbe6..58967cef8 100644 --- a/tests/fast_tests/test_controllers.py +++ b/tests/fast_tests/test_controllers.py @@ -8,7 +8,7 @@ from flow.controllers.routing_controllers import ContinuousRouter from flow.controllers.car_following_models import IDMController, \ OVMController, BCMController, LinearOVM, CFMController, LACController, \ - GippsController + GippsController, BandoFTLController from flow.controllers import FollowerStopper, PISaturation, NonLocalFollowerStopper from tests.setup_scripts import ring_road_exp_setup import os @@ -709,7 +709,7 @@ def test_get_action(self): np.testing.assert_array_almost_equal(requested_accel, expected_accel) -class TestGippsontroller(unittest.TestCase): +class TestGippsController(unittest.TestCase): """ Tests that the Gipps Controller returning mathematically accurate values. """ @@ -765,5 +765,59 @@ def test_get_action(self): np.testing.assert_array_almost_equal(requested_accel, expected_accel) +class TestBandoFTLController(unittest.TestCase): + """ + Tests that the Bando Controller returning mathematically accurate values. + """ + + def setUp(self): + # add a few vehicles to the network using the requested model + # also make sure that the input params are what is expected + contr_params = { + "alpha": .5, + "beta": 20, + "h_st": 2, + "h_go": 10, + "v_max": 32, + "want_max_accel": False, + } + + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(BandoFTLController, contr_params), + routing_controller=(ContinuousRouter, {}), + car_following_params=SumoCarFollowingParams( + accel=15, decel=5), + num_vehicles=5) + + # create the environment and network classes for a ring road + self.env, _, _ = ring_road_exp_setup(vehicles=vehicles) + + def tearDown(self): + # terminate the traci instance + self.env.terminate() + + # free data used by the class + self.env = None + + def test_get_action(self): + self.env.reset() + ids = self.env.k.vehicle.get_ids() + + test_headways = [2, 4, 6, 8, 10] + for i, veh_id in enumerate(ids): + self.env.k.vehicle.set_headway(veh_id, test_headways[i]) + + requested_accel = [ + self.env.k.vehicle.get_acc_controller(veh_id).get_action(self.env) + for veh_id in ids + ] + + expected_accel = [1.649129, 7.853475, 14.057821, 15.70695, 15.959713] + + np.testing.assert_array_almost_equal(requested_accel, expected_accel) + + if __name__ == '__main__': unittest.main() diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index a05fed68e..336c17bf8 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -42,6 +42,7 @@ from examples.exp_configs.non_rl.minicity import flow_params as non_rl_minicity from examples.exp_configs.non_rl.ring import flow_params as non_rl_ring from examples.exp_configs.non_rl.i210_subnetwork import flow_params as non_rl_i210 +from examples.exp_configs.non_rl.highway_single import flow_params as non_rl_highway_single os.environ['TEST_FLAG'] = 'True' os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' @@ -110,6 +111,10 @@ def test_i210(self): """Verify that examples/exp_configs/non_rl/i210_subnetwork.py is working.""" self.run_simulation(non_rl_i210) + def test_highway_single(self): + """Verify that examples/exp_configs/non_rl/highway_single.py is working.""" + self.run_simulation(non_rl_highway_single) + @staticmethod def run_simulation(flow_params): # make the horizon small and set render to False diff --git a/tests/fast_tests/test_scenarios.py b/tests/fast_tests/test_scenarios.py index f9dd47c04..d72a50b17 100644 --- a/tests/fast_tests/test_scenarios.py +++ b/tests/fast_tests/test_scenarios.py @@ -5,6 +5,7 @@ from flow.networks import BottleneckNetwork, FigureEightNetwork, \ TrafficLightGridNetwork, HighwayNetwork, RingNetwork, MergeNetwork, \ MiniCityNetwork, MultiRingNetwork +from tests.setup_scripts import highway_exp_setup __all__ = [ "MultiRingNetwork", "MiniCityNetwork" @@ -94,11 +95,69 @@ def test_additional_net_params(self): "length": 1000, "lanes": 4, "speed_limit": 30, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25 } ) ) + def test_ghost_edge(self): + """Validate the functionality of the ghost edge feature.""" + # =================================================================== # + # Without a ghost edge # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25 + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1000) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), ["highway_0"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + + # =================================================================== # + # With a ghost edge # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": True, + "ghost_speed_limit": 25 + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1500.1) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), + ["highway_0", "highway_end"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + self.assertEqual(env.k.network.speed_limit("highway_end"), 25) + class TestRingNetwork(unittest.TestCase): diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index 485a6a072..b791bba64 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -258,7 +258,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -330,7 +332,9 @@ def test_no_junctions_highway(self): "lanes": 4, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -398,7 +402,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 3 + "num_edges": 3, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -465,7 +471,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 3 + "num_edges": 3, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() diff --git a/tests/setup_scripts.py b/tests/setup_scripts.py index 08d5b2c1e..ac88d2e42 100644 --- a/tests/setup_scripts.py +++ b/tests/setup_scripts.py @@ -343,7 +343,9 @@ def highway_exp_setup(sim_params=None, "lanes": 1, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) From bb3c14cfdbb742eca861a3846d6016aa0b237384 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Sun, 3 May 2020 23:47:51 -0700 Subject: [PATCH 046/438] Benchmark fix (#919) * Add the appropriate reward to the grid benchmark back * Put the bottleneck in a congested regime * Bump bottleneck inflows to put it in the congested regime --- flow/benchmarks/README.md | 6 +++--- flow/benchmarks/bottleneck0.py | 2 +- flow/benchmarks/bottleneck1.py | 2 +- flow/benchmarks/bottleneck2.py | 2 +- flow/benchmarks/grid0.py | 4 ++-- flow/benchmarks/grid1.py | 4 ++-- flow/envs/__init__.py | 3 ++- flow/envs/traffic_light_grid.py | 11 +++++++++++ 8 files changed, 23 insertions(+), 11 deletions(-) diff --git a/flow/benchmarks/README.md b/flow/benchmarks/README.md index 963ad5b70..bbcba9414 100644 --- a/flow/benchmarks/README.md +++ b/flow/benchmarks/README.md @@ -38,12 +38,12 @@ inflow = 300 veh/hour/lane S=(915,), A=(25,), T=400. this problem is to learn to avoid the *capacity drop* that is characteristic to bottleneck structures in transportation networks, and maximize the total outflow in a mixed-autonomy setting. -- `flow.benchmarks.bottleneck0` 4 lanes, inflow = 1900 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck0` 4 lanes, inflow = 2500 veh/hour, 10% CAV penetration, no vehicles are allowed to lane change, S=(141,), A=(20,), T=1000. -- `flow.benchmarks.bottleneck1` 4 lanes, inflow = 1900 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck1` 4 lanes, inflow = 2500 veh/hour, 10% CAV penetration, the human drivers follow the standard lane changing model in the simulator, S=(141,), A=(20,), T=1000. -- `flow.benchmarks.bottleneck2` 8 lanes, inflow = 3800 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck2` 8 lanes, inflow = 5000 veh/hour, 10% CAV penetration, no vehicles are allowed to lane change, S=(281,), A=(40,), T=1000. ## Training on Custom Algorithms diff --git a/flow/benchmarks/bottleneck0.py b/flow/benchmarks/bottleneck0.py index b0e86844c..b07947ad7 100644 --- a/flow/benchmarks/bottleneck0.py +++ b/flow/benchmarks/bottleneck0.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/bottleneck1.py b/flow/benchmarks/bottleneck1.py index 26ae6527a..9c8d9c192 100644 --- a/flow/benchmarks/bottleneck1.py +++ b/flow/benchmarks/bottleneck1.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/bottleneck2.py b/flow/benchmarks/bottleneck2.py index 5052b3b88..4651d448b 100644 --- a/flow/benchmarks/bottleneck2.py +++ b/flow/benchmarks/bottleneck2.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/grid0.py b/flow/benchmarks/grid0.py index 1655c3b3c..5c4ee5349 100644 --- a/flow/benchmarks/grid0.py +++ b/flow/benchmarks/grid0.py @@ -4,7 +4,7 @@ - **Observation Dimension**: (339, ) - **Horizon**: 400 steps """ -from flow.envs import TrafficLightGridPOEnv +from flow.envs import TrafficLightGridBenchmarkEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams @@ -68,7 +68,7 @@ exp_tag="grid_0", # name of the flow environment the experiment is running on - env_name=TrafficLightGridPOEnv, + env_name=TrafficLightGridBenchmarkEnv, # name of the network class the experiment is running on network=TrafficLightGridNetwork, diff --git a/flow/benchmarks/grid1.py b/flow/benchmarks/grid1.py index ec2a27454..83055adfd 100644 --- a/flow/benchmarks/grid1.py +++ b/flow/benchmarks/grid1.py @@ -4,7 +4,7 @@ - **Observation Dimension**: (915, ) - **Horizon**: 400 steps """ -from flow.envs import TrafficLightGridPOEnv +from flow.envs import TrafficLightGridBenchmarkEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams @@ -68,7 +68,7 @@ exp_tag="grid_1", # name of the flow environment the experiment is running on - env_name=TrafficLightGridPOEnv, + env_name=TrafficLightGridBenchmarkEnv, # name of the network class the experiment is running on network=TrafficLightGridNetwork, diff --git a/flow/envs/__init__.py b/flow/envs/__init__.py index 6f4351cc0..8bea3dd4f 100755 --- a/flow/envs/__init__.py +++ b/flow/envs/__init__.py @@ -4,7 +4,7 @@ from flow.envs.bottleneck import BottleneckAccelEnv, BottleneckEnv, \ BottleneckDesiredVelocityEnv from flow.envs.traffic_light_grid import TrafficLightGridEnv, \ - TrafficLightGridPOEnv, TrafficLightGridTestEnv + TrafficLightGridPOEnv, TrafficLightGridTestEnv, TrafficLightGridBenchmarkEnv from flow.envs.ring.lane_change_accel import LaneChangeAccelEnv, \ LaneChangeAccelPOEnv from flow.envs.ring.accel import AccelEnv @@ -34,6 +34,7 @@ 'WaveAttenuationPOEnv', 'TrafficLightGridEnv', 'TrafficLightGridPOEnv', + 'TrafficLightGridBenchmarkEnv', 'BottleneckDesiredVelocityEnv', 'TestEnv', 'BayBridgeEnv', diff --git a/flow/envs/traffic_light_grid.py b/flow/envs/traffic_light_grid.py index 53391a329..8be0cb8a5 100644 --- a/flow/envs/traffic_light_grid.py +++ b/flow/envs/traffic_light_grid.py @@ -731,6 +731,17 @@ def additional_command(self): [self.k.vehicle.set_observed(veh_id) for veh_id in self.observed_ids] +class TrafficLightGridBenchmarkEnv(TrafficLightGridPOEnv): + """Class used for the benchmarks in `Benchmarks for reinforcement learning inmixed-autonomy traffic`.""" + + def compute_reward(self, rl_actions, **kwargs): + """See class definition.""" + if self.env_params.evaluate: + return - rewards.min_delay_unscaled(self) + else: + return rewards.desired_velocity(self) + + class TrafficLightGridTestEnv(TrafficLightGridEnv): """ Class for use in testing. From aa1d7133bda5d89c54cd5a68a792a83e9e0f09cc Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Thu, 7 May 2020 23:51:53 -0700 Subject: [PATCH 047/438] get not departed vehicles (#922) * added function to kernel/vehicle to get number of not departed vehiles * fixed over indentation of the docstring * indentation edit * pep8 Co-authored-by: AboudyKreidieh --- flow/core/kernel/simulation/traci.py | 10 +++++--- flow/core/kernel/vehicle/base.py | 7 ++++++ flow/core/kernel/vehicle/traci.py | 37 ++++++++++++++++++++++------ 3 files changed, 43 insertions(+), 11 deletions(-) diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 0ee29ada6..35b3c2612 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -46,9 +46,13 @@ def pass_api(self, kernel_api): # subscribe some simulation parameters needed to check for entering, # exiting, and colliding vehicles self.kernel_api.simulation.subscribe([ - tc.VAR_DEPARTED_VEHICLES_IDS, tc.VAR_ARRIVED_VEHICLES_IDS, - tc.VAR_TELEPORT_STARTING_VEHICLES_IDS, tc.VAR_TIME_STEP, - tc.VAR_DELTA_T + tc.VAR_DEPARTED_VEHICLES_IDS, + tc.VAR_ARRIVED_VEHICLES_IDS, + tc.VAR_TELEPORT_STARTING_VEHICLES_IDS, + tc.VAR_TIME_STEP, + tc.VAR_DELTA_T, + tc.VAR_LOADED_VEHICLES_NUMBER, + tc.VAR_DEPARTED_VEHICLES_NUMBER ]) def simulation_step(self): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index d9fc773cd..c68d68c3a 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -290,6 +290,13 @@ def get_departed_ids(self): """Return the ids of vehicles that departed in the last time step.""" raise NotImplementedError + def get_num_not_departed(self): + """Return the number of vehicles not departed in the last time step. + + This includes vehicles that were loaded but not departed. + """ + raise NotImplementedError + def get_speed(self, veh_id, error=-1001): """Return the speed of the specified vehicle. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 22dcc8837..9cb1a5f0f 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -22,7 +22,8 @@ STEPS = 10 rdelta = 255 / STEPS # smoothly go from red to green as the speed increases -color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in range(STEPS + 1)] +color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in + range(STEPS + 1)] class TraCIVehicle(KernelVehicle): @@ -56,6 +57,8 @@ def __init__(self, self.num_vehicles = 0 # number of rl vehicles in the network self.num_rl_vehicles = 0 + # number of vehicles loaded but not departed vehicles + self.num_not_departed = 0 # contains the parameters associated with each type of vehicle self.type_parameters = {} @@ -101,6 +104,7 @@ def initialize(self, vehicles): self.minGap = vehicles.minGap self.num_vehicles = 0 self.num_rl_vehicles = 0 + self.num_not_departed = 0 self.__vehicles.clear() for typ in vehicles.initial: @@ -183,11 +187,12 @@ def update(self, reset): self._departed_ids.clear() self._arrived_ids.clear() self._arrived_rl_ids.clear() + self.num_not_departed = 0 # add vehicles from a network template, if applicable if hasattr(self.master_kernel.network.network, "template_vehicles"): - for veh_id in self.master_kernel.network.network.\ + for veh_id in self.master_kernel.network.network. \ template_vehicles: vals = deepcopy(self.master_kernel.network.network. template_vehicles[veh_id]) @@ -212,6 +217,10 @@ def update(self, reset): self._departed_ids.append(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]) self._arrived_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]) + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -321,8 +330,12 @@ def _add_departed(self, veh_id, veh_type): # subscribe the new vehicle self.kernel_api.vehicle.subscribe(veh_id, [ - tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, tc.VAR_ROAD_ID, - tc.VAR_SPEED, tc.VAR_EDGES, tc.VAR_POSITION, tc.VAR_ANGLE, + tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, + tc.VAR_ROAD_ID, + tc.VAR_SPEED, + tc.VAR_EDGES, + tc.VAR_POSITION, + tc.VAR_ANGLE, tc.VAR_SPEED_WITHOUT_TRACI ]) self.kernel_api.vehicle.subscribeLeader(veh_id, 2000) @@ -523,6 +536,10 @@ def get_departed_ids(self): else: return 0 + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): @@ -1009,7 +1026,8 @@ def update_vehicle_colors(self): for veh_id in self.get_rl_ids(): try: # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: # color rl vehicles red self.set_color(veh_id=veh_id, color=RED) except (FatalTraCIError, TraCIException) as e: @@ -1020,7 +1038,8 @@ def update_vehicle_colors(self): try: color = CYAN if veh_id in self.get_observed_ids() else WHITE # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color) except (FatalTraCIError, TraCIException) as e: print('Error when updating human vehicle colors:', e) @@ -1030,7 +1049,8 @@ def update_vehicle_colors(self): if 'av' in veh_id: color = RED # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color) except (FatalTraCIError, TraCIException) as e: print('Error when updating human vehicle colors:', e) @@ -1043,7 +1063,8 @@ def update_vehicle_colors(self): veh_speed = self.get_speed(veh_id) bin_index = np.digitize(veh_speed, speed_ranges) # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color_bins[bin_index]) # clear the list of observed vehicles From 5080514630615d232f3b9caf75c57c1623bdca7f Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Sat, 9 May 2020 15:31:44 -0700 Subject: [PATCH 048/438] changed _departed_ids, and _arrived_ids in the update function (#926) * changed _departed_ids, and _arrived_ids in the update function * fixed bug in get_departed_ids and get_arrived_ids --- flow/core/kernel/simulation/traci.py | 3 ++- flow/core/kernel/vehicle/traci.py | 27 ++++++++++----------------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 35b3c2612..2cd109024 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -52,7 +52,8 @@ def pass_api(self, kernel_api): tc.VAR_TIME_STEP, tc.VAR_DELTA_T, tc.VAR_LOADED_VEHICLES_NUMBER, - tc.VAR_DEPARTED_VEHICLES_NUMBER + tc.VAR_DEPARTED_VEHICLES_NUMBER, + tc.VAR_ARRIVED_VEHICLES_NUMBER ]) def simulation_step(self): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 9cb1a5f0f..3439e98cc 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -71,11 +71,11 @@ def __init__(self, # number of vehicles that entered the network for every time-step self._num_departed = [] - self._departed_ids = [] + self._departed_ids = 0 # number of vehicles to exit the network for every time-step self._num_arrived = [] - self._arrived_ids = [] + self._arrived_ids = 0 self._arrived_rl_ids = [] # whether or not to automatically color vehicles @@ -184,8 +184,8 @@ def update(self, reset): self.prev_last_lc[veh_id] = -float("inf") self._num_departed.clear() self._num_arrived.clear() - self._departed_ids.clear() - self._arrived_ids.clear() + self._departed_ids = 0 + self._arrived_ids = 0 self._arrived_rl_ids.clear() self.num_not_departed = 0 @@ -211,11 +211,10 @@ def update(self, reset): self.__vehicles[veh_id]["last_lc"] = self.time_counter # updated the list of departed and arrived vehicles - self._num_departed.append( - len(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS])) - self._num_arrived.append(len(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS])) - self._departed_ids.append(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]) - self._arrived_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]) + self._num_departed.append(sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER]) + self._num_arrived.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_NUMBER]) + self._departed_ids = sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS] + self._arrived_ids = sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS] # update the number of not departed vehicles self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ @@ -517,10 +516,7 @@ def get_num_arrived(self): def get_arrived_ids(self): """See parent class.""" - if len(self._arrived_ids) > 0: - return self._arrived_ids[-1] - else: - return 0 + return self._arrived_ids def get_arrived_rl_ids(self): """See parent class.""" @@ -531,10 +527,7 @@ def get_arrived_rl_ids(self): def get_departed_ids(self): """See parent class.""" - if len(self._departed_ids) > 0: - return self._departed_ids[-1] - else: - return 0 + return self._departed_ids def get_num_not_departed(self): """See parent class.""" From 1db687e557ffab1d4caffb0b3a72cc647d806892 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 049/438] Add an on ramp option --- .../exp_configs/non_rl/i210_subnetwork.py | 74 ++++--- flow/controllers/routing_controllers.py | 21 ++ flow/envs/base.py | 14 +- flow/networks/i210_subnetwork.py | 196 ++++++++++-------- 4 files changed, 184 insertions(+), 121 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index d993ae93a..87bab415a 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -4,6 +4,7 @@ import numpy as np from flow.controllers.car_following_models import IDMController +from flow.controllers.routing_controllers import I210Router from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -15,18 +16,35 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# create the base vehicle type that will be used for inflows -vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), -) +ON_RAMP = True + +if ON_RAMP: + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + routing_controller=(I210Router, {}) + ) + +else: + # create the base vehicle type that will be used for inflows + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) inflow = InFlows() # main highway @@ -37,18 +55,19 @@ departLane="random", departSpeed=23) # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321, -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421, -# departLane="random", -# departSpeed=20) +if ON_RAMP: + inflow.add( + veh_type="human", + edge="27414345", + vehs_per_hour=321, + departLane="random", + departSpeed=20) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=421, + departLane="random", + departSpeed=20) NET_TEMPLATE = os.path.join( config.PROJECT_PATH, @@ -71,20 +90,21 @@ sim=SumoParams( sim_step=0.5, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4500, + horizon=7200, ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=NET_TEMPLATE + template=NET_TEMPLATE, + additional_params={"use_on_ramp": ON_RAMP} ), # vehicles to be placed in the network at the start of a rollout (see diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index e6ccdde78..c880b5bbf 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -124,3 +124,24 @@ def choose_route(self, env): new_route = super().choose_route(env) return new_route + +class I210Router(ContinuousRouter): + """Assists in choosing routes in select cases for the Bay Bridge network. + Extension to the Continuous Router. + Usage + ----- + See base class for usage example. + """ + + def choose_route(self, env): + """See parent class.""" + edge = env.k.vehicle.get_edge(self.veh_id) + lane = env.k.vehicle.get_lane(self.veh_id) + + # vehicles on these edges in lanes 4 and 5 are not going to be able to make it out in time + if edge == "119257908#1-AddedOffRampEdge" and lane in [5, 4, 3]: + new_route = env.available_routes["119257908#1-AddedOffRampEdge"][0][0] + else: + new_route = super().choose_route(env) + + return new_route diff --git a/flow/envs/base.py b/flow/envs/base.py index adc959b9a..f033514ff 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -405,8 +405,18 @@ def step(self, rl_actions): # test if the environment should terminate due to a collision or the # time horizon being met done = (self.time_counter >= self.env_params.sims_per_step * - (self.env_params.warmup_steps + self.env_params.horizon) - or crash) + (self.env_params.warmup_steps + self.env_params.horizon)) + if crash: + print( + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************\n" + "WARNING: There was a crash. \n" + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************" + ) + # compute the info for each agent infos = {} diff --git a/flow/networks/i210_subnetwork.py b/flow/networks/i210_subnetwork.py index d8e05efb5..febb39b00 100644 --- a/flow/networks/i210_subnetwork.py +++ b/flow/networks/i210_subnetwork.py @@ -45,97 +45,109 @@ def specify_routes(self, net_params): Routes for vehicles moving through the bay bridge from Oakland to San Francisco. """ - rts = { - # Main highway - "119257914": [ - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - # (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 17 / 8378) - ], - # "119257908#0": [ - # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOnRampEdge": [ - # (["119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1": [ - # (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOffRampEdge": [ - # (["119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#2": [ - # (["119257908#2", "119257908#3"], 1), - # ], - # "119257908#3": [ - # (["119257908#3"], 1), - # ], - # - # # On-ramp - # "27414345": [ - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 9 / 321), - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 9 / 321), - # ], - # "27414342#0": [ - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 20 / 421), - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 20 / 421), - # ], - # "27414342#1-AddedOnRampEdge": [ - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 0.5), - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - # ], - # - # # Off-ramp - # "173381935": [ - # (["173381935"], 1), - # ], - } + if net_params.additional_params["use_on_ramp"]: + rts = { + # Main highway + "119257914": [ + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 17 / 8378) + ], + "119257908#0": [ + (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOnRampEdge": [ + (["119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1": [ + (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOffRampEdge": [ + (["119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#2": [ + (["119257908#2", "119257908#3"], 1), + ], + "119257908#3": [ + (["119257908#3"], 1), + ], + + # On-ramp + "27414345": [ + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 9 / 321), + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 9 / 321), + ], + "27414342#0": [ + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 20 / 421), + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 20 / 421), + ], + "27414342#1-AddedOnRampEdge": [ + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 0.5), + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 0.5), + ], + + # Off-ramp + "173381935": [ + (["173381935"], 1), + ], + } + + else: + rts = { + # Main highway + "119257914": [ + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1), + ], + } return rts From 1a36503ba19034f1bd11891fc13896b22f7d5c25 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 11:32:12 -0700 Subject: [PATCH 050/438] Increased inflows to 10800 to match density in Bennis ring --- examples/exp_configs/non_rl/i210_subnetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 87bab415a..049ec032a 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -16,7 +16,7 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -ON_RAMP = True +ON_RAMP = False if ON_RAMP: vehicles = VehicleParams() @@ -51,7 +51,7 @@ inflow.add( veh_type="human", edge="119257914", - vehs_per_hour=8378, + vehs_per_hour=10800, departLane="random", departSpeed=23) # on ramp From 37161a60991187f71d20effb03b527481f657030 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:10:07 -0700 Subject: [PATCH 051/438] Upgrade the network to not have keepclear value on the junctions --- .../exp_configs/templates/sumo/test2.net.xml | 78 ++++++++++++++----- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 00e3edcd5..16170b917 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,5 +1,41 @@ + + @@ -4680,24 +4716,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4801,10 +4837,10 @@ - + - - + + From d99b8b7271bbd6231b93b3035d837028257db490 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:41:31 -0700 Subject: [PATCH 052/438] Convert inflows to pick out the best lane to travel in instead of a random lane --- examples/exp_configs/non_rl/i210_subnetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 049ec032a..f87a31308 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -52,8 +52,8 @@ veh_type="human", edge="119257914", vehs_per_hour=10800, - departLane="random", - departSpeed=23) + departLane="best", + departSpeed=23.0) # on ramp if ON_RAMP: inflow.add( From edfd1496f0fb85c7798526b8c23bc22b331ad2cc Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 24 Mar 2020 22:49:17 -0700 Subject: [PATCH 053/438] Add 1 lane highway network for Benni --- examples/exp_configs/non_rl/highway.py | 40 +++++++------------ .../exp_configs/non_rl/i210_subnetwork.py | 2 +- flow/networks/highway.py | 2 +- 3 files changed, 16 insertions(+), 28 deletions(-) diff --git a/examples/exp_configs/non_rl/highway.py b/examples/exp_configs/non_rl/highway.py index e7505f2d7..1905e2f7f 100644 --- a/examples/exp_configs/non_rl/highway.py +++ b/examples/exp_configs/non_rl/highway.py @@ -5,25 +5,19 @@ from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.envs import LaneChangeAccelEnv +from flow.envs import TestEnv vehicles = VehicleParams() vehicles.add( - veh_id="human", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) -vehicles.add( - veh_id="human2", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) @@ -31,13 +25,7 @@ inflow.add( veh_type="human", edge="highway_0", - probability=0.25, - departLane="free", - departSpeed=20) -inflow.add( - veh_type="human2", - edge="highway_0", - probability=0.25, + vehs_per_hour=10800 / 5.0, departLane="free", departSpeed=20) @@ -47,7 +35,7 @@ exp_tag='highway', # name of the flow environment the experiment is running on - env_name=LaneChangeAccelEnv, + env_name=TestEnv, # name of the network class the experiment is running on network=HighwayNetwork, @@ -58,12 +46,12 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( render=True, - lateral_resolution=1.0, + sim_step=0.5 ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=1500, + horizon=4000, additional_params=ADDITIONAL_ENV_PARAMS.copy(), ), diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index f87a31308..8264fc286 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -5,7 +5,7 @@ from flow.controllers.car_following_models import IDMController from flow.controllers.routing_controllers import I210Router -from flow.core.params import SumoParams +from flow.core.params import SumoParams, SumoCarFollowingParams from flow.core.params import EnvParams from flow.core.params import NetParams from flow.core.params import SumoLaneChangeParams diff --git a/flow/networks/highway.py b/flow/networks/highway.py index 7e9c18ad5..02b61f133 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -9,7 +9,7 @@ # length of the highway "length": 1000, # number of lanes - "lanes": 4, + "lanes": 1, # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into From 48e2642bd6da2be5696c2649eb73f1351b94769c Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Apr 2020 15:28:57 -0700 Subject: [PATCH 054/438] datapip pipeline implemented --- examples/data_pipeline.py | 179 ++++++++++++++++++++++++++++ examples/query.py | 8 ++ examples/run_query.py | 34 ++++++ examples/simulate.py | 10 +- flow/controllers/base_controller.py | 12 ++ flow/core/experiment.py | 27 ++++- flow/core/kernel/vehicle/base.py | 16 +++ flow/core/kernel/vehicle/traci.py | 15 +++ 8 files changed, 299 insertions(+), 2 deletions(-) create mode 100644 examples/data_pipeline.py create mode 100644 examples/query.py create mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py new file mode 100644 index 000000000..5fdc30cf2 --- /dev/null +++ b/examples/data_pipeline.py @@ -0,0 +1,179 @@ +import pandas as pd +import boto3 +from botocore.exceptions import ClientError +from examples.query import QueryStrings +from time import time + + +def generate_trajectory_table(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based on standard SUMO emission + + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ + raw_output = pd.read_csv(data_path, index_col=["time", "id"]) + required_cols = {"time", "id", "speed", "x", "y"} + raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) + + extra_info = pd.DataFrame.from_dict(extra_info) + extra_info.set_index(["time", "id"]) + raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) + + # add the partition column + raw_output['partition'] = partition_name + + output_file_path = data_path[:-4]+"_trajectory.csv" + raw_output.to_csv(output_file_path, index=False) + return output_file_path + + +def upload_to_s3(bucket_name, bucket_key, file_path): + """ upload a file to S3 bucket + + Parameters + ---------- + bucket_name : str + the bucket to upload to + bucket_key: str + the key within the bucket for the file + file_path: str + the path of the file to be uploaded + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + return + + +class AthenaQuery: + + def __init__(self): + self.MAX_WAIT = 60 + self.client = boto3.client("athena") + self.existing_partitions = self.get_existing_partitions() + + def get_existing_partitions(self): + """prints the existing partitions in the S3 bucket""" + + response = self.client.start_query_execution( + QueryString='SHOW PARTITIONS trajectory_table', + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("get current partitions timed out") + response = self.client.get_query_results( + QueryExecutionId=response['QueryExecutionId'], + MaxResults=1000 + ) + return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + + def check_status(self, execution_id): + """ Return the status of the execution with given id + + Parameters + ---------- + execution_id : string + id of the execution that is checked for + Returns + ------- + status: str + QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED + """ + + response = self.client.get_query_execution( + QueryExecutionId=execution_id + ) + return response['QueryExecution']['Status']['State'] + + def wait_for_execution(self, execution_id): + """ wait for the execution to finish or time-out + + Parameters + ---------- + execution_id : str + id of the execution this is watiing for + Returns + ------- + time_out: bool + True if time-out, False if success + Raises + ------ + RuntimeError: if execution failed or get canceled + """ + start = time() + while time() - start < self.MAX_WAIT: + state = self.check_status(execution_id) + if state == 'FAILED' or state == 'CANCELLED': + raise RuntimeError("update partition failed") + elif state == 'SUCCEEDED': + return False + return True + + def update_partition(self, partition): + """ load the given partition to the trajectory_table on Athena + + Parameters + ---------- + partition : str + the new partition that needs to be loaded + """ + response = self.client.start_query_execution( + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("update partition timed out") + self.existing_partitions.append(partition) + return + + def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + """ start the execution of a query, does not wait for it to finish + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be run + result_location: str, optional + location on the S3 bucket where the result will be stored + partition: str, optional + name of the partition to run this query on + Returns + ------- + execution_id: str + the execution id of the execution started by this method + Raises + ------ + ValueError: if tries to run a query not existed in QueryStrings enum + """ + if query_name not in QueryStrings.__members__: + raise ValueError("query not existed: please add it to query.py") + + if partition not in self.existing_partitions: + self.update_partition(partition) + + response = self.client.start_query_execution( + QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + ResultConfiguration={ + 'OutputLocation': result_location, + }, + WorkGroup='primary' + ) + return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py new file mode 100644 index 000000000..3fbbe69e1 --- /dev/null +++ b/examples/query.py @@ -0,0 +1,8 @@ +from enum import Enum + +tags = {} + + +class QueryStrings(Enum): + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py new file mode 100644 index 000000000..7b4a5af7d --- /dev/null +++ b/examples/run_query.py @@ -0,0 +1,34 @@ +import argparse +import sys +from examples.data_pipeline import AthenaQuery +from examples.query import QueryStrings + +parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" + "a S3 location") +parser.add_argument("--run", type=str, nargs="+") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--partition", type=str, nargs='?', default="default") +parser.add_argument("--list_partitions", action="store_true") +parser.add_argument("--check_status", type=str, nargs='+') +parser.add_argument("--list_queries", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args() + queryEngine = AthenaQuery() + + if args.run: + execution_ids = [] + for query_name in args.run: + execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + print(execution_ids) + if args.list_partitions: + print(queryEngine.existing_partitions) + if args.check_status: + status = dict() + for execution_id in args.check_status: + status[execution_id] = queryEngine.check_status(execution_id) + print(status) + if args.list_queries: + for q in QueryStrings: + print(q) diff --git a/examples/simulate.py b/examples/simulate.py index 848f030a4..f54bb38d9 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -48,6 +48,12 @@ def parse_args(args): action='store_true', help='Specifies whether to generate an emission file from the ' 'simulation.') + parser.add_argument( + '--to_aws', + type=str, nargs='?', default=None, const="default", + help='Specifies the name of the partition to store the output' + 'file on S3. Putting not None value for this argument' + 'automatically set gen_emission to True.') return parser.parse_known_args(args)[0] @@ -55,6 +61,8 @@ def parse_args(args): if __name__ == "__main__": flags = parse_args(sys.argv[1:]) + flags.gen_emission = flags.gen_emission or flags.to_aws + # Get the flow_params object. module = __import__("exp_configs.non_rl", fromlist=[flags.exp_config]) flow_params = getattr(module, flags.exp_config).flow_params @@ -83,4 +91,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 4004b1c4d..6e6734764 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,6 +88,9 @@ def get_action(self, env): float the modified form of the acceleration """ + # clear the current stored accel_without_noise of this vehicle None + env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed if len(env.k.vehicle.get_edge(self.veh_id)) == 0: @@ -105,6 +108,15 @@ def get_action(self, env): if accel is None: return None + # store the acceleration without noise to each vehicle + # run fail safe if requested + accel_without_noice = accel + if self.fail_safe == 'instantaneous': + accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + elif self.fail_safe == 'safe_velocity': + accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 69a78cb0e..a81f3b130 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,6 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_table, upload_to_s3 import datetime import logging import time @@ -85,7 +86,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters @@ -98,6 +99,10 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False): convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. Returns ------- @@ -136,6 +141,8 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] + extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} for i in range(num_runs): ret = 0 @@ -153,6 +160,18 @@ def rl_actions(*_): vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) @@ -195,4 +214,10 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) + output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + output_file.split('/')[-1], output_file) + return info_dict diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index c68d68c3a..27e6b1ded 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -670,3 +670,19 @@ def get_max_speed(self, veh_id, error): float """ raise NotImplementedError + + ########################################################################### + # Methods for Datapipeline # + ########################################################################### + + def get_accel(self, veh_id): + """ see traci class """ + raise NotImplementedError + + def update_accel_without_noise(self, veh_id, accel_without_noise): + """ see traci class """ + raise NotImplementedError + + def get_accel_without_noise(self, veh_id): + """ see traci class """ + raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 3439e98cc..365b07a71 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,6 +113,7 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] + self.__vehicles[veh_id]["accel_without_noise"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1110,3 +1111,17 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) + + # add for data pipeline + def get_accel(self, veh_id): + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + + def update_accel_without_noise(self, veh_id, accel_without_noise): + self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + + def get_accel_without_noise(self, veh_id): + return self.__vehicles[veh_id]["accel_without_noise"] + + def get_road_grade(self, veh_id): + # TODO + return 0 From bd13f693bd58522dfa69b11c15bc12f26b862772 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 10 Apr 2020 19:54:30 -0700 Subject: [PATCH 055/438] multiple runs issue solved, testing added --- examples/data_pipeline.py | 55 +++++++- examples/datapipeline_test.py | 33 +++++ examples/query.py | 13 +- examples/run_query.py | 6 +- flow/core/experiment.py | 224 +----------------------------- flow/core/kernel/vehicle/base.py | 4 + flow/core/kernel/vehicle/traci.py | 3 + 7 files changed, 107 insertions(+), 231 deletions(-) create mode 100644 examples/datapipeline_test.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 5fdc30cf2..9d56548c2 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,7 +1,8 @@ import pandas as pd +import numpy as np import boto3 from botocore.exceptions import ClientError -from examples.query import QueryStrings +from examples.query import QueryStrings, testing_functions from time import time @@ -30,13 +31,22 @@ def generate_trajectory_table(data_path, extra_info, partition_name): raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) # add the partition column - raw_output['partition'] = partition_name - + # raw_output['partition'] = partition_name + raw_output = raw_output.sort_values(by=["time", "id"]) output_file_path = data_path[:-4]+"_trajectory.csv" raw_output.to_csv(output_file_path, index=False) return output_file_path +def generate_trajectory_from_flow(data_path, extra_info, partition_name): + extra_info = pd.DataFrame.from_dict(extra_info) + # extra_info["partition"] = partition_name + extra_info.to_csv(data_path, index=False) + upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + extra_info.to_csv(upload_only_file_path, index=False, header=False) + return upload_only_file_path + + def upload_to_s3(bucket_name, bucket_key, file_path): """ upload a file to S3 bucket @@ -176,4 +186,41 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re }, WorkGroup='primary' ) - return response['QueryExecutionId'] \ No newline at end of file + return response['QueryExecutionId'] + +########################################################################### +# Helpers for testing the SQL Queries # +########################################################################### + + +def test_sql_query(query_name): + if query_name not in testing_functions: + raise ValueError("no tests supported for this query") + + # Run the respective sql query + queryEngine = AthenaQuery() + execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + partition="test") + if queryEngine.wait_for_execution(execution_id): + raise RuntimeError("execution timed out") + + # get the Athena query result from S3 + s3 = boto3.resource("s3") + s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") + athena_result = pd.read_csv("data/athena_result.csv") + athena_result = athena_result.sort_values(by=["time", "id"]) + + # get the python expected result + expected_result = pd.read_csv("data/test_data.csv") + expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") + expected_result.columns = ["time", "id", "power"] + expected_result = expected_result.sort_values(by=["time", "id"]) + + difference = athena_result["power"] - expected_result["power"] + print("average difference is: " + str(np.mean(difference))) + print("std of difference is: " + str(np.std(difference))) + print("average ratio of difference to expected is: " + + str(np.mean(np.divide(difference, expected_result["power"])))) + difference = pd.DataFrame(difference) + difference.to_csv("./difference.csv") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py new file mode 100644 index 000000000..564060d3b --- /dev/null +++ b/examples/datapipeline_test.py @@ -0,0 +1,33 @@ +import math + +# Vehicle Mass +M = 1200 +# Gravity +g = 9.81 +# Density of Air +ro_air = 1.225 +# Rolling resistance coefficient +C_r = .005 +# Aerodynamic drag coefficient +C_a = 0.3 +# Vehicle Cross sectional Area +A = 2.6 +# Road grade +theta = 0 + + +def heavyside(inp): + return 0 if inp <= 0 else 1 + + +def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) + accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) + rolling_friction = M * g * C_r * mu + air_drag = .5 * ro_air * A * C_a * mu**3 + power = accel_and_slope + rolling_friction + air_drag + return power + + +def apply_energy_one(row): + return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py index 3fbbe69e1..6354cec3b 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,8 +1,17 @@ from enum import Enum +from examples.datapipeline_test import apply_energy_one -tags = {} +tags = {"energy": ["ENERGY_ONE"]} + +testing_functions = {"ENERGY_ONE": apply_energy_one} class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" + ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py index 7b4a5af7d..ea8839b09 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,6 +1,5 @@ import argparse -import sys -from examples.data_pipeline import AthenaQuery +from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -11,6 +10,7 @@ parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') parser.add_argument("--list_queries", action="store_true") +parser.add_argument("--test_query", nargs=1) if __name__ == "__main__": @@ -32,3 +32,5 @@ if args.list_queries: for q in QueryStrings: print(q) + if args.test_query: + test_sql_query(args.test_query[0]) \ No newline at end of file diff --git a/flow/core/experiment.py b/flow/core/experiment.py index a81f3b130..63c92e798 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,223 +1 @@ -"""Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv -from flow.utils.registry import make_create_env -from examples.data_pipeline import generate_trajectory_table, upload_to_s3 -import datetime -import logging -import time -import os -import numpy as np - - -class Experiment: - """ - Class for systematically running simulations in any supported simulator. - - This class acts as a runner for a network and environment. In order to use - it to run an network and environment in the absence of a method specifying - the actions of RL agents in the network, type the following: - - >>> from flow.envs import Env - >>> flow_params = dict(...) # see the examples in exp_config - >>> exp = Experiment(flow_params) # for some experiment configuration - >>> exp.run(num_runs=1) - - If you wish to specify the actions of RL agents in the network, this may be - done as follows: - - >>> rl_actions = lambda state: 0 # replace with something appropriate - >>> exp.run(num_runs=1, rl_actions=rl_actions) - - Finally, if you would like to like to plot and visualize your results, this - class can generate csv files from emission files produced by sumo. These - files will contain the speeds, positions, edges, etc... of every vehicle - in the network at every time step. - - In order to ensure that the simulator constructs an emission file, set the - ``emission_path`` attribute in ``SimParams`` to some path. - - >>> from flow.core.params import SimParams - >>> flow_params['sim'] = SimParams(emission_path="./data") - - Once you have included this in your environment, run your Experiment object - as follows: - - >>> exp.run(num_runs=1, convert_to_csv=True) - - After the experiment is complete, look at the "./data" directory. There - will be two files, one with the suffix .xml and another with the suffix - .csv. The latter should be easily interpretable from any csv reader (e.g. - Excel), and can be parsed using tools such as numpy and pandas. - - Attributes - ---------- - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we want - to extract from the environment. The lambda will be called at each step - to extract information from the env and it will be stored in a dict - keyed by the str. - env : flow.envs.Env - the environment object the simulator will run - """ - - def __init__(self, flow_params, custom_callables=None): - """Instantiate the Experiment class. - - Parameters - ---------- - flow_params : dict - flow-specific parameters - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we - want to extract from the environment. The lambda will be called at - each step to extract information from the env and it will be stored - in a dict keyed by the str. - """ - self.custom_callables = custom_callables or {} - - # Get the env name and a creator for the environment. - create_env, _ = make_create_env(flow_params) - - # Create the environment. - self.env = create_env() - - logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) - - logging.info("Initializing environment.") - - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): - """Run the given network for a set number of runs. - - Parameters - ---------- - num_runs : int - number of runs the experiment should perform - rl_actions : method, optional - maps states to actions to be performed by the RL agents (if - there are any) - convert_to_csv : bool - Specifies whether to convert the emission file created by sumo - into a csv file - partition_name: str - Specifies the S3 partition you want to store the output file, - will be used to later for query. If NONE, won't upload output - to S3. - - Returns - ------- - info_dict : dict < str, Any > - contains returns, average speed per step - """ - num_steps = self.env.env_params.horizon - - # raise an error if convert_to_csv is set to True but no emission - # file will be generated, to avoid getting an error at the end of the - # simulation - if convert_to_csv and self.env.sim_params.emission_path is None: - raise ValueError( - 'The experiment was run with convert_to_csv set ' - 'to True, but no emission file will be generated. If you wish ' - 'to generate an emission file, you should set the parameter ' - 'emission_path in the simulation parameters (SumoParams or ' - 'AimsunParams) to the path of the folder where emissions ' - 'output should be generated. If you do not wish to generate ' - 'emissions, set the convert_to_csv parameter to False.') - - # used to store - info_dict = { - "returns": [], - "velocities": [], - "outflows": [], - } - info_dict.update({ - key: [] for key in self.custom_callables.keys() - }) - - if rl_actions is None: - def rl_actions(*_): - return None - - # time profiling information - t = time.time() - times = [] - extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} - - for i in range(num_runs): - ret = 0 - vel = [] - custom_vals = {key: [] for key in self.custom_callables.keys()} - state = self.env.reset() - for j in range(num_steps): - t0 = time.time() - state, reward, done, _ = self.env.step(rl_actions(state)) - t1 = time.time() - times.append(1 / (t1 - t0)) - - # Compute the velocity speeds and cumulative returns. - veh_ids = self.env.k.vehicle.get_ids() - vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) - ret += reward - - # collect additional information for the data pipeline - for vid in veh_ids: - extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) - extra_info["id"].append(vid) - extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) - extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) - extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) - extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) - extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) - extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) - extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - - # Compute the results for the custom callables. - for (key, lambda_func) in self.custom_callables.items(): - custom_vals[key].append(lambda_func(self.env)) - - if done: - break - - # Store the information from the run in info_dict. - outflow = self.env.k.vehicle.get_outflow_rate(int(500)) - info_dict["returns"].append(ret) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) - - print("Round {0}, return: {1}".format(i, ret)) - - # Print the averages/std for all variables in the info_dict. - for key in info_dict.keys(): - print("Average, std {}: {}, {}".format( - key, np.mean(info_dict[key]), np.std(info_dict[key]))) - - print("Total time:", time.time() - t) - print("steps/second:", np.mean(times)) - self.env.terminate() - - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - # collect the location of the emission file - dir_path = self.env.sim_params.emission_path - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - - output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) - - if partition_name: - upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + output_file.split('/')[-1], output_file) - - return info_dict +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 27e6b1ded..f22a4ead3 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -683,6 +683,10 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """ see traci class """ raise NotImplementedError + def get_2D_position(self, veh_id, error=-1001): + """ see traci class """ + raise NotImplementedError + def get_accel_without_noise(self, veh_id): """ see traci class """ raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 365b07a71..2c8d3173f 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1122,6 +1122,9 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): return self.__vehicles[veh_id]["accel_without_noise"] + def get_2D_position(self, veh_id, error=-1001): + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) + def get_road_grade(self, veh_id): # TODO return 0 From 221bb9319a1df6e7550ecf18804278e6584ca4ea Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 22 Apr 2020 05:22:01 -0700 Subject: [PATCH 056/438] added more support for lambda function --- examples/data_pipeline.py | 28 ++++++++++++++++++++++++++-- examples/lambda_function.py | 26 ++++++++++++++++++++++++++ examples/simulate.py | 8 +++++++- flow/core/experiment.py | 2 +- 4 files changed, 60 insertions(+), 4 deletions(-) create mode 100644 examples/lambda_function.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 9d56548c2..28d3b5e73 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -39,6 +39,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based only on flow output + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -47,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): return upload_only_file_path -def upload_to_s3(bucket_name, bucket_key, file_path): +def upload_to_s3(bucket_name, bucket_key, file_path, only_query): """ upload a file to S3 bucket Parameters @@ -58,9 +76,15 @@ def upload_to_s3(bucket_name, bucket_key, file_path): the key within the bucket for the file file_path: str the path of the file to be uploaded + only_query: str + specify which query should be run on this file by lambda: + if empty: run none of them + if "all": run all available analysis query + if a string of list of queries: run only those mentioned in the list """ s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + s3.Bucket(bucket_name).upload_file(file_path, bucket_key, + ExtraArgs={"Metadata": {"run-query": only_query}}) return diff --git a/examples/lambda_function.py b/examples/lambda_function.py new file mode 100644 index 000000000..01ce1512a --- /dev/null +++ b/examples/lambda_function.py @@ -0,0 +1,26 @@ +import boto3 +from urllib.parse import unquote_plus +from examples.data_pipeline import AthenaQuery +from examples.query import tags + +s3 = boto3.client('s3') +queryEngine = AthenaQuery() + + +def lambda_handler(event, context): + for record in event['Records']: + bucket = record['s3']['bucket']['name'] + key = unquote_plus(record['s3']['object']['key']) + partition = key.split('/')[-2].split('=')[-1] + response = s3.head_object(Bucket=bucket, Key=key) + run_query = response["Metadata"]["run-query"] + + if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if run_query == "all": + query_list = tags["analysis"] + elif not run_query: + break + else: + query_list = run_query.split("\', \'") + for query_name in query_list: + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file diff --git a/examples/simulate.py b/examples/simulate.py index f54bb38d9..69e11b2fb 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -54,6 +54,12 @@ def parse_args(args): help='Specifies the name of the partition to store the output' 'file on S3. Putting not None value for this argument' 'automatically set gen_emission to True.') + parser.add_argument( + '--only_query', + nargs='*', default="[\'all\']", + help='specify which query should be run by lambda' + 'for detail, see upload_to_s3 in data_pipeline.py' + ) return parser.parse_known_args(args)[0] @@ -91,4 +97,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 63c92e798..12391f9ae 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file From 8f05ec596edfe048487fcabf830b9cd04cedaf04 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 02:54:33 -0700 Subject: [PATCH 057/438] fix windoes line ending issue with experiment.py --- flow/core/experiment.py | 240 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 239 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 12391f9ae..80d607e7d 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1,239 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" +from flow.core.util import emission_to_csv +from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +import datetime +import logging +import time +import os +import numpy as np +import uuid + + +class Experiment: + """ + Class for systematically running simulations in any supported simulator. + + This class acts as a runner for a network and environment. In order to use + it to run an network and environment in the absence of a method specifying + the actions of RL agents in the network, type the following: + + >>> from flow.envs import Env + >>> flow_params = dict(...) # see the examples in exp_config + >>> exp = Experiment(flow_params) # for some experiment configuration + >>> exp.run(num_runs=1) + + If you wish to specify the actions of RL agents in the network, this may be + done as follows: + + >>> rl_actions = lambda state: 0 # replace with something appropriate + >>> exp.run(num_runs=1, rl_actions=rl_actions) + + Finally, if you would like to like to plot and visualize your results, this + class can generate csv files from emission files produced by sumo. These + files will contain the speeds, positions, edges, etc... of every vehicle + in the network at every time step. + + In order to ensure that the simulator constructs an emission file, set the + ``emission_path`` attribute in ``SimParams`` to some path. + + >>> from flow.core.params import SimParams + >>> flow_params['sim'] = SimParams(emission_path="./data") + + Once you have included this in your environment, run your Experiment object + as follows: + + >>> exp.run(num_runs=1, convert_to_csv=True) + + After the experiment is complete, look at the "./data" directory. There + will be two files, one with the suffix .xml and another with the suffix + .csv. The latter should be easily interpretable from any csv reader (e.g. + Excel), and can be parsed using tools such as numpy and pandas. + + Attributes + ---------- + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we want + to extract from the environment. The lambda will be called at each step + to extract information from the env and it will be stored in a dict + keyed by the str. + env : flow.envs.Env + the environment object the simulator will run + """ + + def __init__(self, flow_params, custom_callables=None): + """Instantiate the Experiment class. + + Parameters + ---------- + flow_params : dict + flow-specific parameters + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we + want to extract from the environment. The lambda will be called at + each step to extract information from the env and it will be stored + in a dict keyed by the str. + """ + self.custom_callables = custom_callables or {} + + # Get the env name and a creator for the environment. + create_env, _ = make_create_env(flow_params) + + # Create the environment. + self.env = create_env() + + logging.info(" Starting experiment {} at {}".format( + self.env.network.name, str(datetime.datetime.utcnow()))) + + logging.info("Initializing environment.") + + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + """Run the given network for a set number of runs. + + Parameters + ---------- + num_runs : int + number of runs the experiment should perform + rl_actions : method, optional + maps states to actions to be performed by the RL agents (if + there are any) + convert_to_csv : bool + Specifies whether to convert the emission file created by sumo + into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. + only_query: str + Specifies whether queries should be automatically run the + simulation data when it gets uploaded to s3 + + Returns + ------- + info_dict : dict < str, Any > + contains returns, average speed per step + """ + num_steps = self.env.env_params.horizon + + # raise an error if convert_to_csv is set to True but no emission + # file will be generated, to avoid getting an error at the end of the + # simulation + if convert_to_csv and self.env.sim_params.emission_path is None: + raise ValueError( + 'The experiment was run with convert_to_csv set ' + 'to True, but no emission file will be generated. If you wish ' + 'to generate an emission file, you should set the parameter ' + 'emission_path in the simulation parameters (SumoParams or ' + 'AimsunParams) to the path of the folder where emissions ' + 'output should be generated. If you do not wish to generate ' + 'emissions, set the convert_to_csv parameter to False.') + + # used to store + info_dict = { + "returns": [], + "velocities": [], + "outflows": [], + } + info_dict.update({ + key: [] for key in self.custom_callables.keys() + }) + + if rl_actions is None: + def rl_actions(*_): + return None + + # time profiling information + t = time.time() + times = [] + extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], + "road_grade": [], "source_id": []} + source_id = uuid.uuid4().hex + + for i in range(num_runs): + ret = 0 + vel = [] + custom_vals = {key: [] for key in self.custom_callables.keys()} + state = self.env.reset() + for j in range(num_steps): + t0 = time.time() + state, reward, done, _ = self.env.step(rl_actions(state)) + t1 = time.time() + times.append(1 / (t1 - t0)) + + # Compute the velocity speeds and cumulative returns. + veh_ids = self.env.k.vehicle.get_ids() + vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) + ret += reward + + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( + self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + position = self.env.k.vehicle.get_2D_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) + extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + + # Compute the results for the custom callables. + for (key, lambda_func) in self.custom_callables.items(): + custom_vals[key].append(lambda_func(self.env)) + + if done: + break + + # Store the information from the run in info_dict. + outflow = self.env.k.vehicle.get_outflow_rate(int(500)) + info_dict["returns"].append(ret) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + + print("Round {0}, return: {1}".format(i, ret)) + + # Print the averages/std for all variables in the info_dict. + for key in info_dict.keys(): + print("Average, std {}: {}, {}".format( + key, np.mean(info_dict[key]), np.std(info_dict[key]))) + + print("Total time:", time.time() - t) + print("steps/second:", np.mean(times)) + self.env.terminate() + + if convert_to_csv and self.env.simulator == "traci": + # wait a short period of time to ensure the xml file is readable + time.sleep(0.1) + + # collect the location of the emission file + dir_path = self.env.sim_params.emission_path + emission_filename = \ + "{0}-emission.xml".format(self.env.network.name) + emission_path = os.path.join(dir_path, emission_filename) + + # convert the emission file into a csv + emission_to_csv(emission_path) + + # Delete the .xml version of the emission file. + os.remove(emission_path) + + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + # delete the S3-only version of the trajectory file + os.remove(upload_file_path) + + return info_dict From 29ebdb70d4ab1203edfee65d9d50bb03785ea235 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:35:54 -0700 Subject: [PATCH 058/438] fix style issue --- examples/data_pipeline.py | 113 ++++++++++++++++-------- examples/datapipeline_test.py | 4 + examples/lambda_function.py | 10 +++ examples/query.py | 11 ++- examples/run_query.py | 1 + flow/controllers/routing_controllers.py | 1 + flow/core/kernel/vehicle/base.py | 12 ++- flow/core/kernel/vehicle/traci.py | 5 ++ 8 files changed, 111 insertions(+), 46 deletions(-) diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 28d3b5e73..03b0f87e5 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,3 +1,4 @@ +"""contains class and helper functions for the data pipeline.""" import pandas as pd import numpy as np import boto3 @@ -7,21 +8,21 @@ def generate_trajectory_table(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based on standard SUMO emission + """Generate desired output for the trajectory_table based on standard SUMO emission. - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ raw_output = pd.read_csv(data_path, index_col=["time", "id"]) required_cols = {"time", "id", "speed", "x", "y"} raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) @@ -39,24 +40,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based only on flow output - - Parameters - ---------- - data_path : str - output file path - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder - """ + """Generate desired output for the trajectory_table based only on flow output. + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -66,7 +67,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): def upload_to_s3(bucket_name, bucket_key, file_path, only_query): - """ upload a file to S3 bucket + """Upload a file to S3 bucket. Parameters ---------- @@ -89,15 +90,40 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): class AthenaQuery: + """ + Class used to run query. + + Act as a query engine, maintains an open session with AWS Athena. + + Attributes + ---------- + MAX_WAIT: int + maximum number of seconds to wait before declares time-out + client: boto3.client + the athena client that is used to run the query + existing_partitions: list + a list of partitions that is already recorded in Athena's datalog, + this is obtained through query at the initialization of this class + instance. + """ def __init__(self): + """Initialize AthenaQuery instance. + + initialize a client session with AWS Athena, + query Athena to obtain extisting_partition. + """ self.MAX_WAIT = 60 self.client = boto3.client("athena") self.existing_partitions = self.get_existing_partitions() def get_existing_partitions(self): - """prints the existing partitions in the S3 bucket""" + """Return the existing partitions in the S3 bucket. + Returns + ------- + partitions: a list of existing partitions on S3 bucket + """ response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ @@ -114,7 +140,7 @@ def get_existing_partitions(self): return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): - """ Return the status of the execution with given id + """Return the status of the execution with given id. Parameters ---------- @@ -125,14 +151,13 @@ def check_status(self, execution_id): status: str QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED """ - response = self.client.get_query_execution( QueryExecutionId=execution_id ) return response['QueryExecution']['Status']['State'] def wait_for_execution(self, execution_id): - """ wait for the execution to finish or time-out + """Wait for the execution to finish or time-out. Parameters ---------- @@ -156,7 +181,7 @@ def wait_for_execution(self, execution_id): return True def update_partition(self, partition): - """ load the given partition to the trajectory_table on Athena + """Load the given partition to the trajectory_table on Athena. Parameters ---------- @@ -176,7 +201,7 @@ def update_partition(self, partition): return def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """ start the execution of a query, does not wait for it to finish + """Start the execution of a query, does not wait for it to finish. Parameters ---------- @@ -218,6 +243,16 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re def test_sql_query(query_name): + """Start the execution of a query, does not wait for it to finish. + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be tested + Raises + ------ + RuntimeError: if timeout + """ if query_name not in testing_functions: raise ValueError("no tests supported for this query") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py index 564060d3b..ae0ea382f 100644 --- a/examples/datapipeline_test.py +++ b/examples/datapipeline_test.py @@ -1,3 +1,4 @@ +"""functions that calculates the expected result for testing.""" import math # Vehicle Mass @@ -17,10 +18,12 @@ def heavyside(inp): + """Return 1 if input is positive.""" return 0 if inp <= 0 else 1 def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + """Calculate the expected power for POWER_DEMAND_MODEL query.""" acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) rolling_friction = M * g * C_r * mu @@ -30,4 +33,5 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): + """Apply the power calculation to a row of the dataframe.""" return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/lambda_function.py b/examples/lambda_function.py index 01ce1512a..4f7937c85 100644 --- a/examples/lambda_function.py +++ b/examples/lambda_function.py @@ -1,3 +1,4 @@ +"""lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus from examples.data_pipeline import AthenaQuery @@ -8,6 +9,15 @@ def lambda_handler(event, context): + """Invoke by AWS Lambda upon triggered by an event. + + Parameters + ---------- + event : dic < str: dic > + an S3 event + context: + not used + """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) diff --git a/examples/query.py b/examples/query.py index 6354cec3b..0f0ee13b4 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,15 +1,20 @@ +"""stores all the pre-defined query strings.""" from enum import Enum from examples.datapipeline_test import apply_energy_one -tags = {"energy": ["ENERGY_ONE"]} +# tags for different queries +tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} -testing_functions = {"ENERGY_ONE": apply_energy_one} +# specify the function to calculate the expected result of each query +testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} class QueryStrings(Enum): + """An enumeration of all the pre-defined query strings.""" + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ diff --git a/examples/run_query.py b/examples/run_query.py index ea8839b09..64baa6656 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,3 +1,4 @@ +"""runner script for invoking query manually.""" import argparse from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index c880b5bbf..24f8af3f3 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -127,6 +127,7 @@ def choose_route(self, env): class I210Router(ContinuousRouter): """Assists in choosing routes in select cases for the Bay Bridge network. + Extension to the Continuous Router. Usage ----- diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index f22a4ead3..351f95405 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -676,17 +676,21 @@ def get_max_speed(self, veh_id, error): ########################################################################### def get_accel(self, veh_id): - """ see traci class """ + """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError def update_accel_without_noise(self, veh_id, accel_without_noise): - """ see traci class """ + """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError def get_2D_position(self, veh_id, error=-1001): - """ see traci class """ + """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError def get_accel_without_noise(self, veh_id): - """ see traci class """ + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_road_grade(self, veh_id): + """Return the road-grade of the vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2c8d3173f..79221c09d 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1114,17 +1114,22 @@ def set_max_speed(self, veh_id, max_speed): # add for data pipeline def get_accel(self, veh_id): + """See parent class.""" return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def update_accel_without_noise(self, veh_id, accel_without_noise): + """See parent class.""" self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise def get_accel_without_noise(self, veh_id): + """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] def get_2D_position(self, veh_id, error=-1001): + """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) def get_road_grade(self, veh_id): + """See parent class.""" # TODO return 0 From 23783bd6e70f471189c929086be0a5e0a18e7797 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:38:47 -0700 Subject: [PATCH 059/438] reorganized file locations --- {examples => flow/data_pipeline}/data_pipeline.py | 0 {examples => flow/data_pipeline}/datapipeline_test.py | 0 {examples => flow/data_pipeline}/lambda_function.py | 0 {examples => flow/data_pipeline}/query.py | 0 {examples => flow/data_pipeline}/run_query.py | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename {examples => flow/data_pipeline}/data_pipeline.py (100%) rename {examples => flow/data_pipeline}/datapipeline_test.py (100%) rename {examples => flow/data_pipeline}/lambda_function.py (100%) rename {examples => flow/data_pipeline}/query.py (100%) rename {examples => flow/data_pipeline}/run_query.py (100%) diff --git a/examples/data_pipeline.py b/flow/data_pipeline/data_pipeline.py similarity index 100% rename from examples/data_pipeline.py rename to flow/data_pipeline/data_pipeline.py diff --git a/examples/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py similarity index 100% rename from examples/datapipeline_test.py rename to flow/data_pipeline/datapipeline_test.py diff --git a/examples/lambda_function.py b/flow/data_pipeline/lambda_function.py similarity index 100% rename from examples/lambda_function.py rename to flow/data_pipeline/lambda_function.py diff --git a/examples/query.py b/flow/data_pipeline/query.py similarity index 100% rename from examples/query.py rename to flow/data_pipeline/query.py diff --git a/examples/run_query.py b/flow/data_pipeline/run_query.py similarity index 100% rename from examples/run_query.py rename to flow/data_pipeline/run_query.py From 3bd49eca1b39d998abbc2c4fbbbb737dd58786cc Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:58:44 -0700 Subject: [PATCH 060/438] fix some more style issues --- examples/simulate.py | 3 ++- flow/controllers/base_controller.py | 10 +++++----- flow/controllers/routing_controllers.py | 1 + flow/core/experiment.py | 3 ++- flow/core/kernel/vehicle/base.py | 2 +- flow/core/kernel/vehicle/traci.py | 2 +- flow/data_pipeline/__init__.py | 1 + flow/data_pipeline/data_pipeline.py | 3 +-- flow/data_pipeline/datapipeline_test.py | 2 +- flow/data_pipeline/lambda_function.py | 2 +- flow/data_pipeline/query.py | 12 ++++++------ flow/data_pipeline/run_query.py | 8 ++++---- flow/envs/base.py | 1 - 13 files changed, 26 insertions(+), 24 deletions(-) create mode 100644 flow/data_pipeline/__init__.py diff --git a/examples/simulate.py b/examples/simulate.py index 69e11b2fb..86d14aa14 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -97,4 +97,5 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, + only_query=flags.only_query) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 6e6734764..7adcdf310 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -110,18 +110,18 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noice = accel + accel_without_noise = accel if self.fail_safe == 'instantaneous': - accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) elif self.fail_safe == 'safe_velocity': - accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) - # run the failsafes, if requested + # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index 24f8af3f3..18d6c1842 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -125,6 +125,7 @@ def choose_route(self, env): return new_route + class I210Router(ContinuousRouter): """Assists in choosing routes in select cases for the Bay Bridge network. diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 80d607e7d..aa5028836 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -231,7 +231,8 @@ def rl_actions(*_): if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 351f95405..18d7b98a1 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -683,7 +683,7 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 79221c09d..6dd4077b9 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1125,7 +1125,7 @@ def get_accel_without_noise(self, veh_id): """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py new file mode 100644 index 000000000..622e09b06 --- /dev/null +++ b/flow/data_pipeline/__init__.py @@ -0,0 +1 @@ +"""Empty init file to ensure that data_pipeline is recognized as a package""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 03b0f87e5..afbc09f92 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,8 +2,7 @@ import pandas as pd import numpy as np import boto3 -from botocore.exceptions import ClientError -from examples.query import QueryStrings, testing_functions +from flow.data_pipeline.query import QueryStrings, testing_functions from time import time diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py index ae0ea382f..0e1a50518 100644 --- a/flow/data_pipeline/datapipeline_test.py +++ b/flow/data_pipeline/datapipeline_test.py @@ -34,4 +34,4 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file + return [row[0], row[1], calculate_power(row[4], row[6])] diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 4f7937c85..afef55a4b 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0f0ee13b4..af1b51ce7 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -1,6 +1,6 @@ """stores all the pre-defined query strings.""" from enum import Enum -from examples.datapipeline_test import apply_energy_one +from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} @@ -15,8 +15,8 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ No newline at end of file + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index 64baa6656..f065a726e 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -1,10 +1,10 @@ """runner script for invoking query manually.""" import argparse -from examples.data_pipeline import AthenaQuery, test_sql_query -from examples.query import QueryStrings +from flow.data_pipeline.data_pipeline import AthenaQuery, test_sql_query +from flow.data_pipeline.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" - "a S3 location") + "a S3 location") parser.add_argument("--run", type=str, nargs="+") parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") parser.add_argument("--partition", type=str, nargs='?', default="default") @@ -34,4 +34,4 @@ for q in QueryStrings: print(q) if args.test_query: - test_sql_query(args.test_query[0]) \ No newline at end of file + test_sql_query(args.test_query[0]) diff --git a/flow/envs/base.py b/flow/envs/base.py index f033514ff..e7ad57fde 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -417,7 +417,6 @@ def step(self, rl_actions): "**********************************************************" ) - # compute the info for each agent infos = {} From 6335dd847ef95b4e672616f27293bd612f8f6e1c Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 13:02:33 -0700 Subject: [PATCH 061/438] fix one more style issue --- flow/data_pipeline/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py index 622e09b06..d9d6a6573 100644 --- a/flow/data_pipeline/__init__.py +++ b/flow/data_pipeline/__init__.py @@ -1 +1 @@ -"""Empty init file to ensure that data_pipeline is recognized as a package""" +"""Empty init file to ensure that data_pipeline is recognized as a package.""" From 5d5606acad5b7f60c2eed3a3c67060d465d75733 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sat, 9 May 2020 22:06:30 -0700 Subject: [PATCH 062/438] added new two new quries --- flow/core/experiment.py | 4 ++-- flow/core/kernel/vehicle/base.py | 4 ++++ flow/core/kernel/vehicle/traci.py | 4 ++++ flow/data_pipeline/query.py | 38 ++++++++++++++++++++++++++++++- 4 files changed, 47 insertions(+), 3 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index aa5028836..37fcb03af 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,7 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time @@ -178,7 +178,7 @@ def rl_actions(*_): self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - position = self.env.k.vehicle.get_2D_position(vid) + position = self.env.k.vehicle.get_2d_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 18d7b98a1..cb547cddb 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -691,6 +691,10 @@ def get_accel_without_noise(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError + def get_velocity_without_noise(self, veh_id): + """Return the velocity without noise of vehicle with veh_id.""" + raise NotImplementedError + def get_road_grade(self, veh_id): """Return the road-grade of the vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 6dd4077b9..c52cc2f22 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1125,6 +1125,10 @@ def get_accel_without_noise(self, veh_id): """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] + def get_velocity_without_noise(self, veh_id): + """See parent class.""" + return max([self.get_speed(veh_id) + self.get_accel_without_noise(veh_id) * self.sim_step, 0]) + def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index af1b51ce7..0c87b3dcc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -3,7 +3,8 @@ from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} +tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], + "analysis": ["POWER_DEMAND_MODEL"]} # specify the function to calculate the expected result of each query testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} @@ -20,3 +21,38 @@ class QueryStrings(Enum): "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ "FROM trajectory_table " \ "WHERE partition_name=\'{partition}\'" + POWER_DEMAND_MODEL_DENOISED_ACCEL = \ + "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ + "WITH sub1 AS ( " \ + "SELECT" \ + "time, id, speed, acceleration, accel_without_noise, road_grade, source_id," \ + "time - LAG(time, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step," \ + "LAG(speed, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed," \ + "LAG(acceleration, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel," \ + "LAG(accel_without_noise, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised" \ + "FROM trajectory_table" \ + "WHERE partition_name=\'{partition}\'" \ + ")," \ + "sub2 AS (" \ + "SELECT time, id, speed, acceleration, accel_without_noise, " \ + "road_grade, source_id, " \ + "speed-prev_accel*sim_step+prev_accel_denoised*sim_step AS speed_denoised" \ + "FROM sub1" \ + ") " \ + "SELECT id, time, speed_denoised, accel_without_noise," \ + "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ + "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ + "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ + "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ + "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id" \ + "FROM sub2 " From bdd6068b9326f984b886037f9572b01013df2e05 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 10 May 2020 23:03:35 -0700 Subject: [PATCH 063/438] including next_V for testing only --- flow/core/experiment.py | 1 + flow/core/kernel/vehicle/traci.py | 15 ++++++++++- flow/data_pipeline/query.py | 41 ++++++++++++++++--------------- 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 37fcb03af..8b5cbac02 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -182,6 +182,7 @@ def rl_actions(*_): extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) + #extra_info["next_v"].append(self.env.k.vehicle.get_next_v(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index c52cc2f22..2fd978012 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -949,8 +949,10 @@ def apply_acceleration(self, veh_ids, acc): for i, vid in enumerate(veh_ids): if acc[i] is not None and vid in self.get_ids(): + self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) + #self.__vehicles[vid]["next_v"] = next_vel self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) def apply_lane_change(self, veh_ids, direction): @@ -1113,9 +1115,18 @@ def set_max_speed(self, veh_id, max_speed): self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) # add for data pipeline + def get_next_v(self, veh_id): + """See parent class.""" + if not "next_v" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["next_v"] = None + return self.__vehicles[veh_id]["next_v"] + #return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + def get_accel(self, veh_id): """See parent class.""" - return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + if not "accel" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel"] = None + return self.__vehicles[veh_id]["accel"] def update_accel_without_noise(self, veh_id, accel_without_noise): """See parent class.""" @@ -1123,6 +1134,8 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): """See parent class.""" + if not "accel_without_noise" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] def get_velocity_without_noise(self, veh_id): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0c87b3dcc..9054364e6 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -15,44 +15,45 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, speed, acceleration, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL' AS energy_model_id, source_id " \ "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" + "WHERE partition_name=\'{partition}\' " \ + "ORDER BY id, time " POWER_DEMAND_MODEL_DENOISED_ACCEL = \ - "SELECT id, time, 1200 * speed * " \ + "SELECT id, time, speed, accel_without_noise, 1200 * speed * " \ "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id " \ "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" + "WHERE partition_name=\'{partition}\' " \ + "ORDER BY id, time " POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ "WITH sub1 AS ( " \ - "SELECT" \ - "time, id, speed, acceleration, accel_without_noise, road_grade, source_id," \ + "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ "time - LAG(time, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, " \ "LAG(speed, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, " \ "LAG(acceleration, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, " \ "LAG(accel_without_noise, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised" \ - "FROM trajectory_table" \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised "\ + "FROM trajectory_table " \ "WHERE partition_name=\'{partition}\'" \ ")," \ "sub2 AS (" \ - "SELECT time, id, speed, acceleration, accel_without_noise, " \ - "road_grade, source_id, " \ - "speed-prev_accel*sim_step+prev_accel_denoised*sim_step AS speed_denoised" \ + "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ + "prev_speed+accel_without_noise*sim_step AS speed_denoised " \ "FROM sub1" \ ") " \ - "SELECT id, time, speed_denoised, accel_without_noise," \ - "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ + "SELECT id, time, speed_denoised, accel_without_noise, " \ + "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ - "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id" \ - "FROM sub2 " + "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id " \ + "FROM sub2 " \ + "ORDER BY id, time " From 3468747c4f824fcefcfc7b80ad3a695b4e8ae5d3 Mon Sep 17 00:00:00 2001 From: Kanaad Parvate Date: Thu, 14 May 2020 11:51:26 -0700 Subject: [PATCH 064/438] Replay Improvement / Fixes (#905) * added aggressive driver and made modifications to replay scripts * add numpy import * some more small changes and cleanup * remove aggressive driver * added distribution plots * Fixed minor but common matplotlib error * merge Co-authored-by: Kathy Jang --- .../rl/multiagent/multiagent_i210.py | 14 +- flow/controllers/velocity_controllers.py | 20 +-- flow/core/experiment.py | 2 +- flow/core/kernel/simulation/traci.py | 5 + flow/core/kernel/vehicle/traci.py | 4 +- flow/core/params.py | 6 +- flow/core/rewards.py | 28 +++ flow/envs/base.py | 2 +- flow/envs/multiagent/base.py | 2 +- flow/utils/rllib.py | 2 +- flow/visualize/i210_replay.py | 160 +++++++++++++++--- flow/visualize/plot_custom_callables.py | 42 ++++- flow/visualize/transfer/util.py | 56 +++--- 13 files changed, 264 insertions(+), 79 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index b74f64027..7176abb06 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -9,7 +9,7 @@ from ray.tune.registry import register_env from flow.controllers import RLController -from flow.controllers.car_following_models import IDMController +from flow.controllers.car_following_models import IDMController, SimCarFollowingController import flow.config as config from flow.core.params import EnvParams from flow.core.params import NetParams @@ -18,6 +18,7 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams +from flow.core.params import SumoCarFollowingParams from flow.core.rewards import energy_consumption from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS @@ -57,11 +58,13 @@ num_vehicles=0, lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), + car_following_params=SumoCarFollowingParams(speed_mode="no_collide"), ) vehicles.add( "av", acceleration_controller=(RLController, {}), num_vehicles=0, + color='red' ) inflow = InFlows() @@ -140,7 +143,8 @@ render=False, color_by_speed=False, restart_instance=True, - use_ballistic=True + use_ballistic=True, + disable_collisions=True ), # environment related parameters (see flow.core.params.EnvParams) @@ -195,7 +199,7 @@ def policy_mapping_fn(_): custom_callables = { "avg_speed": lambda env: np.mean([speed for speed in env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), - "avg_outflow": lambda env: np.nan_to_num( - env.k.vehicle.get_outflow_rate(120)), - "avg_energy": lambda env: -1*energy_consumption(env, 0.1) + "avg_outflow": lambda env: np.nan_to_num(env.k.vehicle.get_outflow_rate(120)), + "avg_energy": lambda env: -1*energy_consumption(env, 0.1), + "avg_per_step_energy": lambda env: -1*energy_consumption(env, 0.1) / env.k.vehicle.num_vehicles, } diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index 2e4b7c22a..c3da6136d 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -25,11 +25,10 @@ class FollowerStopper(BaseController): def __init__(self, veh_id, car_following_params, - v_des=15, - danger_edges=None): + v_des=15): """Instantiate FollowerStopper.""" BaseController.__init__( - self, veh_id, car_following_params, delay=1.0, + self, veh_id, car_following_params, delay=0.0, fail_safe='safe_velocity') # desired speed of the vehicle @@ -45,7 +44,6 @@ def __init__(self, self.d_1 = 1.5 self.d_2 = 1.0 self.d_3 = 0.5 - self.danger_edges = danger_edges if danger_edges else {} def find_intersection_dist(self, env): """Find distance to intersection. @@ -106,14 +104,9 @@ def get_accel(self, env): if edge == "": return None - - if self.find_intersection_dist(env) <= 10 and \ - env.k.vehicle.get_edge(self.veh_id) in self.danger_edges or \ - env.k.vehicle.get_edge(self.veh_id)[0] == ":": - return None else: # compute the acceleration from the desired velocity - return (v_cmd - this_vel) / env.sim_step + return np.clip((v_cmd - this_vel) / env.sim_step, -np.abs(self.max_deaccel), self.max_accel) class NonLocalFollowerStopper(FollowerStopper): @@ -154,11 +147,6 @@ def get_accel(self, env): if edge == "": return None - - if self.find_intersection_dist(env) <= 10 and \ - env.k.vehicle.get_edge(self.veh_id) in self.danger_edges or \ - env.k.vehicle.get_edge(self.veh_id)[0] == ":": - return None else: # compute the acceleration from the desired velocity return (v_cmd - this_vel) / env.sim_step @@ -184,7 +172,7 @@ class PISaturation(BaseController): def __init__(self, veh_id, car_following_params): """Instantiate PISaturation.""" - BaseController.__init__(self, veh_id, car_following_params, delay=1.0) + BaseController.__init__(self, veh_id, car_following_params, delay=0.0) # maximum achievable acceleration by the vehicle self.max_accel = car_following_params.controller_params['accel'] diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 69a78cb0e..a0497b595 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -157,7 +157,7 @@ def rl_actions(*_): for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) - if done: + if type(done) is dict and done['__all__'] or type(done) is not dict and done: break # Store the information from the run in info_dict. diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 0ee29ada6..0accdeddf 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -91,6 +91,11 @@ def start_simulation(self, network, sim_params): "--step-length", str(sim_params.sim_step) ] + # disable all collisions and teleporting in the simulation. + if sim_params.disable_collisions: + sumo_call.extend(["--collision.mingap-factor", str(0), + "--collision.action", str("none")]) + # use a ballistic integration step (if request) if sim_params.use_ballistic: sumo_call.append("--step-method.ballistic") diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 22dcc8837..14720cdce 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -727,7 +727,7 @@ def _multi_lane_headways(self): for lane in range(max_lanes): edge_dict[edge][lane].sort(key=lambda x: x[1]) - for veh_id in self.get_rl_ids(): + for veh_id in self.get_ids(): # collect the lane leaders, followers, headways, and tailways for # each vehicle edge = self.get_edge(veh_id) @@ -970,7 +970,7 @@ def apply_lane_change(self, veh_ids, direction): # perform the requested lane action action in TraCI if target_lane != this_lane: self.kernel_api.vehicle.changeLane( - veh_id, int(target_lane), 100000) + veh_id, int(target_lane), self.sim_step) if veh_id in self.get_rl_ids(): self.prev_last_lc[veh_id] = \ diff --git a/flow/core/params.py b/flow/core/params.py index afead7017..0527b33c2 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -568,6 +568,8 @@ class SumoParams(SimParams): current time step use_ballistic: bool, optional If true, use a ballistic integration step instead of an euler step + disable_collisions: bool, optional + If true, disables explicit collision checking and teleporting in SUMO """ def __init__(self, @@ -589,7 +591,8 @@ def __init__(self, teleport_time=-1, num_clients=1, color_by_speed=False, - use_ballistic=False): + use_ballistic=False, + disable_collisions=False): """Instantiate SumoParams.""" super(SumoParams, self).__init__( sim_step, render, restart_instance, emission_path, save_render, @@ -604,6 +607,7 @@ def __init__(self, self.num_clients = num_clients self.color_by_speed = color_by_speed self.use_ballistic = use_ballistic + self.disable_collisions = disable_collisions class EnvParams: diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 6de472af2..60760f357 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -322,6 +322,8 @@ def energy_consumption(env, gain=.001): rho = 1.225 # air density (kg/m^3) A = 2.6 # vehicle cross sectional area (m^2) for veh_id in env.k.vehicle.get_ids(): + if veh_id not in env.k.vehicle.previous_speeds: + continue speed = env.k.vehicle.get_speed(veh_id) prev_speed = env.k.vehicle.get_previous_speed(veh_id) @@ -330,3 +332,29 @@ def energy_consumption(env, gain=.001): power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 return -gain * power + +def vehicle_energy_consumption(env, veh_id, gain=.001): + """Calculate power consumption of a vehicle. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + """ + power = 0 + + M = 1200 # mass of average sized vehicle (kg) + g = 9.81 # gravitational acceleration (m/s^2) + Cr = 0.005 # rolling resistance coefficient + Ca = 0.3 # aerodynamic drag coefficient + rho = 1.225 # air density (kg/m^3) + A = 2.6 # vehicle cross sectional area (m^2) + if veh_id not in env.k.vehicle.previous_speeds: + return 0 + speed = env.k.vehicle.get_speed(veh_id) + prev_speed = env.k.vehicle.get_previous_speed(veh_id) + + accel = abs(speed - prev_speed) / env.sim_step + + power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 + + return -gain * power diff --git a/flow/envs/base.py b/flow/envs/base.py index adc959b9a..baf8270b5 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -385,7 +385,7 @@ def step(self, rl_actions): # crash encodes whether the simulator experienced a collision crash = self.k.simulation.check_collision() - + self.crash = crash # stop collecting new simulation steps if there is a collision if crash: break diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index 126107b00..594fb2fdb 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -104,7 +104,7 @@ def step(self, rl_actions): # crash encodes whether the simulator experienced a collision crash = self.k.simulation.check_collision() - + self.crash = crash # stop collecting new simulation steps if there is a collision if crash: print('A CRASH! A CRASH!!!!!! AAAAAAAAAH!!!!!') diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index ca8072c85..e3404a61f 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -146,7 +146,7 @@ def get_flow_params(config): if flow_params["net"]["inflows"]: net.inflows.__dict__ = flow_params["net"]["inflows"].copy() - if net.template and len(net.template) > 0: + if net.template is not None and len(net.template) > 0: dirname = os.getcwd() filename = os.path.join(dirname, '../../examples') split = net.template.split('examples')[1][1:] diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 502d646aa..ac4cc031d 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -1,10 +1,13 @@ """Transfer and replay for i210 environment.""" import argparse +from datetime import datetime from collections import defaultdict from copy import deepcopy import numpy as np import json import os +import pytz +import subprocess import time import ray @@ -14,15 +17,16 @@ from ray.rllib.agents.registry import get_agent_class from ray.tune.registry import register_env -from flow.core.util import emission_to_csv +from flow.core.util import emission_to_csv, ensure_dir +from flow.core.rewards import vehicle_energy_consumption from flow.utils.registry import make_create_env from flow.utils.rllib import get_flow_params from flow.utils.rllib import get_rllib_config from flow.utils.rllib import get_rllib_pkl from flow.utils.rllib import FlowParamsEncoder - from flow.visualize.transfer.util import inflows_range +from flow.visualize.plot_custom_callables import plot_trip_distribution from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables @@ -38,8 +42,8 @@ 2 - the number of the checkpoint """ - -def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config=None, result_dir=None): +@ray.remote +def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config=None, result_dir=None, max_completed_trips=None, v_des=12): """Replay or run transfer test (defined by transfer_fn) by modif. Arguments: @@ -55,8 +59,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= """ assert bool(args.controller) ^ bool(rllib_config), \ "Need to specify either controller or rllib_config, but not both" - - if args.run_transfer: + if transfer_test is not None: + if type(transfer_test) == bytes: + transfer_test = ray.cloudpickle.loads(transfer_test) flow_params = transfer_test.flow_params_modifier_fn(flow_params) if args.controller: @@ -71,7 +76,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= elif args.controller == 'follower_stopper': from flow.controllers.velocity_controllers import FollowerStopper controller = FollowerStopper - test_params.update({'v_des': 15}) + test_params.update({'v_des': v_des}) + # flow_params['veh'].type_parameters['av']['car_following_params'] elif args.controller == 'sumo': from flow.controllers.car_following_models import SimCarFollowingController controller = SimCarFollowingController @@ -185,17 +191,29 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= info_dict = { "velocities": [], "outflows": [], + "avg_trip_energy": [], + "avg_trip_time": [], + "total_completed_trips": [] } + all_trip_energy_distribution = defaultdict(lambda: []) + all_trip_time_distribution = defaultdict(lambda: []) + info_dict.update({ key: [] for key in custom_callables.keys() }) - for i in range(args.num_rollouts): + i = 0 + while i < args.num_rollouts: + print("Rollout iter", i) vel = [] + per_vehicle_energy_trace = defaultdict(lambda: []) + completed_veh_types = {} + completed_vehicle_avg_energy = {} + completed_vehicle_travel_time = {} custom_vals = {key: [] for key in custom_callables.keys()} state = env.reset() + initial_vehicles = set(env.k.vehicle.get_ids()) for _ in range(env_params.horizon): - if rllib_config: if multiagent: action = {} @@ -226,17 +244,41 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= for (key, lambda_func) in custom_callables.items(): custom_vals[key].append(lambda_func(env)) + for past_veh_id in per_vehicle_energy_trace.keys(): + if past_veh_id not in veh_ids and past_veh_id not in completed_vehicle_avg_energy: + all_trip_energy_distribution[completed_veh_types[past_veh_id]].append(np.sum(per_vehicle_energy_trace[past_veh_id])) + all_trip_time_distribution[completed_veh_types[past_veh_id]].append(len(per_vehicle_energy_trace[past_veh_id])) + completed_vehicle_avg_energy[past_veh_id] = np.sum(per_vehicle_energy_trace[past_veh_id]) + completed_vehicle_travel_time[past_veh_id] = len(per_vehicle_energy_trace[past_veh_id]) + + for veh_id in veh_ids: + if veh_id not in initial_vehicles: + if veh_id not in per_vehicle_energy_trace: + # we have to skip the first step's energy calculation + per_vehicle_energy_trace[veh_id].append(0) + completed_veh_types[veh_id] = env.k.vehicle.get_type(veh_id) + else: + per_vehicle_energy_trace[veh_id].append(-1*vehicle_energy_consumption(env, veh_id)) + if type(done) is dict and done['__all__']: break elif type(done) is not dict and done: break - - # Store the information from the run in info_dict. - outflow = env.k.vehicle.get_outflow_rate(int(500)) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) + elif max_completed_trips is not None and len(completed_vehicle_avg_energy) > max_completed_trips: + break + if env.crash: + print("Crash on iter", i) + else: + # Store the information from the run in info_dict. + outflow = env.k.vehicle.get_outflow_rate(int(500)) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + info_dict["avg_trip_energy"].append(np.mean(list(completed_vehicle_avg_energy.values()))) + info_dict["avg_trip_time"].append(np.mean(list(completed_vehicle_travel_time.values()))) + info_dict["total_completed_trips"].append(len(list(completed_vehicle_avg_energy.values()))) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + i += 1 print('======== Summary of results ========') if args.run_transfer: @@ -252,6 +294,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= env.unwrapped.terminate() if output_dir: + ensure_dir(output_dir) if args.run_transfer: exp_name = "{}-replay".format(transfer_test.transfer_str) else: @@ -276,6 +319,14 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # delete the .xml version of the emission file os.remove(emission_path) + all_trip_energies = os.path.join(output_dir, '{}-all_trip_energies.npy'.format(exp_name)) + np.save(all_trip_energies, dict(all_trip_energy_distribution)) + fig_names, figs = plot_trip_distribution(all_trip_energy_distribution) + + for fig_name, fig in zip(fig_names, figs): + edist_out = os.path.join(output_dir, '{}_energy_distribution.png'.format(fig_name)) + fig.savefig(edist_out) + # Create the flow_params object with open(os.path.join(output_dir, exp_name) + '.json', 'w') as outfile: json.dump(flow_params, outfile, @@ -342,16 +393,42 @@ def create_parser(): action='store_true', help='Runs transfer tests if true' ) + parser.add_argument( + '-pr', + '--penetration_rate', + type=float, + help='Specifies percentage of AVs.', + required=False) + parser.add_argument( + '-mct', + '--max_completed_trips', + type=int, + help='Terminate rollout after max_completed_trips vehicles have started and ended.', + default=None) + parser.add_argument( + '--v_des_sweep', + action='store_true', + help='Runs a sweep over v_des params.', + default=None) parser.add_argument( '--output_dir', type=str, help='Directory to save results.', default=None ) + parser.add_argument('--use_s3', action='store_true', help='If true, upload results to s3') + parser.add_argument('--num_cpus', type=int, default=1, help='Number of cpus to run experiment with') + parser.add_argument('--multi_node', action='store_true', help='Set to true if this will ' + 'be run in cluster mode') + parser.add_argument('--exp_title', type=str, required=False, default=None, + help='Informative experiment title to help distinguish results') return parser if __name__ == '__main__': + date = datetime.now(tz=pytz.utc) + date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") + parser = create_parser() args = parser.parse_args() @@ -365,14 +442,51 @@ def create_parser(): flow_params = deepcopy(I210_MA_DEFAULT_FLOW_PARAMS) - if args.local: - ray.init(num_cpus=1, object_store_memory=200 * 1024 * 1024) + if args.multi_node: + ray.init(redis_address='localhost:6379') + elif args.local: + ray.init(local_mode=True, object_store_memory=200 * 1024 * 1024) else: - ray.init(num_cpus=1) + ray.init(num_cpus=args.num_cpus + 1, object_store_memory=200 * 1024 * 1024) + + if args.exp_title: + output_dir = os.path.join(args.output_dir, args.exp_title) + else: + output_dir = args.output_dir if args.run_transfer: - for transfer_test in inflows_range(penetration_rates=[0.05, 0.1, 0.2], flow_rate_coefs=[0.8, 1.0, 1.2]): - replay(args, flow_params, output_dir=args.output_dir, transfer_test=transfer_test, - rllib_config=rllib_config, result_dir=rllib_result_dir) + s = [ray.cloudpickle.dumps(transfer_test) for transfer_test in inflows_range(penetration_rates=[0.0, 0.1, 0.2, 0.3])] + ray_output = [replay.remote(args, flow_params, output_dir=output_dir, transfer_test=transfer_test, + rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips) + for transfer_test in s] + ray.get(ray_output) + + elif args.v_des_sweep: + assert args.controller == 'follower_stopper' + + ray_output = [replay.remote(args, flow_params, output_dir="{}/{}".format(output_dir, v_des), rllib_config=rllib_config, + result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips, v_des=v_des) + for v_des in range(8, 17, 2)] + ray.get(ray_output) + else: - replay(args, flow_params, output_dir=args.output_dir, rllib_config=rllib_config, result_dir=rllib_result_dir) + if args.penetration_rate is not None: + pr = args.penetration_rate if args.penetration_rate is not None else 0 + single_transfer = next(inflows_range(penetration_rates=pr)) + ray.get(replay.remote(args, flow_params, output_dir=output_dir, transfer_test=single_transfer, + rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips)) + else: + ray.get(replay.remote(args, flow_params, output_dir=output_dir, + rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips)) + + if args.use_s3: + s3_string = 's3://kanaad.experiments/i210_replay/' + date + if args.exp_title: + s3_string += '/' + args.exp_title + + for i in range(4): + try: + p1 = subprocess.Popen("aws s3 sync {} {}".format(output_dir, s3_string).split(' ')) + p1.wait(50) + except Exception as e: + print('This is the error ', e) diff --git a/flow/visualize/plot_custom_callables.py b/flow/visualize/plot_custom_callables.py index 8df0e7f62..f82377cef 100644 --- a/flow/visualize/plot_custom_callables.py +++ b/flow/visualize/plot_custom_callables.py @@ -4,11 +4,43 @@ from datetime import datetime import errno import numpy as np -import matplotlib.pyplot as plt +try: + from matplotlib import pyplot as plt +except ImportError: + import matplotlib + matplotlib.use('TkAgg') + from matplotlib import pyplot as plt import os import pytz import sys +def make_bar_plot(vals, title): + print(len(vals)) + fig = plt.figure() + plt.hist(vals, 10, facecolor='blue', alpha=0.5) + plt.title(title) + plt.xlim(1000,3000) + return fig + +def plot_trip_distribution(all_trip_energy_distribution): + non_av_vals = [] + figures = [] + figure_names = [] + for key in all_trip_energy_distribution: + if key != 'av': + non_av_vals.extend(all_trip_energy_distribution[key]) + figures.append(make_bar_plot(all_trip_energy_distribution[key], key)) + figure_names.append(key) + + figure_names.append('All Non-AV') + figures.append(make_bar_plot(non_av_vals, 'All Non-AV')) + + figure_names.append('All') + figures.append(make_bar_plot(non_av_vals + all_trip_energy_distribution['av'], 'All')) + + return figure_names, figures + + def parse_flags(args): """Parse training options user can specify in command line. @@ -51,13 +83,17 @@ def parse_flags(args): for (dirpath, dir_names, file_names) in os.walk(flags.target_folder): for file_name in file_names: if file_name[-4:] == ".npy": - exp_name = os.path.basename(os.path.dirname(dirpath)) + exp_name = os.path.basename(dirpath) info_dict = np.load(os.path.join(dirpath, file_name), allow_pickle=True).item() info_dicts.append(info_dict) exp_names.append(exp_name) custom_callable_names.update(info_dict.keys()) + idxs = np.argsort(exp_names) + exp_names = [exp_names[i] for i in idxs] + info_dicts = [info_dicts[i] for i in idxs] + for name in custom_callable_names: y_vals = [np.mean(info_dict[name]) for info_dict in info_dicts] y_stds = [np.std(info_dict[name]) for info_dict in info_dicts] @@ -65,7 +101,7 @@ def parse_flags(args): plt.bar(x_pos, y_vals, align='center', alpha=0.5) plt.xticks(x_pos, [exp_name for exp_name in exp_names], rotation=60) - plt.ylabel('Experiment') + plt.xlabel('Experiment') plt.title('I210 Replay Result: {}'.format(name)) plt.tight_layout() if flags.output_folder: diff --git a/flow/visualize/transfer/util.py b/flow/visualize/transfer/util.py index 50b503956..d6c8d9f88 100644 --- a/flow/visualize/transfer/util.py +++ b/flow/visualize/transfer/util.py @@ -6,13 +6,13 @@ VEH_PER_HOUR_BASE_27414345, VEH_PER_HOUR_BASE_27414342 -def make_inflows(penetration_rate=0.1, flow_rate_coef=1.0, departSpeed=20, on_ramp=False): +def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): """Generate inflows object from parameters. Uses default inflows from multiagent_i210. Keyword Arguments: ----------------- - penetration_rate {float} -- [AV Penetration Rate] (default: {0.1}) - flow_rate_coef {float} -- [Scale flow rate by] (default: {1.0}) + pr {float} -- [AV Penetration Rate] (default: {0.1}) + fr_coef {float} -- [Scale flow rate by] (default: {1.0}) departSpeed {int} -- [Initial speed of all flows] (default: {20}) Returns @@ -22,39 +22,45 @@ def make_inflows(penetration_rate=0.1, flow_rate_coef=1.0, departSpeed=20, on_ra """ inflow = InFlows() # main highway - assert penetration_rate < 1.0, "your penetration rate is over 100%" - assert penetration_rate > 0.0, "your penetration rate should be above zero" + assert pr < 1.0, "your penetration rate is over 100%" + + all_inflows = [] inflow_119257914 = dict(veh_type="human", edge="119257914", - vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * penetration_rate * flow_rate_coef, + vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * + (1 - (pr)) * fr_coef, # probability=1.0, departLane="random", departSpeed=departSpeed) - inflow_119257914_av = dict(veh_type="av", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * penetration_rate * flow_rate_coef), - # probability=1.0, - departLane="random", - departSpeed=departSpeed) + all_inflows.append(inflow_119257914) + + if pr > 0.0: + inflow_119257914_av = dict(veh_type="av", + edge="119257914", + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pr * fr_coef), + # probability=1.0, + departLane="random", + departSpeed=departSpeed) + all_inflows.append(inflow_119257914_av) + if on_ramp: inflow_27414345 = dict(veh_type="human", edge="27414345", - vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * penetration_rate * flow_rate_coef, + vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * + (1 - (pr)) * fr_coef, departLane="random", departSpeed=departSpeed) - - inflow_27414342 = dict(veh_type="human", - edge="27414342#0", - vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * penetration_rate * flow_rate_coef, - departLane="random", - departSpeed=departSpeed) - - all_inflow_defs = (inflow_119257914, inflow_27414345, inflow_27414342, inflow_119257914_av) - else: - all_inflow_defs = (inflow_119257914, inflow_119257914_av) - - for inflow_def in all_inflow_defs: + all_inflows.append(inflow_27414345) + if pr > 0.0: + inflow_27414342 = dict(veh_type="human", + edge="27414342#0", + vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * pr * fr_coef, + departLane="random", + departSpeed=departSpeed) + all_inflows.append(inflow_27414342) + + for inflow_def in all_inflows: inflow.add(**inflow_def) return inflow From 3154f2659246bc0f1c74502d4db98df338dfa108 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Mon, 18 May 2020 13:07:30 -0400 Subject: [PATCH 065/438] Add MPG reward (#931) Add an MPG reward --- flow/core/kernel/vehicle/base.py | 14 ++++ flow/core/kernel/vehicle/traci.py | 12 +++- flow/core/rewards.py | 106 ++++++++++++++++++++++++++++++ 3 files changed, 131 insertions(+), 1 deletion(-) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index c68d68c3a..706504027 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -297,6 +297,20 @@ def get_num_not_departed(self): """ raise NotImplementedError + def get_fuel_consumption(selfself, veh_id, error=-1001): + """Return the mpg / s of the specified vehicle. + + Parameters + ---------- + veh_id : str or list of str + vehicle id, or list of vehicle ids + error : any, optional + value that is returned if the vehicle is not found + Returns + ------- + float + """ + def get_speed(self, veh_id, error=-1001): """Return the speed of the specified vehicle. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index d165dbdea..134bac49f 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -335,7 +335,8 @@ def _add_departed(self, veh_id, veh_type): tc.VAR_EDGES, tc.VAR_POSITION, tc.VAR_ANGLE, - tc.VAR_SPEED_WITHOUT_TRACI + tc.VAR_SPEED_WITHOUT_TRACI, + tc.VAR_FUELCONSUMPTION ]) self.kernel_api.vehicle.subscribeLeader(veh_id, 2000) @@ -370,6 +371,8 @@ def _add_departed(self, veh_id, veh_type): self.kernel_api.vehicle.getLaneIndex(veh_id) self.__sumo_obs[veh_id][tc.VAR_SPEED] = \ self.kernel_api.vehicle.getSpeed(veh_id) + self.__sumo_obs[veh_id][tc.VAR_FUELCONSUMPTION] = \ + self.kernel_api.vehicle.getFuelConsumption(veh_id) # make sure that the order of rl_ids is kept sorted self.__rl_ids.sort() @@ -533,6 +536,13 @@ def get_num_not_departed(self): """See parent class.""" return self.num_not_departed + def get_fuel_consumption(self, veh_id, error=-1001): + """Return fuel consumption in gallons/s.""" + ml_to_gallons = 0.000264172 + if isinstance(veh_id, (list, np.ndarray)): + return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 6de472af2..3cca916f5 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -330,3 +330,109 @@ def energy_consumption(env, gain=.001): power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 return -gain * power + + +def veh_energy_consumption(env, veh_id, gain=.001): + """Calculate power consumption of a vehicle. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + """ + power = 0 + + M = 1200 # mass of average sized vehicle (kg) + g = 9.81 # gravitational acceleration (m/s^2) + Cr = 0.005 # rolling resistance coefficient + Ca = 0.3 # aerodynamic drag coefficient + rho = 1.225 # air density (kg/m^3) + A = 2.6 # vehicle cross sectional area (m^2) + speed = env.k.vehicle.get_speed(veh_id) + prev_speed = env.k.vehicle.get_previous_speed(veh_id) + + accel = abs(speed - prev_speed) / env.sim_step + + power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 + + return -gain * power + + +def miles_per_megajoule(env, veh_ids=None, gain=.001): + """Calculate miles per mega-joule of either a particular vehicle or the total average of all the vehicles. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + + Parameters + ---------- + env : flow.envs.Env + the environment variable, which contains information on the current + state of the system. + veh_ids : [list] + list of veh_ids to compute the reward over + gain : float + scaling factor for the reward + """ + mpj = 0 + counter = 0 + if veh_ids is None: + veh_ids = env.k.vehicle.get_ids() + elif not isinstance(veh_ids, list): + veh_ids = [veh_ids] + for veh_id in veh_ids: + speed = env.k.vehicle.get_speed(veh_id) + # convert to be positive since the function called is a penalty + power = -veh_energy_consumption(env, veh_id, gain=1.0) + if power > 0 and speed >= 0.0: + counter += 1 + # meters / joule is (v * \delta t) / (power * \delta t) + mpj += speed / power + if counter > 0: + mpj /= counter + + # convert from meters per joule to miles per joule + mpj /= 1609.0 + # convert from miles per joule to miles per megajoule + mpj *= 10**6 + + return mpj * gain + + +def miles_per_gallon(env, veh_ids=None, gain=.001): + """Calculate mpg of either a particular vehicle or the total average of all the vehicles. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + + Parameters + ---------- + env : flow.envs.Env + the environment variable, which contains information on the current + state of the system. + veh_ids : [list] + list of veh_ids to compute the reward over + gain : float + scaling factor for the reward + """ + mpg = 0 + counter = 0 + if veh_ids is None: + veh_ids = env.k.vehicle.get_ids() + elif not isinstance(veh_ids, list): + veh_ids = [veh_ids] + for veh_id in veh_ids: + speed = env.k.vehicle.get_speed(veh_id) + gallons_per_s = env.k.vehicle.get_fuel_consumption(veh_id) + if gallons_per_s > 0 and speed >= 0.0: + counter += 1 + # meters / gallon is (v * \delta t) / (gallons_per_s * \delta t) + mpg += speed / gallons_per_s + if counter > 0: + mpg /= counter + + # convert from meters per gallon to miles per gallon + mpg /= 1609.0 + + return mpg * gain From 0f45dbe356b79067915e933f5795ab3760d69930 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Mon, 18 May 2020 14:06:22 -0400 Subject: [PATCH 066/438] Mpg reward2 (#933) Add an MPG and MPJ reward --- examples/exp_configs/non_rl/highway_single.py | 144 ++++++++++++++++++ .../rl/multiagent/multiagent_i210.py | 2 +- examples/train.py | 4 +- flow/benchmarks/README.md | 6 +- flow/benchmarks/bottleneck0.py | 2 +- flow/benchmarks/bottleneck1.py | 2 +- flow/benchmarks/bottleneck2.py | 2 +- flow/benchmarks/grid0.py | 4 +- flow/benchmarks/grid1.py | 4 +- flow/controllers/__init__.py | 5 +- flow/controllers/car_following_models.py | 83 ++++++++++ flow/core/kernel/simulation/traci.py | 14 +- flow/core/kernel/vehicle/base.py | 21 +++ flow/core/kernel/vehicle/traci.py | 76 +++++---- flow/core/rewards.py | 83 ++++++++++ flow/envs/__init__.py | 3 +- flow/envs/traffic_light_grid.py | 11 ++ flow/networks/highway.py | 80 +++++++++- flow/visualize/i210_replay.py | 40 +++-- flow/visualize/plot_custom_callables.py | 13 +- flow/visualize/transfer/util.py | 14 +- tests/fast_tests/test_controllers.py | 58 ++++++- tests/fast_tests/test_examples.py | 5 + tests/fast_tests/test_scenarios.py | 61 +++++++- tests/fast_tests/test_vehicles.py | 16 +- tests/setup_scripts.py | 4 +- 26 files changed, 668 insertions(+), 89 deletions(-) create mode 100644 examples/exp_configs/non_rl/highway_single.py diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py new file mode 100644 index 000000000..a2e44032a --- /dev/null +++ b/examples/exp_configs/non_rl/highway_single.py @@ -0,0 +1,144 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +import numpy as np + +from flow.controllers import BandoFTLController +from flow.controllers.velocity_controllers import FollowerStopper +from flow.core.params import EnvParams +from flow.core.params import NetParams +from flow.core.params import InitialConfig +from flow.core.params import InFlows +from flow.core.params import VehicleParams +from flow.core.params import SumoParams +from flow.core.params import SumoLaneChangeParams +from flow.core.rewards import miles_per_megajoule +from flow.networks import HighwayNetwork +from flow.envs import TestEnv +from flow.networks.highway import ADDITIONAL_NET_PARAMS + +TRAFFIC_SPEED = 11 +END_SPEED = 16 +TRAFFIC_FLOW = 2056 +HORIZON = 2000 +INCLUDE_NOISE = False + +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 10.0 + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2500, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2, + # whether to include a ghost edge of length 500m. This edge is provided a + # different speed limit. + "use_ghost_edge": True, + # speed limit for the ghost edge + "ghost_speed_limit": END_SPEED +}) + +vehicles = VehicleParams() +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(BandoFTLController, { + 'alpha': .5, + 'beta': 20.0, + 'h_st': 12.0, + 'h_go': 50.0, + 'v_max': 30.0, + 'noise': 1.0 if INCLUDE_NOISE else 0.0, + }), +) + +if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + num_vehicles=0, + acceleration_controller=(FollowerStopper, {"v_des": 11.0}), + ) + +inflows = InFlows() + +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=int(TRAFFIC_FLOW * (1 - PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23", + name="idm_highway_inflow") + +if PENETRATION_RATE > 0.0: + inflows.add( + veh_type="av", + edge="highway_0", + vehs_per_hour=int(TRAFFIC_FLOW * (PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23", + name="av_highway_inflow") + +# SET UP FLOW PARAMETERS + +flow_params = dict( + # name of the experiment + exp_tag='highway-single', + + # name of the flow environment the experiment is running on + env_name=TestEnv, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + restart_instance=False + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) + +custom_callables = { + "avg_merge_speed": lambda env: np.nan_to_num(np.mean( + env.k.vehicle.get_speed(env.k.vehicle.get_ids()))), + "avg_outflow": lambda env: np.nan_to_num( + env.k.vehicle.get_outflow_rate(120)), + "miles_per_megajoule": lambda env: np.nan_to_num( + miles_per_megajoule(env, env.k.vehicle.get_ids(), gain=1.0) + ) +} diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 7176abb06..a7d707068 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -9,7 +9,7 @@ from ray.tune.registry import register_env from flow.controllers import RLController -from flow.controllers.car_following_models import IDMController, SimCarFollowingController +from flow.controllers.car_following_models import IDMController import flow.config as config from flow.core.params import EnvParams from flow.core.params import NetParams diff --git a/examples/train.py b/examples/train.py index d688f2b9a..5b5431a3d 100644 --- a/examples/train.py +++ b/examples/train.py @@ -25,8 +25,8 @@ import ray from ray import tune -from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper from ray.tune.registry import register_env +from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper try: from ray.rllib.agents.agent import get_agent_class except ImportError: @@ -301,7 +301,7 @@ def train_h_baselines(flow_params, args, multiagent): """Train policies using SAC and TD3 with h-baselines.""" from hbaselines.algorithms import OffPolicyRLAlgorithm from hbaselines.utils.train import parse_options, get_hyperparameters - from hbaselines.envs.mixed_autonomy.envs import FlowEnv + from hbaselines.envs.mixed_autonomy import FlowEnv flow_params = deepcopy(flow_params) diff --git a/flow/benchmarks/README.md b/flow/benchmarks/README.md index 963ad5b70..bbcba9414 100644 --- a/flow/benchmarks/README.md +++ b/flow/benchmarks/README.md @@ -38,12 +38,12 @@ inflow = 300 veh/hour/lane S=(915,), A=(25,), T=400. this problem is to learn to avoid the *capacity drop* that is characteristic to bottleneck structures in transportation networks, and maximize the total outflow in a mixed-autonomy setting. -- `flow.benchmarks.bottleneck0` 4 lanes, inflow = 1900 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck0` 4 lanes, inflow = 2500 veh/hour, 10% CAV penetration, no vehicles are allowed to lane change, S=(141,), A=(20,), T=1000. -- `flow.benchmarks.bottleneck1` 4 lanes, inflow = 1900 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck1` 4 lanes, inflow = 2500 veh/hour, 10% CAV penetration, the human drivers follow the standard lane changing model in the simulator, S=(141,), A=(20,), T=1000. -- `flow.benchmarks.bottleneck2` 8 lanes, inflow = 3800 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck2` 8 lanes, inflow = 5000 veh/hour, 10% CAV penetration, no vehicles are allowed to lane change, S=(281,), A=(40,), T=1000. ## Training on Custom Algorithms diff --git a/flow/benchmarks/bottleneck0.py b/flow/benchmarks/bottleneck0.py index b0e86844c..b07947ad7 100644 --- a/flow/benchmarks/bottleneck0.py +++ b/flow/benchmarks/bottleneck0.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/bottleneck1.py b/flow/benchmarks/bottleneck1.py index 26ae6527a..9c8d9c192 100644 --- a/flow/benchmarks/bottleneck1.py +++ b/flow/benchmarks/bottleneck1.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/bottleneck2.py b/flow/benchmarks/bottleneck2.py index 5052b3b88..4651d448b 100644 --- a/flow/benchmarks/bottleneck2.py +++ b/flow/benchmarks/bottleneck2.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/grid0.py b/flow/benchmarks/grid0.py index 1655c3b3c..5c4ee5349 100644 --- a/flow/benchmarks/grid0.py +++ b/flow/benchmarks/grid0.py @@ -4,7 +4,7 @@ - **Observation Dimension**: (339, ) - **Horizon**: 400 steps """ -from flow.envs import TrafficLightGridPOEnv +from flow.envs import TrafficLightGridBenchmarkEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams @@ -68,7 +68,7 @@ exp_tag="grid_0", # name of the flow environment the experiment is running on - env_name=TrafficLightGridPOEnv, + env_name=TrafficLightGridBenchmarkEnv, # name of the network class the experiment is running on network=TrafficLightGridNetwork, diff --git a/flow/benchmarks/grid1.py b/flow/benchmarks/grid1.py index ec2a27454..83055adfd 100644 --- a/flow/benchmarks/grid1.py +++ b/flow/benchmarks/grid1.py @@ -4,7 +4,7 @@ - **Observation Dimension**: (915, ) - **Horizon**: 400 steps """ -from flow.envs import TrafficLightGridPOEnv +from flow.envs import TrafficLightGridBenchmarkEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams @@ -68,7 +68,7 @@ exp_tag="grid_1", # name of the flow environment the experiment is running on - env_name=TrafficLightGridPOEnv, + env_name=TrafficLightGridBenchmarkEnv, # name of the network class the experiment is running on network=TrafficLightGridNetwork, diff --git a/flow/controllers/__init__.py b/flow/controllers/__init__.py index 6cb20077a..4dfcf05b7 100755 --- a/flow/controllers/__init__.py +++ b/flow/controllers/__init__.py @@ -14,7 +14,8 @@ from flow.controllers.base_controller import BaseController from flow.controllers.car_following_models import CFMController, \ BCMController, OVMController, LinearOVM, IDMController, \ - SimCarFollowingController, LACController, GippsController + SimCarFollowingController, LACController, GippsController, \ + BandoFTLController from flow.controllers.velocity_controllers import FollowerStopper, \ PISaturation, NonLocalFollowerStopper @@ -35,5 +36,5 @@ "IDMController", "SimCarFollowingController", "FollowerStopper", "PISaturation", "StaticLaneChanger", "SimLaneChangeController", "ContinuousRouter", "GridRouter", "BayBridgeRouter", "LACController", - "GippsController", "NonLocalFollowerStopper" + "GippsController", "NonLocalFollowerStopper", "BandoFTLController" ] diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index f86c546e8..42c9b2a9b 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -580,3 +580,86 @@ def get_accel(self, env): v_next = min(v_acc, v_safe, self.v_desired) return (v_next-v)/env.sim_step + + +class BandoFTLController(BaseController): + """Bando follow-the-leader controller. + + Usage + ----- + See BaseController for usage example. + + Attributes + ---------- + veh_id : str + Vehicle ID for SUMO identification + car_following_params : flow.core.params.SumoCarFollowingParams + see parent class + alpha : float + gain on desired velocity to current velocity difference + (default: 0.6) + beta : float + gain on lead car velocity and self velocity difference + (default: 0.9) + h_st : float + headway for stopping (default: 5) + h_go : float + headway for full speed (default: 35) + v_max : float + max velocity (default: 30) + time_delay : float + time delay (default: 0.5) + noise : float + std dev of normal perturbation to the acceleration (default: 0) + fail_safe : str + type of flow-imposed failsafe the vehicle should posses, defaults + to no failsafe (None) + """ + + def __init__(self, + veh_id, + car_following_params, + alpha=.5, + beta=20, + h_st=2, + h_go=10, + v_max=32, + want_max_accel=False, + time_delay=0, + noise=0, + fail_safe=None): + """Instantiate an Bando controller.""" + BaseController.__init__( + self, + veh_id, + car_following_params, + delay=time_delay, + fail_safe=fail_safe, + noise=noise, + ) + self.veh_id = veh_id + self.v_max = v_max + self.alpha = alpha + self.beta = beta + self.h_st = h_st + self.h_go = h_go + self.want_max_accel = want_max_accel + + def get_accel(self, env): + """See parent class.""" + lead_id = env.k.vehicle.get_leader(self.veh_id) + if not lead_id: # no car ahead + if self.want_max_accel: + return self.max_accel + + v_l = env.k.vehicle.get_speed(lead_id) + v = env.k.vehicle.get_speed(self.veh_id) + s = env.k.vehicle.get_headway(self.veh_id) + return self.accel_func(v, v_l, s) + + def accel_func(self, v, v_l, s): + """Compute the acceleration function.""" + v_h = self.v_max * ((np.tanh(s/self.h_st-2)+np.tanh(2))/(1+np.tanh(2))) + s_dot = v_l - v + u = self.alpha * (v_h - v) + self.beta * s_dot/(s**2) + return u diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 0accdeddf..09ea03907 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -12,7 +12,6 @@ import subprocess import signal - # Number of retries on restarting SUMO before giving up RETRIES_ON_ERROR = 10 @@ -46,9 +45,14 @@ def pass_api(self, kernel_api): # subscribe some simulation parameters needed to check for entering, # exiting, and colliding vehicles self.kernel_api.simulation.subscribe([ - tc.VAR_DEPARTED_VEHICLES_IDS, tc.VAR_ARRIVED_VEHICLES_IDS, - tc.VAR_TELEPORT_STARTING_VEHICLES_IDS, tc.VAR_TIME_STEP, - tc.VAR_DELTA_T + tc.VAR_DEPARTED_VEHICLES_IDS, + tc.VAR_ARRIVED_VEHICLES_IDS, + tc.VAR_TELEPORT_STARTING_VEHICLES_IDS, + tc.VAR_TIME_STEP, + tc.VAR_DELTA_T, + tc.VAR_LOADED_VEHICLES_NUMBER, + tc.VAR_DEPARTED_VEHICLES_NUMBER, + tc.VAR_ARRIVED_VEHICLES_NUMBER ]) def simulation_step(self): @@ -94,7 +98,7 @@ def start_simulation(self, network, sim_params): # disable all collisions and teleporting in the simulation. if sim_params.disable_collisions: sumo_call.extend(["--collision.mingap-factor", str(0), - "--collision.action", str("none")]) + "--collision.action", str("none")]) # use a ballistic integration step (if request) if sim_params.use_ballistic: diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index d9fc773cd..706504027 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -290,6 +290,27 @@ def get_departed_ids(self): """Return the ids of vehicles that departed in the last time step.""" raise NotImplementedError + def get_num_not_departed(self): + """Return the number of vehicles not departed in the last time step. + + This includes vehicles that were loaded but not departed. + """ + raise NotImplementedError + + def get_fuel_consumption(selfself, veh_id, error=-1001): + """Return the mpg / s of the specified vehicle. + + Parameters + ---------- + veh_id : str or list of str + vehicle id, or list of vehicle ids + error : any, optional + value that is returned if the vehicle is not found + Returns + ------- + float + """ + def get_speed(self, veh_id, error=-1001): """Return the speed of the specified vehicle. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 14720cdce..b89e981be 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -22,7 +22,8 @@ STEPS = 10 rdelta = 255 / STEPS # smoothly go from red to green as the speed increases -color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in range(STEPS + 1)] +color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in + range(STEPS + 1)] class TraCIVehicle(KernelVehicle): @@ -56,6 +57,8 @@ def __init__(self, self.num_vehicles = 0 # number of rl vehicles in the network self.num_rl_vehicles = 0 + # number of vehicles loaded but not departed vehicles + self.num_not_departed = 0 # contains the parameters associated with each type of vehicle self.type_parameters = {} @@ -68,11 +71,11 @@ def __init__(self, # number of vehicles that entered the network for every time-step self._num_departed = [] - self._departed_ids = [] + self._departed_ids = 0 # number of vehicles to exit the network for every time-step self._num_arrived = [] - self._arrived_ids = [] + self._arrived_ids = 0 self._arrived_rl_ids = [] # whether or not to automatically color vehicles @@ -101,6 +104,7 @@ def initialize(self, vehicles): self.minGap = vehicles.minGap self.num_vehicles = 0 self.num_rl_vehicles = 0 + self.num_not_departed = 0 self.__vehicles.clear() for typ in vehicles.initial: @@ -180,14 +184,15 @@ def update(self, reset): self.prev_last_lc[veh_id] = -float("inf") self._num_departed.clear() self._num_arrived.clear() - self._departed_ids.clear() - self._arrived_ids.clear() + self._departed_ids = 0 + self._arrived_ids = 0 self._arrived_rl_ids.clear() + self.num_not_departed = 0 # add vehicles from a network template, if applicable if hasattr(self.master_kernel.network.network, "template_vehicles"): - for veh_id in self.master_kernel.network.network.\ + for veh_id in self.master_kernel.network.network. \ template_vehicles: vals = deepcopy(self.master_kernel.network.network. template_vehicles[veh_id]) @@ -206,11 +211,14 @@ def update(self, reset): self.__vehicles[veh_id]["last_lc"] = self.time_counter # updated the list of departed and arrived vehicles - self._num_departed.append( - len(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS])) - self._num_arrived.append(len(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS])) - self._departed_ids.append(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]) - self._arrived_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]) + self._num_departed.append(sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER]) + self._num_arrived.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_NUMBER]) + self._departed_ids = sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS] + self._arrived_ids = sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS] + + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: @@ -321,9 +329,14 @@ def _add_departed(self, veh_id, veh_type): # subscribe the new vehicle self.kernel_api.vehicle.subscribe(veh_id, [ - tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, tc.VAR_ROAD_ID, - tc.VAR_SPEED, tc.VAR_EDGES, tc.VAR_POSITION, tc.VAR_ANGLE, - tc.VAR_SPEED_WITHOUT_TRACI + tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, + tc.VAR_ROAD_ID, + tc.VAR_SPEED, + tc.VAR_EDGES, + tc.VAR_POSITION, + tc.VAR_ANGLE, + tc.VAR_SPEED_WITHOUT_TRACI, + tc.VAR_FUELCONSUMPTION ]) self.kernel_api.vehicle.subscribeLeader(veh_id, 2000) @@ -358,6 +371,8 @@ def _add_departed(self, veh_id, veh_type): self.kernel_api.vehicle.getLaneIndex(veh_id) self.__sumo_obs[veh_id][tc.VAR_SPEED] = \ self.kernel_api.vehicle.getSpeed(veh_id) + self.__sumo_obs[veh_id][tc.VAR_FUELCONSUMPTION] = \ + self.kernel_api.vehicle.getFuelConsumption(veh_id) # make sure that the order of rl_ids is kept sorted self.__rl_ids.sort() @@ -504,10 +519,7 @@ def get_num_arrived(self): def get_arrived_ids(self): """See parent class.""" - if len(self._arrived_ids) > 0: - return self._arrived_ids[-1] - else: - return 0 + return self._arrived_ids def get_arrived_rl_ids(self): """See parent class.""" @@ -518,10 +530,18 @@ def get_arrived_rl_ids(self): def get_departed_ids(self): """See parent class.""" - if len(self._departed_ids) > 0: - return self._departed_ids[-1] - else: - return 0 + return self._departed_ids + + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + + def get_fuel_consumption(self, veh_id, error=-1001): + """Return fuel consumption in gallons/s.""" + ml_to_gallons = 0.000264172 + if isinstance(veh_id, (list, np.ndarray)): + return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" @@ -1009,7 +1029,8 @@ def update_vehicle_colors(self): for veh_id in self.get_rl_ids(): try: # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: # color rl vehicles red self.set_color(veh_id=veh_id, color=RED) except (FatalTraCIError, TraCIException) as e: @@ -1020,7 +1041,8 @@ def update_vehicle_colors(self): try: color = CYAN if veh_id in self.get_observed_ids() else WHITE # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color) except (FatalTraCIError, TraCIException) as e: print('Error when updating human vehicle colors:', e) @@ -1030,7 +1052,8 @@ def update_vehicle_colors(self): if 'av' in veh_id: color = RED # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color) except (FatalTraCIError, TraCIException) as e: print('Error when updating human vehicle colors:', e) @@ -1043,7 +1066,8 @@ def update_vehicle_colors(self): veh_speed = self.get_speed(veh_id) bin_index = np.digitize(veh_speed, speed_ranges) # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color_bins[bin_index]) # clear the list of observed vehicles diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 60760f357..1434636e6 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -333,6 +333,7 @@ def energy_consumption(env, gain=.001): return -gain * power + def vehicle_energy_consumption(env, veh_id, gain=.001): """Calculate power consumption of a vehicle. @@ -348,6 +349,7 @@ def vehicle_energy_consumption(env, veh_id, gain=.001): Ca = 0.3 # aerodynamic drag coefficient rho = 1.225 # air density (kg/m^3) A = 2.6 # vehicle cross sectional area (m^2) + if veh_id not in env.k.vehicle.previous_speeds: return 0 speed = env.k.vehicle.get_speed(veh_id) @@ -358,3 +360,84 @@ def vehicle_energy_consumption(env, veh_id, gain=.001): power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 return -gain * power + + +def miles_per_megajoule(env, veh_ids=None, gain=.001): + """Calculate miles per mega-joule of either a particular vehicle or the total average of all the vehicles. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + + Parameters + ---------- + env : flow.envs.Env + the environment variable, which contains information on the current + state of the system. + veh_ids : [list] + list of veh_ids to compute the reward over + gain : float + scaling factor for the reward + """ + mpj = 0 + counter = 0 + if veh_ids is None: + veh_ids = env.k.vehicle.get_ids() + elif not isinstance(veh_ids, list): + veh_ids = [veh_ids] + for veh_id in veh_ids: + speed = env.k.vehicle.get_speed(veh_id) + # convert to be positive since the function called is a penalty + power = -vehicle_energy_consumption(env, veh_id, gain=1.0) + if power > 0 and speed >= 0.0: + counter += 1 + # meters / joule is (v * \delta t) / (power * \delta t) + mpj += speed / power + if counter > 0: + mpj /= counter + + # convert from meters per joule to miles per joule + mpj /= 1609.0 + # convert from miles per joule to miles per megajoule + mpj *= 10 ** 6 + + return mpj * gain + + +def miles_per_gallon(env, veh_ids=None, gain=.001): + """Calculate mpg of either a particular vehicle or the total average of all the vehicles. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + + Parameters + ---------- + env : flow.envs.Env + the environment variable, which contains information on the current + state of the system. + veh_ids : [list] + list of veh_ids to compute the reward over + gain : float + scaling factor for the reward + """ + mpg = 0 + counter = 0 + if veh_ids is None: + veh_ids = env.k.vehicle.get_ids() + elif not isinstance(veh_ids, list): + veh_ids = [veh_ids] + for veh_id in veh_ids: + speed = env.k.vehicle.get_speed(veh_id) + gallons_per_s = env.k.vehicle.get_fuel_consumption(veh_id) + if gallons_per_s > 0 and speed >= 0.0: + counter += 1 + # meters / gallon is (v * \delta t) / (gallons_per_s * \delta t) + mpg += speed / gallons_per_s + if counter > 0: + mpg /= counter + + # convert from meters per gallon to miles per gallon + mpg /= 1609.0 + + return mpg * gain diff --git a/flow/envs/__init__.py b/flow/envs/__init__.py index 6f4351cc0..8bea3dd4f 100755 --- a/flow/envs/__init__.py +++ b/flow/envs/__init__.py @@ -4,7 +4,7 @@ from flow.envs.bottleneck import BottleneckAccelEnv, BottleneckEnv, \ BottleneckDesiredVelocityEnv from flow.envs.traffic_light_grid import TrafficLightGridEnv, \ - TrafficLightGridPOEnv, TrafficLightGridTestEnv + TrafficLightGridPOEnv, TrafficLightGridTestEnv, TrafficLightGridBenchmarkEnv from flow.envs.ring.lane_change_accel import LaneChangeAccelEnv, \ LaneChangeAccelPOEnv from flow.envs.ring.accel import AccelEnv @@ -34,6 +34,7 @@ 'WaveAttenuationPOEnv', 'TrafficLightGridEnv', 'TrafficLightGridPOEnv', + 'TrafficLightGridBenchmarkEnv', 'BottleneckDesiredVelocityEnv', 'TestEnv', 'BayBridgeEnv', diff --git a/flow/envs/traffic_light_grid.py b/flow/envs/traffic_light_grid.py index 53391a329..8be0cb8a5 100644 --- a/flow/envs/traffic_light_grid.py +++ b/flow/envs/traffic_light_grid.py @@ -731,6 +731,17 @@ def additional_command(self): [self.k.vehicle.set_observed(veh_id) for veh_id in self.observed_ids] +class TrafficLightGridBenchmarkEnv(TrafficLightGridPOEnv): + """Class used for the benchmarks in `Benchmarks for reinforcement learning inmixed-autonomy traffic`.""" + + def compute_reward(self, rl_actions, **kwargs): + """See class definition.""" + if self.env_params.evaluate: + return - rewards.min_delay_unscaled(self) + else: + return rewards.desired_velocity(self) + + class TrafficLightGridTestEnv(TrafficLightGridEnv): """ Class for use in testing. diff --git a/flow/networks/highway.py b/flow/networks/highway.py index e1234053c..7e9c18ad5 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -13,7 +13,12 @@ # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into - "num_edges": 1 + "num_edges": 1, + # whether to include a ghost edge of length 500m. This edge is provided a + # different speed limit. + "use_ghost_edge": False, + # speed limit for the ghost edge + "ghost_speed_limit": 25, } @@ -29,6 +34,9 @@ class HighwayNetwork(Network): * **lanes** : number of lanes in the highway * **speed_limit** : max speed limit of the highway * **num_edges** : number of edges to divide the highway into + * **use_ghost_edge** : whether to include a ghost edge of length 500m. This + edge is provided a different speed limit. + * **ghost_speed_limit** : speed limit for the ghost edge Usage ----- @@ -62,9 +70,7 @@ def __init__(self, if p not in net_params.additional_params: raise KeyError('Network parameter "{}" not supplied'.format(p)) - self.length = net_params.additional_params["length"] - self.lanes = net_params.additional_params["lanes"] - self.num_edges = net_params.additional_params.get("num_edges", 1) + self.end_length = 500 super().__init__(name, vehicles, net_params, initial_config, traffic_lights) @@ -83,6 +89,13 @@ def specify_nodes(self, net_params): "y": 0 }] + if self.net_params.additional_params["use_ghost_edge"]: + nodes += [{ + "id": "edge_{}".format(num_edges + 1), + "x": length + self.end_length, + "y": 0 + }] + return nodes def specify_edges(self, net_params): @@ -101,12 +114,22 @@ def specify_edges(self, net_params): "length": segment_length }] + if self.net_params.additional_params["use_ghost_edge"]: + edges += [{ + "id": "highway_end", + "type": "highway_end", + "from": "edge_{}".format(num_edges), + "to": "edge_{}".format(num_edges + 1), + "length": self.end_length + }] + return edges def specify_types(self, net_params): """See parent class.""" lanes = net_params.additional_params["lanes"] speed_limit = net_params.additional_params["speed_limit"] + end_speed_limit = net_params.additional_params["ghost_speed_limit"] types = [{ "id": "highwayType", @@ -114,6 +137,13 @@ def specify_types(self, net_params): "speed": speed_limit }] + if self.net_params.additional_params["use_ghost_edge"]: + types += [{ + "id": "highway_end", + "numLanes": lanes, + "speed": end_speed_limit + }] + return types def specify_routes(self, net_params): @@ -123,15 +153,51 @@ def specify_routes(self, net_params): for i in range(num_edges): rts["highway_{}".format(i)] = ["highway_{}".format(j) for j in range(i, num_edges)] + if self.net_params.additional_params["use_ghost_edge"]: + rts["highway_{}".format(i)].append("highway_end") return rts def specify_edge_starts(self): """See parent class.""" + junction_length = 0.1 + length = self.net_params.additional_params["length"] + num_edges = self.net_params.additional_params.get("num_edges", 1) + + # Add the main edges. + edge_starts = [ + ("highway_{}".format(i), + i * (length / num_edges + junction_length)) + for i in range(num_edges) + ] + + if self.net_params.additional_params["use_ghost_edge"]: + edge_starts += [ + ("highway_end", length + num_edges * junction_length) + ] + + return edge_starts + + def specify_internal_edge_starts(self): + """See parent class.""" + junction_length = 0.1 length = self.net_params.additional_params["length"] - edgestarts = [("highway_{}".format(i), (length / self.num_edges) * i) - for i in range(self.num_edges)] - return edgestarts + num_edges = self.net_params.additional_params.get("num_edges", 1) + + # Add the junctions. + edge_starts = [ + (":edge_{}".format(i + 1), + (i + 1) * length / num_edges + i * junction_length) + for i in range(num_edges - 1) + ] + + if self.net_params.additional_params["use_ghost_edge"]: + edge_starts += [ + (":edge_{}".format(num_edges), + length + (num_edges - 1) * junction_length) + ] + + return edge_starts @staticmethod def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles): diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index ac4cc031d..a37bac95b 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -11,6 +11,7 @@ import time import ray + try: from ray.rllib.agents.agent import get_agent_class except ImportError: @@ -26,7 +27,7 @@ from flow.utils.rllib import FlowParamsEncoder from flow.visualize.transfer.util import inflows_range -from flow.visualize.plot_custom_callables import plot_trip_distribution +from flow.visualize.plot_custom_callables import plot_trip_distribution from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables @@ -42,8 +43,10 @@ 2 - the number of the checkpoint """ + @ray.remote -def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config=None, result_dir=None, max_completed_trips=None, v_des=12): +def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config=None, result_dir=None, + max_completed_trips=None, v_des=12): """Replay or run transfer test (defined by transfer_fn) by modif. Arguments: @@ -221,8 +224,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= if use_lstm: action[agent_id], lstm_state[agent_id], _ = \ agent.compute_action( - state[agent_id], state=lstm_state[agent_id], - policy_id=policy_map_fn(agent_id)) + state[agent_id], state=lstm_state[agent_id], + policy_id=policy_map_fn(agent_id)) else: action[agent_id] = agent.compute_action( state[agent_id], policy_id=policy_map_fn(agent_id)) @@ -246,8 +249,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= for past_veh_id in per_vehicle_energy_trace.keys(): if past_veh_id not in veh_ids and past_veh_id not in completed_vehicle_avg_energy: - all_trip_energy_distribution[completed_veh_types[past_veh_id]].append(np.sum(per_vehicle_energy_trace[past_veh_id])) - all_trip_time_distribution[completed_veh_types[past_veh_id]].append(len(per_vehicle_energy_trace[past_veh_id])) + all_trip_energy_distribution[completed_veh_types[past_veh_id]].append( + np.sum(per_vehicle_energy_trace[past_veh_id])) + all_trip_time_distribution[completed_veh_types[past_veh_id]].append( + len(per_vehicle_energy_trace[past_veh_id])) completed_vehicle_avg_energy[past_veh_id] = np.sum(per_vehicle_energy_trace[past_veh_id]) completed_vehicle_travel_time[past_veh_id] = len(per_vehicle_energy_trace[past_veh_id]) @@ -258,7 +263,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= per_vehicle_energy_trace[veh_id].append(0) completed_veh_types[veh_id] = env.k.vehicle.get_type(veh_id) else: - per_vehicle_energy_trace[veh_id].append(-1*vehicle_energy_consumption(env, veh_id)) + per_vehicle_energy_trace[veh_id].append(-1 * vehicle_energy_consumption(env, veh_id)) if type(done) is dict and done['__all__']: break @@ -322,7 +327,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= all_trip_energies = os.path.join(output_dir, '{}-all_trip_energies.npy'.format(exp_name)) np.save(all_trip_energies, dict(all_trip_energy_distribution)) fig_names, figs = plot_trip_distribution(all_trip_energy_distribution) - + for fig_name, fig in zip(fig_names, figs): edist_out = os.path.join(output_dir, '{}_energy_distribution.png'.format(fig_name)) fig.savefig(edist_out) @@ -455,18 +460,21 @@ def create_parser(): output_dir = args.output_dir if args.run_transfer: - s = [ray.cloudpickle.dumps(transfer_test) for transfer_test in inflows_range(penetration_rates=[0.0, 0.1, 0.2, 0.3])] + s = [ray.cloudpickle.dumps(transfer_test) for transfer_test in + inflows_range(penetration_rates=[0.0, 0.1, 0.2, 0.3])] ray_output = [replay.remote(args, flow_params, output_dir=output_dir, transfer_test=transfer_test, - rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips) + rllib_config=rllib_config, result_dir=rllib_result_dir, + max_completed_trips=args.max_completed_trips) for transfer_test in s] ray.get(ray_output) elif args.v_des_sweep: assert args.controller == 'follower_stopper' - ray_output = [replay.remote(args, flow_params, output_dir="{}/{}".format(output_dir, v_des), rllib_config=rllib_config, - result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips, v_des=v_des) - for v_des in range(8, 17, 2)] + ray_output = [ + replay.remote(args, flow_params, output_dir="{}/{}".format(output_dir, v_des), rllib_config=rllib_config, + result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips, v_des=v_des) + for v_des in range(8, 17, 2)] ray.get(ray_output) else: @@ -474,10 +482,12 @@ def create_parser(): pr = args.penetration_rate if args.penetration_rate is not None else 0 single_transfer = next(inflows_range(penetration_rates=pr)) ray.get(replay.remote(args, flow_params, output_dir=output_dir, transfer_test=single_transfer, - rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips)) + rllib_config=rllib_config, result_dir=rllib_result_dir, + max_completed_trips=args.max_completed_trips)) else: ray.get(replay.remote(args, flow_params, output_dir=output_dir, - rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips)) + rllib_config=rllib_config, result_dir=rllib_result_dir, + max_completed_trips=args.max_completed_trips)) if args.use_s3: s3_string = 's3://kanaad.experiments/i210_replay/' + date diff --git a/flow/visualize/plot_custom_callables.py b/flow/visualize/plot_custom_callables.py index f82377cef..ee9a10c1d 100644 --- a/flow/visualize/plot_custom_callables.py +++ b/flow/visualize/plot_custom_callables.py @@ -4,25 +4,31 @@ from datetime import datetime import errno import numpy as np + try: from matplotlib import pyplot as plt except ImportError: import matplotlib + matplotlib.use('TkAgg') from matplotlib import pyplot as plt import os import pytz import sys + def make_bar_plot(vals, title): + """Make a bar plot.""" print(len(vals)) fig = plt.figure() plt.hist(vals, 10, facecolor='blue', alpha=0.5) plt.title(title) - plt.xlim(1000,3000) + plt.xlim(1000, 3000) return fig + def plot_trip_distribution(all_trip_energy_distribution): + """Plot a distribution of trips.""" non_av_vals = [] figures = [] figure_names = [] @@ -31,7 +37,7 @@ def plot_trip_distribution(all_trip_energy_distribution): non_av_vals.extend(all_trip_energy_distribution[key]) figures.append(make_bar_plot(all_trip_energy_distribution[key], key)) figure_names.append(key) - + figure_names.append('All Non-AV') figures.append(make_bar_plot(non_av_vals, 'All Non-AV')) @@ -39,8 +45,7 @@ def plot_trip_distribution(all_trip_energy_distribution): figures.append(make_bar_plot(non_av_vals + all_trip_energy_distribution['av'], 'All')) return figure_names, figures - - + def parse_flags(args): """Parse training options user can specify in command line. diff --git a/flow/visualize/transfer/util.py b/flow/visualize/transfer/util.py index d6c8d9f88..107e6d026 100644 --- a/flow/visualize/transfer/util.py +++ b/flow/visualize/transfer/util.py @@ -28,8 +28,7 @@ def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): inflow_119257914 = dict(veh_type="human", edge="119257914", - vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * - (1 - (pr)) * fr_coef, + vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * (1 - (pr)) * fr_coef, # probability=1.0, departLane="random", departSpeed=departSpeed) @@ -47,17 +46,16 @@ def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): if on_ramp: inflow_27414345 = dict(veh_type="human", edge="27414345", - vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * - (1 - (pr)) * fr_coef, + vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * (1 - (pr)) * fr_coef, departLane="random", departSpeed=departSpeed) all_inflows.append(inflow_27414345) if pr > 0.0: inflow_27414342 = dict(veh_type="human", - edge="27414342#0", - vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * pr * fr_coef, - departLane="random", - departSpeed=departSpeed) + edge="27414342#0", + vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * pr * fr_coef, + departLane="random", + departSpeed=departSpeed) all_inflows.append(inflow_27414342) for inflow_def in all_inflows: diff --git a/tests/fast_tests/test_controllers.py b/tests/fast_tests/test_controllers.py index 76146dbe6..58967cef8 100644 --- a/tests/fast_tests/test_controllers.py +++ b/tests/fast_tests/test_controllers.py @@ -8,7 +8,7 @@ from flow.controllers.routing_controllers import ContinuousRouter from flow.controllers.car_following_models import IDMController, \ OVMController, BCMController, LinearOVM, CFMController, LACController, \ - GippsController + GippsController, BandoFTLController from flow.controllers import FollowerStopper, PISaturation, NonLocalFollowerStopper from tests.setup_scripts import ring_road_exp_setup import os @@ -709,7 +709,7 @@ def test_get_action(self): np.testing.assert_array_almost_equal(requested_accel, expected_accel) -class TestGippsontroller(unittest.TestCase): +class TestGippsController(unittest.TestCase): """ Tests that the Gipps Controller returning mathematically accurate values. """ @@ -765,5 +765,59 @@ def test_get_action(self): np.testing.assert_array_almost_equal(requested_accel, expected_accel) +class TestBandoFTLController(unittest.TestCase): + """ + Tests that the Bando Controller returning mathematically accurate values. + """ + + def setUp(self): + # add a few vehicles to the network using the requested model + # also make sure that the input params are what is expected + contr_params = { + "alpha": .5, + "beta": 20, + "h_st": 2, + "h_go": 10, + "v_max": 32, + "want_max_accel": False, + } + + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(BandoFTLController, contr_params), + routing_controller=(ContinuousRouter, {}), + car_following_params=SumoCarFollowingParams( + accel=15, decel=5), + num_vehicles=5) + + # create the environment and network classes for a ring road + self.env, _, _ = ring_road_exp_setup(vehicles=vehicles) + + def tearDown(self): + # terminate the traci instance + self.env.terminate() + + # free data used by the class + self.env = None + + def test_get_action(self): + self.env.reset() + ids = self.env.k.vehicle.get_ids() + + test_headways = [2, 4, 6, 8, 10] + for i, veh_id in enumerate(ids): + self.env.k.vehicle.set_headway(veh_id, test_headways[i]) + + requested_accel = [ + self.env.k.vehicle.get_acc_controller(veh_id).get_action(self.env) + for veh_id in ids + ] + + expected_accel = [1.649129, 7.853475, 14.057821, 15.70695, 15.959713] + + np.testing.assert_array_almost_equal(requested_accel, expected_accel) + + if __name__ == '__main__': unittest.main() diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index a05fed68e..336c17bf8 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -42,6 +42,7 @@ from examples.exp_configs.non_rl.minicity import flow_params as non_rl_minicity from examples.exp_configs.non_rl.ring import flow_params as non_rl_ring from examples.exp_configs.non_rl.i210_subnetwork import flow_params as non_rl_i210 +from examples.exp_configs.non_rl.highway_single import flow_params as non_rl_highway_single os.environ['TEST_FLAG'] = 'True' os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' @@ -110,6 +111,10 @@ def test_i210(self): """Verify that examples/exp_configs/non_rl/i210_subnetwork.py is working.""" self.run_simulation(non_rl_i210) + def test_highway_single(self): + """Verify that examples/exp_configs/non_rl/highway_single.py is working.""" + self.run_simulation(non_rl_highway_single) + @staticmethod def run_simulation(flow_params): # make the horizon small and set render to False diff --git a/tests/fast_tests/test_scenarios.py b/tests/fast_tests/test_scenarios.py index f9dd47c04..d72a50b17 100644 --- a/tests/fast_tests/test_scenarios.py +++ b/tests/fast_tests/test_scenarios.py @@ -5,6 +5,7 @@ from flow.networks import BottleneckNetwork, FigureEightNetwork, \ TrafficLightGridNetwork, HighwayNetwork, RingNetwork, MergeNetwork, \ MiniCityNetwork, MultiRingNetwork +from tests.setup_scripts import highway_exp_setup __all__ = [ "MultiRingNetwork", "MiniCityNetwork" @@ -94,11 +95,69 @@ def test_additional_net_params(self): "length": 1000, "lanes": 4, "speed_limit": 30, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25 } ) ) + def test_ghost_edge(self): + """Validate the functionality of the ghost edge feature.""" + # =================================================================== # + # Without a ghost edge # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25 + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1000) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), ["highway_0"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + + # =================================================================== # + # With a ghost edge # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": True, + "ghost_speed_limit": 25 + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1500.1) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), + ["highway_0", "highway_end"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + self.assertEqual(env.k.network.speed_limit("highway_end"), 25) + class TestRingNetwork(unittest.TestCase): diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index 485a6a072..b791bba64 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -258,7 +258,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -330,7 +332,9 @@ def test_no_junctions_highway(self): "lanes": 4, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -398,7 +402,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 3 + "num_edges": 3, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -465,7 +471,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 3 + "num_edges": 3, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() diff --git a/tests/setup_scripts.py b/tests/setup_scripts.py index 08d5b2c1e..ac88d2e42 100644 --- a/tests/setup_scripts.py +++ b/tests/setup_scripts.py @@ -343,7 +343,9 @@ def highway_exp_setup(sim_params=None, "lanes": 1, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) From 153da9d7dfd6b811c13634284f06000dceca9842 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 18 May 2020 12:25:00 -0700 Subject: [PATCH 067/438] change the bucket to a common bucket --- examples/exp_configs/non_rl/highway_single.py | 2 +- flow/core/experiment.py | 29 +++++----------- flow/core/kernel/vehicle/base.py | 4 +-- flow/core/kernel/vehicle/traci.py | 13 ++----- flow/data_pipeline/data_pipeline.py | 34 +++++++++++++++++-- flow/data_pipeline/lambda_function.py | 4 +-- flow/visualize/i210_replay.py | 10 ++++++ 7 files changed, 57 insertions(+), 39 deletions(-) diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index 46b18c0e9..c2df0759a 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -19,7 +19,7 @@ END_SPEED = 16 TRAFFIC_FLOW = 2056 HORIZON = 3600 -INCLUDE_NOISE = False +INCLUDE_NOISE = True additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params.update({ diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8b5cbac02..2296ef635 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,10 +1,11 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info import datetime import logging import time +from datetime import date import os import numpy as np import uuid @@ -145,9 +146,7 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] - extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], - "road_grade": [], "source_id": []} + extra_info = extra_init() source_id = uuid.uuid4().hex for i in range(num_runs): @@ -167,22 +166,7 @@ def rl_actions(*_): ret += reward # collect additional information for the data pipeline - for vid in veh_ids: - extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) - extra_info["id"].append(vid) - extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) - extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) - extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) - extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) - extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( - self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) - extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) - extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - position = self.env.k.vehicle.get_2d_position(vid) - extra_info["x"].append(position[0]) - extra_info["y"].append(position[1]) - extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) - #extra_info["next_v"].append(self.env.k.vehicle.get_next_v(vid)) + get_extra_info(self.env.k.vehicle, extra_info, veh_ids) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. @@ -231,7 +215,10 @@ def rl_actions(*_): upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: - upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + if partition_name == "default": + partition_name = source_id[0:3] + partition_name = date.today().isoformat() + " " + partition_name + upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index cb547cddb..7f001ed13 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -691,8 +691,8 @@ def get_accel_without_noise(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError - def get_velocity_without_noise(self, veh_id): - """Return the velocity without noise of vehicle with veh_id.""" + def get_realized_accel(self, veh_id): + """Return the acceleration that the vehicle actually make.""" raise NotImplementedError def get_road_grade(self, veh_id): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2fd978012..f1dbee8bf 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1115,13 +1115,6 @@ def set_max_speed(self, veh_id, max_speed): self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) # add for data pipeline - def get_next_v(self, veh_id): - """See parent class.""" - if not "next_v" in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["next_v"] = None - return self.__vehicles[veh_id]["next_v"] - #return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step - def get_accel(self, veh_id): """See parent class.""" if not "accel" in self.__vehicles[veh_id]: @@ -1138,9 +1131,9 @@ def get_accel_without_noise(self, veh_id): self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] - def get_velocity_without_noise(self, veh_id): + def get_realized_accel(self, veh_id): """See parent class.""" - return max([self.get_speed(veh_id) + self.get_accel_without_noise(veh_id) * self.sim_step, 0]) + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def get_2d_position(self, veh_id, error=-1001): """See parent class.""" @@ -1148,5 +1141,5 @@ def get_2d_position(self, veh_id, error=-1001): def get_road_grade(self, veh_id): """See parent class.""" - # TODO + # TODO : Brent return 0 diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index afbc09f92..0cd0cbc79 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -88,6 +88,34 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): return +def extra_init(): + """Return the dictionary with all the feild pre-populated with empty list.""" + extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "road_grade": [], "source_id": []} + return extra_info + + +def get_extra_info(veh_kernel, extra_info, veh_ids): + """Get all the necessary information for the trajectory output from flow.""" + for vid in veh_ids: + extra_info["time"].append(veh_kernel.get_timestep(vid) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(veh_kernel.get_headway(vid)) + extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["leader_id"].append(veh_kernel.get_leader(vid)) + extra_info["follower_id"].append(veh_kernel.get_follower(vid)) + extra_info["leader_rel_speed"].append(veh_kernel.get_speed( + veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) + extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) + extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) + position = veh_kernel.get_2d_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(veh_kernel.get_speed(vid)) + + class AthenaQuery: """ Class used to run query. @@ -199,7 +227,7 @@ def update_partition(self, partition): self.existing_partitions.append(partition) return - def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data/query-result/", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -257,14 +285,14 @@ def test_sql_query(query_name): # Run the respective sql query queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data/query-result/query-test", partition="test") if queryEngine.wait_for_execution(execution_id): raise RuntimeError("execution timed out") # get the Athena query result from S3 s3 = boto3.resource("s3") - s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + s3.Bucket("circles.data").download_file("query-result/query-test/"+execution_id+".csv", "data/athena_result.csv") athena_result = pd.read_csv("data/athena_result.csv") athena_result = athena_result.sort_values(by=["time", "id"]) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index afef55a4b..3f0abb8a1 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -25,7 +25,7 @@ def lambda_handler(event, context): response = s3.head_object(Bucket=bucket, Key=key) run_query = response["Metadata"]["run-query"] - if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if bucket == 'circles.data' and 'trajectory-output/' in key: if run_query == "all": query_list = tags["analysis"] elif not run_query: @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data/query-result/auto/', partition) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 502d646aa..9e41009e8 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -27,6 +27,9 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info +import uuid + EXAMPLE_USAGE = """ example usage: python i210_replay.py -r /ray_results/experiment_dir/result_dir -c 1 @@ -190,6 +193,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= key: [] for key in custom_callables.keys() }) + extra_info = extra_init() + source_id = uuid.uuid4().hex + for i in range(args.num_rollouts): vel = [] custom_vals = {key: [] for key in custom_callables.keys()} @@ -222,6 +228,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= veh_ids = env.k.vehicle.get_ids() vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) + # Collect information from flow for the trajectory output + get_extra_info(env.k.vehicle, extra_info, veh_ids) + extra_info["source_id"].extend([source_id + "run" + str(i)] * len(veh_ids)) + # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): custom_vals[key].append(lambda_func(env)) From 2851e8a6b7089756c33a4519f6148c373b763a77 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 18 May 2020 12:28:17 -0700 Subject: [PATCH 068/438] removed the old tests --- flow/data_pipeline/datapipeline_test.py | 37 ------------------------- 1 file changed, 37 deletions(-) delete mode 100644 flow/data_pipeline/datapipeline_test.py diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py deleted file mode 100644 index 0e1a50518..000000000 --- a/flow/data_pipeline/datapipeline_test.py +++ /dev/null @@ -1,37 +0,0 @@ -"""functions that calculates the expected result for testing.""" -import math - -# Vehicle Mass -M = 1200 -# Gravity -g = 9.81 -# Density of Air -ro_air = 1.225 -# Rolling resistance coefficient -C_r = .005 -# Aerodynamic drag coefficient -C_a = 0.3 -# Vehicle Cross sectional Area -A = 2.6 -# Road grade -theta = 0 - - -def heavyside(inp): - """Return 1 if input is positive.""" - return 0 if inp <= 0 else 1 - - -def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): - """Calculate the expected power for POWER_DEMAND_MODEL query.""" - acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) - accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) - rolling_friction = M * g * C_r * mu - air_drag = .5 * ro_air * A * C_a * mu**3 - power = accel_and_slope + rolling_friction + air_drag - return power - - -def apply_energy_one(row): - """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] From c01f235891baa45b8af010e730e0daeefb557ae5 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 18 May 2020 12:49:11 -0700 Subject: [PATCH 069/438] fix merge issue in i210_replay --- flow/visualize/i210_replay.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index f6898d1ec..abb13bbc9 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -211,7 +211,6 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= extra_info = extra_init() source_id = uuid.uuid4().hex - for i in range(args.num_rollouts): i = 0 while i < args.num_rollouts: print("Rollout iter", i) From 27445157469851cf146b7eb4b08d811929155033 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:01:41 -0700 Subject: [PATCH 070/438] added auto upload to s3 feature for the reply scipt and fix some other minor issues --- flow/core/experiment.py | 15 ++--- flow/core/kernel/vehicle/traci.py | 5 +- flow/data_pipeline/data_pipeline.py | 12 ++-- flow/data_pipeline/lambda_function.py | 4 +- flow/data_pipeline/query.py | 86 ++++++++++++++------------- flow/data_pipeline/run_query.py | 2 +- flow/visualize/i210_replay.py | 21 ++++++- 7 files changed, 82 insertions(+), 63 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8e0ba0bbf..6e9be9aea 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -88,7 +88,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=""): """Run the given network for a set number of runs. Parameters @@ -106,8 +106,9 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=No will be used to later for query. If NONE, won't upload output to S3. only_query: str - Specifies whether queries should be automatically run the - simulation data when it gets uploaded to s3 + Specifies which queries should be automatically run when the + simulation data gets uploaded to S3. If an empty str is passed in, + then it implies no queries should be run on this. Returns ------- @@ -147,7 +148,7 @@ def rl_actions(*_): t = time.time() times = [] extra_info = extra_init() - source_id = uuid.uuid4().hex + source_id = 'flow_{}'.format(uuid.uuid4().hex) for i in range(num_runs): ret = 0 @@ -167,7 +168,7 @@ def rl_actions(*_): # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): @@ -218,8 +219,8 @@ def rl_actions(*_): if partition_name == "default": partition_name = source_id[0:3] partition_name = date.today().isoformat() + " " + partition_name - upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_to_s3('circles.data.pipeline', 'trajectory-output/partition_name={}/{}.csv'.format( + partition_name, upload_file_path.split('/')[-1].split('_')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index b2fd66073..2a4e06257 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -962,7 +962,6 @@ def apply_acceleration(self, veh_ids, acc): self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) - #self.__vehicles[vid]["next_v"] = next_vel self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) def apply_lane_change(self, veh_ids, direction): @@ -1127,7 +1126,7 @@ def set_max_speed(self, veh_id, max_speed): # add for data pipeline def get_accel(self, veh_id): """See parent class.""" - if not "accel" in self.__vehicles[veh_id]: + if "accel" not in self.__vehicles[veh_id]: self.__vehicles[veh_id]["accel"] = None return self.__vehicles[veh_id]["accel"] @@ -1137,7 +1136,7 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): """See parent class.""" - if not "accel_without_noise" in self.__vehicles[veh_id]: + if "accel_without_noise" not in self.__vehicles[veh_id]: self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 0cd0cbc79..fbd975c5e 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -38,7 +38,7 @@ def generate_trajectory_table(data_path, extra_info, partition_name): return output_file_path -def generate_trajectory_from_flow(data_path, extra_info, partition_name): +def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): """Generate desired output for the trajectory_table based only on flow output. Parameters @@ -227,7 +227,7 @@ def update_partition(self, partition): self.existing_partitions.append(partition) return - def run_query(self, query_name, result_location="s3://circles.data/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data.pipeline/query-result/", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -285,15 +285,15 @@ def test_sql_query(query_name): # Run the respective sql query queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data/query-result/query-test", - partition="test") + execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data.pipeline/" + "query-result/query-test", partition="test") if queryEngine.wait_for_execution(execution_id): raise RuntimeError("execution timed out") # get the Athena query result from S3 s3 = boto3.resource("s3") - s3.Bucket("circles.data").download_file("query-result/query-test/"+execution_id+".csv", - "data/athena_result.csv") + s3.Bucket("circles.data.pipeline").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") athena_result = pd.read_csv("data/athena_result.csv") athena_result = athena_result.sort_values(by=["time", "id"]) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 3f0abb8a1..fd50ba8f5 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -25,7 +25,7 @@ def lambda_handler(event, context): response = s3.head_object(Bucket=bucket, Key=key) run_query = response["Metadata"]["run-query"] - if bucket == 'circles.data' and 'trajectory-output/' in key: + if bucket == 'circles.data.pipeline' and 'trajectory-output/' in key: if run_query == "all": query_list = tags["analysis"] elif not run_query: @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://circles.data/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data.pipeline/query-result/auto/', partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2d34570f9..78960456d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -11,45 +11,47 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = "SELECT id, time, speed, acceleration, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL' AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\' " \ - "ORDER BY id, time " - POWER_DEMAND_MODEL_DENOISED_ACCEL = \ - "SELECT id, time, speed, accel_without_noise, 1200 * speed * " \ - "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\' " \ - "ORDER BY id, time " - POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ - "WITH sub1 AS ( " \ - "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ - "time - LAG(time, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, " \ - "LAG(speed, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, " \ - "LAG(acceleration, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, " \ - "LAG(accel_without_noise, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised "\ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ - ")," \ - "sub2 AS (" \ - "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ - "prev_speed+accel_without_noise*sim_step AS speed_denoised " \ - "FROM sub1" \ - ") " \ - "SELECT id, time, speed_denoised, accel_without_noise, " \ - "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ - "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ - "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ - "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ - "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id " \ - "FROM sub2 " \ - "ORDER BY id, time " + POWER_DEMAND_MODEL = """ + SELECT id, time, speed, acceleration, 1200 * speed * + ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 + * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time """ + POWER_DEMAND_MODEL_DENOISED_ACCEL = """ + SELECT id, time, speed, accel_without_noise, + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time """ + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ + WITH lagged_timestep AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + time - LAG(time, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, + LAG(acceleration, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, + LAG(accel_without_noise, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised + FROM trajectory_table + WHERE partition_name=\'{partition}\'), + speed_denoised_table AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + prev_speed+accel_without_noise*sim_step AS speed_denoised + FROM lagged_timestep + ) + SELECT id, time, speed_denoised, accel_without_noise, + 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 + THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 + * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 + * 2.6 * 0.3 * POW(speed_denoised,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id + FROM speed_denoised_table + ORDER BY id, time """ diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index f065a726e..ac927c749 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -6,7 +6,7 @@ parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" "a S3 location") parser.add_argument("--run", type=str, nargs="+") -parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://circles.data.pipeline/query-result/") parser.add_argument("--partition", type=str, nargs='?', default="default") parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index abb13bbc9..5fd3142ad 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -209,7 +209,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= }) extra_info = extra_init() - source_id = uuid.uuid4().hex + source_id = 'flow_{}'.format(uuid.uuid4().hex) i = 0 while i < args.num_rollouts: @@ -251,7 +251,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # Collect information from flow for the trajectory output get_extra_info(env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id + "run" + str(i)] * len(veh_ids)) + extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): @@ -328,6 +328,17 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # convert the emission file into a csv file emission_to_csv(emission_path, output_path=output_path) + # generate the trajectory output file + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) + + # upload to s3 if asked + if args.use_s3: + partition_name = date.today().isoformat() + " " + source_id[0:3] + upload_to_s3('circles.data.pipeline', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(args.only_query)[2:-2]) + # print the location of the emission csv file print("\nGenerated emission file at " + output_path) @@ -437,6 +448,12 @@ def create_parser(): 'be run in cluster mode') parser.add_argument('--exp_title', type=str, required=False, default=None, help='Informative experiment title to help distinguish results') + parser.add_argument( + '--only_query', + nargs='*', default="[\'all\']", + help='specify which query should be run by lambda' + 'for detail, see upload_to_s3 in data_pipeline.py' + ) return parser From 462f4bb877a6e179ed27a926e0e660e9b37d6700 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:07:29 -0700 Subject: [PATCH 071/438] fix trailing white space style issue --- flow/data_pipeline/query.py | 79 ++++++++++++++++++------------------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 78960456d..06150eadc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -12,46 +12,45 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" POWER_DEMAND_MODEL = """ - SELECT id, time, speed, acceleration, 1200 * speed * - ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 - + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 - * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time """ + SELECT id, time, speed, acceleration, 1200 * speed * + ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 + * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL = """ - SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) - * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) - + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time """ + SELECT id, time, speed, accel_without_noise, + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ - WITH lagged_timestep AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - time - LAG(time, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, - LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, - LAG(acceleration, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, - LAG(accel_without_noise, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised - FROM trajectory_table + WITH lagged_timestep AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + time - LAG(time, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, + LAG(acceleration, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, + LAG(accel_without_noise, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised + FROM trajectory_table WHERE partition_name=\'{partition}\'), - speed_denoised_table AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - prev_speed+accel_without_noise*sim_step AS speed_denoised - FROM lagged_timestep - ) - SELECT id, time, speed_denoised, accel_without_noise, - 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 - THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 - * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 - * 2.6 * 0.3 * POW(speed_denoised,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id - FROM speed_denoised_table - ORDER BY id, time """ + speed_denoised_table AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + prev_speed+accel_without_noise*sim_step AS speed_denoised + FROM lagged_timestep) + SELECT id, time, speed_denoised, accel_without_noise, + 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 + THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 + * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 + * 2.6 * 0.3 * POW(speed_denoised,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id + FROM speed_denoised_table + ORDER BY id, time""" From c2513e9e1d0f22065e513c34e8edee178bef1602 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:10:43 -0700 Subject: [PATCH 072/438] some minor issue fixed --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 06150eadc..4f34945d8 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -22,7 +22,7 @@ class QueryStrings(Enum): ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL = """ SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id From 1dcf6a654f59a2c36406b3b6cf732e1fac79d3fb Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 08:42:29 -0700 Subject: [PATCH 073/438] reformatting energy queries --- flow/data_pipeline/query.py | 112 ++++++++++++++++++++++-------------- 1 file changed, 69 insertions(+), 43 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 4f34945d8..e194b5648 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -5,52 +5,78 @@ tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], "analysis": ["POWER_DEMAND_MODEL"]} +VEHICLE_POWER_DEMAND_SUBQUERY = """ + SELECT + id, + "time", + speed, + acceleration, + road_grade, + 1200 * speed * ( + (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + + 0.8 + 9.81 * SIN(road_grade) + ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, + source_id + FROM {} + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ORDER BY id, "time" + """ class QueryStrings(Enum): """An enumeration of all the pre-defined query strings.""" - SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = """ - SELECT id, time, speed, acceleration, 1200 * speed * - ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 - + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 - * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time""" + SAMPLE = """ + SELECT * + FROM trajectory_table + WHERE partition_name=\'{partition}\' + LIMIT 15; + """ + + UPDATE_PARTITION = """ + ALTER TABLE trajectory_table + ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); + """ + + POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_SUBQUERY.format('trajectory_table') + POWER_DEMAND_MODEL_DENOISED_ACCEL = """ - SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) - * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) - + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time""" + WITH denoised_accel_cte AS ( + SELECT + id, + "time", + speed, + accel_without_noise AS acceleration, + road_grade, + source_id + FROM trajectory_table + ) + {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_accel_cte')) + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ - WITH lagged_timestep AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - time - LAG(time, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, - LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, - LAG(acceleration, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, - LAG(accel_without_noise, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised - FROM trajectory_table - WHERE partition_name=\'{partition}\'), - speed_denoised_table AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - prev_speed+accel_without_noise*sim_step AS speed_denoised - FROM lagged_timestep) - SELECT id, time, speed_denoised, accel_without_noise, - 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 - THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 - * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 - * 2.6 * 0.3 * POW(speed_denoised,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id - FROM speed_denoised_table - ORDER BY id, time""" + WITH lagged_timestep AS ( + SELECT + "time", + id, + accel_without_noise, + road_grade, + source_id, + "time" - LAG("time", 1) + OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ), denoised_speed_cte AS ( + SELECT + id, + "time", + prev_speed + accel_without_noise * sim_step AS speed, + accel_without_noise AS acceleration, + road_grade, + source_id + FROM lagged_timestep + ) + {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_speed_cte')) From 8a68fb93be587339fc1535b5c58c5e79731e1cd7 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 08:52:17 -0700 Subject: [PATCH 074/438] rename vehicle power demand query --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e194b5648..db1283f9d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -5,7 +5,7 @@ tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], "analysis": ["POWER_DEMAND_MODEL"]} -VEHICLE_POWER_DEMAND_SUBQUERY = """ +VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT id, "time", @@ -39,7 +39,7 @@ class QueryStrings(Enum): ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); """ - POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_SUBQUERY.format('trajectory_table') + POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_FINAL_SELECT.format('trajectory_table') POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -52,7 +52,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table ) - {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -79,4 +79,4 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_speed_cte')) From f021d5a6381e777eaea0a917b6a8f7e95ca3a1e0 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 10:44:06 -0700 Subject: [PATCH 075/438] move partition condition to cte's --- flow/data_pipeline/query.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index db1283f9d..28564afde 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -19,8 +19,6 @@ 'POWER_DEMAND_MODEL' AS energy_model_id, source_id FROM {} - WHERE 1 = 1 - AND partition_name=\'{partition}\' ORDER BY id, "time" """ @@ -39,7 +37,20 @@ class QueryStrings(Enum): ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); """ - POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_FINAL_SELECT.format('trajectory_table') + POWER_DEMAND_MODEL = """ + WITH regular_cte AS ( + SELECT + id, + "time", + speed, + acceleration, + road_grade, + source_id + FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -51,14 +62,16 @@ class QueryStrings(Enum): road_grade, source_id FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( SELECT - "time", id, + "time", accel_without_noise, road_grade, source_id, From 3d16a5ad4e308da3628354191f14b2c949a59526 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 19 May 2020 15:53:30 -0400 Subject: [PATCH 076/438] Ev i210 highway updated (#937) Merge in wave calibration for the straight road @AboudyKreidieh --- examples/exp_configs/non_rl/highway_single.py | 69 +- .../exp_configs/non_rl/i210_subnetwork.py | 110 +- .../non_rl/i210_subnetwork_sweep.py | 151 - .../rl/multiagent/multiagent_i210.py | 12 +- .../rl/multiagent/multiagent_straight_road.py | 45 +- .../templates/sumo/i210_with_ghost_cell.xml | 5719 +++++++++++++++++ .../i210_with_ghost_cell_with_downstream.xml | 5719 +++++++++++++++++ examples/train.py | 18 +- flow/controllers/__init__.py | 5 +- flow/controllers/routing_controllers.py | 26 + flow/networks/highway.py | 17 +- flow/networks/i210_subnetwork.py | 247 +- flow/networks/ring.py | 2 +- tests/fast_tests/test_scenarios.py | 6 +- tests/fast_tests/test_vehicles.py | 1 + tests/setup_scripts.py | 1 + 16 files changed, 11812 insertions(+), 336 deletions(-) delete mode 100644 examples/exp_configs/non_rl/i210_subnetwork_sweep.py create mode 100644 examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml create mode 100644 examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index a2e44032a..7e0a5eb49 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -1,11 +1,7 @@ -"""Multi-agent highway with ramps example. - -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" +"""Example of an open network with human-driven vehicles and a wave.""" import numpy as np -from flow.controllers import BandoFTLController +from flow.controllers import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import EnvParams from flow.core.params import NetParams @@ -14,18 +10,23 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams +from flow.core.params import SumoCarFollowingParams from flow.core.rewards import miles_per_megajoule from flow.networks import HighwayNetwork from flow.envs import TestEnv from flow.networks.highway import ADDITIONAL_NET_PARAMS -TRAFFIC_SPEED = 11 -END_SPEED = 16 -TRAFFIC_FLOW = 2056 -HORIZON = 2000 -INCLUDE_NOISE = False +# the speed of vehicles entering the network +TRAFFIC_SPEED = 24.1 +# the maximum speed at the downstream boundary edge +END_SPEED = 6.0 +# the inflow rate of vehicles +TRAFFIC_FLOW = 2215 +# the simulation time horizon (in steps) +HORIZON = 1500 +# whether to include noise in the car-following models +INCLUDE_NOISE = True -# percentage of autonomous vehicles compared to human vehicles on highway PENETRATION_RATE = 10.0 additional_net_params = ADDITIONAL_NET_PARAMS.copy() @@ -38,35 +39,38 @@ "speed_limit": 30, # number of edges to divide the highway into "num_edges": 2, - # whether to include a ghost edge of length 500m. This edge is provided a - # different speed limit. + # whether to include a ghost edge "use_ghost_edge": True, # speed limit for the ghost edge - "ghost_speed_limit": END_SPEED + "ghost_speed_limit": END_SPEED, + # length of the cell imposing a boundary + "boundary_cell_length": 300, }) vehicles = VehicleParams() vehicles.add( "human", - num_vehicles=0, + acceleration_controller=(IDMController, { + 'a': 1.3, + 'b': 2.0, + 'noise': 0.3 if INCLUDE_NOISE else 0.0 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0.5 + ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + model="SL2015", + lc_sublane=2.0, ), - acceleration_controller=(BandoFTLController, { - 'alpha': .5, - 'beta': 20.0, - 'h_st': 12.0, - 'h_go': 50.0, - 'v_max': 30.0, - 'noise': 1.0 if INCLUDE_NOISE else 0.0, - }), ) +inflows = InFlows() + if PENETRATION_RATE > 0.0: vehicles.add( "av", num_vehicles=0, - acceleration_controller=(FollowerStopper, {"v_des": 11.0}), + acceleration_controller=(FollowerStopper, {"v_des": 6.0}), ) inflows = InFlows() @@ -76,7 +80,7 @@ edge="highway_0", vehs_per_hour=int(TRAFFIC_FLOW * (1 - PENETRATION_RATE / 100)), depart_lane="free", - depart_speed="23", + depart_speed=TRAFFIC_SPEED, name="idm_highway_inflow") if PENETRATION_RATE > 0.0: @@ -85,7 +89,7 @@ edge="highway_0", vehs_per_hour=int(TRAFFIC_FLOW * (PENETRATION_RATE / 100)), depart_lane="free", - depart_speed="23", + depart_speed=TRAFFIC_SPEED, name="av_highway_inflow") # SET UP FLOW PARAMETERS @@ -106,15 +110,16 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - warmup_steps=0, - sims_per_step=1, + warmup_steps=500, + sims_per_step=3, ), # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( - sim_step=0.5, + sim_step=0.4, render=False, - restart_instance=False + restart_instance=False, + use_ballistic=True ), # network-related parameters (see flow.core.params.NetParams and the diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index d993ae93a..25565bb49 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -1,9 +1,9 @@ """I-210 subnetwork example.""" import os - import numpy as np -from flow.controllers.car_following_models import IDMController +from flow.controllers import IDMController +from flow.controllers import I210Router from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -15,7 +15,49 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# create the base vehicle type that will be used for inflows +# =========================================================================== # +# Specify some configurable constants. # +# =========================================================================== # + +# whether to include the upstream ghost edge in the network +WANT_GHOST_CELL = True +# whether to include the downstream slow-down edge in the network +WANT_DOWNSTREAM_BOUNDARY = True +# whether to include vehicles on the on-ramp +ON_RAMP = True +# the inflow rate of vehicles (in veh/hr) +INFLOW_RATE = 5 * 2215 +# the speed of inflowing vehicles from the main edge (in m/s) +INFLOW_SPEED = 24.1 + +# =========================================================================== # +# Specify the path to the network template. # +# =========================================================================== # + +if WANT_DOWNSTREAM_BOUNDARY: + net_template = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_" + "downstream.xml") +elif WANT_GHOST_CELL: + net_template = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml") +else: + net_template = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml") + +# If the ghost cell is not being used, remove it from the initial edges that +# vehicles can be placed on. +edges_distribution = EDGES_DISTRIBUTION.copy() +if not WANT_GHOST_CELL: + edges_distribution.remove("ghost0") + +# =========================================================================== # +# Specify vehicle-specific information and inflows. # +# =========================================================================== # + vehicles = VehicleParams() vehicles.add( "human", @@ -24,35 +66,39 @@ lane_change_mode="strategic", ), acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 + "a": 1.3, + "b": 2.0, + "noise": 0.3, }), + routing_controller=(I210Router, {}) if ON_RAMP else None, ) inflow = InFlows() # main highway inflow.add( veh_type="human", - edge="119257914", - vehs_per_hour=8378, - departLane="random", - departSpeed=23) + edge="ghost0" if WANT_GHOST_CELL else "119257914", + vehs_per_hour=INFLOW_RATE, + departLane="best", + departSpeed=INFLOW_SPEED) # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321, -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421, -# departLane="random", -# departSpeed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") +if ON_RAMP: + inflow.add( + veh_type="human", + edge="27414345", + vehs_per_hour=500, + departLane="random", + departSpeed=10) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=500, + departLane="random", + departSpeed=10) + +# =========================================================================== # +# Generate the flow_params dict with all relevant simulation information. # +# =========================================================================== # flow_params = dict( # name of the experiment @@ -69,7 +115,7 @@ # simulation-related parameters sim=SumoParams( - sim_step=0.5, + sim_step=0.4, render=False, color_by_speed=True, use_ballistic=True @@ -77,14 +123,18 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4500, + horizon=10000, ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=NET_TEMPLATE + template=net_template, + additional_params={ + "on_ramp": ON_RAMP, + "ghost_edge": WANT_GHOST_CELL, + } ), # vehicles to be placed in the network at the start of a rollout (see @@ -94,10 +144,14 @@ # parameters specifying the positioning of vehicles upon initialization/ # reset (see flow.core.params.InitialConfig) initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, + edges_distribution=edges_distribution, ), ) +# =========================================================================== # +# Specify custom callable that is logged during simulation runtime. # +# =========================================================================== # + edge_id = "119257908#1-AddedOnRampEdge" custom_callables = { "avg_merge_speed": lambda env: np.nan_to_num(np.mean( diff --git a/examples/exp_configs/non_rl/i210_subnetwork_sweep.py b/examples/exp_configs/non_rl/i210_subnetwork_sweep.py deleted file mode 100644 index 28cba81ce..000000000 --- a/examples/exp_configs/non_rl/i210_subnetwork_sweep.py +++ /dev/null @@ -1,151 +0,0 @@ -"""I-210 subnetwork example. - -In this case flow_params is a list of dicts. This is to test the effects of -multiple human-driver model parameters on the flow traffic. -""" -from collections import OrderedDict -from copy import deepcopy -import itertools -import os -import numpy as np - -from flow.core.params import SumoParams -from flow.core.params import EnvParams -from flow.core.params import NetParams -from flow.core.params import SumoLaneChangeParams -from flow.core.params import VehicleParams -from flow.core.params import InitialConfig -from flow.core.params import InFlows -import flow.config as config -from flow.envs import TestEnv -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION - -# the default parameters for all lane change parameters -default_dict = { - "lane_change_mode": "strategic", - "model": "LC2013", - "lc_strategic": 1.0, - "lc_cooperative": 1.0, - "lc_speed_gain": 1.0, - "lc_keep_right": 1.0, - "lc_look_ahead_left": 2.0, - "lc_speed_gain_right": 1.0, - "lc_sublane": 1.0, - "lc_pushy": 0, - "lc_pushy_gap": 0.6, - "lc_assertive": 1, - "lc_accel_lat": 1.0 -} - -# values to sweep through for some lane change parameters -sweep_dict = OrderedDict({ - "lc_strategic": [1.0, 2.0, 4.0, 8.0], - "lc_cooperative": [1.0, 2.0], - "lc_look_ahead_left": [2.0, 4.0] -}) - -# Create a list of possible lane change parameter combinations. -all_names = sorted(sweep_dict) -combinations = itertools.product(*(sweep_dict[name] for name in all_names)) -combination_list = list(combinations) -res = [] -for val in combination_list: - curr_dict = {} - for elem, name in zip(val, all_names): - curr_dict[name] = elem - res.append(curr_dict) - -# Create a list of all possible flow_params dictionaries to sweep through the -# different lane change parameters. -flow_params = [] - -for lane_change_dict in res: - # no vehicles in the network. The lane change parameters of inflowing - # vehicles are updated here. - vehicles = VehicleParams() - update_dict = deepcopy(default_dict) - update_dict.update(lane_change_dict) - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams(**update_dict) - ) - - inflow = InFlows() - # main highway - inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=8378, - # probability=1.0, - departLane="random", - departSpeed=20) - # on ramp - inflow.add( - veh_type="human", - edge="27414345", - vehs_per_hour=321, - departLane="random", - departSpeed=20) - inflow.add( - veh_type="human", - edge="27414342#0", - vehs_per_hour=421, - departLane="random", - departSpeed=20) - - NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") - - params = dict( - # name of the experiment - exp_tag='I-210_subnetwork', - - # name of the flow environment the experiment is running on - env_name=TestEnv, - - # name of the network class the experiment is running on - network=I210SubNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # simulation-related parameters - sim=SumoParams( - sim_step=0.8, - render=True, - color_by_speed=True - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=4500, # one hour of run time - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - template=NET_TEMPLATE - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon - # initialization/reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, - ), - ) - - # Store the next flow_params dict. - flow_params.append(params) - - -custom_callables = { - "avg_merge_speed": lambda env: np.mean(env.k.vehicle.get_speed( - env.k.vehicle.get_ids_by_edge("119257908#1-AddedOnRampEdge"))) -} diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index a7d707068..01b9e6082 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -36,6 +36,10 @@ # percentage of autonomous vehicles compared to human vehicles on highway PENETRATION_RATE = 10 +# TODO: temporary fix +edges_distribution = EDGES_DISTRIBUTION.copy() +edges_distribution.remove("ghost0") + # SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() additional_env_params.update({ @@ -160,7 +164,11 @@ # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=NET_TEMPLATE + template=NET_TEMPLATE, + additional_params={ + "on_ramp": False, + "ghost_edge": False + } ), # vehicles to be placed in the network at the start of a rollout (see @@ -170,7 +178,7 @@ # parameters specifying the positioning of vehicles upon initialization/ # reset (see flow.core.params.InitialConfig) initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, + edges_distribution=edges_distribution, ), ) diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index a15471539..ec71a2f42 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -5,7 +5,7 @@ """ from flow.controllers import RLController, IDMController from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ - VehicleParams, SumoParams, SumoLaneChangeParams + VehicleParams, SumoParams, SumoLaneChangeParams, SumoCarFollowingParams from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS from flow.networks import HighwayNetwork from flow.envs.multiagent import MultiStraightRoad @@ -16,27 +16,35 @@ # SET UP PARAMETERS FOR THE SIMULATION -# number of steps per rollout -HORIZON = 2000 +# the speed of vehicles entering the network +TRAFFIC_SPEED = 24.1 +# the maximum speed at the downstream boundary edge +END_SPEED = 6.0 +# the inflow rate of vehicles +HIGHWAY_INFLOW_RATE = 2215 +# the simulation time horizon (in steps) +HORIZON = 1500 +# whether to include noise in the car-following models +INCLUDE_NOISE = True -# inflow rate on the highway in vehicles per hour -HIGHWAY_INFLOW_RATE = 10800 / 5 -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 10 - - -# SET UP PARAMETERS FOR THE NETWORK +PENETRATION_RATE = 10.0 additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params.update({ # length of the highway - "length": 2000, + "length": 2500, # number of lanes "lanes": 1, # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into - "num_edges": 2 + "num_edges": 2, + # whether to include a ghost edge + "use_ghost_edge": True, + # speed limit for the ghost edge + "ghost_speed_limit": END_SPEED, + # length of the cell imposing a boundary + "boundary_cell_length": 300, }) @@ -62,11 +70,18 @@ # human vehicles vehicles.add( "human", - num_vehicles=0, + acceleration_controller=(IDMController, { + 'a': 1.3, + 'b': 2.0, + 'noise': 0.3 if INCLUDE_NOISE else 0.0 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0.5 + ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + model="SL2015", + lc_sublane=2.0, ), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), ) # autonomous vehicles diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml new file mode 100644 index 000000000..66e5a1131 --- /dev/null +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml @@ -0,0 +1,5719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml new file mode 100644 index 000000000..10d4d8d45 --- /dev/null +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml @@ -0,0 +1,5719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/train.py b/examples/train.py index 5b5431a3d..e34b2935c 100644 --- a/examples/train.py +++ b/examples/train.py @@ -23,9 +23,7 @@ except ImportError: print("Stable-baselines not installed") -import ray from ray import tune -from ray.tune.registry import register_env from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper try: from ray.rllib.agents.agent import get_agent_class @@ -126,6 +124,9 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 + if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -174,6 +175,13 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ + from ray import tune + from ray.tune.registry import register_env + try: + from ray.rllib.agents.agent import get_agent_class + except ImportError: + from ray.rllib.agents.registry import get_agent_class + horizon = flow_params['env'].horizon alg_run = flags.algorithm.upper() @@ -255,6 +263,9 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + from ray.tune import run_experiments + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -402,6 +413,9 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" + from stable_baselines.common.vec_env import DummyVecEnv + from stable_baselines import PPO2 + flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] diff --git a/flow/controllers/__init__.py b/flow/controllers/__init__.py index 4dfcf05b7..a61d16980 100755 --- a/flow/controllers/__init__.py +++ b/flow/controllers/__init__.py @@ -28,7 +28,7 @@ # routing controllers from flow.controllers.base_routing_controller import BaseRouter from flow.controllers.routing_controllers import ContinuousRouter, \ - GridRouter, BayBridgeRouter + GridRouter, BayBridgeRouter, I210Router __all__ = [ "RLController", "BaseController", "BaseLaneChangeController", "BaseRouter", @@ -36,5 +36,6 @@ "IDMController", "SimCarFollowingController", "FollowerStopper", "PISaturation", "StaticLaneChanger", "SimLaneChangeController", "ContinuousRouter", "GridRouter", "BayBridgeRouter", "LACController", - "GippsController", "NonLocalFollowerStopper", "BandoFTLController" + "GippsController", "NonLocalFollowerStopper", "BandoFTLController", + "I210Router" ] diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index e6ccdde78..02aa34cb4 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -124,3 +124,29 @@ def choose_route(self, env): new_route = super().choose_route(env) return new_route + + +class I210Router(ContinuousRouter): + """Assists in choosing routes in select cases for the I-210 sub-network. + + Extension to the Continuous Router. + + Usage + ----- + See base class for usage example. + """ + + def choose_route(self, env): + """See parent class.""" + edge = env.k.vehicle.get_edge(self.veh_id) + lane = env.k.vehicle.get_lane(self.veh_id) + + # vehicles on these edges in lanes 4 and 5 are not going to be able to + # make it out in time + if edge == "119257908#1-AddedOffRampEdge" and lane in [5, 4, 3]: + new_route = env.available_routes[ + "119257908#1-AddedOffRampEdge"][0][0] + else: + new_route = super().choose_route(env) + + return new_route diff --git a/flow/networks/highway.py b/flow/networks/highway.py index 7e9c18ad5..6f10d3279 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -14,11 +14,13 @@ "speed_limit": 30, # number of edges to divide the highway into "num_edges": 1, - # whether to include a ghost edge of length 500m. This edge is provided a - # different speed limit. + # whether to include a ghost edge. This edge is provided a different speed + # limit. "use_ghost_edge": False, # speed limit for the ghost edge "ghost_speed_limit": 25, + # length of the cell imposing a boundary + "boundary_cell_length": 500 } @@ -34,9 +36,10 @@ class HighwayNetwork(Network): * **lanes** : number of lanes in the highway * **speed_limit** : max speed limit of the highway * **num_edges** : number of edges to divide the highway into - * **use_ghost_edge** : whether to include a ghost edge of length 500m. This - edge is provided a different speed limit. + * **use_ghost_edge** : whether to include a ghost edge. This edge is + provided a different speed limit. * **ghost_speed_limit** : speed limit for the ghost edge + * **boundary_cell_length** : length of the cell imposing a boundary Usage ----- @@ -80,6 +83,7 @@ def specify_nodes(self, net_params): length = net_params.additional_params["length"] num_edges = net_params.additional_params.get("num_edges", 1) segment_lengths = np.linspace(0, length, num_edges+1) + end_length = net_params.additional_params["boundary_cell_length"] nodes = [] for i in range(num_edges+1): @@ -92,7 +96,7 @@ def specify_nodes(self, net_params): if self.net_params.additional_params["use_ghost_edge"]: nodes += [{ "id": "edge_{}".format(num_edges + 1), - "x": length + self.end_length, + "x": length + end_length, "y": 0 }] @@ -103,6 +107,7 @@ def specify_edges(self, net_params): length = net_params.additional_params["length"] num_edges = net_params.additional_params.get("num_edges", 1) segment_length = length/float(num_edges) + end_length = net_params.additional_params["boundary_cell_length"] edges = [] for i in range(num_edges): @@ -120,7 +125,7 @@ def specify_edges(self, net_params): "type": "highway_end", "from": "edge_{}".format(num_edges), "to": "edge_{}".format(num_edges + 1), - "length": self.end_length + "length": end_length }] return edges diff --git a/flow/networks/i210_subnetwork.py b/flow/networks/i210_subnetwork.py index d8e05efb5..b86a0dc8a 100644 --- a/flow/networks/i210_subnetwork.py +++ b/flow/networks/i210_subnetwork.py @@ -1,9 +1,18 @@ """Contains the I-210 sub-network class.""" - from flow.networks.base import Network +from flow.core.params import InitialConfig +from flow.core.params import TrafficLightParams + +ADDITIONAL_NET_PARAMS = { + # whether to include vehicle on the on-ramp + "on_ramp": False, + # whether to include the downstream slow-down edge in the network + "ghost_edge": False, +} EDGES_DISTRIBUTION = [ # Main highway + "ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", @@ -25,6 +34,12 @@ class I210SubNetwork(Network): """A network used to simulate the I-210 sub-network. + Requires from net_params: + + * **on_ramp** : whether to include vehicle on the on-ramp + * **ghost_edge** : whether to include the downstream slow-down edge in the + network + Usage ----- >>> from flow.core.params import NetParams @@ -39,103 +54,145 @@ class I210SubNetwork(Network): >>> ) """ - def specify_routes(self, net_params): - """See parent class. + def __init__(self, + name, + vehicles, + net_params, + initial_config=InitialConfig(), + traffic_lights=TrafficLightParams()): + """Initialize the I210 sub-network scenario.""" + for p in ADDITIONAL_NET_PARAMS.keys(): + if p not in net_params.additional_params: + raise KeyError('Network parameter "{}" not supplied'.format(p)) + + super(I210SubNetwork, self).__init__( + name=name, + vehicles=vehicles, + net_params=net_params, + initial_config=initial_config, + traffic_lights=traffic_lights, + ) - Routes for vehicles moving through the bay bridge from Oakland to San - Francisco. - """ + def specify_routes(self, net_params): + """See parent class.""" rts = { - # Main highway "119257914": [ - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - # (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 17 / 8378) - ], - # "119257908#0": [ - # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOnRampEdge": [ - # (["119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1": [ - # (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOffRampEdge": [ - # (["119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#2": [ - # (["119257908#2", "119257908#3"], 1), - # ], - # "119257908#3": [ - # (["119257908#3"], 1), - # ], - # - # # On-ramp - # "27414345": [ - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 9 / 321), - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 9 / 321), - # ], - # "27414342#0": [ - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 20 / 421), - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 20 / 421), - # ], - # "27414342#1-AddedOnRampEdge": [ - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 0.5), - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - # ], - # - # # Off-ramp - # "173381935": [ - # (["173381935"], 1), - # ], + (["119257914", + "119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ] } + if net_params.additional_params["ghost_edge"]: + rts.update({ + "ghost0": [ + (["ghost0", + "119257914", + "119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1), + ], + }) + + if net_params.additional_params["on_ramp"]: + rts.update({ + # Main highway + "119257908#0": [ + (["119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#1-AddedOnRampEdge": [ + (["119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#1": [ + (["119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#1-AddedOffRampEdge": [ + (["119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#2": [ + (["119257908#2", + "119257908#3"], 1), + ], + "119257908#3": [ + (["119257908#3"], 1), + ], + + # On-ramp + "27414345": [ + (["27414345", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1 - 9 / 321), + (["27414345", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "173381935"], 9 / 321), + ], + "27414342#0": [ + (["27414342#0", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1 - 20 / 421), + (["27414342#0", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "173381935"], 20 / 421), + ], + "27414342#1-AddedOnRampEdge": [ + (["27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 0.5), + (["27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "173381935"], 0.5), + ], + + # Off-ramp + "173381935": [ + (["173381935"], 1), + ], + }) + return rts diff --git a/flow/networks/ring.py b/flow/networks/ring.py index de4d17503..ceef22a78 100755 --- a/flow/networks/ring.py +++ b/flow/networks/ring.py @@ -37,7 +37,7 @@ class RingNetwork(Network): >>> from flow.core.params import NetParams >>> from flow.core.params import VehicleParams >>> from flow.core.params import InitialConfig - >>> from flow.scenarios import RingNetwork + >>> from flow.networks import RingNetwork >>> >>> network = RingNetwork( >>> name='ring_road', diff --git a/tests/fast_tests/test_scenarios.py b/tests/fast_tests/test_scenarios.py index d72a50b17..2263f3474 100644 --- a/tests/fast_tests/test_scenarios.py +++ b/tests/fast_tests/test_scenarios.py @@ -97,7 +97,8 @@ def test_additional_net_params(self): "speed_limit": 30, "num_edges": 1, "use_ghost_edge": False, - "ghost_speed_limit": 25 + "ghost_speed_limit": 25, + "boundary_cell_length": 300, } ) ) @@ -116,7 +117,8 @@ def test_ghost_edge(self): "speed_limit": 30, "num_edges": 1, "use_ghost_edge": False, - "ghost_speed_limit": 25 + "ghost_speed_limit": 25, + "boundary_cell_length": 300, }) ) env.reset() diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index b791bba64..a37b235ff 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -261,6 +261,7 @@ def test_no_junctions_highway(self): "num_edges": 1, "use_ghost_edge": False, "ghost_speed_limit": 25, + "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() diff --git a/tests/setup_scripts.py b/tests/setup_scripts.py index ac88d2e42..343bad906 100644 --- a/tests/setup_scripts.py +++ b/tests/setup_scripts.py @@ -346,6 +346,7 @@ def highway_exp_setup(sim_params=None, "num_edges": 1, "use_ghost_edge": False, "ghost_speed_limit": 25, + "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) From f43d0e43a0b4ef158a15c680d4e3131bd7dee0fb Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 13:41:17 -0700 Subject: [PATCH 077/438] fix some query string formatting issue --- examples/train.py | 2 -- flow/data_pipeline/query.py | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/examples/train.py b/examples/train.py index 1ba9586af..e34b2935c 100644 --- a/examples/train.py +++ b/examples/train.py @@ -23,9 +23,7 @@ except ImportError: print("Stable-baselines not installed") -import ray from ray import tune -from ray.tune.registry import register_env from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper try: from ray.rllib.agents.agent import get_agent_class diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 28564afde..2b5052f66 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -48,7 +48,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) @@ -63,7 +63,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) @@ -81,7 +81,7 @@ class QueryStrings(Enum): OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ), denoised_speed_cte AS ( SELECT id, From 78e47457c034dce1cfbe3d18695ba6ff2a466159 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:28:54 -0700 Subject: [PATCH 078/438] fix some style issue --- flow/data_pipeline/query.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2b5052f66..96fc86497 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -22,6 +22,7 @@ ORDER BY id, "time" """ + class QueryStrings(Enum): """An enumeration of all the pre-defined query strings.""" From 437f8cf4103626bae222268b3e9380f397f26469 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:56:53 -0700 Subject: [PATCH 079/438] get up to date with i210_dev --- examples/exp_configs/non_rl/highway.py | 40 ++++++---- .../exp_configs/non_rl/i210_subnetwork.py | 2 +- .../exp_configs/templates/sumo/test2.net.xml | 78 +++++-------------- 3 files changed, 48 insertions(+), 72 deletions(-) diff --git a/examples/exp_configs/non_rl/highway.py b/examples/exp_configs/non_rl/highway.py index 1905e2f7f..e7505f2d7 100644 --- a/examples/exp_configs/non_rl/highway.py +++ b/examples/exp_configs/non_rl/highway.py @@ -5,19 +5,25 @@ from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.envs import TestEnv +from flow.envs import LaneChangeAccelEnv vehicles = VehicleParams() vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), - ) + veh_id="human", + acceleration_controller=(IDMController, {}), + lane_change_params=SumoLaneChangeParams( + model="SL2015", + lc_sublane=2.0, + ), + num_vehicles=20) +vehicles.add( + veh_id="human2", + acceleration_controller=(IDMController, {}), + lane_change_params=SumoLaneChangeParams( + model="SL2015", + lc_sublane=2.0, + ), + num_vehicles=20) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) @@ -25,7 +31,13 @@ inflow.add( veh_type="human", edge="highway_0", - vehs_per_hour=10800 / 5.0, + probability=0.25, + departLane="free", + departSpeed=20) +inflow.add( + veh_type="human2", + edge="highway_0", + probability=0.25, departLane="free", departSpeed=20) @@ -35,7 +47,7 @@ exp_tag='highway', # name of the flow environment the experiment is running on - env_name=TestEnv, + env_name=LaneChangeAccelEnv, # name of the network class the experiment is running on network=HighwayNetwork, @@ -46,12 +58,12 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( render=True, - sim_step=0.5 + lateral_resolution=1.0, ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4000, + horizon=1500, additional_params=ADDITIONAL_ENV_PARAMS.copy(), ), diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 194da1099..25565bb49 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=False, + color_by_speed=True, use_ballistic=True ), diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 16170b917..00e3edcd5 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,41 +1,5 @@ - - @@ -4716,24 +4680,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4837,10 +4801,10 @@ - + - - + + From e6db29b9d6013b541f9e066383f1aa7f3090f885 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:45:08 -0700 Subject: [PATCH 080/438] remove dupe imports --- examples/train.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/examples/train.py b/examples/train.py index e34b2935c..7cb84d361 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,8 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv - from stable_baselines import PPO2 if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() @@ -175,12 +173,7 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ - from ray import tune from ray.tune.registry import register_env - try: - from ray.rllib.agents.agent import get_agent_class - except ImportError: - from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon @@ -263,8 +256,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - import ray - from ray.tune import run_experiments flow_params = submodule.flow_params flow_params['sim'].render = flags.render @@ -413,8 +404,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - from stable_baselines.common.vec_env import DummyVecEnv - from stable_baselines import PPO2 flow_params = submodule.flow_params # Path to the saved files From 6b5111b93db5760235d05f26e6ef163591b36497 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:51:14 -0700 Subject: [PATCH 081/438] remove blank lines after docstrings --- examples/train.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/train.py b/examples/train.py index 7cb84d361..5a9ab5903 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,7 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -256,7 +255,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -404,7 +402,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 4d206b374e843ee611f46d5519de62119d8fb1b2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:59:00 -0700 Subject: [PATCH 082/438] add back ray import --- examples/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/train.py b/examples/train.py index 5a9ab5903..50720b756 100644 --- a/examples/train.py +++ b/examples/train.py @@ -255,6 +255,8 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 18a88bcaef5b677a1f81e65beb9dbab6d3a17f29 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 21:04:56 -0700 Subject: [PATCH 083/438] remove whitespace --- examples/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index 50720b756..1689d846f 100644 --- a/examples/train.py +++ b/examples/train.py @@ -256,7 +256,7 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray - + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 7306298d97fbaa1fd03fb9f4a4ea816631b300b5 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:18:43 -0700 Subject: [PATCH 084/438] update lambda function, change partition into multi-column --- flow/core/experiment.py | 8 +-- flow/data_pipeline/data_pipeline.py | 84 ++++++++------------------- flow/data_pipeline/lambda_function.py | 26 +++------ flow/data_pipeline/query.py | 29 ++++----- flow/data_pipeline/run_query.py | 6 +- flow/visualize/i210_replay.py | 7 ++- 6 files changed, 58 insertions(+), 102 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 6e9be9aea..f46f802a5 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -217,10 +217,10 @@ def rl_actions(*_): if partition_name: if partition_name == "default": - partition_name = source_id[0:3] - partition_name = date.today().isoformat() + " " + partition_name - upload_to_s3('circles.data.pipeline', 'trajectory-output/partition_name={}/{}.csv'.format( - partition_name, upload_file_path.split('/')[-1].split('_')[0]), + partition_name = source_id[-3:] + cur_date = date.today().isoformat() + upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index fbd975c5e..111c41994 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,8 +2,9 @@ import pandas as pd import numpy as np import boto3 -from flow.data_pipeline.query import QueryStrings, testing_functions +from flow.data_pipeline.query import QueryStrings from time import time +from datetime import date def generate_trajectory_table(data_path, extra_info, partition_name): @@ -90,7 +91,7 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -99,7 +100,7 @@ def extra_init(): def get_extra_info(veh_kernel, extra_info, veh_ids): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: - extra_info["time"].append(veh_kernel.get_timestep(vid) / 1000) + extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(veh_kernel.get_headway(vid)) extra_info["acceleration"].append(veh_kernel.get_accel(vid)) @@ -154,7 +155,7 @@ def get_existing_partitions(self): response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, WorkGroup='primary' ) @@ -164,7 +165,7 @@ def get_existing_partitions(self): QueryExecutionId=response['QueryExecutionId'], MaxResults=1000 ) - return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + return [data['Data'][0]['VarCharValue'] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): """Return the status of the execution with given id. @@ -207,27 +208,30 @@ def wait_for_execution(self, execution_id): return False return True - def update_partition(self, partition): + def update_partition(self, query_date, partition): """Load the given partition to the trajectory_table on Athena. Parameters ---------- + query_date : str + the new partition date that needs to be loaded partition : str the new partition that needs to be loaded """ response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(date=query_date, partition=partition), QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, WorkGroup='primary' ) if self.wait_for_execution(response['QueryExecutionId']): raise RuntimeError("update partition timed out") - self.existing_partitions.append(partition) + self.existing_partitions.append("date={}/partition_name={}".format(query_date, partition)) return - def run_query(self, query_name, result_location="s3://circles.data.pipeline/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", + query_date="today", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -236,6 +240,8 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer name of the query in QueryStrings enum that will be run result_location: str, optional location on the S3 bucket where the result will be stored + query_date : str + name of the partition date to run this query on partition: str, optional name of the partition to run this query on Returns @@ -249,13 +255,16 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer if query_name not in QueryStrings.__members__: raise ValueError("query not existed: please add it to query.py") - if partition not in self.existing_partitions: - self.update_partition(partition) + if query_date == "today": + query_date = date.today().isoformat() + + if "date={}/partition_name={}".format(query_date, partition) not in self.existing_partitions: + self.update_partition(query_date, partition) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryString=QueryStrings[query_name].value.format(date=query_date, partition=partition), QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, ResultConfiguration={ 'OutputLocation': result_location, @@ -263,50 +272,3 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer WorkGroup='primary' ) return response['QueryExecutionId'] - -########################################################################### -# Helpers for testing the SQL Queries # -########################################################################### - - -def test_sql_query(query_name): - """Start the execution of a query, does not wait for it to finish. - - Parameters - ---------- - query_name : str - name of the query in QueryStrings enum that will be tested - Raises - ------ - RuntimeError: if timeout - """ - if query_name not in testing_functions: - raise ValueError("no tests supported for this query") - - # Run the respective sql query - queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data.pipeline/" - "query-result/query-test", partition="test") - if queryEngine.wait_for_execution(execution_id): - raise RuntimeError("execution timed out") - - # get the Athena query result from S3 - s3 = boto3.resource("s3") - s3.Bucket("circles.data.pipeline").download_file("query-result/query-test/"+execution_id+".csv", - "data/athena_result.csv") - athena_result = pd.read_csv("data/athena_result.csv") - athena_result = athena_result.sort_values(by=["time", "id"]) - - # get the python expected result - expected_result = pd.read_csv("data/test_data.csv") - expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") - expected_result.columns = ["time", "id", "power"] - expected_result = expected_result.sort_values(by=["time", "id"]) - - difference = athena_result["power"] - expected_result["power"] - print("average difference is: " + str(np.mean(difference))) - print("std of difference is: " + str(np.std(difference))) - print("average ratio of difference to expected is: " + - str(np.mean(np.divide(difference, expected_result["power"])))) - difference = pd.DataFrame(difference) - difference.to_csv("./difference.csv") diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index fd50ba8f5..35dcbfba8 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,36 +1,28 @@ """lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus -from examples.data_pipeline import AthenaQuery -from examples.query import tags +from flow.data_pipeline.data_pipeline import AthenaQuery +from flow.data_pipeline.query import tags s3 = boto3.client('s3') queryEngine = AthenaQuery() def lambda_handler(event, context): - """Invoke by AWS Lambda upon triggered by an event. - - Parameters - ---------- - event : dic < str: dic > - an S3 event - context: - not used - """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) + query_date = key.split('/')[-3].split('=')[-1] partition = key.split('/')[-2].split('=')[-1] response = s3.head_object(Bucket=bucket, Key=key) - run_query = response["Metadata"]["run-query"] + required_query = response["Metadata"]["run-query"] if bucket == 'circles.data.pipeline' and 'trajectory-output/' in key: - if run_query == "all": - query_list = tags["analysis"] - elif not run_query: + if required_query == "all": + query_list = tags["energy"] + elif not required_query: break else: - query_list = run_query.split("\', \'") + query_list = required_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://circles.data.pipeline/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data.pipeline/result/auto/', query_date, partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 96fc86497..2e137946d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,13 +2,12 @@ from enum import Enum # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], - "analysis": ["POWER_DEMAND_MODEL"]} +tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT id, - "time", + time_step, speed, acceleration, road_grade, @@ -19,7 +18,7 @@ 'POWER_DEMAND_MODEL' AS energy_model_id, source_id FROM {} - ORDER BY id, "time" + ORDER BY id, time_step """ @@ -29,26 +28,28 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * FROM trajectory_table - WHERE partition_name=\'{partition}\' + WHERE date = \'{date}\' + AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ ALTER TABLE trajectory_table - ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); + ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ POWER_DEMAND_MODEL = """ WITH regular_cte AS ( SELECT id, - "time", + time_step, speed, acceleration, road_grade, source_id FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) @@ -57,13 +58,14 @@ class QueryStrings(Enum): WITH denoised_accel_cte AS ( SELECT id, - "time", + time_step, speed, accel_without_noise AS acceleration, road_grade, source_id FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) @@ -72,21 +74,22 @@ class QueryStrings(Enum): WITH lagged_timestep AS ( SELECT id, - "time", + time_step, accel_without_noise, road_grade, source_id, - "time" - LAG("time", 1) - OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) - OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ), denoised_speed_cte AS ( SELECT id, - "time", + time_step, prev_speed + accel_without_noise * sim_step AS speed, accel_without_noise AS acceleration, road_grade, diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index ac927c749..1eb802205 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -1,6 +1,6 @@ """runner script for invoking query manually.""" import argparse -from flow.data_pipeline.data_pipeline import AthenaQuery, test_sql_query +from flow.data_pipeline.data_pipeline import AthenaQuery from flow.data_pipeline.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -21,7 +21,7 @@ if args.run: execution_ids = [] for query_name in args.run: - execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + execution_ids.append(queryEngine.run_query(query_name, args.result_location, partition=args.partition)) print(execution_ids) if args.list_partitions: print(queryEngine.existing_partitions) @@ -33,5 +33,3 @@ if args.list_queries: for q in QueryStrings: print(q) - if args.test_query: - test_sql_query(args.test_query[0]) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 5fd3142ad..8e62bb0d8 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -334,9 +334,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: - partition_name = date.today().isoformat() + " " + source_id[0:3] - upload_to_s3('circles.data.pipeline', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + partition_name = source_id[-3:] + cur_date = date.today().isoformat() + upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) # print the location of the emission csv file From efa60f79f096f44f5b12fcbf70fa57c344d0a587 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:32:39 -0700 Subject: [PATCH 085/438] style fix --- .../non_rl/Highway_Downstream_Congestion.py | 112 ++++++++++++ flow/data_pipeline/lambda_function.py | 1 + flow/networks/SpeedChange.py | 173 ++++++++++++++++++ 3 files changed, 286 insertions(+) create mode 100644 examples/exp_configs/non_rl/Highway_Downstream_Congestion.py create mode 100644 flow/networks/SpeedChange.py diff --git a/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py b/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py new file mode 100644 index 000000000..ddf3542f1 --- /dev/null +++ b/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py @@ -0,0 +1,112 @@ +"""Example of an open multi-lane network with human-driven vehicles.""" + +from flow.controllers import IDMController,LinearOVM,BandoFTLController +from flow.core.params import SumoParams, EnvParams, NetParams, InitialConfig, SumoLaneChangeParams +from flow.core.params import VehicleParams, InFlows +from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS +from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS +from flow.networks.SpeedChange import HighwayNetwork_Modified, ADDITIONAL_NET_PARAMS +from flow.envs import LaneChangeAccelEnv + +# accel_data = (BandoFTL_Controller,{'alpha':.5,'beta':20.0,'h_st':12.0,'h_go':50.0,'v_max':30.0,'noise':0.0}) +# traffic_speed = 28.6 +# traffic_flow = 2172 + +accel_data = (IDMController,{'a':1.3,'b':2.0,'noise':0.3}) +traffic_speed = 24.1 +traffic_flow = 2215 + + + +vehicles = VehicleParams() +vehicles.add( + veh_id="human", + acceleration_controller=accel_data, + lane_change_params=SumoLaneChangeParams( + model="SL2015", + lc_sublane=2.0, + ), +) + +# Does this break the sim? +# vehicles.add( +# veh_id="human2", +# acceleration_controller=(LinearOVM,{'v_max':traffic_speed}), +# lane_change_params=SumoLaneChangeParams( +# model="SL2015", +# lc_sublane=2.0, +# ), +# num_vehicles=1) + +env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) + +inflow = InFlows() +inflow.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=traffic_flow, + departLane="free", + departSpeed=traffic_speed) + +# inflow.add( +# veh_type="human2", +# edge="highway_0", +# probability=0.25, +# departLane="free", +# departSpeed=20) + + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params['lanes'] = 1 +additional_net_params['length'] = 1000 +additional_net_params['end_speed_limit'] = 6.0 +additional_net_params['boundary_cell_length'] = 300 + + + + +flow_params = dict( + # name of the experiment + exp_tag='highway', + + # name of the flow environment the experiment is running on + env_name=LaneChangeAccelEnv, + + # name of the network class the experiment is running on + network=HighwayNetwork_Modified, + + # simulator that is used by the experiment + simulator='traci', + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.4, + render=False, + color_by_speed=True, + use_ballistic=True + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=3000, + additional_params=ADDITIONAL_ENV_PARAMS.copy(), + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflow, + additional_params=additional_net_params, + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig( + spacing="uniform", + shuffle=True, + ), +) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 35dcbfba8..fe8efe3c0 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -9,6 +9,7 @@ def lambda_handler(event, context): + """Run on AWS Lambda to start query automatically.""" for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) diff --git a/flow/networks/SpeedChange.py b/flow/networks/SpeedChange.py new file mode 100644 index 000000000..c24fb767b --- /dev/null +++ b/flow/networks/SpeedChange.py @@ -0,0 +1,173 @@ +"""Contains the highway network class.""" + +from flow.networks.base import Network +from flow.core.params import InitialConfig +from flow.core.params import TrafficLightParams +import numpy as np + +ADDITIONAL_NET_PARAMS = { + # length of the highway + "length": 1000, + # number of lanes + "lanes": 4, + # speed limit for all edges + "speed_limit": 30, + # end speed limit + "end_speed_limit": 25, + # number of edges to divide the highway into + "num_edges": 1, + # Length of the cell imposing a boundary + "boundary_cell_length": 500 +} + + +class HighwayNetwork_Modified(Network): + """Highway network class. + + This network consists of `num_edges` different straight highway sections + with a total characteristic length and number of lanes. + + Requires from net_params: + + * **length** : length of the highway + * **lanes** : number of lanes in the highway + * **speed_limit** : max speed limit of the highway + * **num_edges** : number of edges to divide the highway into + + Usage + ----- + >>> from flow.core.params import NetParams + >>> from flow.core.params import VehicleParams + >>> from flow.core.params import InitialConfig + >>> from flow.networks import HighwayNetwork + >>> + >>> network = HighwayNetwork( + >>> name='highway', + >>> vehicles=VehicleParams(), + >>> net_params=NetParams( + >>> additional_params={ + >>> 'length': 230, + >>> 'lanes': 1, + >>> 'speed_limit': 30, + >>> 'num_edges': 1 + >>> }, + >>> ) + >>> ) + """ + + def __init__(self, + name, + vehicles, + net_params, + initial_config=InitialConfig(), + traffic_lights=TrafficLightParams()): + """Initialize a highway network.""" + for p in ADDITIONAL_NET_PARAMS.keys(): + if p not in net_params.additional_params: + raise KeyError('Network parameter "{}" not supplied'.format(p)) + + self.length = net_params.additional_params["length"] + self.lanes = net_params.additional_params["lanes"] + self.num_edges = net_params.additional_params.get("num_edges", 1) + self.end_length = net_params.additional_params["boundary_cell_length"] + + super().__init__(name, vehicles, net_params, initial_config, + traffic_lights) + + def specify_nodes(self, net_params): + """See parent class.""" + length = net_params.additional_params["length"] + num_edges = net_params.additional_params.get("num_edges", 1) + segment_lengths = np.linspace(0, length, num_edges+1) + + nodes = [] + for i in range(num_edges+1): + nodes += [{ + "id": "edge_{}".format(i), + "x": segment_lengths[i], + "y": 0 + }] + + nodes +=[{"id": "edge_{}".format(num_edges+1), + "x": length+self.end_length, + "y": 0}] + + return nodes + + def specify_edges(self, net_params): + """See parent class.""" + length = net_params.additional_params["length"] + num_edges = net_params.additional_params.get("num_edges", 1) + segment_length = length/float(num_edges) + + edges = [] + for i in range(num_edges): + edges += [{ + "id": "highway_{}".format(i), + "type": "highwayType", + "from": "edge_{}".format(i), + "to": "edge_{}".format(i+1), + "length": segment_length + }] + + edges += [{ + "id": "highway_end", + "type": "highway_end", + "from": "edge_{}".format(num_edges), + "to": "edge_{}".format(num_edges+1), + "length": self.end_length + }] + + return edges + + def specify_types(self, net_params): + """See parent class.""" + lanes = net_params.additional_params["lanes"] + speed_limit = net_params.additional_params["speed_limit"] + end_speed_limt = net_params.additional_params["end_speed_limit"] + + types = [{ + "id": "highwayType", + "numLanes": lanes, + "speed": speed_limit + }] + + types +=[{ + "id":"highway_end", + "numLanes":lanes, + "speed":end_speed_limt}] + + return types + + def specify_routes(self, net_params): + """See parent class.""" + num_edges = net_params.additional_params.get("num_edges", 1) + rts = {} + for i in range(num_edges): + rts["highway_{}".format(i)] = ["highway_{}".format(j) for + j in range(i, num_edges)] + rts["highway_{}".format(i)].append("highway_end") + + return rts + + def specify_edge_starts(self): + """See parent class.""" + num_edges = self.num_edges + edgestarts = [("highway_{}".format(i), 0) + for i in range(num_edges)] + + + # Adding this line fixes the problem that the simulation breaks when it reaches + # the slow down segment, but then the simulation doesn't advance past the first step. + + edgestarts += [("highway_end",self.length)] + return edgestarts + + @staticmethod + def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles): + """Generate a user defined set of starting positions. + + This method is just used for testing. + """ + return initial_config.additional_params["start_positions"], \ + initial_config.additional_params["start_lanes"] From d4923f6a76f3d3f8756c41021c24d50e72bf3094 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:37:12 -0700 Subject: [PATCH 086/438] remove the IDM config file from another campus --- .../non_rl/Highway_Downstream_Congestion.py | 112 ------------ flow/networks/SpeedChange.py | 173 ------------------ 2 files changed, 285 deletions(-) delete mode 100644 examples/exp_configs/non_rl/Highway_Downstream_Congestion.py delete mode 100644 flow/networks/SpeedChange.py diff --git a/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py b/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py deleted file mode 100644 index ddf3542f1..000000000 --- a/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py +++ /dev/null @@ -1,112 +0,0 @@ -"""Example of an open multi-lane network with human-driven vehicles.""" - -from flow.controllers import IDMController,LinearOVM,BandoFTLController -from flow.core.params import SumoParams, EnvParams, NetParams, InitialConfig, SumoLaneChangeParams -from flow.core.params import VehicleParams, InFlows -from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS -from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.networks.SpeedChange import HighwayNetwork_Modified, ADDITIONAL_NET_PARAMS -from flow.envs import LaneChangeAccelEnv - -# accel_data = (BandoFTL_Controller,{'alpha':.5,'beta':20.0,'h_st':12.0,'h_go':50.0,'v_max':30.0,'noise':0.0}) -# traffic_speed = 28.6 -# traffic_flow = 2172 - -accel_data = (IDMController,{'a':1.3,'b':2.0,'noise':0.3}) -traffic_speed = 24.1 -traffic_flow = 2215 - - - -vehicles = VehicleParams() -vehicles.add( - veh_id="human", - acceleration_controller=accel_data, - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), -) - -# Does this break the sim? -# vehicles.add( -# veh_id="human2", -# acceleration_controller=(LinearOVM,{'v_max':traffic_speed}), -# lane_change_params=SumoLaneChangeParams( -# model="SL2015", -# lc_sublane=2.0, -# ), -# num_vehicles=1) - -env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) - -inflow = InFlows() -inflow.add( - veh_type="human", - edge="highway_0", - vehs_per_hour=traffic_flow, - departLane="free", - departSpeed=traffic_speed) - -# inflow.add( -# veh_type="human2", -# edge="highway_0", -# probability=0.25, -# departLane="free", -# departSpeed=20) - - -additional_net_params = ADDITIONAL_NET_PARAMS.copy() -additional_net_params['lanes'] = 1 -additional_net_params['length'] = 1000 -additional_net_params['end_speed_limit'] = 6.0 -additional_net_params['boundary_cell_length'] = 300 - - - - -flow_params = dict( - # name of the experiment - exp_tag='highway', - - # name of the flow environment the experiment is running on - env_name=LaneChangeAccelEnv, - - # name of the network class the experiment is running on - network=HighwayNetwork_Modified, - - # simulator that is used by the experiment - simulator='traci', - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.4, - render=False, - color_by_speed=True, - use_ballistic=True - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=3000, - additional_params=ADDITIONAL_ENV_PARAMS.copy(), - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - additional_params=additional_net_params, - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - spacing="uniform", - shuffle=True, - ), -) diff --git a/flow/networks/SpeedChange.py b/flow/networks/SpeedChange.py deleted file mode 100644 index c24fb767b..000000000 --- a/flow/networks/SpeedChange.py +++ /dev/null @@ -1,173 +0,0 @@ -"""Contains the highway network class.""" - -from flow.networks.base import Network -from flow.core.params import InitialConfig -from flow.core.params import TrafficLightParams -import numpy as np - -ADDITIONAL_NET_PARAMS = { - # length of the highway - "length": 1000, - # number of lanes - "lanes": 4, - # speed limit for all edges - "speed_limit": 30, - # end speed limit - "end_speed_limit": 25, - # number of edges to divide the highway into - "num_edges": 1, - # Length of the cell imposing a boundary - "boundary_cell_length": 500 -} - - -class HighwayNetwork_Modified(Network): - """Highway network class. - - This network consists of `num_edges` different straight highway sections - with a total characteristic length and number of lanes. - - Requires from net_params: - - * **length** : length of the highway - * **lanes** : number of lanes in the highway - * **speed_limit** : max speed limit of the highway - * **num_edges** : number of edges to divide the highway into - - Usage - ----- - >>> from flow.core.params import NetParams - >>> from flow.core.params import VehicleParams - >>> from flow.core.params import InitialConfig - >>> from flow.networks import HighwayNetwork - >>> - >>> network = HighwayNetwork( - >>> name='highway', - >>> vehicles=VehicleParams(), - >>> net_params=NetParams( - >>> additional_params={ - >>> 'length': 230, - >>> 'lanes': 1, - >>> 'speed_limit': 30, - >>> 'num_edges': 1 - >>> }, - >>> ) - >>> ) - """ - - def __init__(self, - name, - vehicles, - net_params, - initial_config=InitialConfig(), - traffic_lights=TrafficLightParams()): - """Initialize a highway network.""" - for p in ADDITIONAL_NET_PARAMS.keys(): - if p not in net_params.additional_params: - raise KeyError('Network parameter "{}" not supplied'.format(p)) - - self.length = net_params.additional_params["length"] - self.lanes = net_params.additional_params["lanes"] - self.num_edges = net_params.additional_params.get("num_edges", 1) - self.end_length = net_params.additional_params["boundary_cell_length"] - - super().__init__(name, vehicles, net_params, initial_config, - traffic_lights) - - def specify_nodes(self, net_params): - """See parent class.""" - length = net_params.additional_params["length"] - num_edges = net_params.additional_params.get("num_edges", 1) - segment_lengths = np.linspace(0, length, num_edges+1) - - nodes = [] - for i in range(num_edges+1): - nodes += [{ - "id": "edge_{}".format(i), - "x": segment_lengths[i], - "y": 0 - }] - - nodes +=[{"id": "edge_{}".format(num_edges+1), - "x": length+self.end_length, - "y": 0}] - - return nodes - - def specify_edges(self, net_params): - """See parent class.""" - length = net_params.additional_params["length"] - num_edges = net_params.additional_params.get("num_edges", 1) - segment_length = length/float(num_edges) - - edges = [] - for i in range(num_edges): - edges += [{ - "id": "highway_{}".format(i), - "type": "highwayType", - "from": "edge_{}".format(i), - "to": "edge_{}".format(i+1), - "length": segment_length - }] - - edges += [{ - "id": "highway_end", - "type": "highway_end", - "from": "edge_{}".format(num_edges), - "to": "edge_{}".format(num_edges+1), - "length": self.end_length - }] - - return edges - - def specify_types(self, net_params): - """See parent class.""" - lanes = net_params.additional_params["lanes"] - speed_limit = net_params.additional_params["speed_limit"] - end_speed_limt = net_params.additional_params["end_speed_limit"] - - types = [{ - "id": "highwayType", - "numLanes": lanes, - "speed": speed_limit - }] - - types +=[{ - "id":"highway_end", - "numLanes":lanes, - "speed":end_speed_limt}] - - return types - - def specify_routes(self, net_params): - """See parent class.""" - num_edges = net_params.additional_params.get("num_edges", 1) - rts = {} - for i in range(num_edges): - rts["highway_{}".format(i)] = ["highway_{}".format(j) for - j in range(i, num_edges)] - rts["highway_{}".format(i)].append("highway_end") - - return rts - - def specify_edge_starts(self): - """See parent class.""" - num_edges = self.num_edges - edgestarts = [("highway_{}".format(i), 0) - for i in range(num_edges)] - - - # Adding this line fixes the problem that the simulation breaks when it reaches - # the slow down segment, but then the simulation doesn't advance past the first step. - - edgestarts += [("highway_end",self.length)] - return edgestarts - - @staticmethod - def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles): - """Generate a user defined set of starting positions. - - This method is just used for testing. - """ - return initial_config.additional_params["start_positions"], \ - initial_config.additional_params["start_lanes"] From a60b023d233335c2a0f4776f404d5a79f47e9b02 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:47:19 -0700 Subject: [PATCH 087/438] style fixed --- flow/data_pipeline/data_pipeline.py | 1 - flow/data_pipeline/query.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 111c41994..a999b6eb1 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -1,6 +1,5 @@ """contains class and helper functions for the data pipeline.""" import pandas as pd -import numpy as np import boto3 from flow.data_pipeline.query import QueryStrings from time import time diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2e137946d..e8c341444 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -79,9 +79,9 @@ class QueryStrings(Enum): road_grade, source_id, time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 AND date = \'{{date}}\' From 0ea7ffc571c441e0d6b4c8c42d0edc4df7186fc5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 11:49:38 -0700 Subject: [PATCH 088/438] specify power demand model names --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e8c341444..2ee794507 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -15,7 +15,7 @@ (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, + \'{}\' AS energy_model_id, source_id FROM {} ORDER BY id, time_step @@ -52,7 +52,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -68,7 +68,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -96,4 +96,4 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) From da243f946c109259e4c75943bd24515dd4d9e516 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:31:20 -0700 Subject: [PATCH 089/438] Add several accelerations (with/without noise, with/without failsafes) to custom output --- flow/controllers/base_controller.py | 18 +++++++++------ flow/core/kernel/vehicle/base.py | 20 ++++++++++++++-- flow/core/kernel/vehicle/traci.py | 36 +++++++++++++++++++++++------ flow/data_pipeline/data_pipeline.py | 19 ++++++++++----- 4 files changed, 71 insertions(+), 22 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 7adcdf310..c417bb73a 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,8 +88,10 @@ def get_action(self, env): float the modified form of the acceleration """ - # clear the current stored accel_without_noise of this vehicle None - env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # clear the current stored accel_no_noise_no_failsafe of this vehicle None + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed @@ -110,23 +112,25 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noise = accel + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) if self.fail_safe == 'instantaneous': - accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': - accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, accel_no_noise_with_failsafe) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, accel) # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) - + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 647ef37fe..ed53773cb 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -693,7 +693,15 @@ def get_accel(self, veh_id): """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError @@ -701,7 +709,15 @@ def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError - def get_accel_without_noise(self, veh_id): + def get_accel_no_noise_no_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_no_noise_with_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_with_noise_no_failsafe(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2a4e06257..4a1916617 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,7 +113,9 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] - self.__vehicles[veh_id]["accel_without_noise"] = None + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1130,15 +1132,35 @@ def get_accel(self, veh_id): self.__vehicles[veh_id]["accel"] = None return self.__vehicles[veh_id]["accel"] - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): """See parent class.""" - self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = accel_no_noise_no_failsafe - def get_accel_without_noise(self, veh_id): + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): """See parent class.""" - if "accel_without_noise" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_without_noise"] = None - return self.__vehicles[veh_id]["accel_without_noise"] + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = accel_no_noise_with_failsafe + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): + """See parent class.""" + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe + + def get_accel_no_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] + + def get_accel_no_noise_with_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_with_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] + + def get_accel_with_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_with_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] def get_realized_accel(self, veh_id): """See parent class.""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a999b6eb1..11d85cb0d 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -89,9 +89,11 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): - """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + """Return the dictionary with all the field pre-populated with empty list.""" + extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], + "target_accel_with_noise_with_failsafe": [], "target_accel_no_noise_no_failsafe": [], + "target_accel_with_noise_no_failsafe": [], "target_accel_no_noise_with_failsafe": [], + "realized_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -102,13 +104,18 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(veh_kernel.get_headway(vid)) - extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["target_accel_with_noise_with_failsafe"].append(veh_kernel.get_accel(vid)) extra_info["leader_id"].append(veh_kernel.get_leader(vid)) extra_info["follower_id"].append(veh_kernel.get_follower(vid)) extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) - extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["target_accel_no_noise_no_failsafe"].append( + veh_kernel.get_accel_no_noise_no_failsafe(vid)) + extra_info["target_accel_with_noise_no_failsafe"].append( + veh_kernel.get_accel_with_noise_no_failsafe(vid)) + extra_info["target_accel_no_noise_with_failsafe"].append( + veh_kernel.get_accel_no_noise_with_failsafe(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) position = veh_kernel.get_2d_position(vid) extra_info["x"].append(position[0]) From 951c755672a37bffcec0ac723545746b5e1d0e73 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:44:15 -0700 Subject: [PATCH 090/438] update queries with new column names --- flow/data_pipeline/query.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2ee794507..43ad45216 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -27,14 +27,14 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * - FROM trajectory_table + FROM fact_vehicle_trace WHERE date = \'{date}\' AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ - ALTER TABLE trajectory_table + ALTER TABLE fact_vehicle_trace ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -44,10 +44,10 @@ class QueryStrings(Enum): id, time_step, speed, - acceleration, + target_accel_with_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -60,10 +60,10 @@ class QueryStrings(Enum): id, time_step, speed, - accel_without_noise AS acceleration, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -75,14 +75,14 @@ class QueryStrings(Enum): SELECT id, time_step, - accel_without_noise, + target_accel_no_noise_with_failsafe, road_grade, source_id, time_step - LAG(time_step, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -90,8 +90,8 @@ class QueryStrings(Enum): SELECT id, time_step, - prev_speed + accel_without_noise * sim_step AS speed, - accel_without_noise AS acceleration, + prev_speed + target_accel_no_noise_with_failsafe * sim_step AS speed, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id FROM lagged_timestep From df0bb664e80e5fe9819c6246663b8602212da243 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:47:44 -0700 Subject: [PATCH 091/438] fix flake8 issues --- flow/controllers/base_controller.py | 2 +- flow/data_pipeline/query.py | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index c417bb73a..3f6a0f4ae 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -130,7 +130,7 @@ def get_action(self, env): elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) - + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 43ad45216..e403b51f8 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,7 +2,13 @@ from enum import Enum # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = { + "energy": [ + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" + ] + } VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT @@ -52,7 +58,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -68,7 +75,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', + 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -96,4 +104,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) From 863f360809eac6fad1ae26eba0b197759a7c666c Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:51:46 -0700 Subject: [PATCH 092/438] remove trailing whitespaces --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e403b51f8..8e8196f6f 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -4,8 +4,8 @@ # tags for different queries tags = { "energy": [ - "POWER_DEMAND_MODEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" ] } @@ -58,7 +58,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ @@ -104,5 +104,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) From a524f4dabb9b56333e45fc10d53e1666dfa7fef0 Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Thu, 21 May 2020 12:37:05 -0700 Subject: [PATCH 093/438] Updated ray_autoscale and requirements.txt --- requirements.txt | 2 +- scripts/ray_autoscale.yaml | 13 +++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 546cb4e26..ccb971a99 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ gym==0.14.0 -numpy==1.16.0 +numpy==1.18.4 scipy==1.1.0 lxml==4.4.1 pyprind==2.11.2 diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 5bf2a9c4a..0800ce324 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -68,11 +68,20 @@ worker_nodes: setup_commands: - cd flow && git fetch && git checkout origin/master - -head_setup_commands: + - pip install ray==0.8.0 + - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions - pip install awscli==1.16.309 + - pip install stable-baselines - pip install pytz + - pip install torch==1.3.1 + - pip install tensorflow==2.0.0 + - pip install lz4 + - pip install dm-tree + - pip install numpy==1.18.4 + - ./flow/scripts/setup_sumo_ubuntu1604.sh + +head_setup_commands: [] # Custom commands that will be run on worker nodes after common setup. worker_setup_commands: [] From 3b7364b9ee26642d8c9700874541899de447de9a Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Thu, 21 May 2020 12:41:32 -0700 Subject: [PATCH 094/438] Updated ray_autoscale and requirements.txt --- requirements.txt | 2 +- scripts/ray_autoscale.yaml | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index c069a6cb6..f06c3c69f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ gym==0.14.0 -numpy==1.16.0 +numpy==1.18.4 scipy==1.1.0 lxml==4.4.1 pyprind==2.11.2 diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index d0c9cccbb..5cf0eca96 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -68,15 +68,20 @@ worker_nodes: setup_commands: - cd flow && git fetch && git checkout origin/i210_dev - -head_setup_commands: - pip install ray==0.8.0 - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions - pip install awscli==1.16.309 - pip install stable-baselines - - pip install torch==1.4.0 - pip install pytz + - pip install torch==1.3.1 + - pip install tensorflow==2.0.0 + - pip install lz4 + - pip install dm-tree + - pip install numpy==1.18.4 + - ./flow/scripts/setup_sumo_ubuntu1604.sh + +head_setup_commands: [] # Custom commands that will be run on worker nodes after common setup. worker_setup_commands: [] From 7f782b895027c265dc84f5d397c3edf99432969e Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Thu, 21 May 2020 13:21:11 -0700 Subject: [PATCH 095/438] Reverted to original master's ray_autoscale.yaml and added 2 lines --- scripts/ray_autoscale.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 0800ce324..27ac0898e 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -68,16 +68,9 @@ worker_nodes: setup_commands: - cd flow && git fetch && git checkout origin/master - - pip install ray==0.8.0 - - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions - pip install awscli==1.16.309 - - pip install stable-baselines - pip install pytz - - pip install torch==1.3.1 - - pip install tensorflow==2.0.0 - - pip install lz4 - - pip install dm-tree - pip install numpy==1.18.4 - ./flow/scripts/setup_sumo_ubuntu1604.sh From 7493d9b6d56b5c06c2a8b3b000f938288b506314 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 21 May 2020 16:28:52 -0700 Subject: [PATCH 096/438] more quieres added --- examples/simulate.py | 2 +- flow/core/experiment.py | 12 +- flow/data_pipeline/data_pipeline.py | 4 +- flow/data_pipeline/query.py | 367 +++++++++++++++++++++++++++- flow/visualize/i210_replay.py | 3 +- 5 files changed, 374 insertions(+), 14 deletions(-) diff --git a/examples/simulate.py b/examples/simulate.py index 86d14aa14..0b183649b 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -97,5 +97,5 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, to_aws=flags.to_aws, only_query=flags.only_query) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index f46f802a5..eb9beeca9 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -88,7 +88,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=""): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query=""): """Run the given network for a set number of runs. Parameters @@ -101,7 +101,7 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=No convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file - partition_name: str + to_aws: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. @@ -213,14 +213,12 @@ def rl_actions(*_): os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) - if partition_name: - if partition_name == "default": - partition_name = source_id[-3:] + if to_aws: cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( - cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), + cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a999b6eb1..c83734f4c 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -91,7 +91,7 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): """Return the dictionary with all the feild pre-populated with empty list.""" extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + "accel_without_noise": [], "realized_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -108,7 +108,7 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) position = veh_kernel.get_2d_position(vid) extra_info["x"].append(position[0]) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2ee794507..7459a9903 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,7 +2,8 @@ from enum import Enum # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = {"vehicle_energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT @@ -96,4 +97,366 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) + + FACT_NETWORK_THROUGHPUT_AGG = """ + WITH agg AS ( + SELECT + source_id, + COUNT(DISTINCT id) AS n_vehicles, + MAX(time_step) - MIN(time_step) AS total_time_seconds + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1 + ) + SELECT + source_id, + n_vehicles * 3600 / total_time_seconds AS throughput_per_hour + FROM agg + ;""" + + FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ + WITH distance AS ( + SELECT + id, + source_id, + MAX(x) AS distance_meters + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND source_id = + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), energy AS ( + SELECT + id, + source_id, + energy_model_id, + (MAX(time_step) - MIN(time_step)) / (COUNT(DISTINCT time_step) - 1) AS time_step_size_seconds, + SUM(power) AS power_watts + FROM fact_energy_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ) + SELECT + d.id, + d.source_id, + e.energy_model_id, + distance_meters, + power_watts * time_step_size_seconds AS energy_joules, + distance_meters / (power_watts * time_step_size_seconds) AS efficiency_meters_per_joules, + 74564 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon + FROM distance d + JOIN energy e ON 1=1 + AND d.id = e.id + AND d.source_id = e.source_id + ; + """ + + FACT_NETWORK_FUEL_EFFICIENCY_AGG = """ + SELECT + source_id, + energy_model_id, + SUM(distance_meters) AS distance_meters, + SUM(energy_joules) AS energy_joules, + SUM(distance_meters) / SUM(energy_joules) AS efficiency_meters_per_joules, + 74564 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon + FROM fact_vehicle_fuel_efficiency_agg + WHERE 1 = 1 + AND date = \'{{date}}\' + AND parititon_name = \'{{partition}}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + GROUP BY 1, 2 + ;""" + + LEADERBOARD_CHART = """ + SELECT + t.source_id, + e.energy_model_id, + e.efficiency_meters_per_joules, + 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon + t.throughput_per_hour + FROM fact_network_throughput_agg t + JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 + AND t.date = \'{{date}}\' + AND t.partition_name = \'{{partition}}_FACT_NETWORK_THROUGHPUT_AGG\' + AND e.date = \'{{date}}\' + AND e.partition_name = \'{{partition}}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' + AND t.source_id = e.source_id + AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ;""" + + FACT_NETWORK_INFLOWS_OUTFLOWS = """ + WITH min_max_time_step AS ( + SELECT + id, + source_id, + MIN(time_step) AS min_time_step, + MAX(time_step) AS max_time_step + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), inflows AS ( + SELECT + INT(min_time_step / 60) * 60 AS time_step, + source_id, + 60 * COUNT(DISTINCT id) AS inflow_rate + FROM min_max_time_step + GROUP BY 1, 2 + ), outflows AS ( + SELECT + INT(max_time_step / 60) * 60 AS time_step, + source_id, + 60 * COUNT(DISTINCT id) AS outflow_rate + FROM min_max_time_step + GROUP BY 1, 2 + ) + SELECT + COALESCE(i.time_step, o.time_step) AS time_step, + COALESCE(i.source_id, o.source_id) AS source_id, + COALESCE(i.inflow_rate, 0) AS inflow_rate, + COALESCE(o.outflow_rate, 0) AS outflow_rate + FROM inflows i + FULL OUTER JOIN outflows o ON 1 = 1 + AND i.time_step = o.time_step + AND i.source_id = o.source_id + ;""" + + FACT_NETWORK_METRICS_BY_DISTANCE_AGG = """ + WITH joined_trace AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + SUM(power) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + cumulative_power + FROM fact_vehicle_trace vt + JOIN fact_energy_trace et ON 1 = 1 + AND vt.date = \'{{date}}\' + AND vt.partition_name = \'{{partition}}\' + AND et.date = \'{{date}}\' + AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.id = et.id + AND vt.source_id = et.source_id + AND vt.time_step = et.time_step + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 + AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ), cumulative_energy AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + cumulative_power * sim_step AS energy_joules + FROM joined_trace + WHERE 1 = 1 + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ), binned_cumulative_energy AS ( + SELECT + source_id, + INT(x/10) * 10 AS distance_meters_bin, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy + GROUP BY 1, 2 + ), binned_speed_accel AS ( + SELECT + source_id, + INT(x/10) * 10 AS distance_meters_bin, + AVG(speed) AS speed_avg, + AVG(speed) + STDEV(speed) AS speed_upper_bound, + AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(accel_without_noise) AS accel_avg, + AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), binned_energy_start_end AS ( + SELECT DISTINCT + source_id, + id, + INT(x/10) * 10 AS distance_meters_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + FROM cumulative_energy + ), binned_energy AS ( + SELECT + source_id, + distance_meters_bin, + AVG(energy_end - energy_start) AS instantaneous_energy_avg, + AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + FROM binned_energy_start_end + GROUP BY 1, 2 + ) + SELECT + COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, + COALESCE(bce.distance_meters_bin, bsa.distance_meters_bin, be.distance_meters_bin) AS distance_meters_bin, + bce.cumulative_energy_avg, + bce.cumulative_energy_lower_bound, + bce.cumulative_energy_upper_bound, + bsa.speed_avg, + bsa.speed_upper_bound, + bsa.speed_lower_bound, + bsa.accel_avg, + bsa.accel_upper_bound, + bsa.accel_lower_bound, + be.instantaneous_energy_avg, + be.instantaneous_energy_upper_bound, + be.instantaneous_energy_lower_bound + FROM binned_cumulative_energy bce + FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 + AND bce.source_id = bsa.source_id + AND bce.distance_meters_bin = bsa.distance_meters_bin + FULL OUTER JOIN binned_energy be ON 1 = 1 + AND COALESCE(bce.source_id, bsa.source_id) = be.source_id + AND COALESCE(bce.distance_meters_bin, bce.distance_meters_bin) = be.distance_meters_bin + ;""" + + FACT_NETWORK_METRICS_BY_TIME_AGG = """ + WITH joined_trace AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + SUM(power) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + cumulative_power + FROM fact_vehicle_trace vt + JOIN fact_energy_trace et ON 1 = 1 + AND vt.date = \'{{date}}\' + AND vt.partition_name = \'{{partition}}\' + AND et.date = \'{{date}}\' + AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.id = et.id + AND vt.source_id = et.source_id + AND vt.time_step = et.time_step + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 + AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ), cumulative_energy AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + cumulative_power * sim_step AS energy_joules + FROM joined_trace + WHERE 1 = 1 + AND date = + AND partition_name = + AND source_id = + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ), binned_cumulative_energy AS ( + SELECT + source_id, + INT(time_step/60) * 60 AS time_seconds_bin, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy + GROUP BY 1, 2 + ), binned_speed_accel AS ( + SELECT + source_id, + INT(time_step/60) * 60 AS time_seconds_bin, + AVG(speed) AS speed_avg, + AVG(speed) + STDEV(speed) AS speed_upper_bound, + AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(accel_without_noise) AS accel_avg, + AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), binned_energy_start_end AS ( + SELECT DISTINCT + source_id, + id, + INT(time_step/60) * 60 AS time_seconds_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + FROM cumulative_energy + ), binned_energy AS ( + SELECT + source_id, + time_seconds_bin, + AVG(energy_end - energy_start) AS instantaneous_energy_avg, + AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + FROM binned_energy_start_end + GROUP BY 1, 2 + ) + SELECT + COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, + COALESCE(bce.time_seconds_bin, bsa.time_seconds_bin, be.time_seconds_bin) AS time_seconds_bin, + bce.cumulative_energy_avg, + bce.cumulative_energy_lower_bound, + bce.cumulative_energy_upper_bound, + bsa.speed_avg, + bsa.speed_upper_bound, + bsa.speed_lower_bound, + bsa.accel_avg, + bsa.accel_upper_bound, + bsa.accel_lower_bound, + be.instantaneous_energy_avg, + be.instantaneous_energy_upper_bound, + be.instantaneous_energy_lower_bound + FROM binned_cumulative_energy bce + FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 + AND bce.source_id = bsa.source_id + AND bce.time_seconds_bin = bsa.time_seconds_bin + FULL OUTER JOIN binned_energy be ON 1 = 1 + AND COALESCE(bce.source_id, bsa.source_id) = be.source_id + AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin + ;""" diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 8e62bb0d8..23a7de1d8 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -334,10 +334,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: - partition_name = source_id[-3:] cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( - cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), + cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) # print the location of the emission csv file From 40ace2578737333db4c8ddf3e4311d93f56528ad Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 21 May 2020 19:54:59 -0700 Subject: [PATCH 097/438] update the architecture and fix some bugs --- flow/core/experiment.py | 2 +- flow/data_pipeline/data_pipeline.py | 31 +++++++--- flow/data_pipeline/query.py | 96 ++++++++++++++++------------- flow/visualize/i210_replay.py | 2 +- 4 files changed, 75 insertions(+), 56 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index eb9beeca9..1652da1ad 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -217,7 +217,7 @@ def rl_actions(*_): if to_aws: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index c83734f4c..290ac70e9 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -142,9 +142,9 @@ def __init__(self): """ self.MAX_WAIT = 60 self.client = boto3.client("athena") - self.existing_partitions = self.get_existing_partitions() + self.existing_partitions = {} - def get_existing_partitions(self): + def get_existing_partitions(self, table): """Return the existing partitions in the S3 bucket. Returns @@ -152,7 +152,7 @@ def get_existing_partitions(self): partitions: a list of existing partitions on S3 bucket """ response = self.client.start_query_execution( - QueryString='SHOW PARTITIONS trajectory_table', + QueryString='SHOW PARTITIONS {}'.format(table), QueryExecutionContext={ 'Database': 'circles' }, @@ -207,18 +207,21 @@ def wait_for_execution(self, execution_id): return False return True - def update_partition(self, query_date, partition): + def update_partition(self, table, query_date, partition): """Load the given partition to the trajectory_table on Athena. Parameters ---------- + table : str + the name of the table to update query_date : str the new partition date that needs to be loaded partition : str the new partition that needs to be loaded """ response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(date=query_date, partition=partition), + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(table=table, date=query_date, + partition=partition), QueryExecutionContext={ 'Database': 'circles' }, @@ -226,11 +229,11 @@ def update_partition(self, query_date, partition): ) if self.wait_for_execution(response['QueryExecutionId']): raise RuntimeError("update partition timed out") - self.existing_partitions.append("date={}/partition_name={}".format(query_date, partition)) + self.existing_partitions[table].append("date={}/partition_name={}".format(query_date, partition)) return def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", - query_date="today", partition="default"): + query_date="today", partition="default", primary_table=""): """Start the execution of a query, does not wait for it to finish. Parameters @@ -243,6 +246,8 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu name of the partition date to run this query on partition: str, optional name of the partition to run this query on + primary_table: str + the table whose partition that may need update Returns ------- execution_id: str @@ -257,11 +262,17 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu if query_date == "today": query_date = date.today().isoformat() - if "date={}/partition_name={}".format(query_date, partition) not in self.existing_partitions: - self.update_partition(query_date, partition) + source_id = "flow_{}".format(partition.split('_')[1]) + + if primary_table: + if primary_table not in self.existing_partitions.keys(): + self.existing_partitions[primary_table] = self.get_existing_partitions(primary_table) + if "date={}/partition_name={}".format(query_date, partition) not in \ + self.existing_partitions[primary_table]: + self.update_partition(primary_table, query_date, partition) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(date=query_date, partition=partition), + QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id), QueryExecutionContext={ 'Database': 'circles' }, diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 7459a9903..152eefc52 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,8 +2,16 @@ from enum import Enum # tags for different queries -tags = {"vehicle_energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = {"fact_vehicle_trace": {"fact_energy_trace": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], + "fact_network_throughput_agg": ["FACT_NETWORK_THROUGHPUT_AGG"], + "fact_network_inflows_outflows": ["FACT_NETWORK_INFLOWS_OUTFLOWS"]}, + "fact_energy_trace": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], + "fact_network_metrics_by_distance_agg": ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], + "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, + "fact_vehicle_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"], + "fact_network_fuel_efficiency_agg": ["LEADERBOARD_CHART"] + } VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT @@ -35,7 +43,7 @@ class QueryStrings(Enum): """ UPDATE_PARTITION = """ - ALTER TABLE trajectory_table + ALTER TABLE {table} ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -48,7 +56,7 @@ class QueryStrings(Enum): acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -64,7 +72,7 @@ class QueryStrings(Enum): accel_without_noise AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -83,7 +91,7 @@ class QueryStrings(Enum): OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -108,8 +116,8 @@ class QueryStrings(Enum): MAX(time_step) - MIN(time_step) AS total_time_seconds FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1 @@ -128,8 +136,8 @@ class QueryStrings(Enum): MAX(x) AS distance_meters FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND source_id = AND x BETWEEN 500 AND 2300 AND time_step >= 600 @@ -143,8 +151,8 @@ class QueryStrings(Enum): SUM(power) AS power_watts FROM fact_energy_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND date = \'{date}\' + AND partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 @@ -176,8 +184,8 @@ class QueryStrings(Enum): 74564 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 - AND date = \'{{date}}\' - AND parititon_name = \'{{partition}}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND date = \'{date}\' + AND parititon_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' GROUP BY 1, 2 ;""" @@ -191,10 +199,10 @@ class QueryStrings(Enum): t.throughput_per_hour FROM fact_network_throughput_agg t JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 - AND t.date = \'{{date}}\' - AND t.partition_name = \'{{partition}}_FACT_NETWORK_THROUGHPUT_AGG\' - AND e.date = \'{{date}}\' - AND e.partition_name = \'{{partition}}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' + AND t.date = \'{date}\' + AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' + AND e.date = \'{date}\' + AND e.partition_name = \'{partition}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' AND t.source_id = e.source_id AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' WHERE 1 = 1 @@ -209,21 +217,21 @@ class QueryStrings(Enum): MAX(time_step) AS max_time_step FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 ), inflows AS ( SELECT - INT(min_time_step / 60) * 60 AS time_step, + CAST(min_time_step / 60 AS INTEGER) * 60 AS time_step, source_id, 60 * COUNT(DISTINCT id) AS inflow_rate FROM min_max_time_step GROUP BY 1, 2 ), outflows AS ( SELECT - INT(max_time_step / 60) * 60 AS time_step, + CAST(max_time_step / 60 AS INTEGER) * 60 AS time_step, source_id, 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step @@ -255,10 +263,10 @@ class QueryStrings(Enum): cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{{date}}\' - AND vt.partition_name = \'{{partition}}\' - AND et.date = \'{{date}}\' - AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND et.date = \'{date}\' + AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -284,7 +292,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - INT(x/10) * 10 AS distance_meters_bin, + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound @@ -293,7 +301,7 @@ class QueryStrings(Enum): ), binned_speed_accel AS ( SELECT source_id, - INT(x/10) * 10 AS distance_meters_bin, + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(speed) AS speed_avg, AVG(speed) + STDEV(speed) AS speed_upper_bound, AVG(speed) - STDEV(speed) AS speed_lower_bound, @@ -302,8 +310,8 @@ class QueryStrings(Enum): AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 @@ -311,9 +319,9 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - INT(x/10) * 10 AS distance_meters_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -364,10 +372,10 @@ class QueryStrings(Enum): cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{{date}}\' - AND vt.partition_name = \'{{partition}}\' - AND et.date = \'{{date}}\' - AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND et.date = \'{date}\' + AND et.partition_name = \'{partitio}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -396,7 +404,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - INT(time_step/60) * 60 AS time_seconds_bin, + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound @@ -405,7 +413,7 @@ class QueryStrings(Enum): ), binned_speed_accel AS ( SELECT source_id, - INT(time_step/60) * 60 AS time_seconds_bin, + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(speed) AS speed_avg, AVG(speed) + STDEV(speed) AS speed_upper_bound, AVG(speed) - STDEV(speed) AS speed_lower_bound, @@ -414,8 +422,8 @@ class QueryStrings(Enum): AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 @@ -423,9 +431,9 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - INT(time_step/60) * 60 AS time_seconds_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 23a7de1d8..a28dadec4 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -335,7 +335,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) From ba2c3da129826f42171efde224c09b8b7f093bc9 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 22 May 2020 00:18:53 -0700 Subject: [PATCH 098/438] fix inflow issue --- flow/data_pipeline/query.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 152eefc52..40698e39c 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -109,17 +109,25 @@ class QueryStrings(Enum): 'denoised_speed_cte')) FACT_NETWORK_THROUGHPUT_AGG = """ - WITH agg AS ( - SELECT + WITH min_time AS ( + SELECT source_id, - COUNT(DISTINCT id) AS n_vehicles, - MAX(time_step) - MIN(time_step) AS total_time_seconds + id, + MIN(time_step) AS enter_time FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 - AND time_step >= 600 + GROUP BY 1, 2 + ), agg AS ( + SELECT + source_id, + COUNT(DISTINCT id) AS n_vehicles, + MAX(enter_time) - MIN(enter_time) AS total_time_seconds + FROM min_time + WHERE 1 = 1 + AND enter_time >= 600 GROUP BY 1 ) SELECT @@ -133,7 +141,7 @@ class QueryStrings(Enum): SELECT id, source_id, - MAX(x) AS distance_meters + MAX(x)-MIN(x) AS distance_meters FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' From 2232861f572b1ff2f4644af3392f1eddddf5715b Mon Sep 17 00:00:00 2001 From: bill zhao Date: Fri, 22 May 2020 10:41:21 -0700 Subject: [PATCH 099/438] Address MacOS Catalina SegFault issue when running sumo-gui (#938) --- docs/source/flow_setup.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/source/flow_setup.rst b/docs/source/flow_setup.rst index 60734b7b1..cbe585d36 100644 --- a/docs/source/flow_setup.rst +++ b/docs/source/flow_setup.rst @@ -112,6 +112,22 @@ If you are a Mac user and the above command gives you the error ``FXApp:openDisplay: unable to open display :0.0``, make sure to open the application XQuartz. +*Troubleshooting*: +If you are a Mac user and the above command gives you the error +``Segmentation fault: 11``, make sure to reinstall ``fox`` using brew. +:: + + # Uninstall Catalina bottle of fox: + $ brew uninstall --ignore-dependencies fox + + # Edit brew Formula of fox: + $ brew edit fox + + # Comment out or delete the following line: sha256 "c6697be294c9a0458580564d59f8db32791beb5e67a05a6246e0b969ffc068bc" => :catalina + # Install Mojave bottle of fox: + $ brew install fox + + Testing your SUMO and Flow installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From 9dbf4fc3964d83a202e90708bc84d6cacea43d55 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 24 May 2020 20:07:30 -0700 Subject: [PATCH 100/438] the extended new pipeline constructed and works --- flow/core/experiment.py | 8 +- flow/data_pipeline/data_pipeline.py | 6 +- flow/data_pipeline/query.py | 152 +++++++++++++++------------- 3 files changed, 89 insertions(+), 77 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 1652da1ad..6c6a5fbfb 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -213,15 +213,15 @@ def rl_actions(*_): os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) + generate_trajectory_from_flow(trajectory_table_path, extra_info) if to_aws: cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( - cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), - upload_file_path, str(only_query)[2:-2]) + cur_date, source_id, source_id), + trajectory_table_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file - os.remove(upload_file_path) + # os.remove(upload_file_path) return info_dict diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 290ac70e9..d74725590 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -60,9 +60,9 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) - upload_only_file_path = data_path[:-4] + "_upload" + ".csv" - extra_info.to_csv(upload_only_file_path, index=False, header=False) - return upload_only_file_path + # upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + # extra_info.to_csv(upload_only_file_path, index=False, header=False) + return def upload_to_s3(bucket_name, bucket_key, file_path, only_query): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 40698e39c..595379367 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -6,13 +6,19 @@ "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], "fact_network_throughput_agg": ["FACT_NETWORK_THROUGHPUT_AGG"], "fact_network_inflows_outflows": ["FACT_NETWORK_INFLOWS_OUTFLOWS"]}, - "fact_energy_trace": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], - "fact_network_metrics_by_distance_agg": ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], - "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, - "fact_vehicle_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"], - "fact_network_fuel_efficiency_agg": ["LEADERBOARD_CHART"] + "fact_energy_trace": {}, + "POWER_DEMAND_MODEL_DENOISED_ACCEL": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], + "fact_network_metrics_by_distance_agg": + ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], + "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, + "fact_vehicle_fuel_efficiency_agg": {"fact_network_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"]}, + "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]} } +tables = ["fact_vehicle_trace", "fact_energy_trace", "fact_network_throughput_agg", "fact_network_inflows_outflows", + "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", + "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart"] + VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT id, @@ -53,7 +59,7 @@ class QueryStrings(Enum): id, time_step, speed, - acceleration, + COALESCE (acceleration, realized_accel) AS acceleration, road_grade, source_id FROM fact_vehicle_trace @@ -69,7 +75,7 @@ class QueryStrings(Enum): id, time_step, speed, - accel_without_noise AS acceleration, + COALESCE (accel_without_noise, acceleration, realized_accel) AS acceleration, road_grade, source_id FROM fact_vehicle_trace @@ -84,9 +90,10 @@ class QueryStrings(Enum): SELECT id, time_step, - accel_without_noise, + COALESCE (accel_without_noise, acceleration, realized_accel) AS acceleration, road_grade, source_id, + speed AS cur_speed, time_step - LAG(time_step, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) @@ -99,8 +106,8 @@ class QueryStrings(Enum): SELECT id, time_step, - prev_speed + accel_without_noise * sim_step AS speed, - accel_without_noise AS acceleration, + COALESCE (prev_speed + acceleration * sim_step, cur_speed) AS speed, + acceleration, road_grade, source_id FROM lagged_timestep @@ -137,35 +144,46 @@ class QueryStrings(Enum): ;""" FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ - WITH distance AS ( - SELECT + WITH sub_fact_vehicle_trace AS ( + SELECT id, - source_id, - MAX(x)-MIN(x) AS distance_meters + time_step, + x, + source_id FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND source_id = - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 - GROUP BY 1, 2 - ), energy AS ( + ), distance AS ( SELECT id, source_id, - energy_model_id, - (MAX(time_step) - MIN(time_step)) / (COUNT(DISTINCT time_step) - 1) AS time_step_size_seconds, - SUM(power) AS power_watts - FROM fact_energy_trace + MAX(x)-MIN(x) AS distance_meters + FROM sub_fact_vehicle_trace WHERE 1 = 1 - AND date = \'{date}\' - AND partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' - AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 + GROUP BY 1, 2 + ), energy AS ( + SELECT + e.id, + e.source_id, + e.energy_model_id, + (MAX(e.time_step) - MIN(e.time_step)) / (COUNT(DISTINCT e.time_step) - 1) AS time_step_size_seconds, + SUM(e.power) AS power_watts + FROM fact_energy_trace AS e + JOIN sub_fact_vehicle_trace AS v ON 1 = 1 + AND e.id = v.id + AND e.time_step = v.time_step + AND e.source_id = v.source_id + WHERE 1 = 1 + AND e.date = \'{date}\' + AND e.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND v.x BETWEEN 500 AND 2300 + AND e.time_step >= 600 GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 + HAVING COUNT(DISTINCT e.time_step) > 1 ) SELECT d.id, @@ -193,7 +211,7 @@ class QueryStrings(Enum): FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 AND date = \'{date}\' - AND parititon_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' GROUP BY 1, 2 ;""" @@ -203,10 +221,10 @@ class QueryStrings(Enum): t.source_id, e.energy_model_id, e.efficiency_meters_per_joules, - 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon + 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, t.throughput_per_hour - FROM fact_network_throughput_agg t - JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 + FROM fact_network_throughput_agg AS t + JOIN fact_network_fuel_efficiency_agg AS e ON 1 = 1 AND t.date = \'{date}\' AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' AND e.date = \'{date}\' @@ -259,15 +277,15 @@ class QueryStrings(Enum): FACT_NETWORK_METRICS_BY_DISTANCE_AGG = """ WITH joined_trace AS ( SELECT - id, - source_id, - time_step, - x, + vt.id, + vt.source_id, + vt.time_step, + vt.x, energy_model_id, - time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + vt.time_step - LAG(vt.time_step, 1) + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, SUM(power) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 @@ -295,27 +313,26 @@ class QueryStrings(Enum): AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 - GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 ), binned_cumulative_energy AS ( SELECT source_id, CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound FROM cumulative_energy GROUP BY 1, 2 + HAVING COUNT(DISTINCT time_step) > 1 ), binned_speed_accel AS ( SELECT source_id, CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(speed) AS speed_avg, - AVG(speed) + STDEV(speed) AS speed_upper_bound, - AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(speed) + STDDEV(speed) AS speed_upper_bound, + AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -336,8 +353,8 @@ class QueryStrings(Enum): source_id, distance_meters_bin, AVG(energy_end - energy_start) AS instantaneous_energy_avg, - AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, - AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + AVG(energy_end - energy_start) + STDDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound FROM binned_energy_start_end GROUP BY 1, 2 ) @@ -368,22 +385,22 @@ class QueryStrings(Enum): FACT_NETWORK_METRICS_BY_TIME_AGG = """ WITH joined_trace AS ( SELECT - id, - source_id, - time_step, - x, + vt.id, + vt.source_id, + vt.time_step, + vt.x, energy_model_id, - time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + vt.time_step - LAG(vt.time_step, 1) + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, SUM(power) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' AND et.date = \'{date}\' - AND et.partition_name = \'{partitio}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -401,33 +418,28 @@ class QueryStrings(Enum): cumulative_power * sim_step AS energy_joules FROM joined_trace WHERE 1 = 1 - AND date = - AND partition_name = - AND source_id = - AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 - GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 ), binned_cumulative_energy AS ( SELECT source_id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound FROM cumulative_energy GROUP BY 1, 2 + HAVING COUNT(DISTINCT time_step) > 1 ), binned_speed_accel AS ( SELECT source_id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(speed) AS speed_avg, - AVG(speed) + STDEV(speed) AS speed_upper_bound, - AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(speed) + STDDEV(speed) AS speed_upper_bound, + AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -448,8 +460,8 @@ class QueryStrings(Enum): source_id, time_seconds_bin, AVG(energy_end - energy_start) AS instantaneous_energy_avg, - AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, - AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + AVG(energy_end - energy_start) + STDDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound FROM binned_energy_start_end GROUP BY 1, 2 ) From 57b42ca5544b72a5d6641ac38c50f9677117d940 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 24 May 2020 21:29:11 -0700 Subject: [PATCH 101/438] fix bug in vehicle power demand --- flow/data_pipeline/query.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2ee794507..8dde9474d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -11,9 +11,12 @@ speed, acceleration, road_grade, - 1200 * speed * ( - (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) - + 0.8 + 9.81 * SIN(road_grade) + 1200 * speed * MAX(0, ( + CASE + WHEN acceleration > 0 THEN 1 + WHEN acceleration < 0 THEN 0 + ELSE 0.5 + END * (1 - 0.8) + 0.8) * acceleration + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, \'{}\' AS energy_model_id, source_id From 87dcff271ac1aa5450516f2592940eddeb149fe4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:31:20 -0700 Subject: [PATCH 102/438] Add several accelerations (with/without noise, with/without failsafes) to custom output --- flow/controllers/base_controller.py | 18 +++++++++------ flow/core/kernel/vehicle/base.py | 20 ++++++++++++++-- flow/core/kernel/vehicle/traci.py | 36 +++++++++++++++++++++++------ flow/data_pipeline/data_pipeline.py | 19 ++++++++++----- 4 files changed, 71 insertions(+), 22 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 7adcdf310..c417bb73a 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,8 +88,10 @@ def get_action(self, env): float the modified form of the acceleration """ - # clear the current stored accel_without_noise of this vehicle None - env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # clear the current stored accel_no_noise_no_failsafe of this vehicle None + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed @@ -110,23 +112,25 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noise = accel + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) if self.fail_safe == 'instantaneous': - accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': - accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, accel_no_noise_with_failsafe) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, accel) # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) - + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 647ef37fe..ed53773cb 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -693,7 +693,15 @@ def get_accel(self, veh_id): """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError @@ -701,7 +709,15 @@ def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError - def get_accel_without_noise(self, veh_id): + def get_accel_no_noise_no_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_no_noise_with_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_with_noise_no_failsafe(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2a4e06257..4a1916617 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,7 +113,9 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] - self.__vehicles[veh_id]["accel_without_noise"] = None + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1130,15 +1132,35 @@ def get_accel(self, veh_id): self.__vehicles[veh_id]["accel"] = None return self.__vehicles[veh_id]["accel"] - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): """See parent class.""" - self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = accel_no_noise_no_failsafe - def get_accel_without_noise(self, veh_id): + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): """See parent class.""" - if "accel_without_noise" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_without_noise"] = None - return self.__vehicles[veh_id]["accel_without_noise"] + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = accel_no_noise_with_failsafe + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): + """See parent class.""" + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe + + def get_accel_no_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] + + def get_accel_no_noise_with_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_with_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] + + def get_accel_with_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_with_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] def get_realized_accel(self, veh_id): """See parent class.""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a999b6eb1..11d85cb0d 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -89,9 +89,11 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): - """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + """Return the dictionary with all the field pre-populated with empty list.""" + extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], + "target_accel_with_noise_with_failsafe": [], "target_accel_no_noise_no_failsafe": [], + "target_accel_with_noise_no_failsafe": [], "target_accel_no_noise_with_failsafe": [], + "realized_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -102,13 +104,18 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(veh_kernel.get_headway(vid)) - extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["target_accel_with_noise_with_failsafe"].append(veh_kernel.get_accel(vid)) extra_info["leader_id"].append(veh_kernel.get_leader(vid)) extra_info["follower_id"].append(veh_kernel.get_follower(vid)) extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) - extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["target_accel_no_noise_no_failsafe"].append( + veh_kernel.get_accel_no_noise_no_failsafe(vid)) + extra_info["target_accel_with_noise_no_failsafe"].append( + veh_kernel.get_accel_with_noise_no_failsafe(vid)) + extra_info["target_accel_no_noise_with_failsafe"].append( + veh_kernel.get_accel_no_noise_with_failsafe(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) position = veh_kernel.get_2d_position(vid) extra_info["x"].append(position[0]) From d192a9f5dc1ca2622875cd26997198c1889d213c Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:44:15 -0700 Subject: [PATCH 103/438] update queries with new column names --- flow/data_pipeline/query.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 8dde9474d..689818e65 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -30,14 +30,14 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * - FROM trajectory_table + FROM fact_vehicle_trace WHERE date = \'{date}\' AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ - ALTER TABLE trajectory_table + ALTER TABLE fact_vehicle_trace ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -47,10 +47,10 @@ class QueryStrings(Enum): id, time_step, speed, - acceleration, + target_accel_with_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -63,10 +63,10 @@ class QueryStrings(Enum): id, time_step, speed, - accel_without_noise AS acceleration, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -78,14 +78,14 @@ class QueryStrings(Enum): SELECT id, time_step, - accel_without_noise, + target_accel_no_noise_with_failsafe, road_grade, source_id, time_step - LAG(time_step, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -93,8 +93,8 @@ class QueryStrings(Enum): SELECT id, time_step, - prev_speed + accel_without_noise * sim_step AS speed, - accel_without_noise AS acceleration, + prev_speed + target_accel_no_noise_with_failsafe * sim_step AS speed, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id FROM lagged_timestep From 92a745dd3d517135f3bef6b69782c212ffbfd336 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:47:44 -0700 Subject: [PATCH 104/438] fix flake8 issues --- flow/controllers/base_controller.py | 2 +- flow/data_pipeline/query.py | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index c417bb73a..3f6a0f4ae 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -130,7 +130,7 @@ def get_action(self, env): elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) - + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 689818e65..928bb5d47 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,7 +2,13 @@ from enum import Enum # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = { + "energy": [ + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" + ] + } VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT @@ -55,7 +61,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -71,7 +78,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', + 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -99,4 +107,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) From 215d4abb938e7b7032f227b9a2e6997092164bc6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:51:46 -0700 Subject: [PATCH 105/438] remove trailing whitespaces --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 928bb5d47..e8ac34abc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -4,8 +4,8 @@ # tags for different queries tags = { "energy": [ - "POWER_DEMAND_MODEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" ] } @@ -61,7 +61,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ @@ -107,5 +107,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) From 97f3ccdf34d4fb1323a25d479abc4ccab616c8f1 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 24 May 2020 23:20:29 -0700 Subject: [PATCH 106/438] fix accel with noise with failsafe output --- flow/controllers/base_controller.py | 1 + flow/core/kernel/vehicle/base.py | 20 ++++++++++++++------ flow/core/kernel/vehicle/traci.py | 10 ++++++++++ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 3f6a0f4ae..1169ce5b8 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -113,6 +113,7 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) + accel_no_noise_with_failsafe = accel if self.fail_safe == 'instantaneous': accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index ed53773cb..f6f8ee382 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -694,15 +694,19 @@ def get_accel(self, veh_id): raise NotImplementedError def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration without noise without failsafe of vehicle with veh_id.""" raise NotImplementedError def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration without noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration with noise without failsafe of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): + """Update stored acceleration with noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_2d_position(self, veh_id, error=-1001): @@ -710,15 +714,19 @@ def get_2d_position(self, veh_id, error=-1001): raise NotImplementedError def get_accel_no_noise_no_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration without noise without failsafe of vehicle with veh_id.""" raise NotImplementedError def get_accel_no_noise_with_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration without noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_accel_with_noise_no_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration with noise without failsafe of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_with_noise_with_failsafe(self, veh_id): + """Return the acceleration with noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_realized_accel(self, veh_id): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 4a1916617..1c0b5f19b 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1144,6 +1144,10 @@ def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsa """See parent class.""" self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe + def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): + """See parent class.""" + self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = accel_with_noise_with_failsafe + def get_accel_no_noise_no_failsafe(self, veh_id): """See parent class.""" if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: @@ -1162,6 +1166,12 @@ def get_accel_with_noise_no_failsafe(self, veh_id): self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] + def get_accel_with_noise_with_failsafe(self, veh_id): + """See parent class.""" + if "accel_with_noise_with_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = None + return self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] + def get_realized_accel(self, veh_id): """See parent class.""" return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step From e9e66a77e5ea38e2deda69ae46191d3aeee72723 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 14:27:27 -0700 Subject: [PATCH 107/438] remove extra_init() in favor of collections.defaultdict() --- flow/core/experiment.py | 5 +++-- flow/data_pipeline/data_pipeline.py | 8 -------- flow/visualize/i210_replay.py | 4 ++-- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index f46f802a5..779fdb0f4 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,7 +1,8 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, get_extra_info +from collections import defaultdict import datetime import logging import time @@ -147,7 +148,7 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] - extra_info = extra_init() + extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) for i in range(num_runs): diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a999b6eb1..8cd00358c 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -88,14 +88,6 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): return -def extra_init(): - """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "road_grade": [], "source_id": []} - return extra_info - - def get_extra_info(veh_kernel, extra_info, veh_ids): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 8e62bb0d8..57e72586a 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -32,7 +32,7 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, get_extra_info import uuid EXAMPLE_USAGE = """ @@ -208,7 +208,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= key: [] for key in custom_callables.keys() }) - extra_info = extra_init() + extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) i = 0 From d0df0a3d9271584c80c26fe691d7d2cb4a70f5f4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 14:27:50 -0700 Subject: [PATCH 108/438] revert temporary change --- flow/networks/highway.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/networks/highway.py b/flow/networks/highway.py index 871e7f415..6f10d3279 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -9,7 +9,7 @@ # length of the highway "length": 1000, # number of lanes - "lanes": 1, + "lanes": 4, # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into From 3b93994d4be805e9b12d79906e29857eeca76312 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 14:31:43 -0700 Subject: [PATCH 109/438] update energy query to MVP params --- flow/data_pipeline/query.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 8dde9474d..078bdd129 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,22 +2,28 @@ from enum import Enum # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = { + "energy": [ + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" + ] +} -VEHICLE_POWER_DEMAND_FINAL_SELECT = """ +VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT id, time_step, speed, acceleration, road_grade, - 1200 * speed * MAX(0, ( + MAX(0, 1200 * speed * ( CASE WHEN acceleration > 0 THEN 1 WHEN acceleration < 0 THEN 0 ELSE 0.5 - END * (1 - 0.8) + 0.8) * acceleration + 9.81 * SIN(road_grade) - ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + END * (1 - {}) + {}) * acceleration + 9.81 * SIN(road_grade) + ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3)) AS power, \'{}\' AS energy_model_id, source_id FROM {} @@ -55,7 +61,9 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL', + 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -71,7 +79,9 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL', + 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -99,4 +109,6 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) From f773ff342e28daeef3df2d20413dbe5ccbdb60a1 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 21 May 2020 16:28:52 -0700 Subject: [PATCH 110/438] more quieres added --- examples/simulate.py | 2 +- flow/core/experiment.py | 12 +- flow/data_pipeline/data_pipeline.py | 2 +- flow/data_pipeline/query.py | 375 +++++++++++++++++++++++++++- flow/visualize/i210_replay.py | 3 +- 5 files changed, 376 insertions(+), 18 deletions(-) diff --git a/examples/simulate.py b/examples/simulate.py index 86d14aa14..0b183649b 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -97,5 +97,5 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, to_aws=flags.to_aws, only_query=flags.only_query) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 779fdb0f4..9984692f6 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -89,7 +89,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=""): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query=""): """Run the given network for a set number of runs. Parameters @@ -102,7 +102,7 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=No convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file - partition_name: str + to_aws: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. @@ -214,14 +214,12 @@ def rl_actions(*_): os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) - if partition_name: - if partition_name == "default": - partition_name = source_id[-3:] + if to_aws: cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( - cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), + cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 8cd00358c..0d8f91c39 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -100,7 +100,7 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) position = veh_kernel.get_2d_position(vid) extra_info["x"].append(position[0]) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 078bdd129..f22c90114 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,13 +2,8 @@ from enum import Enum # tags for different queries -tags = { - "energy": [ - "POWER_DEMAND_MODEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" - ] -} +tags = {"vehicle_energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT @@ -109,6 +104,372 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) +<<<<<<< HEAD {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) +======= + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) + + FACT_NETWORK_THROUGHPUT_AGG = """ + WITH agg AS ( + SELECT + source_id, + COUNT(DISTINCT id) AS n_vehicles, + MAX(time_step) - MIN(time_step) AS total_time_seconds + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1 + ) + SELECT + source_id, + n_vehicles * 3600 / total_time_seconds AS throughput_per_hour + FROM agg + ;""" + + FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ + WITH distance AS ( + SELECT + id, + source_id, + MAX(x) AS distance_meters + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND source_id = + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), energy AS ( + SELECT + id, + source_id, + energy_model_id, + (MAX(time_step) - MIN(time_step)) / (COUNT(DISTINCT time_step) - 1) AS time_step_size_seconds, + SUM(power) AS power_watts + FROM fact_energy_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ) + SELECT + d.id, + d.source_id, + e.energy_model_id, + distance_meters, + power_watts * time_step_size_seconds AS energy_joules, + distance_meters / (power_watts * time_step_size_seconds) AS efficiency_meters_per_joules, + 74564 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon + FROM distance d + JOIN energy e ON 1=1 + AND d.id = e.id + AND d.source_id = e.source_id + ; + """ + + FACT_NETWORK_FUEL_EFFICIENCY_AGG = """ + SELECT + source_id, + energy_model_id, + SUM(distance_meters) AS distance_meters, + SUM(energy_joules) AS energy_joules, + SUM(distance_meters) / SUM(energy_joules) AS efficiency_meters_per_joules, + 74564 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon + FROM fact_vehicle_fuel_efficiency_agg + WHERE 1 = 1 + AND date = \'{{date}}\' + AND parititon_name = \'{{partition}}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + GROUP BY 1, 2 + ;""" + + LEADERBOARD_CHART = """ + SELECT + t.source_id, + e.energy_model_id, + e.efficiency_meters_per_joules, + 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon + t.throughput_per_hour + FROM fact_network_throughput_agg t + JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 + AND t.date = \'{{date}}\' + AND t.partition_name = \'{{partition}}_FACT_NETWORK_THROUGHPUT_AGG\' + AND e.date = \'{{date}}\' + AND e.partition_name = \'{{partition}}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' + AND t.source_id = e.source_id + AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ;""" + + FACT_NETWORK_INFLOWS_OUTFLOWS = """ + WITH min_max_time_step AS ( + SELECT + id, + source_id, + MIN(time_step) AS min_time_step, + MAX(time_step) AS max_time_step + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), inflows AS ( + SELECT + INT(min_time_step / 60) * 60 AS time_step, + source_id, + 60 * COUNT(DISTINCT id) AS inflow_rate + FROM min_max_time_step + GROUP BY 1, 2 + ), outflows AS ( + SELECT + INT(max_time_step / 60) * 60 AS time_step, + source_id, + 60 * COUNT(DISTINCT id) AS outflow_rate + FROM min_max_time_step + GROUP BY 1, 2 + ) + SELECT + COALESCE(i.time_step, o.time_step) AS time_step, + COALESCE(i.source_id, o.source_id) AS source_id, + COALESCE(i.inflow_rate, 0) AS inflow_rate, + COALESCE(o.outflow_rate, 0) AS outflow_rate + FROM inflows i + FULL OUTER JOIN outflows o ON 1 = 1 + AND i.time_step = o.time_step + AND i.source_id = o.source_id + ;""" + + FACT_NETWORK_METRICS_BY_DISTANCE_AGG = """ + WITH joined_trace AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + SUM(power) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + cumulative_power + FROM fact_vehicle_trace vt + JOIN fact_energy_trace et ON 1 = 1 + AND vt.date = \'{{date}}\' + AND vt.partition_name = \'{{partition}}\' + AND et.date = \'{{date}}\' + AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.id = et.id + AND vt.source_id = et.source_id + AND vt.time_step = et.time_step + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 + AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ), cumulative_energy AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + cumulative_power * sim_step AS energy_joules + FROM joined_trace + WHERE 1 = 1 + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ), binned_cumulative_energy AS ( + SELECT + source_id, + INT(x/10) * 10 AS distance_meters_bin, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy + GROUP BY 1, 2 + ), binned_speed_accel AS ( + SELECT + source_id, + INT(x/10) * 10 AS distance_meters_bin, + AVG(speed) AS speed_avg, + AVG(speed) + STDEV(speed) AS speed_upper_bound, + AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(accel_without_noise) AS accel_avg, + AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), binned_energy_start_end AS ( + SELECT DISTINCT + source_id, + id, + INT(x/10) * 10 AS distance_meters_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + FROM cumulative_energy + ), binned_energy AS ( + SELECT + source_id, + distance_meters_bin, + AVG(energy_end - energy_start) AS instantaneous_energy_avg, + AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + FROM binned_energy_start_end + GROUP BY 1, 2 + ) + SELECT + COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, + COALESCE(bce.distance_meters_bin, bsa.distance_meters_bin, be.distance_meters_bin) AS distance_meters_bin, + bce.cumulative_energy_avg, + bce.cumulative_energy_lower_bound, + bce.cumulative_energy_upper_bound, + bsa.speed_avg, + bsa.speed_upper_bound, + bsa.speed_lower_bound, + bsa.accel_avg, + bsa.accel_upper_bound, + bsa.accel_lower_bound, + be.instantaneous_energy_avg, + be.instantaneous_energy_upper_bound, + be.instantaneous_energy_lower_bound + FROM binned_cumulative_energy bce + FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 + AND bce.source_id = bsa.source_id + AND bce.distance_meters_bin = bsa.distance_meters_bin + FULL OUTER JOIN binned_energy be ON 1 = 1 + AND COALESCE(bce.source_id, bsa.source_id) = be.source_id + AND COALESCE(bce.distance_meters_bin, bce.distance_meters_bin) = be.distance_meters_bin + ;""" + + FACT_NETWORK_METRICS_BY_TIME_AGG = """ + WITH joined_trace AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + SUM(power) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + cumulative_power + FROM fact_vehicle_trace vt + JOIN fact_energy_trace et ON 1 = 1 + AND vt.date = \'{{date}}\' + AND vt.partition_name = \'{{partition}}\' + AND et.date = \'{{date}}\' + AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.id = et.id + AND vt.source_id = et.source_id + AND vt.time_step = et.time_step + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 + AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ), cumulative_energy AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + cumulative_power * sim_step AS energy_joules + FROM joined_trace + WHERE 1 = 1 + AND date = + AND partition_name = + AND source_id = + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ), binned_cumulative_energy AS ( + SELECT + source_id, + INT(time_step/60) * 60 AS time_seconds_bin, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy + GROUP BY 1, 2 + ), binned_speed_accel AS ( + SELECT + source_id, + INT(time_step/60) * 60 AS time_seconds_bin, + AVG(speed) AS speed_avg, + AVG(speed) + STDEV(speed) AS speed_upper_bound, + AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(accel_without_noise) AS accel_avg, + AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), binned_energy_start_end AS ( + SELECT DISTINCT + source_id, + id, + INT(time_step/60) * 60 AS time_seconds_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + FROM cumulative_energy + ), binned_energy AS ( + SELECT + source_id, + time_seconds_bin, + AVG(energy_end - energy_start) AS instantaneous_energy_avg, + AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + FROM binned_energy_start_end + GROUP BY 1, 2 + ) + SELECT + COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, + COALESCE(bce.time_seconds_bin, bsa.time_seconds_bin, be.time_seconds_bin) AS time_seconds_bin, + bce.cumulative_energy_avg, + bce.cumulative_energy_lower_bound, + bce.cumulative_energy_upper_bound, + bsa.speed_avg, + bsa.speed_upper_bound, + bsa.speed_lower_bound, + bsa.accel_avg, + bsa.accel_upper_bound, + bsa.accel_lower_bound, + be.instantaneous_energy_avg, + be.instantaneous_energy_upper_bound, + be.instantaneous_energy_lower_bound + FROM binned_cumulative_energy bce + FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 + AND bce.source_id = bsa.source_id + AND bce.time_seconds_bin = bsa.time_seconds_bin + FULL OUTER JOIN binned_energy be ON 1 = 1 + AND COALESCE(bce.source_id, bsa.source_id) = be.source_id + AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin + ;""" +>>>>>>> more quieres added diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 57e72586a..f7b4ff358 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -334,10 +334,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: - partition_name = source_id[-3:] cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( - cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), + cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) # print the location of the emission csv file From 7731187279b8c41e421cf6641157fbd6e2de2e40 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 21 May 2020 19:54:59 -0700 Subject: [PATCH 111/438] update the architecture and fix some bugs --- flow/core/experiment.py | 2 +- flow/data_pipeline/data_pipeline.py | 31 +++++++--- flow/data_pipeline/query.py | 96 ++++++++++++++++------------- flow/visualize/i210_replay.py | 2 +- 4 files changed, 75 insertions(+), 56 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 9984692f6..6b0e32589 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -218,7 +218,7 @@ def rl_actions(*_): if to_aws: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 0d8f91c39..8f57a29d8 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -134,9 +134,9 @@ def __init__(self): """ self.MAX_WAIT = 60 self.client = boto3.client("athena") - self.existing_partitions = self.get_existing_partitions() + self.existing_partitions = {} - def get_existing_partitions(self): + def get_existing_partitions(self, table): """Return the existing partitions in the S3 bucket. Returns @@ -144,7 +144,7 @@ def get_existing_partitions(self): partitions: a list of existing partitions on S3 bucket """ response = self.client.start_query_execution( - QueryString='SHOW PARTITIONS trajectory_table', + QueryString='SHOW PARTITIONS {}'.format(table), QueryExecutionContext={ 'Database': 'circles' }, @@ -199,18 +199,21 @@ def wait_for_execution(self, execution_id): return False return True - def update_partition(self, query_date, partition): + def update_partition(self, table, query_date, partition): """Load the given partition to the trajectory_table on Athena. Parameters ---------- + table : str + the name of the table to update query_date : str the new partition date that needs to be loaded partition : str the new partition that needs to be loaded """ response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(date=query_date, partition=partition), + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(table=table, date=query_date, + partition=partition), QueryExecutionContext={ 'Database': 'circles' }, @@ -218,11 +221,11 @@ def update_partition(self, query_date, partition): ) if self.wait_for_execution(response['QueryExecutionId']): raise RuntimeError("update partition timed out") - self.existing_partitions.append("date={}/partition_name={}".format(query_date, partition)) + self.existing_partitions[table].append("date={}/partition_name={}".format(query_date, partition)) return def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", - query_date="today", partition="default"): + query_date="today", partition="default", primary_table=""): """Start the execution of a query, does not wait for it to finish. Parameters @@ -235,6 +238,8 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu name of the partition date to run this query on partition: str, optional name of the partition to run this query on + primary_table: str + the table whose partition that may need update Returns ------- execution_id: str @@ -249,11 +254,17 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu if query_date == "today": query_date = date.today().isoformat() - if "date={}/partition_name={}".format(query_date, partition) not in self.existing_partitions: - self.update_partition(query_date, partition) + source_id = "flow_{}".format(partition.split('_')[1]) + + if primary_table: + if primary_table not in self.existing_partitions.keys(): + self.existing_partitions[primary_table] = self.get_existing_partitions(primary_table) + if "date={}/partition_name={}".format(query_date, partition) not in \ + self.existing_partitions[primary_table]: + self.update_partition(primary_table, query_date, partition) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(date=query_date, partition=partition), + QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id), QueryExecutionContext={ 'Database': 'circles' }, diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index f22c90114..b010ac2ba 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,8 +2,16 @@ from enum import Enum # tags for different queries -tags = {"vehicle_energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = {"fact_vehicle_trace": {"fact_energy_trace": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], + "fact_network_throughput_agg": ["FACT_NETWORK_THROUGHPUT_AGG"], + "fact_network_inflows_outflows": ["FACT_NETWORK_INFLOWS_OUTFLOWS"]}, + "fact_energy_trace": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], + "fact_network_metrics_by_distance_agg": ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], + "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, + "fact_vehicle_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"], + "fact_network_fuel_efficiency_agg": ["LEADERBOARD_CHART"] + } VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT @@ -38,7 +46,7 @@ class QueryStrings(Enum): """ UPDATE_PARTITION = """ - ALTER TABLE trajectory_table + ALTER TABLE {table} ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -51,7 +59,7 @@ class QueryStrings(Enum): acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -69,7 +77,7 @@ class QueryStrings(Enum): accel_without_noise AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -90,7 +98,7 @@ class QueryStrings(Enum): OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -120,8 +128,8 @@ class QueryStrings(Enum): MAX(time_step) - MIN(time_step) AS total_time_seconds FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1 @@ -140,8 +148,8 @@ class QueryStrings(Enum): MAX(x) AS distance_meters FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND source_id = AND x BETWEEN 500 AND 2300 AND time_step >= 600 @@ -155,8 +163,8 @@ class QueryStrings(Enum): SUM(power) AS power_watts FROM fact_energy_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND date = \'{date}\' + AND partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 @@ -188,8 +196,8 @@ class QueryStrings(Enum): 74564 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 - AND date = \'{{date}}\' - AND parititon_name = \'{{partition}}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND date = \'{date}\' + AND parititon_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' GROUP BY 1, 2 ;""" @@ -203,10 +211,10 @@ class QueryStrings(Enum): t.throughput_per_hour FROM fact_network_throughput_agg t JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 - AND t.date = \'{{date}}\' - AND t.partition_name = \'{{partition}}_FACT_NETWORK_THROUGHPUT_AGG\' - AND e.date = \'{{date}}\' - AND e.partition_name = \'{{partition}}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' + AND t.date = \'{date}\' + AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' + AND e.date = \'{date}\' + AND e.partition_name = \'{partition}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' AND t.source_id = e.source_id AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' WHERE 1 = 1 @@ -221,21 +229,21 @@ class QueryStrings(Enum): MAX(time_step) AS max_time_step FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 ), inflows AS ( SELECT - INT(min_time_step / 60) * 60 AS time_step, + CAST(min_time_step / 60 AS INTEGER) * 60 AS time_step, source_id, 60 * COUNT(DISTINCT id) AS inflow_rate FROM min_max_time_step GROUP BY 1, 2 ), outflows AS ( SELECT - INT(max_time_step / 60) * 60 AS time_step, + CAST(max_time_step / 60 AS INTEGER) * 60 AS time_step, source_id, 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step @@ -267,10 +275,10 @@ class QueryStrings(Enum): cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{{date}}\' - AND vt.partition_name = \'{{partition}}\' - AND et.date = \'{{date}}\' - AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND et.date = \'{date}\' + AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -296,7 +304,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - INT(x/10) * 10 AS distance_meters_bin, + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound @@ -305,7 +313,7 @@ class QueryStrings(Enum): ), binned_speed_accel AS ( SELECT source_id, - INT(x/10) * 10 AS distance_meters_bin, + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(speed) AS speed_avg, AVG(speed) + STDEV(speed) AS speed_upper_bound, AVG(speed) - STDEV(speed) AS speed_lower_bound, @@ -314,8 +322,8 @@ class QueryStrings(Enum): AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 @@ -323,9 +331,9 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - INT(x/10) * 10 AS distance_meters_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -376,10 +384,10 @@ class QueryStrings(Enum): cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{{date}}\' - AND vt.partition_name = \'{{partition}}\' - AND et.date = \'{{date}}\' - AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND et.date = \'{date}\' + AND et.partition_name = \'{partitio}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -408,7 +416,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - INT(time_step/60) * 60 AS time_seconds_bin, + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound @@ -417,7 +425,7 @@ class QueryStrings(Enum): ), binned_speed_accel AS ( SELECT source_id, - INT(time_step/60) * 60 AS time_seconds_bin, + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(speed) AS speed_avg, AVG(speed) + STDEV(speed) AS speed_upper_bound, AVG(speed) - STDEV(speed) AS speed_lower_bound, @@ -426,8 +434,8 @@ class QueryStrings(Enum): AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 @@ -435,9 +443,9 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - INT(time_step/60) * 60 AS time_seconds_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index f7b4ff358..4faf54385 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -335,7 +335,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) From 71cb4ee9c5f2a2d32e6172620cd3cfb410fad5eb Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 22 May 2020 00:18:53 -0700 Subject: [PATCH 112/438] fix inflow issue --- flow/data_pipeline/query.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index b010ac2ba..47bd9dd81 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -121,17 +121,25 @@ class QueryStrings(Enum): 'denoised_speed_cte')) FACT_NETWORK_THROUGHPUT_AGG = """ - WITH agg AS ( - SELECT + WITH min_time AS ( + SELECT source_id, - COUNT(DISTINCT id) AS n_vehicles, - MAX(time_step) - MIN(time_step) AS total_time_seconds + id, + MIN(time_step) AS enter_time FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 - AND time_step >= 600 + GROUP BY 1, 2 + ), agg AS ( + SELECT + source_id, + COUNT(DISTINCT id) AS n_vehicles, + MAX(enter_time) - MIN(enter_time) AS total_time_seconds + FROM min_time + WHERE 1 = 1 + AND enter_time >= 600 GROUP BY 1 ) SELECT @@ -145,7 +153,7 @@ class QueryStrings(Enum): SELECT id, source_id, - MAX(x) AS distance_meters + MAX(x)-MIN(x) AS distance_meters FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' From a90e8d0510a28904177f0591ae2f1b086287742d Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 24 May 2020 20:07:30 -0700 Subject: [PATCH 113/438] the extended new pipeline constructed and works --- flow/core/experiment.py | 8 +- flow/data_pipeline/data_pipeline.py | 6 +- flow/data_pipeline/query.py | 160 +++++++++++++++------------- 3 files changed, 90 insertions(+), 84 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 6b0e32589..c50648746 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -214,15 +214,15 @@ def rl_actions(*_): os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) + generate_trajectory_from_flow(trajectory_table_path, extra_info) if to_aws: cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( - cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), - upload_file_path, str(only_query)[2:-2]) + cur_date, source_id, source_id), + trajectory_table_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file - os.remove(upload_file_path) + # os.remove(upload_file_path) return info_dict diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 8f57a29d8..72caa5218 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -60,9 +60,9 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) - upload_only_file_path = data_path[:-4] + "_upload" + ".csv" - extra_info.to_csv(upload_only_file_path, index=False, header=False) - return upload_only_file_path + # upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + # extra_info.to_csv(upload_only_file_path, index=False, header=False) + return def upload_to_s3(bucket_name, bucket_key, file_path, only_query): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 47bd9dd81..abc3bcd53 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -6,14 +6,20 @@ "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], "fact_network_throughput_agg": ["FACT_NETWORK_THROUGHPUT_AGG"], "fact_network_inflows_outflows": ["FACT_NETWORK_INFLOWS_OUTFLOWS"]}, - "fact_energy_trace": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], - "fact_network_metrics_by_distance_agg": ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], - "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, - "fact_vehicle_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"], - "fact_network_fuel_efficiency_agg": ["LEADERBOARD_CHART"] + "fact_energy_trace": {}, + "POWER_DEMAND_MODEL_DENOISED_ACCEL": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], + "fact_network_metrics_by_distance_agg": + ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], + "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, + "fact_vehicle_fuel_efficiency_agg": {"fact_network_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"]}, + "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]} } -VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ +tables = ["fact_vehicle_trace", "fact_energy_trace", "fact_network_throughput_agg", "fact_network_inflows_outflows", + "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", + "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart"] + +VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT id, time_step, @@ -56,7 +62,7 @@ class QueryStrings(Enum): id, time_step, speed, - acceleration, + COALESCE (acceleration, realized_accel) AS acceleration, road_grade, source_id FROM fact_vehicle_trace @@ -74,7 +80,7 @@ class QueryStrings(Enum): id, time_step, speed, - accel_without_noise AS acceleration, + COALESCE (accel_without_noise, acceleration, realized_accel) AS acceleration, road_grade, source_id FROM fact_vehicle_trace @@ -91,9 +97,10 @@ class QueryStrings(Enum): SELECT id, time_step, - accel_without_noise, + COALESCE (accel_without_noise, acceleration, realized_accel) AS acceleration, road_grade, source_id, + speed AS cur_speed, time_step - LAG(time_step, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) @@ -106,17 +113,12 @@ class QueryStrings(Enum): SELECT id, time_step, - prev_speed + accel_without_noise * sim_step AS speed, - accel_without_noise AS acceleration, + COALESCE (prev_speed + acceleration * sim_step, cur_speed) AS speed, + acceleration, road_grade, source_id FROM lagged_timestep ) -<<<<<<< HEAD - {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', - 'denoised_speed_cte')) -======= {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) @@ -149,35 +151,46 @@ class QueryStrings(Enum): ;""" FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ - WITH distance AS ( - SELECT + WITH sub_fact_vehicle_trace AS ( + SELECT id, - source_id, - MAX(x)-MIN(x) AS distance_meters + time_step, + x, + source_id FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND source_id = - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 - GROUP BY 1, 2 - ), energy AS ( + ), distance AS ( SELECT id, source_id, - energy_model_id, - (MAX(time_step) - MIN(time_step)) / (COUNT(DISTINCT time_step) - 1) AS time_step_size_seconds, - SUM(power) AS power_watts - FROM fact_energy_trace + MAX(x)-MIN(x) AS distance_meters + FROM sub_fact_vehicle_trace WHERE 1 = 1 - AND date = \'{date}\' - AND partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' - AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 + GROUP BY 1, 2 + ), energy AS ( + SELECT + e.id, + e.source_id, + e.energy_model_id, + (MAX(e.time_step) - MIN(e.time_step)) / (COUNT(DISTINCT e.time_step) - 1) AS time_step_size_seconds, + SUM(e.power) AS power_watts + FROM fact_energy_trace AS e + JOIN sub_fact_vehicle_trace AS v ON 1 = 1 + AND e.id = v.id + AND e.time_step = v.time_step + AND e.source_id = v.source_id + WHERE 1 = 1 + AND e.date = \'{date}\' + AND e.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND v.x BETWEEN 500 AND 2300 + AND e.time_step >= 600 GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 + HAVING COUNT(DISTINCT e.time_step) > 1 ) SELECT d.id, @@ -205,7 +218,7 @@ class QueryStrings(Enum): FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 AND date = \'{date}\' - AND parititon_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' GROUP BY 1, 2 ;""" @@ -215,10 +228,10 @@ class QueryStrings(Enum): t.source_id, e.energy_model_id, e.efficiency_meters_per_joules, - 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon + 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, t.throughput_per_hour - FROM fact_network_throughput_agg t - JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 + FROM fact_network_throughput_agg AS t + JOIN fact_network_fuel_efficiency_agg AS e ON 1 = 1 AND t.date = \'{date}\' AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' AND e.date = \'{date}\' @@ -271,15 +284,15 @@ class QueryStrings(Enum): FACT_NETWORK_METRICS_BY_DISTANCE_AGG = """ WITH joined_trace AS ( SELECT - id, - source_id, - time_step, - x, + vt.id, + vt.source_id, + vt.time_step, + vt.x, energy_model_id, - time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + vt.time_step - LAG(vt.time_step, 1) + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, SUM(power) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 @@ -307,27 +320,26 @@ class QueryStrings(Enum): AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 - GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 ), binned_cumulative_energy AS ( SELECT source_id, CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound FROM cumulative_energy GROUP BY 1, 2 + HAVING COUNT(DISTINCT time_step) > 1 ), binned_speed_accel AS ( SELECT source_id, CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(speed) AS speed_avg, - AVG(speed) + STDEV(speed) AS speed_upper_bound, - AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(speed) + STDDEV(speed) AS speed_upper_bound, + AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -348,8 +360,8 @@ class QueryStrings(Enum): source_id, distance_meters_bin, AVG(energy_end - energy_start) AS instantaneous_energy_avg, - AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, - AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + AVG(energy_end - energy_start) + STDDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound FROM binned_energy_start_end GROUP BY 1, 2 ) @@ -380,22 +392,22 @@ class QueryStrings(Enum): FACT_NETWORK_METRICS_BY_TIME_AGG = """ WITH joined_trace AS ( SELECT - id, - source_id, - time_step, - x, + vt.id, + vt.source_id, + vt.time_step, + vt.x, energy_model_id, - time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + vt.time_step - LAG(vt.time_step, 1) + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, SUM(power) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' AND et.date = \'{date}\' - AND et.partition_name = \'{partitio}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -413,33 +425,28 @@ class QueryStrings(Enum): cumulative_power * sim_step AS energy_joules FROM joined_trace WHERE 1 = 1 - AND date = - AND partition_name = - AND source_id = - AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 - GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 ), binned_cumulative_energy AS ( SELECT source_id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound FROM cumulative_energy GROUP BY 1, 2 + HAVING COUNT(DISTINCT time_step) > 1 ), binned_speed_accel AS ( SELECT source_id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(speed) AS speed_avg, - AVG(speed) + STDEV(speed) AS speed_upper_bound, - AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(speed) + STDDEV(speed) AS speed_upper_bound, + AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -460,8 +467,8 @@ class QueryStrings(Enum): source_id, time_seconds_bin, AVG(energy_end - energy_start) AS instantaneous_energy_avg, - AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, - AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + AVG(energy_end - energy_start) + STDDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound FROM binned_energy_start_end GROUP BY 1, 2 ) @@ -488,4 +495,3 @@ class QueryStrings(Enum): AND COALESCE(bce.source_id, bsa.source_id) = be.source_id AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin ;""" ->>>>>>> more quieres added From 94337c20d80cb38e25257b86d06dc9684c6baf4e Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 25 May 2020 15:03:26 -0700 Subject: [PATCH 114/438] updated the lambda function --- flow/data_pipeline/lambda_function.py | 43 +++++++++++++++++++-------- 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index fe8efe3c0..22145af9c 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -2,28 +2,47 @@ import boto3 from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery -from flow.data_pipeline.query import tags +from flow.data_pipeline.query import tags, tables s3 = boto3.client('s3') queryEngine = AthenaQuery() def lambda_handler(event, context): - """Run on AWS Lambda to start query automatically.""" + """Handles S3 put event on AWS Lambda.""" + records = [] + # delete all unwanted metadata for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) + table = key.split('/')[0] + if table not in tables: + continue + if key[-9:] == '.metadata': + s3.delete_object(Bucket=bucket, Key=key) + continue + if table in tags.keys(): + records.append((bucket, key, table)) + # initialize the queries + for bucket, key, table in records: query_date = key.split('/')[-3].split('=')[-1] partition = key.split('/')[-2].split('=')[-1] - response = s3.head_object(Bucket=bucket, Key=key) - required_query = response["Metadata"]["run-query"] + source_id = "flow_{}".format(partition.split('_')[1]) + # response = s3.head_object(Bucket=bucket, Key=key) + # required_query = response["Metadata"]["run-query"] - if bucket == 'circles.data.pipeline' and 'trajectory-output/' in key: - if required_query == "all": - query_list = tags["energy"] - elif not required_query: - break - else: - query_list = required_query.split("\', \'") + query_dict = tags[table] + + # handle different energy models + if table == "fact_energy_trace": + energy_model_id = partition.replace(source_id, "")[1:] + query_dict = tags[energy_model_id] + + # initialize queries and store them at appropriate locations + for table_name, query_list in query_dict.items(): for query_name in query_list: - queryEngine.run_query(query_name, 's3://circles.data.pipeline/result/auto/', query_date, partition) + result_location = 's3://circles.data.pipeline/{}/date={}/partition_name={}_{}'.format(table_name, + query_date, + source_id, + query_name) + queryEngine.run_query(query_name, result_location, query_date, partition, table) From 3a501dab148900d2d5f3a17f7af7232acf1eb059 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 25 May 2020 15:59:26 -0700 Subject: [PATCH 115/438] fix minor string formatting issue in the query --- flow/data_pipeline/query.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 078bdd129..7091997cc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -22,11 +22,11 @@ WHEN acceleration > 0 THEN 1 WHEN acceleration < 0 THEN 0 ELSE 0.5 - END * (1 - {}) + {}) * acceleration + 9.81 * SIN(road_grade) + END * (1 - {0}) + {0}) * acceleration + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3)) AS power, - \'{}\' AS energy_model_id, + \'{1}\' AS energy_model_id, source_id - FROM {} + FROM {2} ORDER BY id, time_step """ From 36086521fff4c3e3d0728423e36af15bfa242403 Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Thu, 7 May 2020 23:51:53 -0700 Subject: [PATCH 116/438] get not departed vehicles (#922) * added function to kernel/vehicle to get number of not departed vehiles * fixed over indentation of the docstring * indentation edit * pep8 Co-authored-by: AboudyKreidieh --- flow/core/kernel/vehicle/traci.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index b89e981be..58eddfd1c 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -220,6 +220,10 @@ def update(self, reset): self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -543,6 +547,10 @@ def get_fuel_consumption(self, veh_id, error=-1001): return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): From 0a83576b80c2955ac8e09f88e0b159c836a887c4 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 117/438] Add an on ramp option --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- flow/envs/base.py | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 25565bb49..194da1099 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), diff --git a/flow/envs/base.py b/flow/envs/base.py index baf8270b5..f2067d947 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -405,8 +405,18 @@ def step(self, rl_actions): # test if the environment should terminate due to a collision or the # time horizon being met done = (self.time_counter >= self.env_params.sims_per_step * - (self.env_params.warmup_steps + self.env_params.horizon) - or crash) + (self.env_params.warmup_steps + self.env_params.horizon)) + if crash: + print( + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************\n" + "WARNING: There was a crash. \n" + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************" + ) + # compute the info for each agent infos = {} From 9b649efbda97a80f3c926ec1fc9838b76f27aa60 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:10:07 -0700 Subject: [PATCH 118/438] Upgrade the network to not have keepclear value on the junctions --- .../exp_configs/templates/sumo/test2.net.xml | 78 ++++++++++++++----- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 00e3edcd5..16170b917 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,5 +1,41 @@ + + @@ -4680,24 +4716,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4801,10 +4837,10 @@ - + - - + + From c18ec58b8a1e0a036b111649dbd2b0f05bd28c55 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Apr 2020 15:28:57 -0700 Subject: [PATCH 119/438] datapip pipeline implemented --- examples/data_pipeline.py | 179 ++++++++++++++++++++++++++++ examples/query.py | 8 ++ examples/run_query.py | 34 ++++++ examples/simulate.py | 10 +- flow/controllers/base_controller.py | 12 ++ flow/core/experiment.py | 27 ++++- flow/core/kernel/vehicle/base.py | 16 +++ flow/core/kernel/vehicle/traci.py | 15 +++ 8 files changed, 299 insertions(+), 2 deletions(-) create mode 100644 examples/data_pipeline.py create mode 100644 examples/query.py create mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py new file mode 100644 index 000000000..5fdc30cf2 --- /dev/null +++ b/examples/data_pipeline.py @@ -0,0 +1,179 @@ +import pandas as pd +import boto3 +from botocore.exceptions import ClientError +from examples.query import QueryStrings +from time import time + + +def generate_trajectory_table(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based on standard SUMO emission + + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ + raw_output = pd.read_csv(data_path, index_col=["time", "id"]) + required_cols = {"time", "id", "speed", "x", "y"} + raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) + + extra_info = pd.DataFrame.from_dict(extra_info) + extra_info.set_index(["time", "id"]) + raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) + + # add the partition column + raw_output['partition'] = partition_name + + output_file_path = data_path[:-4]+"_trajectory.csv" + raw_output.to_csv(output_file_path, index=False) + return output_file_path + + +def upload_to_s3(bucket_name, bucket_key, file_path): + """ upload a file to S3 bucket + + Parameters + ---------- + bucket_name : str + the bucket to upload to + bucket_key: str + the key within the bucket for the file + file_path: str + the path of the file to be uploaded + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + return + + +class AthenaQuery: + + def __init__(self): + self.MAX_WAIT = 60 + self.client = boto3.client("athena") + self.existing_partitions = self.get_existing_partitions() + + def get_existing_partitions(self): + """prints the existing partitions in the S3 bucket""" + + response = self.client.start_query_execution( + QueryString='SHOW PARTITIONS trajectory_table', + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("get current partitions timed out") + response = self.client.get_query_results( + QueryExecutionId=response['QueryExecutionId'], + MaxResults=1000 + ) + return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + + def check_status(self, execution_id): + """ Return the status of the execution with given id + + Parameters + ---------- + execution_id : string + id of the execution that is checked for + Returns + ------- + status: str + QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED + """ + + response = self.client.get_query_execution( + QueryExecutionId=execution_id + ) + return response['QueryExecution']['Status']['State'] + + def wait_for_execution(self, execution_id): + """ wait for the execution to finish or time-out + + Parameters + ---------- + execution_id : str + id of the execution this is watiing for + Returns + ------- + time_out: bool + True if time-out, False if success + Raises + ------ + RuntimeError: if execution failed or get canceled + """ + start = time() + while time() - start < self.MAX_WAIT: + state = self.check_status(execution_id) + if state == 'FAILED' or state == 'CANCELLED': + raise RuntimeError("update partition failed") + elif state == 'SUCCEEDED': + return False + return True + + def update_partition(self, partition): + """ load the given partition to the trajectory_table on Athena + + Parameters + ---------- + partition : str + the new partition that needs to be loaded + """ + response = self.client.start_query_execution( + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("update partition timed out") + self.existing_partitions.append(partition) + return + + def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + """ start the execution of a query, does not wait for it to finish + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be run + result_location: str, optional + location on the S3 bucket where the result will be stored + partition: str, optional + name of the partition to run this query on + Returns + ------- + execution_id: str + the execution id of the execution started by this method + Raises + ------ + ValueError: if tries to run a query not existed in QueryStrings enum + """ + if query_name not in QueryStrings.__members__: + raise ValueError("query not existed: please add it to query.py") + + if partition not in self.existing_partitions: + self.update_partition(partition) + + response = self.client.start_query_execution( + QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + ResultConfiguration={ + 'OutputLocation': result_location, + }, + WorkGroup='primary' + ) + return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py new file mode 100644 index 000000000..3fbbe69e1 --- /dev/null +++ b/examples/query.py @@ -0,0 +1,8 @@ +from enum import Enum + +tags = {} + + +class QueryStrings(Enum): + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py new file mode 100644 index 000000000..7b4a5af7d --- /dev/null +++ b/examples/run_query.py @@ -0,0 +1,34 @@ +import argparse +import sys +from examples.data_pipeline import AthenaQuery +from examples.query import QueryStrings + +parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" + "a S3 location") +parser.add_argument("--run", type=str, nargs="+") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--partition", type=str, nargs='?', default="default") +parser.add_argument("--list_partitions", action="store_true") +parser.add_argument("--check_status", type=str, nargs='+') +parser.add_argument("--list_queries", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args() + queryEngine = AthenaQuery() + + if args.run: + execution_ids = [] + for query_name in args.run: + execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + print(execution_ids) + if args.list_partitions: + print(queryEngine.existing_partitions) + if args.check_status: + status = dict() + for execution_id in args.check_status: + status[execution_id] = queryEngine.check_status(execution_id) + print(status) + if args.list_queries: + for q in QueryStrings: + print(q) diff --git a/examples/simulate.py b/examples/simulate.py index 848f030a4..f54bb38d9 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -48,6 +48,12 @@ def parse_args(args): action='store_true', help='Specifies whether to generate an emission file from the ' 'simulation.') + parser.add_argument( + '--to_aws', + type=str, nargs='?', default=None, const="default", + help='Specifies the name of the partition to store the output' + 'file on S3. Putting not None value for this argument' + 'automatically set gen_emission to True.') return parser.parse_known_args(args)[0] @@ -55,6 +61,8 @@ def parse_args(args): if __name__ == "__main__": flags = parse_args(sys.argv[1:]) + flags.gen_emission = flags.gen_emission or flags.to_aws + # Get the flow_params object. module = __import__("exp_configs.non_rl", fromlist=[flags.exp_config]) flow_params = getattr(module, flags.exp_config).flow_params @@ -83,4 +91,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 4004b1c4d..6e6734764 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,6 +88,9 @@ def get_action(self, env): float the modified form of the acceleration """ + # clear the current stored accel_without_noise of this vehicle None + env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed if len(env.k.vehicle.get_edge(self.veh_id)) == 0: @@ -105,6 +108,15 @@ def get_action(self, env): if accel is None: return None + # store the acceleration without noise to each vehicle + # run fail safe if requested + accel_without_noice = accel + if self.fail_safe == 'instantaneous': + accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + elif self.fail_safe == 'safe_velocity': + accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index a0497b595..1f0cce355 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,6 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_table, upload_to_s3 import datetime import logging import time @@ -85,7 +86,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters @@ -98,6 +99,10 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False): convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. Returns ------- @@ -136,6 +141,8 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] + extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} for i in range(num_runs): ret = 0 @@ -153,6 +160,18 @@ def rl_actions(*_): vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) @@ -195,4 +214,10 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) + output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + output_file.split('/')[-1], output_file) + return info_dict diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 706504027..0c992503c 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -684,3 +684,19 @@ def get_max_speed(self, veh_id, error): float """ raise NotImplementedError + + ########################################################################### + # Methods for Datapipeline # + ########################################################################### + + def get_accel(self, veh_id): + """ see traci class """ + raise NotImplementedError + + def update_accel_without_noise(self, veh_id, accel_without_noise): + """ see traci class """ + raise NotImplementedError + + def get_accel_without_noise(self, veh_id): + """ see traci class """ + raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 58eddfd1c..b06ab112b 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,6 +113,7 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] + self.__vehicles[veh_id]["accel_without_noise"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1128,3 +1129,17 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) + + # add for data pipeline + def get_accel(self, veh_id): + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + + def update_accel_without_noise(self, veh_id, accel_without_noise): + self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + + def get_accel_without_noise(self, veh_id): + return self.__vehicles[veh_id]["accel_without_noise"] + + def get_road_grade(self, veh_id): + # TODO + return 0 From 5878eae7cc27e766085362b478beb2abe1f51933 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:56:53 -0700 Subject: [PATCH 120/438] get up to date with i210_dev --- .../exp_configs/non_rl/i210_subnetwork.py | 2 +- .../exp_configs/templates/sumo/test2.net.xml | 78 +++++-------------- 2 files changed, 22 insertions(+), 58 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 194da1099..25565bb49 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=False, + color_by_speed=True, use_ballistic=True ), diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 16170b917..00e3edcd5 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,41 +1,5 @@ - - @@ -4716,24 +4680,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4837,10 +4801,10 @@ - + - - + + From 34cecff822badd955a79ee8c773640875e6bea2b Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:45:08 -0700 Subject: [PATCH 121/438] remove dupe imports --- examples/train.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/examples/train.py b/examples/train.py index e34b2935c..7cb84d361 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,8 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv - from stable_baselines import PPO2 if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() @@ -175,12 +173,7 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ - from ray import tune from ray.tune.registry import register_env - try: - from ray.rllib.agents.agent import get_agent_class - except ImportError: - from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon @@ -263,8 +256,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - import ray - from ray.tune import run_experiments flow_params = submodule.flow_params flow_params['sim'].render = flags.render @@ -413,8 +404,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - from stable_baselines.common.vec_env import DummyVecEnv - from stable_baselines import PPO2 flow_params = submodule.flow_params # Path to the saved files From fc9983631ec172b624ae6dfef65eeed1eb8dce4c Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:51:14 -0700 Subject: [PATCH 122/438] remove blank lines after docstrings --- examples/train.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/train.py b/examples/train.py index 7cb84d361..5a9ab5903 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,7 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -256,7 +255,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -404,7 +402,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 6c11a70281ba4673edad160ffdcf68fc4372c13a Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:59:00 -0700 Subject: [PATCH 123/438] add back ray import --- examples/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/train.py b/examples/train.py index 5a9ab5903..50720b756 100644 --- a/examples/train.py +++ b/examples/train.py @@ -255,6 +255,8 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From d6ffaa6bb0783fe0aaf0feb09a7b2b1f9591d0b5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 21:04:56 -0700 Subject: [PATCH 124/438] remove whitespace --- examples/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index 50720b756..1689d846f 100644 --- a/examples/train.py +++ b/examples/train.py @@ -256,7 +256,7 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray - + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 151e3b2195de3d6f9079593d0acf684489633e81 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 16:57:52 -0700 Subject: [PATCH 125/438] fix rebase errors --- examples/data_pipeline.py | 179 ---------------------------- examples/query.py | 8 -- examples/run_query.py | 34 ------ flow/controllers/base_controller.py | 1 + flow/core/kernel/vehicle/traci.py | 8 -- flow/data_pipeline/data_pipeline.py | 10 -- 6 files changed, 1 insertion(+), 239 deletions(-) delete mode 100644 examples/data_pipeline.py delete mode 100644 examples/query.py delete mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py deleted file mode 100644 index 5fdc30cf2..000000000 --- a/examples/data_pipeline.py +++ /dev/null @@ -1,179 +0,0 @@ -import pandas as pd -import boto3 -from botocore.exceptions import ClientError -from examples.query import QueryStrings -from time import time - - -def generate_trajectory_table(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based on standard SUMO emission - - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ - raw_output = pd.read_csv(data_path, index_col=["time", "id"]) - required_cols = {"time", "id", "speed", "x", "y"} - raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) - - extra_info = pd.DataFrame.from_dict(extra_info) - extra_info.set_index(["time", "id"]) - raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) - - # add the partition column - raw_output['partition'] = partition_name - - output_file_path = data_path[:-4]+"_trajectory.csv" - raw_output.to_csv(output_file_path, index=False) - return output_file_path - - -def upload_to_s3(bucket_name, bucket_key, file_path): - """ upload a file to S3 bucket - - Parameters - ---------- - bucket_name : str - the bucket to upload to - bucket_key: str - the key within the bucket for the file - file_path: str - the path of the file to be uploaded - """ - s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key) - return - - -class AthenaQuery: - - def __init__(self): - self.MAX_WAIT = 60 - self.client = boto3.client("athena") - self.existing_partitions = self.get_existing_partitions() - - def get_existing_partitions(self): - """prints the existing partitions in the S3 bucket""" - - response = self.client.start_query_execution( - QueryString='SHOW PARTITIONS trajectory_table', - QueryExecutionContext={ - 'Database': 'simulation' - }, - WorkGroup='primary' - ) - if self.wait_for_execution(response['QueryExecutionId']): - raise RuntimeError("get current partitions timed out") - response = self.client.get_query_results( - QueryExecutionId=response['QueryExecutionId'], - MaxResults=1000 - ) - return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] - - def check_status(self, execution_id): - """ Return the status of the execution with given id - - Parameters - ---------- - execution_id : string - id of the execution that is checked for - Returns - ------- - status: str - QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED - """ - - response = self.client.get_query_execution( - QueryExecutionId=execution_id - ) - return response['QueryExecution']['Status']['State'] - - def wait_for_execution(self, execution_id): - """ wait for the execution to finish or time-out - - Parameters - ---------- - execution_id : str - id of the execution this is watiing for - Returns - ------- - time_out: bool - True if time-out, False if success - Raises - ------ - RuntimeError: if execution failed or get canceled - """ - start = time() - while time() - start < self.MAX_WAIT: - state = self.check_status(execution_id) - if state == 'FAILED' or state == 'CANCELLED': - raise RuntimeError("update partition failed") - elif state == 'SUCCEEDED': - return False - return True - - def update_partition(self, partition): - """ load the given partition to the trajectory_table on Athena - - Parameters - ---------- - partition : str - the new partition that needs to be loaded - """ - response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), - QueryExecutionContext={ - 'Database': 'simulation' - }, - WorkGroup='primary' - ) - if self.wait_for_execution(response['QueryExecutionId']): - raise RuntimeError("update partition timed out") - self.existing_partitions.append(partition) - return - - def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """ start the execution of a query, does not wait for it to finish - - Parameters - ---------- - query_name : str - name of the query in QueryStrings enum that will be run - result_location: str, optional - location on the S3 bucket where the result will be stored - partition: str, optional - name of the partition to run this query on - Returns - ------- - execution_id: str - the execution id of the execution started by this method - Raises - ------ - ValueError: if tries to run a query not existed in QueryStrings enum - """ - if query_name not in QueryStrings.__members__: - raise ValueError("query not existed: please add it to query.py") - - if partition not in self.existing_partitions: - self.update_partition(partition) - - response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(partition=partition), - QueryExecutionContext={ - 'Database': 'simulation' - }, - ResultConfiguration={ - 'OutputLocation': result_location, - }, - WorkGroup='primary' - ) - return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py deleted file mode 100644 index 3fbbe69e1..000000000 --- a/examples/query.py +++ /dev/null @@ -1,8 +0,0 @@ -from enum import Enum - -tags = {} - - -class QueryStrings(Enum): - SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py deleted file mode 100644 index 7b4a5af7d..000000000 --- a/examples/run_query.py +++ /dev/null @@ -1,34 +0,0 @@ -import argparse -import sys -from examples.data_pipeline import AthenaQuery -from examples.query import QueryStrings - -parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" - "a S3 location") -parser.add_argument("--run", type=str, nargs="+") -parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") -parser.add_argument("--partition", type=str, nargs='?', default="default") -parser.add_argument("--list_partitions", action="store_true") -parser.add_argument("--check_status", type=str, nargs='+') -parser.add_argument("--list_queries", action="store_true") - - -if __name__ == "__main__": - args = parser.parse_args() - queryEngine = AthenaQuery() - - if args.run: - execution_ids = [] - for query_name in args.run: - execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) - print(execution_ids) - if args.list_partitions: - print(queryEngine.existing_partitions) - if args.check_status: - status = dict() - for execution_id in args.check_status: - status[execution_id] = queryEngine.check_status(execution_id) - print(status) - if args.list_queries: - for q in QueryStrings: - print(q) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 1169ce5b8..ac29bca2e 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -92,6 +92,7 @@ def get_action(self, env): env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, None) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 5f3821a01..c1e614fe5 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -224,10 +224,6 @@ def update(self, reset): self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] - # update the number of not departed vehicles - self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ - sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] - # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -551,10 +547,6 @@ def get_fuel_consumption(self, veh_id, error=-1001): return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons - def get_num_not_departed(self): - """See parent class.""" - return self.num_not_departed - def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 11d85cb0d..aea9b349c 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -88,16 +88,6 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): return -def extra_init(): - """Return the dictionary with all the field pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], - "target_accel_with_noise_with_failsafe": [], "target_accel_no_noise_no_failsafe": [], - "target_accel_with_noise_no_failsafe": [], "target_accel_no_noise_with_failsafe": [], - "realized_accel": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "road_grade": [], "source_id": []} - return extra_info - - def get_extra_info(veh_kernel, extra_info, veh_ids): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: From 9d2026e6a3635f756417f632abd30bb8891310a9 Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Thu, 7 May 2020 23:51:53 -0700 Subject: [PATCH 126/438] get not departed vehicles (#922) * added function to kernel/vehicle to get number of not departed vehiles * fixed over indentation of the docstring * indentation edit * pep8 Co-authored-by: AboudyKreidieh --- flow/core/kernel/vehicle/traci.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index b89e981be..58eddfd1c 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -220,6 +220,10 @@ def update(self, reset): self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -543,6 +547,10 @@ def get_fuel_consumption(self, veh_id, error=-1001): return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): From 43eeee0193d92e10ef76c9436dac903a52060157 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 127/438] Add an on ramp option --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- flow/envs/base.py | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 25565bb49..194da1099 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), diff --git a/flow/envs/base.py b/flow/envs/base.py index baf8270b5..f2067d947 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -405,8 +405,18 @@ def step(self, rl_actions): # test if the environment should terminate due to a collision or the # time horizon being met done = (self.time_counter >= self.env_params.sims_per_step * - (self.env_params.warmup_steps + self.env_params.horizon) - or crash) + (self.env_params.warmup_steps + self.env_params.horizon)) + if crash: + print( + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************\n" + "WARNING: There was a crash. \n" + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************" + ) + # compute the info for each agent infos = {} From 8eed7e16ef8793914761a48cc6c0af30756b89d0 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:10:07 -0700 Subject: [PATCH 128/438] Upgrade the network to not have keepclear value on the junctions --- .../exp_configs/templates/sumo/test2.net.xml | 78 ++++++++++++++----- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 00e3edcd5..16170b917 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,5 +1,41 @@ + + @@ -4680,24 +4716,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4801,10 +4837,10 @@ - + - - + + From c373e94388e8fa4399a95e377c1ba95bbdb282c3 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Apr 2020 15:28:57 -0700 Subject: [PATCH 129/438] datapip pipeline implemented --- examples/data_pipeline.py | 179 ++++++++++++++++++++++++++++ examples/query.py | 8 ++ examples/run_query.py | 34 ++++++ examples/simulate.py | 10 +- flow/controllers/base_controller.py | 12 ++ flow/core/experiment.py | 27 ++++- flow/core/kernel/vehicle/base.py | 16 +++ flow/core/kernel/vehicle/traci.py | 15 +++ 8 files changed, 299 insertions(+), 2 deletions(-) create mode 100644 examples/data_pipeline.py create mode 100644 examples/query.py create mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py new file mode 100644 index 000000000..5fdc30cf2 --- /dev/null +++ b/examples/data_pipeline.py @@ -0,0 +1,179 @@ +import pandas as pd +import boto3 +from botocore.exceptions import ClientError +from examples.query import QueryStrings +from time import time + + +def generate_trajectory_table(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based on standard SUMO emission + + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ + raw_output = pd.read_csv(data_path, index_col=["time", "id"]) + required_cols = {"time", "id", "speed", "x", "y"} + raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) + + extra_info = pd.DataFrame.from_dict(extra_info) + extra_info.set_index(["time", "id"]) + raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) + + # add the partition column + raw_output['partition'] = partition_name + + output_file_path = data_path[:-4]+"_trajectory.csv" + raw_output.to_csv(output_file_path, index=False) + return output_file_path + + +def upload_to_s3(bucket_name, bucket_key, file_path): + """ upload a file to S3 bucket + + Parameters + ---------- + bucket_name : str + the bucket to upload to + bucket_key: str + the key within the bucket for the file + file_path: str + the path of the file to be uploaded + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + return + + +class AthenaQuery: + + def __init__(self): + self.MAX_WAIT = 60 + self.client = boto3.client("athena") + self.existing_partitions = self.get_existing_partitions() + + def get_existing_partitions(self): + """prints the existing partitions in the S3 bucket""" + + response = self.client.start_query_execution( + QueryString='SHOW PARTITIONS trajectory_table', + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("get current partitions timed out") + response = self.client.get_query_results( + QueryExecutionId=response['QueryExecutionId'], + MaxResults=1000 + ) + return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + + def check_status(self, execution_id): + """ Return the status of the execution with given id + + Parameters + ---------- + execution_id : string + id of the execution that is checked for + Returns + ------- + status: str + QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED + """ + + response = self.client.get_query_execution( + QueryExecutionId=execution_id + ) + return response['QueryExecution']['Status']['State'] + + def wait_for_execution(self, execution_id): + """ wait for the execution to finish or time-out + + Parameters + ---------- + execution_id : str + id of the execution this is watiing for + Returns + ------- + time_out: bool + True if time-out, False if success + Raises + ------ + RuntimeError: if execution failed or get canceled + """ + start = time() + while time() - start < self.MAX_WAIT: + state = self.check_status(execution_id) + if state == 'FAILED' or state == 'CANCELLED': + raise RuntimeError("update partition failed") + elif state == 'SUCCEEDED': + return False + return True + + def update_partition(self, partition): + """ load the given partition to the trajectory_table on Athena + + Parameters + ---------- + partition : str + the new partition that needs to be loaded + """ + response = self.client.start_query_execution( + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("update partition timed out") + self.existing_partitions.append(partition) + return + + def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + """ start the execution of a query, does not wait for it to finish + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be run + result_location: str, optional + location on the S3 bucket where the result will be stored + partition: str, optional + name of the partition to run this query on + Returns + ------- + execution_id: str + the execution id of the execution started by this method + Raises + ------ + ValueError: if tries to run a query not existed in QueryStrings enum + """ + if query_name not in QueryStrings.__members__: + raise ValueError("query not existed: please add it to query.py") + + if partition not in self.existing_partitions: + self.update_partition(partition) + + response = self.client.start_query_execution( + QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + ResultConfiguration={ + 'OutputLocation': result_location, + }, + WorkGroup='primary' + ) + return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py new file mode 100644 index 000000000..3fbbe69e1 --- /dev/null +++ b/examples/query.py @@ -0,0 +1,8 @@ +from enum import Enum + +tags = {} + + +class QueryStrings(Enum): + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py new file mode 100644 index 000000000..7b4a5af7d --- /dev/null +++ b/examples/run_query.py @@ -0,0 +1,34 @@ +import argparse +import sys +from examples.data_pipeline import AthenaQuery +from examples.query import QueryStrings + +parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" + "a S3 location") +parser.add_argument("--run", type=str, nargs="+") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--partition", type=str, nargs='?', default="default") +parser.add_argument("--list_partitions", action="store_true") +parser.add_argument("--check_status", type=str, nargs='+') +parser.add_argument("--list_queries", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args() + queryEngine = AthenaQuery() + + if args.run: + execution_ids = [] + for query_name in args.run: + execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + print(execution_ids) + if args.list_partitions: + print(queryEngine.existing_partitions) + if args.check_status: + status = dict() + for execution_id in args.check_status: + status[execution_id] = queryEngine.check_status(execution_id) + print(status) + if args.list_queries: + for q in QueryStrings: + print(q) diff --git a/examples/simulate.py b/examples/simulate.py index 848f030a4..f54bb38d9 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -48,6 +48,12 @@ def parse_args(args): action='store_true', help='Specifies whether to generate an emission file from the ' 'simulation.') + parser.add_argument( + '--to_aws', + type=str, nargs='?', default=None, const="default", + help='Specifies the name of the partition to store the output' + 'file on S3. Putting not None value for this argument' + 'automatically set gen_emission to True.') return parser.parse_known_args(args)[0] @@ -55,6 +61,8 @@ def parse_args(args): if __name__ == "__main__": flags = parse_args(sys.argv[1:]) + flags.gen_emission = flags.gen_emission or flags.to_aws + # Get the flow_params object. module = __import__("exp_configs.non_rl", fromlist=[flags.exp_config]) flow_params = getattr(module, flags.exp_config).flow_params @@ -83,4 +91,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 4004b1c4d..6e6734764 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,6 +88,9 @@ def get_action(self, env): float the modified form of the acceleration """ + # clear the current stored accel_without_noise of this vehicle None + env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed if len(env.k.vehicle.get_edge(self.veh_id)) == 0: @@ -105,6 +108,15 @@ def get_action(self, env): if accel is None: return None + # store the acceleration without noise to each vehicle + # run fail safe if requested + accel_without_noice = accel + if self.fail_safe == 'instantaneous': + accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + elif self.fail_safe == 'safe_velocity': + accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index a0497b595..1f0cce355 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,6 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_table, upload_to_s3 import datetime import logging import time @@ -85,7 +86,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters @@ -98,6 +99,10 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False): convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. Returns ------- @@ -136,6 +141,8 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] + extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} for i in range(num_runs): ret = 0 @@ -153,6 +160,18 @@ def rl_actions(*_): vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) @@ -195,4 +214,10 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) + output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + output_file.split('/')[-1], output_file) + return info_dict diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 706504027..0c992503c 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -684,3 +684,19 @@ def get_max_speed(self, veh_id, error): float """ raise NotImplementedError + + ########################################################################### + # Methods for Datapipeline # + ########################################################################### + + def get_accel(self, veh_id): + """ see traci class """ + raise NotImplementedError + + def update_accel_without_noise(self, veh_id, accel_without_noise): + """ see traci class """ + raise NotImplementedError + + def get_accel_without_noise(self, veh_id): + """ see traci class """ + raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 58eddfd1c..b06ab112b 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,6 +113,7 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] + self.__vehicles[veh_id]["accel_without_noise"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1128,3 +1129,17 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) + + # add for data pipeline + def get_accel(self, veh_id): + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + + def update_accel_without_noise(self, veh_id, accel_without_noise): + self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + + def get_accel_without_noise(self, veh_id): + return self.__vehicles[veh_id]["accel_without_noise"] + + def get_road_grade(self, veh_id): + # TODO + return 0 From a88c209f5fa6eb057c978c6583ab040cd11a8aa0 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:56:53 -0700 Subject: [PATCH 130/438] get up to date with i210_dev --- .../exp_configs/non_rl/i210_subnetwork.py | 2 +- .../exp_configs/templates/sumo/test2.net.xml | 78 +++++-------------- 2 files changed, 22 insertions(+), 58 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 194da1099..25565bb49 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=False, + color_by_speed=True, use_ballistic=True ), diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 16170b917..00e3edcd5 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,41 +1,5 @@ - - @@ -4716,24 +4680,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4837,10 +4801,10 @@ - + - - + + From 89f8d1d504a4e4c98bc564967c1490f0718774cd Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:45:08 -0700 Subject: [PATCH 131/438] remove dupe imports --- examples/train.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/examples/train.py b/examples/train.py index e34b2935c..7cb84d361 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,8 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv - from stable_baselines import PPO2 if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() @@ -175,12 +173,7 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ - from ray import tune from ray.tune.registry import register_env - try: - from ray.rllib.agents.agent import get_agent_class - except ImportError: - from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon @@ -263,8 +256,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - import ray - from ray.tune import run_experiments flow_params = submodule.flow_params flow_params['sim'].render = flags.render @@ -413,8 +404,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - from stable_baselines.common.vec_env import DummyVecEnv - from stable_baselines import PPO2 flow_params = submodule.flow_params # Path to the saved files From 306a01fe55f3e756931098e306d03872602b88b2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:51:14 -0700 Subject: [PATCH 132/438] remove blank lines after docstrings --- examples/train.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/train.py b/examples/train.py index 7cb84d361..5a9ab5903 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,7 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -256,7 +255,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -404,7 +402,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 0d5fa6bda67aca96014b8be335cde547b47d7f7b Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:59:00 -0700 Subject: [PATCH 133/438] add back ray import --- examples/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/train.py b/examples/train.py index 5a9ab5903..50720b756 100644 --- a/examples/train.py +++ b/examples/train.py @@ -255,6 +255,8 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 0ade197b74f7ec0a5a4890e419d605ff3933f824 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 21:04:56 -0700 Subject: [PATCH 134/438] remove whitespace --- examples/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index 50720b756..1689d846f 100644 --- a/examples/train.py +++ b/examples/train.py @@ -256,7 +256,7 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray - + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 1111e9aa34a4ce46058ec282255c43d03b117123 Mon Sep 17 00:00:00 2001 From: chendiw <31671291+chendiw@users.noreply.github.com> Date: Tue, 21 Apr 2020 15:14:31 -0700 Subject: [PATCH 135/438] moved imports under functions in train.py (#903) * deleting unworking params from SumoChangeLaneParams * deleted unworking params, sublane working in highway : * moved imports inside functions * Apply suggestions from code review * bug fixes * bug fix Co-authored-by: Aboudy Kreidieh --- examples/train.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/examples/train.py b/examples/train.py index 1689d846f..d9e7dde07 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,6 +124,9 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 + if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -172,7 +175,12 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ + from ray import tune from ray.tune.registry import register_env + try: + from ray.rllib.agents.agent import get_agent_class + except ImportError: + from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon @@ -404,6 +412,9 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" + from stable_baselines.common.vec_env import DummyVecEnv + from stable_baselines import PPO2 + flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From a4c7d67758bd4187f176e1b5f1f63bc12a10af81 Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Thu, 7 May 2020 23:51:53 -0700 Subject: [PATCH 136/438] get not departed vehicles (#922) * added function to kernel/vehicle to get number of not departed vehiles * fixed over indentation of the docstring * indentation edit * pep8 Co-authored-by: AboudyKreidieh --- flow/core/kernel/vehicle/traci.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index b06ab112b..1c814b7b4 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -225,6 +225,10 @@ def update(self, reset): self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -552,6 +556,10 @@ def get_num_not_departed(self): """See parent class.""" return self.num_not_departed + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): From 36e8851f7f7ae71a25b2d5ca5a927396b9e1e41a Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Sat, 9 May 2020 15:31:44 -0700 Subject: [PATCH 137/438] changed _departed_ids, and _arrived_ids in the update function (#926) * changed _departed_ids, and _arrived_ids in the update function * fixed bug in get_departed_ids and get_arrived_ids --- flow/core/kernel/vehicle/traci.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 1c814b7b4..bdf94579a 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -552,14 +552,6 @@ def get_fuel_consumption(self, veh_id, error=-1001): return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons - def get_num_not_departed(self): - """See parent class.""" - return self.num_not_departed - - def get_num_not_departed(self): - """See parent class.""" - return self.num_not_departed - def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): From ebb29215ad82c0b2a6b89625ea1b899b5587420a Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 138/438] Add an on ramp option --- .../exp_configs/non_rl/i210_subnetwork.py | 64 ++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 25565bb49..474d7335e 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -2,8 +2,13 @@ import os import numpy as np +<<<<<<< HEAD from flow.controllers import IDMController from flow.controllers import I210Router +======= +from flow.controllers.car_following_models import IDMController +from flow.controllers.routing_controllers import I210Router +>>>>>>> Add an on ramp option from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -15,6 +20,7 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION +<<<<<<< HEAD # =========================================================================== # # Specify some configurable constants. # # =========================================================================== # @@ -72,6 +78,37 @@ }), routing_controller=(I210Router, {}) if ON_RAMP else None, ) +======= +ON_RAMP = True + +if ON_RAMP: + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + routing_controller=(I210Router, {}) + ) + +else: + # create the base vehicle type that will be used for inflows + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) +>>>>>>> Add an on ramp option inflow = InFlows() # main highway @@ -86,6 +123,7 @@ inflow.add( veh_type="human", edge="27414345", +<<<<<<< HEAD vehs_per_hour=500, departLane="random", departSpeed=10) @@ -99,6 +137,21 @@ # =========================================================================== # # Generate the flow_params dict with all relevant simulation information. # # =========================================================================== # +======= + vehs_per_hour=321, + departLane="random", + departSpeed=20) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=421, + departLane="random", + departSpeed=20) + +NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml") +>>>>>>> Add an on ramp option flow_params = dict( # name of the experiment @@ -117,24 +170,33 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( +<<<<<<< HEAD horizon=10000, +======= + horizon=7200, +>>>>>>> Add an on ramp option ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, +<<<<<<< HEAD template=net_template, additional_params={ "on_ramp": ON_RAMP, "ghost_edge": WANT_GHOST_CELL, } +======= + template=NET_TEMPLATE, + additional_params={"use_on_ramp": ON_RAMP} +>>>>>>> Add an on ramp option ), # vehicles to be placed in the network at the start of a rollout (see From e4c02bb1f5513e905f2ea0c5e635d3946fe4d38a Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 11:32:12 -0700 Subject: [PATCH 139/438] Increased inflows to 10800 to match density in Bennis ring --- .../exp_configs/non_rl/i210_subnetwork.py | 66 +------------------ 1 file changed, 2 insertions(+), 64 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 474d7335e..3704a7a1c 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -2,13 +2,8 @@ import os import numpy as np -<<<<<<< HEAD from flow.controllers import IDMController from flow.controllers import I210Router -======= -from flow.controllers.car_following_models import IDMController -from flow.controllers.routing_controllers import I210Router ->>>>>>> Add an on ramp option from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -20,7 +15,6 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -<<<<<<< HEAD # =========================================================================== # # Specify some configurable constants. # # =========================================================================== # @@ -78,37 +72,6 @@ }), routing_controller=(I210Router, {}) if ON_RAMP else None, ) -======= -ON_RAMP = True - -if ON_RAMP: - vehicles = VehicleParams() - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), - routing_controller=(I210Router, {}) - ) - -else: - # create the base vehicle type that will be used for inflows - vehicles = VehicleParams() - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), - ) ->>>>>>> Add an on ramp option inflow = InFlows() # main highway @@ -123,7 +86,6 @@ inflow.add( veh_type="human", edge="27414345", -<<<<<<< HEAD vehs_per_hour=500, departLane="random", departSpeed=10) @@ -137,21 +99,6 @@ # =========================================================================== # # Generate the flow_params dict with all relevant simulation information. # # =========================================================================== # -======= - vehs_per_hour=321, - departLane="random", - departSpeed=20) - inflow.add( - veh_type="human", - edge="27414342#0", - vehs_per_hour=421, - departLane="random", - departSpeed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") ->>>>>>> Add an on ramp option flow_params = dict( # name of the experiment @@ -170,33 +117,24 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=False, + color_by_speed=True, use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( -<<<<<<< HEAD horizon=10000, -======= - horizon=7200, ->>>>>>> Add an on ramp option ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, -<<<<<<< HEAD template=net_template, additional_params={ "on_ramp": ON_RAMP, "ghost_edge": WANT_GHOST_CELL, } -======= - template=NET_TEMPLATE, - additional_params={"use_on_ramp": ON_RAMP} ->>>>>>> Add an on ramp option ), # vehicles to be placed in the network at the start of a rollout (see @@ -225,4 +163,4 @@ "avg_density": lambda env: 5 * 1000 * len(env.k.vehicle.get_ids_by_edge( edge_id)) / (env.k.network.edge_length(edge_id) * env.k.network.num_lanes(edge_id)), -} +} \ No newline at end of file From 505d646beb9814daaa527f417740f8309a9f1c85 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:10:07 -0700 Subject: [PATCH 140/438] Upgrade the network to not have keepclear value on the junctions --- .../exp_configs/templates/sumo/test2.net.xml | 78 ++++++++++++++----- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 00e3edcd5..16170b917 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,5 +1,41 @@ + + @@ -4680,24 +4716,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4801,10 +4837,10 @@ - + - - + + From 7d52445fdaa2f6ef358bad6cd58f6b26775a4f36 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 24 Mar 2020 22:49:17 -0700 Subject: [PATCH 141/438] Add 1 lane highway network for Benni --- examples/exp_configs/non_rl/highway.py | 40 +++++++++----------------- flow/networks/highway.py | 2 +- 2 files changed, 15 insertions(+), 27 deletions(-) diff --git a/examples/exp_configs/non_rl/highway.py b/examples/exp_configs/non_rl/highway.py index e7505f2d7..1905e2f7f 100644 --- a/examples/exp_configs/non_rl/highway.py +++ b/examples/exp_configs/non_rl/highway.py @@ -5,25 +5,19 @@ from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.envs import LaneChangeAccelEnv +from flow.envs import TestEnv vehicles = VehicleParams() vehicles.add( - veh_id="human", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) -vehicles.add( - veh_id="human2", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) @@ -31,13 +25,7 @@ inflow.add( veh_type="human", edge="highway_0", - probability=0.25, - departLane="free", - departSpeed=20) -inflow.add( - veh_type="human2", - edge="highway_0", - probability=0.25, + vehs_per_hour=10800 / 5.0, departLane="free", departSpeed=20) @@ -47,7 +35,7 @@ exp_tag='highway', # name of the flow environment the experiment is running on - env_name=LaneChangeAccelEnv, + env_name=TestEnv, # name of the network class the experiment is running on network=HighwayNetwork, @@ -58,12 +46,12 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( render=True, - lateral_resolution=1.0, + sim_step=0.5 ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=1500, + horizon=4000, additional_params=ADDITIONAL_ENV_PARAMS.copy(), ), diff --git a/flow/networks/highway.py b/flow/networks/highway.py index 6f10d3279..871e7f415 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -9,7 +9,7 @@ # length of the highway "length": 1000, # number of lanes - "lanes": 4, + "lanes": 1, # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into From c3b2a51aa3fcf2c60c0678e7e3c385febf11d867 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 10 Apr 2020 19:54:30 -0700 Subject: [PATCH 142/438] multiple runs issue solved, testing added --- examples/data_pipeline.py | 55 +++++++- examples/datapipeline_test.py | 33 +++++ examples/query.py | 13 +- examples/run_query.py | 6 +- flow/core/experiment.py | 224 +----------------------------- flow/core/kernel/vehicle/base.py | 4 + flow/core/kernel/vehicle/traci.py | 3 + 7 files changed, 107 insertions(+), 231 deletions(-) create mode 100644 examples/datapipeline_test.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 5fdc30cf2..9d56548c2 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,7 +1,8 @@ import pandas as pd +import numpy as np import boto3 from botocore.exceptions import ClientError -from examples.query import QueryStrings +from examples.query import QueryStrings, testing_functions from time import time @@ -30,13 +31,22 @@ def generate_trajectory_table(data_path, extra_info, partition_name): raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) # add the partition column - raw_output['partition'] = partition_name - + # raw_output['partition'] = partition_name + raw_output = raw_output.sort_values(by=["time", "id"]) output_file_path = data_path[:-4]+"_trajectory.csv" raw_output.to_csv(output_file_path, index=False) return output_file_path +def generate_trajectory_from_flow(data_path, extra_info, partition_name): + extra_info = pd.DataFrame.from_dict(extra_info) + # extra_info["partition"] = partition_name + extra_info.to_csv(data_path, index=False) + upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + extra_info.to_csv(upload_only_file_path, index=False, header=False) + return upload_only_file_path + + def upload_to_s3(bucket_name, bucket_key, file_path): """ upload a file to S3 bucket @@ -176,4 +186,41 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re }, WorkGroup='primary' ) - return response['QueryExecutionId'] \ No newline at end of file + return response['QueryExecutionId'] + +########################################################################### +# Helpers for testing the SQL Queries # +########################################################################### + + +def test_sql_query(query_name): + if query_name not in testing_functions: + raise ValueError("no tests supported for this query") + + # Run the respective sql query + queryEngine = AthenaQuery() + execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + partition="test") + if queryEngine.wait_for_execution(execution_id): + raise RuntimeError("execution timed out") + + # get the Athena query result from S3 + s3 = boto3.resource("s3") + s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") + athena_result = pd.read_csv("data/athena_result.csv") + athena_result = athena_result.sort_values(by=["time", "id"]) + + # get the python expected result + expected_result = pd.read_csv("data/test_data.csv") + expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") + expected_result.columns = ["time", "id", "power"] + expected_result = expected_result.sort_values(by=["time", "id"]) + + difference = athena_result["power"] - expected_result["power"] + print("average difference is: " + str(np.mean(difference))) + print("std of difference is: " + str(np.std(difference))) + print("average ratio of difference to expected is: " + + str(np.mean(np.divide(difference, expected_result["power"])))) + difference = pd.DataFrame(difference) + difference.to_csv("./difference.csv") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py new file mode 100644 index 000000000..564060d3b --- /dev/null +++ b/examples/datapipeline_test.py @@ -0,0 +1,33 @@ +import math + +# Vehicle Mass +M = 1200 +# Gravity +g = 9.81 +# Density of Air +ro_air = 1.225 +# Rolling resistance coefficient +C_r = .005 +# Aerodynamic drag coefficient +C_a = 0.3 +# Vehicle Cross sectional Area +A = 2.6 +# Road grade +theta = 0 + + +def heavyside(inp): + return 0 if inp <= 0 else 1 + + +def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) + accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) + rolling_friction = M * g * C_r * mu + air_drag = .5 * ro_air * A * C_a * mu**3 + power = accel_and_slope + rolling_friction + air_drag + return power + + +def apply_energy_one(row): + return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py index 3fbbe69e1..6354cec3b 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,8 +1,17 @@ from enum import Enum +from examples.datapipeline_test import apply_energy_one -tags = {} +tags = {"energy": ["ENERGY_ONE"]} + +testing_functions = {"ENERGY_ONE": apply_energy_one} class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" + ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py index 7b4a5af7d..ea8839b09 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,6 +1,5 @@ import argparse -import sys -from examples.data_pipeline import AthenaQuery +from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -11,6 +10,7 @@ parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') parser.add_argument("--list_queries", action="store_true") +parser.add_argument("--test_query", nargs=1) if __name__ == "__main__": @@ -32,3 +32,5 @@ if args.list_queries: for q in QueryStrings: print(q) + if args.test_query: + test_sql_query(args.test_query[0]) \ No newline at end of file diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 1f0cce355..63c92e798 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,223 +1 @@ -"""Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv -from flow.utils.registry import make_create_env -from examples.data_pipeline import generate_trajectory_table, upload_to_s3 -import datetime -import logging -import time -import os -import numpy as np - - -class Experiment: - """ - Class for systematically running simulations in any supported simulator. - - This class acts as a runner for a network and environment. In order to use - it to run an network and environment in the absence of a method specifying - the actions of RL agents in the network, type the following: - - >>> from flow.envs import Env - >>> flow_params = dict(...) # see the examples in exp_config - >>> exp = Experiment(flow_params) # for some experiment configuration - >>> exp.run(num_runs=1) - - If you wish to specify the actions of RL agents in the network, this may be - done as follows: - - >>> rl_actions = lambda state: 0 # replace with something appropriate - >>> exp.run(num_runs=1, rl_actions=rl_actions) - - Finally, if you would like to like to plot and visualize your results, this - class can generate csv files from emission files produced by sumo. These - files will contain the speeds, positions, edges, etc... of every vehicle - in the network at every time step. - - In order to ensure that the simulator constructs an emission file, set the - ``emission_path`` attribute in ``SimParams`` to some path. - - >>> from flow.core.params import SimParams - >>> flow_params['sim'] = SimParams(emission_path="./data") - - Once you have included this in your environment, run your Experiment object - as follows: - - >>> exp.run(num_runs=1, convert_to_csv=True) - - After the experiment is complete, look at the "./data" directory. There - will be two files, one with the suffix .xml and another with the suffix - .csv. The latter should be easily interpretable from any csv reader (e.g. - Excel), and can be parsed using tools such as numpy and pandas. - - Attributes - ---------- - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we want - to extract from the environment. The lambda will be called at each step - to extract information from the env and it will be stored in a dict - keyed by the str. - env : flow.envs.Env - the environment object the simulator will run - """ - - def __init__(self, flow_params, custom_callables=None): - """Instantiate the Experiment class. - - Parameters - ---------- - flow_params : dict - flow-specific parameters - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we - want to extract from the environment. The lambda will be called at - each step to extract information from the env and it will be stored - in a dict keyed by the str. - """ - self.custom_callables = custom_callables or {} - - # Get the env name and a creator for the environment. - create_env, _ = make_create_env(flow_params) - - # Create the environment. - self.env = create_env() - - logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) - - logging.info("Initializing environment.") - - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): - """Run the given network for a set number of runs. - - Parameters - ---------- - num_runs : int - number of runs the experiment should perform - rl_actions : method, optional - maps states to actions to be performed by the RL agents (if - there are any) - convert_to_csv : bool - Specifies whether to convert the emission file created by sumo - into a csv file - partition_name: str - Specifies the S3 partition you want to store the output file, - will be used to later for query. If NONE, won't upload output - to S3. - - Returns - ------- - info_dict : dict < str, Any > - contains returns, average speed per step - """ - num_steps = self.env.env_params.horizon - - # raise an error if convert_to_csv is set to True but no emission - # file will be generated, to avoid getting an error at the end of the - # simulation - if convert_to_csv and self.env.sim_params.emission_path is None: - raise ValueError( - 'The experiment was run with convert_to_csv set ' - 'to True, but no emission file will be generated. If you wish ' - 'to generate an emission file, you should set the parameter ' - 'emission_path in the simulation parameters (SumoParams or ' - 'AimsunParams) to the path of the folder where emissions ' - 'output should be generated. If you do not wish to generate ' - 'emissions, set the convert_to_csv parameter to False.') - - # used to store - info_dict = { - "returns": [], - "velocities": [], - "outflows": [], - } - info_dict.update({ - key: [] for key in self.custom_callables.keys() - }) - - if rl_actions is None: - def rl_actions(*_): - return None - - # time profiling information - t = time.time() - times = [] - extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} - - for i in range(num_runs): - ret = 0 - vel = [] - custom_vals = {key: [] for key in self.custom_callables.keys()} - state = self.env.reset() - for j in range(num_steps): - t0 = time.time() - state, reward, done, _ = self.env.step(rl_actions(state)) - t1 = time.time() - times.append(1 / (t1 - t0)) - - # Compute the velocity speeds and cumulative returns. - veh_ids = self.env.k.vehicle.get_ids() - vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) - ret += reward - - # collect additional information for the data pipeline - for vid in veh_ids: - extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) - extra_info["id"].append(vid) - extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) - extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) - extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) - extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) - extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) - extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) - extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - - # Compute the results for the custom callables. - for (key, lambda_func) in self.custom_callables.items(): - custom_vals[key].append(lambda_func(self.env)) - - if type(done) is dict and done['__all__'] or type(done) is not dict and done: - break - - # Store the information from the run in info_dict. - outflow = self.env.k.vehicle.get_outflow_rate(int(500)) - info_dict["returns"].append(ret) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) - - print("Round {0}, return: {1}".format(i, ret)) - - # Print the averages/std for all variables in the info_dict. - for key in info_dict.keys(): - print("Average, std {}: {}, {}".format( - key, np.mean(info_dict[key]), np.std(info_dict[key]))) - - print("Total time:", time.time() - t) - print("steps/second:", np.mean(times)) - self.env.terminate() - - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - # collect the location of the emission file - dir_path = self.env.sim_params.emission_path - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - - output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) - - if partition_name: - upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + output_file.split('/')[-1], output_file) - - return info_dict +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 0c992503c..3c285697f 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -697,6 +697,10 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """ see traci class """ raise NotImplementedError + def get_2D_position(self, veh_id, error=-1001): + """ see traci class """ + raise NotImplementedError + def get_accel_without_noise(self, veh_id): """ see traci class """ raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index bdf94579a..889528b36 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1140,6 +1140,9 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): return self.__vehicles[veh_id]["accel_without_noise"] + def get_2D_position(self, veh_id, error=-1001): + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) + def get_road_grade(self, veh_id): # TODO return 0 From dc881e06442f642538320c1792dec529abad6086 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 22 Apr 2020 05:22:01 -0700 Subject: [PATCH 143/438] added more support for lambda function --- examples/data_pipeline.py | 28 ++++++++++++++++++++++++++-- examples/lambda_function.py | 26 ++++++++++++++++++++++++++ examples/simulate.py | 8 +++++++- flow/core/experiment.py | 2 +- 4 files changed, 60 insertions(+), 4 deletions(-) create mode 100644 examples/lambda_function.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 9d56548c2..28d3b5e73 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -39,6 +39,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based only on flow output + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -47,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): return upload_only_file_path -def upload_to_s3(bucket_name, bucket_key, file_path): +def upload_to_s3(bucket_name, bucket_key, file_path, only_query): """ upload a file to S3 bucket Parameters @@ -58,9 +76,15 @@ def upload_to_s3(bucket_name, bucket_key, file_path): the key within the bucket for the file file_path: str the path of the file to be uploaded + only_query: str + specify which query should be run on this file by lambda: + if empty: run none of them + if "all": run all available analysis query + if a string of list of queries: run only those mentioned in the list """ s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + s3.Bucket(bucket_name).upload_file(file_path, bucket_key, + ExtraArgs={"Metadata": {"run-query": only_query}}) return diff --git a/examples/lambda_function.py b/examples/lambda_function.py new file mode 100644 index 000000000..01ce1512a --- /dev/null +++ b/examples/lambda_function.py @@ -0,0 +1,26 @@ +import boto3 +from urllib.parse import unquote_plus +from examples.data_pipeline import AthenaQuery +from examples.query import tags + +s3 = boto3.client('s3') +queryEngine = AthenaQuery() + + +def lambda_handler(event, context): + for record in event['Records']: + bucket = record['s3']['bucket']['name'] + key = unquote_plus(record['s3']['object']['key']) + partition = key.split('/')[-2].split('=')[-1] + response = s3.head_object(Bucket=bucket, Key=key) + run_query = response["Metadata"]["run-query"] + + if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if run_query == "all": + query_list = tags["analysis"] + elif not run_query: + break + else: + query_list = run_query.split("\', \'") + for query_name in query_list: + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file diff --git a/examples/simulate.py b/examples/simulate.py index f54bb38d9..69e11b2fb 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -54,6 +54,12 @@ def parse_args(args): help='Specifies the name of the partition to store the output' 'file on S3. Putting not None value for this argument' 'automatically set gen_emission to True.') + parser.add_argument( + '--only_query', + nargs='*', default="[\'all\']", + help='specify which query should be run by lambda' + 'for detail, see upload_to_s3 in data_pipeline.py' + ) return parser.parse_known_args(args)[0] @@ -91,4 +97,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 63c92e798..12391f9ae 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file From ee1188ec7b5796aeb96bc7de89c5d9bfd10168de Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 02:54:33 -0700 Subject: [PATCH 144/438] fix windoes line ending issue with experiment.py --- flow/core/experiment.py | 240 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 239 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 12391f9ae..80d607e7d 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1,239 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" +from flow.core.util import emission_to_csv +from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +import datetime +import logging +import time +import os +import numpy as np +import uuid + + +class Experiment: + """ + Class for systematically running simulations in any supported simulator. + + This class acts as a runner for a network and environment. In order to use + it to run an network and environment in the absence of a method specifying + the actions of RL agents in the network, type the following: + + >>> from flow.envs import Env + >>> flow_params = dict(...) # see the examples in exp_config + >>> exp = Experiment(flow_params) # for some experiment configuration + >>> exp.run(num_runs=1) + + If you wish to specify the actions of RL agents in the network, this may be + done as follows: + + >>> rl_actions = lambda state: 0 # replace with something appropriate + >>> exp.run(num_runs=1, rl_actions=rl_actions) + + Finally, if you would like to like to plot and visualize your results, this + class can generate csv files from emission files produced by sumo. These + files will contain the speeds, positions, edges, etc... of every vehicle + in the network at every time step. + + In order to ensure that the simulator constructs an emission file, set the + ``emission_path`` attribute in ``SimParams`` to some path. + + >>> from flow.core.params import SimParams + >>> flow_params['sim'] = SimParams(emission_path="./data") + + Once you have included this in your environment, run your Experiment object + as follows: + + >>> exp.run(num_runs=1, convert_to_csv=True) + + After the experiment is complete, look at the "./data" directory. There + will be two files, one with the suffix .xml and another with the suffix + .csv. The latter should be easily interpretable from any csv reader (e.g. + Excel), and can be parsed using tools such as numpy and pandas. + + Attributes + ---------- + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we want + to extract from the environment. The lambda will be called at each step + to extract information from the env and it will be stored in a dict + keyed by the str. + env : flow.envs.Env + the environment object the simulator will run + """ + + def __init__(self, flow_params, custom_callables=None): + """Instantiate the Experiment class. + + Parameters + ---------- + flow_params : dict + flow-specific parameters + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we + want to extract from the environment. The lambda will be called at + each step to extract information from the env and it will be stored + in a dict keyed by the str. + """ + self.custom_callables = custom_callables or {} + + # Get the env name and a creator for the environment. + create_env, _ = make_create_env(flow_params) + + # Create the environment. + self.env = create_env() + + logging.info(" Starting experiment {} at {}".format( + self.env.network.name, str(datetime.datetime.utcnow()))) + + logging.info("Initializing environment.") + + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + """Run the given network for a set number of runs. + + Parameters + ---------- + num_runs : int + number of runs the experiment should perform + rl_actions : method, optional + maps states to actions to be performed by the RL agents (if + there are any) + convert_to_csv : bool + Specifies whether to convert the emission file created by sumo + into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. + only_query: str + Specifies whether queries should be automatically run the + simulation data when it gets uploaded to s3 + + Returns + ------- + info_dict : dict < str, Any > + contains returns, average speed per step + """ + num_steps = self.env.env_params.horizon + + # raise an error if convert_to_csv is set to True but no emission + # file will be generated, to avoid getting an error at the end of the + # simulation + if convert_to_csv and self.env.sim_params.emission_path is None: + raise ValueError( + 'The experiment was run with convert_to_csv set ' + 'to True, but no emission file will be generated. If you wish ' + 'to generate an emission file, you should set the parameter ' + 'emission_path in the simulation parameters (SumoParams or ' + 'AimsunParams) to the path of the folder where emissions ' + 'output should be generated. If you do not wish to generate ' + 'emissions, set the convert_to_csv parameter to False.') + + # used to store + info_dict = { + "returns": [], + "velocities": [], + "outflows": [], + } + info_dict.update({ + key: [] for key in self.custom_callables.keys() + }) + + if rl_actions is None: + def rl_actions(*_): + return None + + # time profiling information + t = time.time() + times = [] + extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], + "road_grade": [], "source_id": []} + source_id = uuid.uuid4().hex + + for i in range(num_runs): + ret = 0 + vel = [] + custom_vals = {key: [] for key in self.custom_callables.keys()} + state = self.env.reset() + for j in range(num_steps): + t0 = time.time() + state, reward, done, _ = self.env.step(rl_actions(state)) + t1 = time.time() + times.append(1 / (t1 - t0)) + + # Compute the velocity speeds and cumulative returns. + veh_ids = self.env.k.vehicle.get_ids() + vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) + ret += reward + + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( + self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + position = self.env.k.vehicle.get_2D_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) + extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + + # Compute the results for the custom callables. + for (key, lambda_func) in self.custom_callables.items(): + custom_vals[key].append(lambda_func(self.env)) + + if done: + break + + # Store the information from the run in info_dict. + outflow = self.env.k.vehicle.get_outflow_rate(int(500)) + info_dict["returns"].append(ret) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + + print("Round {0}, return: {1}".format(i, ret)) + + # Print the averages/std for all variables in the info_dict. + for key in info_dict.keys(): + print("Average, std {}: {}, {}".format( + key, np.mean(info_dict[key]), np.std(info_dict[key]))) + + print("Total time:", time.time() - t) + print("steps/second:", np.mean(times)) + self.env.terminate() + + if convert_to_csv and self.env.simulator == "traci": + # wait a short period of time to ensure the xml file is readable + time.sleep(0.1) + + # collect the location of the emission file + dir_path = self.env.sim_params.emission_path + emission_filename = \ + "{0}-emission.xml".format(self.env.network.name) + emission_path = os.path.join(dir_path, emission_filename) + + # convert the emission file into a csv + emission_to_csv(emission_path) + + # Delete the .xml version of the emission file. + os.remove(emission_path) + + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + # delete the S3-only version of the trajectory file + os.remove(upload_file_path) + + return info_dict From 65c9ee061541b4e9660bf54d241a603dabf77e95 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:35:54 -0700 Subject: [PATCH 145/438] fix style issue --- examples/data_pipeline.py | 113 +++++++++++++++++++----------- examples/datapipeline_test.py | 4 ++ examples/lambda_function.py | 10 +++ examples/query.py | 11 ++- examples/run_query.py | 1 + flow/core/kernel/vehicle/base.py | 12 ++-- flow/core/kernel/vehicle/traci.py | 5 ++ 7 files changed, 110 insertions(+), 46 deletions(-) diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 28d3b5e73..03b0f87e5 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,3 +1,4 @@ +"""contains class and helper functions for the data pipeline.""" import pandas as pd import numpy as np import boto3 @@ -7,21 +8,21 @@ def generate_trajectory_table(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based on standard SUMO emission + """Generate desired output for the trajectory_table based on standard SUMO emission. - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ raw_output = pd.read_csv(data_path, index_col=["time", "id"]) required_cols = {"time", "id", "speed", "x", "y"} raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) @@ -39,24 +40,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based only on flow output - - Parameters - ---------- - data_path : str - output file path - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder - """ + """Generate desired output for the trajectory_table based only on flow output. + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -66,7 +67,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): def upload_to_s3(bucket_name, bucket_key, file_path, only_query): - """ upload a file to S3 bucket + """Upload a file to S3 bucket. Parameters ---------- @@ -89,15 +90,40 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): class AthenaQuery: + """ + Class used to run query. + + Act as a query engine, maintains an open session with AWS Athena. + + Attributes + ---------- + MAX_WAIT: int + maximum number of seconds to wait before declares time-out + client: boto3.client + the athena client that is used to run the query + existing_partitions: list + a list of partitions that is already recorded in Athena's datalog, + this is obtained through query at the initialization of this class + instance. + """ def __init__(self): + """Initialize AthenaQuery instance. + + initialize a client session with AWS Athena, + query Athena to obtain extisting_partition. + """ self.MAX_WAIT = 60 self.client = boto3.client("athena") self.existing_partitions = self.get_existing_partitions() def get_existing_partitions(self): - """prints the existing partitions in the S3 bucket""" + """Return the existing partitions in the S3 bucket. + Returns + ------- + partitions: a list of existing partitions on S3 bucket + """ response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ @@ -114,7 +140,7 @@ def get_existing_partitions(self): return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): - """ Return the status of the execution with given id + """Return the status of the execution with given id. Parameters ---------- @@ -125,14 +151,13 @@ def check_status(self, execution_id): status: str QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED """ - response = self.client.get_query_execution( QueryExecutionId=execution_id ) return response['QueryExecution']['Status']['State'] def wait_for_execution(self, execution_id): - """ wait for the execution to finish or time-out + """Wait for the execution to finish or time-out. Parameters ---------- @@ -156,7 +181,7 @@ def wait_for_execution(self, execution_id): return True def update_partition(self, partition): - """ load the given partition to the trajectory_table on Athena + """Load the given partition to the trajectory_table on Athena. Parameters ---------- @@ -176,7 +201,7 @@ def update_partition(self, partition): return def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """ start the execution of a query, does not wait for it to finish + """Start the execution of a query, does not wait for it to finish. Parameters ---------- @@ -218,6 +243,16 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re def test_sql_query(query_name): + """Start the execution of a query, does not wait for it to finish. + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be tested + Raises + ------ + RuntimeError: if timeout + """ if query_name not in testing_functions: raise ValueError("no tests supported for this query") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py index 564060d3b..ae0ea382f 100644 --- a/examples/datapipeline_test.py +++ b/examples/datapipeline_test.py @@ -1,3 +1,4 @@ +"""functions that calculates the expected result for testing.""" import math # Vehicle Mass @@ -17,10 +18,12 @@ def heavyside(inp): + """Return 1 if input is positive.""" return 0 if inp <= 0 else 1 def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + """Calculate the expected power for POWER_DEMAND_MODEL query.""" acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) rolling_friction = M * g * C_r * mu @@ -30,4 +33,5 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): + """Apply the power calculation to a row of the dataframe.""" return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/lambda_function.py b/examples/lambda_function.py index 01ce1512a..4f7937c85 100644 --- a/examples/lambda_function.py +++ b/examples/lambda_function.py @@ -1,3 +1,4 @@ +"""lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus from examples.data_pipeline import AthenaQuery @@ -8,6 +9,15 @@ def lambda_handler(event, context): + """Invoke by AWS Lambda upon triggered by an event. + + Parameters + ---------- + event : dic < str: dic > + an S3 event + context: + not used + """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) diff --git a/examples/query.py b/examples/query.py index 6354cec3b..0f0ee13b4 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,15 +1,20 @@ +"""stores all the pre-defined query strings.""" from enum import Enum from examples.datapipeline_test import apply_energy_one -tags = {"energy": ["ENERGY_ONE"]} +# tags for different queries +tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} -testing_functions = {"ENERGY_ONE": apply_energy_one} +# specify the function to calculate the expected result of each query +testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} class QueryStrings(Enum): + """An enumeration of all the pre-defined query strings.""" + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ diff --git a/examples/run_query.py b/examples/run_query.py index ea8839b09..64baa6656 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,3 +1,4 @@ +"""runner script for invoking query manually.""" import argparse from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 3c285697f..080162c7b 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -690,17 +690,21 @@ def get_max_speed(self, veh_id, error): ########################################################################### def get_accel(self, veh_id): - """ see traci class """ + """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError def update_accel_without_noise(self, veh_id, accel_without_noise): - """ see traci class """ + """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError def get_2D_position(self, veh_id, error=-1001): - """ see traci class """ + """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError def get_accel_without_noise(self, veh_id): - """ see traci class """ + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_road_grade(self, veh_id): + """Return the road-grade of the vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 889528b36..b473a1fa7 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1132,17 +1132,22 @@ def set_max_speed(self, veh_id, max_speed): # add for data pipeline def get_accel(self, veh_id): + """See parent class.""" return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def update_accel_without_noise(self, veh_id, accel_without_noise): + """See parent class.""" self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise def get_accel_without_noise(self, veh_id): + """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] def get_2D_position(self, veh_id, error=-1001): + """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) def get_road_grade(self, veh_id): + """See parent class.""" # TODO return 0 From 5a3ff57fb2d70f2736a9f1ba091aa5730d7006d4 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:38:47 -0700 Subject: [PATCH 146/438] reorganized file locations --- {examples => flow/data_pipeline}/data_pipeline.py | 0 {examples => flow/data_pipeline}/datapipeline_test.py | 0 {examples => flow/data_pipeline}/lambda_function.py | 0 {examples => flow/data_pipeline}/query.py | 0 {examples => flow/data_pipeline}/run_query.py | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename {examples => flow/data_pipeline}/data_pipeline.py (100%) rename {examples => flow/data_pipeline}/datapipeline_test.py (100%) rename {examples => flow/data_pipeline}/lambda_function.py (100%) rename {examples => flow/data_pipeline}/query.py (100%) rename {examples => flow/data_pipeline}/run_query.py (100%) diff --git a/examples/data_pipeline.py b/flow/data_pipeline/data_pipeline.py similarity index 100% rename from examples/data_pipeline.py rename to flow/data_pipeline/data_pipeline.py diff --git a/examples/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py similarity index 100% rename from examples/datapipeline_test.py rename to flow/data_pipeline/datapipeline_test.py diff --git a/examples/lambda_function.py b/flow/data_pipeline/lambda_function.py similarity index 100% rename from examples/lambda_function.py rename to flow/data_pipeline/lambda_function.py diff --git a/examples/query.py b/flow/data_pipeline/query.py similarity index 100% rename from examples/query.py rename to flow/data_pipeline/query.py diff --git a/examples/run_query.py b/flow/data_pipeline/run_query.py similarity index 100% rename from examples/run_query.py rename to flow/data_pipeline/run_query.py From ddc53fb03ae5474c6c2faf2627feb11a6bdac7da Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:58:44 -0700 Subject: [PATCH 147/438] fix some more style issues --- examples/simulate.py | 3 ++- flow/controllers/base_controller.py | 10 +++++----- flow/core/experiment.py | 3 ++- flow/core/kernel/vehicle/base.py | 2 +- flow/core/kernel/vehicle/traci.py | 2 +- flow/data_pipeline/__init__.py | 1 + flow/data_pipeline/data_pipeline.py | 3 +-- flow/data_pipeline/datapipeline_test.py | 2 +- flow/data_pipeline/lambda_function.py | 2 +- flow/data_pipeline/query.py | 12 ++++++------ flow/data_pipeline/run_query.py | 8 ++++---- flow/envs/base.py | 1 - 12 files changed, 25 insertions(+), 24 deletions(-) create mode 100644 flow/data_pipeline/__init__.py diff --git a/examples/simulate.py b/examples/simulate.py index 69e11b2fb..86d14aa14 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -97,4 +97,5 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, + only_query=flags.only_query) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 6e6734764..7adcdf310 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -110,18 +110,18 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noice = accel + accel_without_noise = accel if self.fail_safe == 'instantaneous': - accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) elif self.fail_safe == 'safe_velocity': - accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) - # run the failsafes, if requested + # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 80d607e7d..aa5028836 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -231,7 +231,8 @@ def rl_actions(*_): if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 080162c7b..1b729d159 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -697,7 +697,7 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index b473a1fa7..81d759988 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1143,7 +1143,7 @@ def get_accel_without_noise(self, veh_id): """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py new file mode 100644 index 000000000..622e09b06 --- /dev/null +++ b/flow/data_pipeline/__init__.py @@ -0,0 +1 @@ +"""Empty init file to ensure that data_pipeline is recognized as a package""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 03b0f87e5..afbc09f92 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,8 +2,7 @@ import pandas as pd import numpy as np import boto3 -from botocore.exceptions import ClientError -from examples.query import QueryStrings, testing_functions +from flow.data_pipeline.query import QueryStrings, testing_functions from time import time diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py index ae0ea382f..0e1a50518 100644 --- a/flow/data_pipeline/datapipeline_test.py +++ b/flow/data_pipeline/datapipeline_test.py @@ -34,4 +34,4 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file + return [row[0], row[1], calculate_power(row[4], row[6])] diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 4f7937c85..afef55a4b 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0f0ee13b4..af1b51ce7 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -1,6 +1,6 @@ """stores all the pre-defined query strings.""" from enum import Enum -from examples.datapipeline_test import apply_energy_one +from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} @@ -15,8 +15,8 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ No newline at end of file + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index 64baa6656..f065a726e 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -1,10 +1,10 @@ """runner script for invoking query manually.""" import argparse -from examples.data_pipeline import AthenaQuery, test_sql_query -from examples.query import QueryStrings +from flow.data_pipeline.data_pipeline import AthenaQuery, test_sql_query +from flow.data_pipeline.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" - "a S3 location") + "a S3 location") parser.add_argument("--run", type=str, nargs="+") parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") parser.add_argument("--partition", type=str, nargs='?', default="default") @@ -34,4 +34,4 @@ for q in QueryStrings: print(q) if args.test_query: - test_sql_query(args.test_query[0]) \ No newline at end of file + test_sql_query(args.test_query[0]) diff --git a/flow/envs/base.py b/flow/envs/base.py index f2067d947..cf1674355 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -417,7 +417,6 @@ def step(self, rl_actions): "**********************************************************" ) - # compute the info for each agent infos = {} From e7ac1a9afa6513f0cb425a2e37c3db26b259f6f0 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 13:02:33 -0700 Subject: [PATCH 148/438] fix one more style issue --- flow/data_pipeline/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py index 622e09b06..d9d6a6573 100644 --- a/flow/data_pipeline/__init__.py +++ b/flow/data_pipeline/__init__.py @@ -1 +1 @@ -"""Empty init file to ensure that data_pipeline is recognized as a package""" +"""Empty init file to ensure that data_pipeline is recognized as a package.""" From c97021992460a6d628ad769c289975f83bdf9628 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sat, 9 May 2020 22:06:30 -0700 Subject: [PATCH 149/438] added new two new quries --- flow/core/experiment.py | 4 ++-- flow/core/kernel/vehicle/base.py | 4 ++++ flow/core/kernel/vehicle/traci.py | 4 ++++ flow/data_pipeline/query.py | 38 ++++++++++++++++++++++++++++++- 4 files changed, 47 insertions(+), 3 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index aa5028836..37fcb03af 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,7 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time @@ -178,7 +178,7 @@ def rl_actions(*_): self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - position = self.env.k.vehicle.get_2D_position(vid) + position = self.env.k.vehicle.get_2d_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 1b729d159..7609cf252 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -705,6 +705,10 @@ def get_accel_without_noise(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError + def get_velocity_without_noise(self, veh_id): + """Return the velocity without noise of vehicle with veh_id.""" + raise NotImplementedError + def get_road_grade(self, veh_id): """Return the road-grade of the vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 81d759988..1f697f046 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1143,6 +1143,10 @@ def get_accel_without_noise(self, veh_id): """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] + def get_velocity_without_noise(self, veh_id): + """See parent class.""" + return max([self.get_speed(veh_id) + self.get_accel_without_noise(veh_id) * self.sim_step, 0]) + def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index af1b51ce7..0c87b3dcc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -3,7 +3,8 @@ from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} +tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], + "analysis": ["POWER_DEMAND_MODEL"]} # specify the function to calculate the expected result of each query testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} @@ -20,3 +21,38 @@ class QueryStrings(Enum): "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ "FROM trajectory_table " \ "WHERE partition_name=\'{partition}\'" + POWER_DEMAND_MODEL_DENOISED_ACCEL = \ + "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ + "WITH sub1 AS ( " \ + "SELECT" \ + "time, id, speed, acceleration, accel_without_noise, road_grade, source_id," \ + "time - LAG(time, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step," \ + "LAG(speed, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed," \ + "LAG(acceleration, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel," \ + "LAG(accel_without_noise, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised" \ + "FROM trajectory_table" \ + "WHERE partition_name=\'{partition}\'" \ + ")," \ + "sub2 AS (" \ + "SELECT time, id, speed, acceleration, accel_without_noise, " \ + "road_grade, source_id, " \ + "speed-prev_accel*sim_step+prev_accel_denoised*sim_step AS speed_denoised" \ + "FROM sub1" \ + ") " \ + "SELECT id, time, speed_denoised, accel_without_noise," \ + "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ + "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ + "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ + "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ + "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id" \ + "FROM sub2 " From 3b10524a6830986f3ec446907a9655a08c3f85dd Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 10 May 2020 23:03:35 -0700 Subject: [PATCH 150/438] including next_V for testing only --- flow/core/experiment.py | 1 + flow/core/kernel/vehicle/traci.py | 15 ++++++++++- flow/data_pipeline/query.py | 41 ++++++++++++++++--------------- 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 37fcb03af..8b5cbac02 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -182,6 +182,7 @@ def rl_actions(*_): extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) + #extra_info["next_v"].append(self.env.k.vehicle.get_next_v(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 1f697f046..13ca8efa6 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -967,8 +967,10 @@ def apply_acceleration(self, veh_ids, acc): for i, vid in enumerate(veh_ids): if acc[i] is not None and vid in self.get_ids(): + self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) + #self.__vehicles[vid]["next_v"] = next_vel self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) def apply_lane_change(self, veh_ids, direction): @@ -1131,9 +1133,18 @@ def set_max_speed(self, veh_id, max_speed): self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) # add for data pipeline + def get_next_v(self, veh_id): + """See parent class.""" + if not "next_v" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["next_v"] = None + return self.__vehicles[veh_id]["next_v"] + #return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + def get_accel(self, veh_id): """See parent class.""" - return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + if not "accel" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel"] = None + return self.__vehicles[veh_id]["accel"] def update_accel_without_noise(self, veh_id, accel_without_noise): """See parent class.""" @@ -1141,6 +1152,8 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): """See parent class.""" + if not "accel_without_noise" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] def get_velocity_without_noise(self, veh_id): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0c87b3dcc..9054364e6 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -15,44 +15,45 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, speed, acceleration, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL' AS energy_model_id, source_id " \ "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" + "WHERE partition_name=\'{partition}\' " \ + "ORDER BY id, time " POWER_DEMAND_MODEL_DENOISED_ACCEL = \ - "SELECT id, time, 1200 * speed * " \ + "SELECT id, time, speed, accel_without_noise, 1200 * speed * " \ "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id " \ "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" + "WHERE partition_name=\'{partition}\' " \ + "ORDER BY id, time " POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ "WITH sub1 AS ( " \ - "SELECT" \ - "time, id, speed, acceleration, accel_without_noise, road_grade, source_id," \ + "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ "time - LAG(time, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, " \ "LAG(speed, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, " \ "LAG(acceleration, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, " \ "LAG(accel_without_noise, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised" \ - "FROM trajectory_table" \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised "\ + "FROM trajectory_table " \ "WHERE partition_name=\'{partition}\'" \ ")," \ "sub2 AS (" \ - "SELECT time, id, speed, acceleration, accel_without_noise, " \ - "road_grade, source_id, " \ - "speed-prev_accel*sim_step+prev_accel_denoised*sim_step AS speed_denoised" \ + "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ + "prev_speed+accel_without_noise*sim_step AS speed_denoised " \ "FROM sub1" \ ") " \ - "SELECT id, time, speed_denoised, accel_without_noise," \ - "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ + "SELECT id, time, speed_denoised, accel_without_noise, " \ + "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ - "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id" \ - "FROM sub2 " + "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id " \ + "FROM sub2 " \ + "ORDER BY id, time " From 638f9b4ff1a7baec698264f2f2cdbb35d507b669 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 18 May 2020 12:25:00 -0700 Subject: [PATCH 151/438] change the bucket to a common bucket --- flow/core/experiment.py | 29 +++++++---------------- flow/core/kernel/vehicle/base.py | 4 ++-- flow/core/kernel/vehicle/traci.py | 13 +++------- flow/data_pipeline/data_pipeline.py | 34 ++++++++++++++++++++++++--- flow/data_pipeline/lambda_function.py | 4 ++-- flow/visualize/i210_replay.py | 14 ++++++++--- 6 files changed, 57 insertions(+), 41 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8b5cbac02..2296ef635 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,10 +1,11 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info import datetime import logging import time +from datetime import date import os import numpy as np import uuid @@ -145,9 +146,7 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] - extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], - "road_grade": [], "source_id": []} + extra_info = extra_init() source_id = uuid.uuid4().hex for i in range(num_runs): @@ -167,22 +166,7 @@ def rl_actions(*_): ret += reward # collect additional information for the data pipeline - for vid in veh_ids: - extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) - extra_info["id"].append(vid) - extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) - extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) - extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) - extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) - extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( - self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) - extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) - extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - position = self.env.k.vehicle.get_2d_position(vid) - extra_info["x"].append(position[0]) - extra_info["y"].append(position[1]) - extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) - #extra_info["next_v"].append(self.env.k.vehicle.get_next_v(vid)) + get_extra_info(self.env.k.vehicle, extra_info, veh_ids) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. @@ -231,7 +215,10 @@ def rl_actions(*_): upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: - upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + if partition_name == "default": + partition_name = source_id[0:3] + partition_name = date.today().isoformat() + " " + partition_name + upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 7609cf252..647ef37fe 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -705,8 +705,8 @@ def get_accel_without_noise(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError - def get_velocity_without_noise(self, veh_id): - """Return the velocity without noise of vehicle with veh_id.""" + def get_realized_accel(self, veh_id): + """Return the acceleration that the vehicle actually make.""" raise NotImplementedError def get_road_grade(self, veh_id): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 13ca8efa6..f40eed99c 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1133,13 +1133,6 @@ def set_max_speed(self, veh_id, max_speed): self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) # add for data pipeline - def get_next_v(self, veh_id): - """See parent class.""" - if not "next_v" in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["next_v"] = None - return self.__vehicles[veh_id]["next_v"] - #return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step - def get_accel(self, veh_id): """See parent class.""" if not "accel" in self.__vehicles[veh_id]: @@ -1156,9 +1149,9 @@ def get_accel_without_noise(self, veh_id): self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] - def get_velocity_without_noise(self, veh_id): + def get_realized_accel(self, veh_id): """See parent class.""" - return max([self.get_speed(veh_id) + self.get_accel_without_noise(veh_id) * self.sim_step, 0]) + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def get_2d_position(self, veh_id, error=-1001): """See parent class.""" @@ -1166,5 +1159,5 @@ def get_2d_position(self, veh_id, error=-1001): def get_road_grade(self, veh_id): """See parent class.""" - # TODO + # TODO : Brent return 0 diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index afbc09f92..0cd0cbc79 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -88,6 +88,34 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): return +def extra_init(): + """Return the dictionary with all the feild pre-populated with empty list.""" + extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "road_grade": [], "source_id": []} + return extra_info + + +def get_extra_info(veh_kernel, extra_info, veh_ids): + """Get all the necessary information for the trajectory output from flow.""" + for vid in veh_ids: + extra_info["time"].append(veh_kernel.get_timestep(vid) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(veh_kernel.get_headway(vid)) + extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["leader_id"].append(veh_kernel.get_leader(vid)) + extra_info["follower_id"].append(veh_kernel.get_follower(vid)) + extra_info["leader_rel_speed"].append(veh_kernel.get_speed( + veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) + extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) + extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) + position = veh_kernel.get_2d_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(veh_kernel.get_speed(vid)) + + class AthenaQuery: """ Class used to run query. @@ -199,7 +227,7 @@ def update_partition(self, partition): self.existing_partitions.append(partition) return - def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data/query-result/", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -257,14 +285,14 @@ def test_sql_query(query_name): # Run the respective sql query queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data/query-result/query-test", partition="test") if queryEngine.wait_for_execution(execution_id): raise RuntimeError("execution timed out") # get the Athena query result from S3 s3 = boto3.resource("s3") - s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + s3.Bucket("circles.data").download_file("query-result/query-test/"+execution_id+".csv", "data/athena_result.csv") athena_result = pd.read_csv("data/athena_result.csv") athena_result = athena_result.sort_values(by=["time", "id"]) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index afef55a4b..3f0abb8a1 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -25,7 +25,7 @@ def lambda_handler(event, context): response = s3.head_object(Bucket=bucket, Key=key) run_query = response["Metadata"]["run-query"] - if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if bucket == 'circles.data' and 'trajectory-output/' in key: if run_query == "all": query_list = tags["analysis"] elif not run_query: @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data/query-result/auto/', partition) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index a37bac95b..c50f12a05 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -32,6 +32,9 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info +import uuid + EXAMPLE_USAGE = """ example usage: python i210_replay.py -r /ray_results/experiment_dir/result_dir -c 1 @@ -205,9 +208,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= key: [] for key in custom_callables.keys() }) - i = 0 - while i < args.num_rollouts: - print("Rollout iter", i) + extra_info = extra_init() + source_id = uuid.uuid4().hex + + for i in range(args.num_rollouts): vel = [] per_vehicle_energy_trace = defaultdict(lambda: []) completed_veh_types = {} @@ -243,6 +247,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= veh_ids = env.k.vehicle.get_ids() vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) + # Collect information from flow for the trajectory output + get_extra_info(env.k.vehicle, extra_info, veh_ids) + extra_info["source_id"].extend([source_id + "run" + str(i)] * len(veh_ids)) + # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): custom_vals[key].append(lambda_func(env)) From bc8584a30d3736169d9c0f985ddc677d34144dfd Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 18 May 2020 12:28:17 -0700 Subject: [PATCH 152/438] removed the old tests --- flow/data_pipeline/datapipeline_test.py | 37 ------------------------- 1 file changed, 37 deletions(-) delete mode 100644 flow/data_pipeline/datapipeline_test.py diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py deleted file mode 100644 index 0e1a50518..000000000 --- a/flow/data_pipeline/datapipeline_test.py +++ /dev/null @@ -1,37 +0,0 @@ -"""functions that calculates the expected result for testing.""" -import math - -# Vehicle Mass -M = 1200 -# Gravity -g = 9.81 -# Density of Air -ro_air = 1.225 -# Rolling resistance coefficient -C_r = .005 -# Aerodynamic drag coefficient -C_a = 0.3 -# Vehicle Cross sectional Area -A = 2.6 -# Road grade -theta = 0 - - -def heavyside(inp): - """Return 1 if input is positive.""" - return 0 if inp <= 0 else 1 - - -def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): - """Calculate the expected power for POWER_DEMAND_MODEL query.""" - acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) - accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) - rolling_friction = M * g * C_r * mu - air_drag = .5 * ro_air * A * C_a * mu**3 - power = accel_and_slope + rolling_friction + air_drag - return power - - -def apply_energy_one(row): - """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] From 0ee66469dcb5f21d542a57b464b3ad5fe7b11008 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 153/438] Add an on ramp option --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 3704a7a1c..8970e6165 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), From 3af559503e36d69c4f1481ee405778aab01c6840 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Apr 2020 15:28:57 -0700 Subject: [PATCH 154/438] datapip pipeline implemented --- examples/data_pipeline.py | 179 ++++++++++++++++++++++++++++++++++++++ examples/query.py | 8 ++ examples/run_query.py | 34 ++++++++ 3 files changed, 221 insertions(+) create mode 100644 examples/data_pipeline.py create mode 100644 examples/query.py create mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py new file mode 100644 index 000000000..5fdc30cf2 --- /dev/null +++ b/examples/data_pipeline.py @@ -0,0 +1,179 @@ +import pandas as pd +import boto3 +from botocore.exceptions import ClientError +from examples.query import QueryStrings +from time import time + + +def generate_trajectory_table(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based on standard SUMO emission + + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ + raw_output = pd.read_csv(data_path, index_col=["time", "id"]) + required_cols = {"time", "id", "speed", "x", "y"} + raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) + + extra_info = pd.DataFrame.from_dict(extra_info) + extra_info.set_index(["time", "id"]) + raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) + + # add the partition column + raw_output['partition'] = partition_name + + output_file_path = data_path[:-4]+"_trajectory.csv" + raw_output.to_csv(output_file_path, index=False) + return output_file_path + + +def upload_to_s3(bucket_name, bucket_key, file_path): + """ upload a file to S3 bucket + + Parameters + ---------- + bucket_name : str + the bucket to upload to + bucket_key: str + the key within the bucket for the file + file_path: str + the path of the file to be uploaded + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + return + + +class AthenaQuery: + + def __init__(self): + self.MAX_WAIT = 60 + self.client = boto3.client("athena") + self.existing_partitions = self.get_existing_partitions() + + def get_existing_partitions(self): + """prints the existing partitions in the S3 bucket""" + + response = self.client.start_query_execution( + QueryString='SHOW PARTITIONS trajectory_table', + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("get current partitions timed out") + response = self.client.get_query_results( + QueryExecutionId=response['QueryExecutionId'], + MaxResults=1000 + ) + return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + + def check_status(self, execution_id): + """ Return the status of the execution with given id + + Parameters + ---------- + execution_id : string + id of the execution that is checked for + Returns + ------- + status: str + QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED + """ + + response = self.client.get_query_execution( + QueryExecutionId=execution_id + ) + return response['QueryExecution']['Status']['State'] + + def wait_for_execution(self, execution_id): + """ wait for the execution to finish or time-out + + Parameters + ---------- + execution_id : str + id of the execution this is watiing for + Returns + ------- + time_out: bool + True if time-out, False if success + Raises + ------ + RuntimeError: if execution failed or get canceled + """ + start = time() + while time() - start < self.MAX_WAIT: + state = self.check_status(execution_id) + if state == 'FAILED' or state == 'CANCELLED': + raise RuntimeError("update partition failed") + elif state == 'SUCCEEDED': + return False + return True + + def update_partition(self, partition): + """ load the given partition to the trajectory_table on Athena + + Parameters + ---------- + partition : str + the new partition that needs to be loaded + """ + response = self.client.start_query_execution( + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("update partition timed out") + self.existing_partitions.append(partition) + return + + def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + """ start the execution of a query, does not wait for it to finish + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be run + result_location: str, optional + location on the S3 bucket where the result will be stored + partition: str, optional + name of the partition to run this query on + Returns + ------- + execution_id: str + the execution id of the execution started by this method + Raises + ------ + ValueError: if tries to run a query not existed in QueryStrings enum + """ + if query_name not in QueryStrings.__members__: + raise ValueError("query not existed: please add it to query.py") + + if partition not in self.existing_partitions: + self.update_partition(partition) + + response = self.client.start_query_execution( + QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + ResultConfiguration={ + 'OutputLocation': result_location, + }, + WorkGroup='primary' + ) + return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py new file mode 100644 index 000000000..3fbbe69e1 --- /dev/null +++ b/examples/query.py @@ -0,0 +1,8 @@ +from enum import Enum + +tags = {} + + +class QueryStrings(Enum): + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py new file mode 100644 index 000000000..7b4a5af7d --- /dev/null +++ b/examples/run_query.py @@ -0,0 +1,34 @@ +import argparse +import sys +from examples.data_pipeline import AthenaQuery +from examples.query import QueryStrings + +parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" + "a S3 location") +parser.add_argument("--run", type=str, nargs="+") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--partition", type=str, nargs='?', default="default") +parser.add_argument("--list_partitions", action="store_true") +parser.add_argument("--check_status", type=str, nargs='+') +parser.add_argument("--list_queries", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args() + queryEngine = AthenaQuery() + + if args.run: + execution_ids = [] + for query_name in args.run: + execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + print(execution_ids) + if args.list_partitions: + print(queryEngine.existing_partitions) + if args.check_status: + status = dict() + for execution_id in args.check_status: + status[execution_id] = queryEngine.check_status(execution_id) + print(status) + if args.list_queries: + for q in QueryStrings: + print(q) From 8d4ad2904bb76afeb6c03cd8d90d8ea1e038df15 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 10 Apr 2020 19:54:30 -0700 Subject: [PATCH 155/438] multiple runs issue solved, testing added --- examples/data_pipeline.py | 55 +++++++- examples/datapipeline_test.py | 33 +++++ examples/query.py | 13 +- examples/run_query.py | 6 +- flow/core/experiment.py | 229 +------------------------------ flow/core/kernel/vehicle/base.py | 4 + 6 files changed, 104 insertions(+), 236 deletions(-) create mode 100644 examples/datapipeline_test.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 5fdc30cf2..9d56548c2 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,7 +1,8 @@ import pandas as pd +import numpy as np import boto3 from botocore.exceptions import ClientError -from examples.query import QueryStrings +from examples.query import QueryStrings, testing_functions from time import time @@ -30,13 +31,22 @@ def generate_trajectory_table(data_path, extra_info, partition_name): raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) # add the partition column - raw_output['partition'] = partition_name - + # raw_output['partition'] = partition_name + raw_output = raw_output.sort_values(by=["time", "id"]) output_file_path = data_path[:-4]+"_trajectory.csv" raw_output.to_csv(output_file_path, index=False) return output_file_path +def generate_trajectory_from_flow(data_path, extra_info, partition_name): + extra_info = pd.DataFrame.from_dict(extra_info) + # extra_info["partition"] = partition_name + extra_info.to_csv(data_path, index=False) + upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + extra_info.to_csv(upload_only_file_path, index=False, header=False) + return upload_only_file_path + + def upload_to_s3(bucket_name, bucket_key, file_path): """ upload a file to S3 bucket @@ -176,4 +186,41 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re }, WorkGroup='primary' ) - return response['QueryExecutionId'] \ No newline at end of file + return response['QueryExecutionId'] + +########################################################################### +# Helpers for testing the SQL Queries # +########################################################################### + + +def test_sql_query(query_name): + if query_name not in testing_functions: + raise ValueError("no tests supported for this query") + + # Run the respective sql query + queryEngine = AthenaQuery() + execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + partition="test") + if queryEngine.wait_for_execution(execution_id): + raise RuntimeError("execution timed out") + + # get the Athena query result from S3 + s3 = boto3.resource("s3") + s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") + athena_result = pd.read_csv("data/athena_result.csv") + athena_result = athena_result.sort_values(by=["time", "id"]) + + # get the python expected result + expected_result = pd.read_csv("data/test_data.csv") + expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") + expected_result.columns = ["time", "id", "power"] + expected_result = expected_result.sort_values(by=["time", "id"]) + + difference = athena_result["power"] - expected_result["power"] + print("average difference is: " + str(np.mean(difference))) + print("std of difference is: " + str(np.std(difference))) + print("average ratio of difference to expected is: " + + str(np.mean(np.divide(difference, expected_result["power"])))) + difference = pd.DataFrame(difference) + difference.to_csv("./difference.csv") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py new file mode 100644 index 000000000..564060d3b --- /dev/null +++ b/examples/datapipeline_test.py @@ -0,0 +1,33 @@ +import math + +# Vehicle Mass +M = 1200 +# Gravity +g = 9.81 +# Density of Air +ro_air = 1.225 +# Rolling resistance coefficient +C_r = .005 +# Aerodynamic drag coefficient +C_a = 0.3 +# Vehicle Cross sectional Area +A = 2.6 +# Road grade +theta = 0 + + +def heavyside(inp): + return 0 if inp <= 0 else 1 + + +def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) + accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) + rolling_friction = M * g * C_r * mu + air_drag = .5 * ro_air * A * C_a * mu**3 + power = accel_and_slope + rolling_friction + air_drag + return power + + +def apply_energy_one(row): + return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py index 3fbbe69e1..6354cec3b 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,8 +1,17 @@ from enum import Enum +from examples.datapipeline_test import apply_energy_one -tags = {} +tags = {"energy": ["ENERGY_ONE"]} + +testing_functions = {"ENERGY_ONE": apply_energy_one} class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" + ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py index 7b4a5af7d..ea8839b09 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,6 +1,5 @@ import argparse -import sys -from examples.data_pipeline import AthenaQuery +from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -11,6 +10,7 @@ parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') parser.add_argument("--list_queries", action="store_true") +parser.add_argument("--test_query", nargs=1) if __name__ == "__main__": @@ -32,3 +32,5 @@ if args.list_queries: for q in QueryStrings: print(q) + if args.test_query: + test_sql_query(args.test_query[0]) \ No newline at end of file diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 2296ef635..97467adb5 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,228 +1 @@ -"""Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv -from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info -import datetime -import logging -import time -from datetime import date -import os -import numpy as np -import uuid - - -class Experiment: - """ - Class for systematically running simulations in any supported simulator. - - This class acts as a runner for a network and environment. In order to use - it to run an network and environment in the absence of a method specifying - the actions of RL agents in the network, type the following: - - >>> from flow.envs import Env - >>> flow_params = dict(...) # see the examples in exp_config - >>> exp = Experiment(flow_params) # for some experiment configuration - >>> exp.run(num_runs=1) - - If you wish to specify the actions of RL agents in the network, this may be - done as follows: - - >>> rl_actions = lambda state: 0 # replace with something appropriate - >>> exp.run(num_runs=1, rl_actions=rl_actions) - - Finally, if you would like to like to plot and visualize your results, this - class can generate csv files from emission files produced by sumo. These - files will contain the speeds, positions, edges, etc... of every vehicle - in the network at every time step. - - In order to ensure that the simulator constructs an emission file, set the - ``emission_path`` attribute in ``SimParams`` to some path. - - >>> from flow.core.params import SimParams - >>> flow_params['sim'] = SimParams(emission_path="./data") - - Once you have included this in your environment, run your Experiment object - as follows: - - >>> exp.run(num_runs=1, convert_to_csv=True) - - After the experiment is complete, look at the "./data" directory. There - will be two files, one with the suffix .xml and another with the suffix - .csv. The latter should be easily interpretable from any csv reader (e.g. - Excel), and can be parsed using tools such as numpy and pandas. - - Attributes - ---------- - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we want - to extract from the environment. The lambda will be called at each step - to extract information from the env and it will be stored in a dict - keyed by the str. - env : flow.envs.Env - the environment object the simulator will run - """ - - def __init__(self, flow_params, custom_callables=None): - """Instantiate the Experiment class. - - Parameters - ---------- - flow_params : dict - flow-specific parameters - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we - want to extract from the environment. The lambda will be called at - each step to extract information from the env and it will be stored - in a dict keyed by the str. - """ - self.custom_callables = custom_callables or {} - - # Get the env name and a creator for the environment. - create_env, _ = make_create_env(flow_params) - - # Create the environment. - self.env = create_env() - - logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) - - logging.info("Initializing environment.") - - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): - """Run the given network for a set number of runs. - - Parameters - ---------- - num_runs : int - number of runs the experiment should perform - rl_actions : method, optional - maps states to actions to be performed by the RL agents (if - there are any) - convert_to_csv : bool - Specifies whether to convert the emission file created by sumo - into a csv file - partition_name: str - Specifies the S3 partition you want to store the output file, - will be used to later for query. If NONE, won't upload output - to S3. - only_query: str - Specifies whether queries should be automatically run the - simulation data when it gets uploaded to s3 - - Returns - ------- - info_dict : dict < str, Any > - contains returns, average speed per step - """ - num_steps = self.env.env_params.horizon - - # raise an error if convert_to_csv is set to True but no emission - # file will be generated, to avoid getting an error at the end of the - # simulation - if convert_to_csv and self.env.sim_params.emission_path is None: - raise ValueError( - 'The experiment was run with convert_to_csv set ' - 'to True, but no emission file will be generated. If you wish ' - 'to generate an emission file, you should set the parameter ' - 'emission_path in the simulation parameters (SumoParams or ' - 'AimsunParams) to the path of the folder where emissions ' - 'output should be generated. If you do not wish to generate ' - 'emissions, set the convert_to_csv parameter to False.') - - # used to store - info_dict = { - "returns": [], - "velocities": [], - "outflows": [], - } - info_dict.update({ - key: [] for key in self.custom_callables.keys() - }) - - if rl_actions is None: - def rl_actions(*_): - return None - - # time profiling information - t = time.time() - times = [] - extra_info = extra_init() - source_id = uuid.uuid4().hex - - for i in range(num_runs): - ret = 0 - vel = [] - custom_vals = {key: [] for key in self.custom_callables.keys()} - state = self.env.reset() - for j in range(num_steps): - t0 = time.time() - state, reward, done, _ = self.env.step(rl_actions(state)) - t1 = time.time() - times.append(1 / (t1 - t0)) - - # Compute the velocity speeds and cumulative returns. - veh_ids = self.env.k.vehicle.get_ids() - vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) - ret += reward - - # collect additional information for the data pipeline - get_extra_info(self.env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) - - # Compute the results for the custom callables. - for (key, lambda_func) in self.custom_callables.items(): - custom_vals[key].append(lambda_func(self.env)) - - if done: - break - - # Store the information from the run in info_dict. - outflow = self.env.k.vehicle.get_outflow_rate(int(500)) - info_dict["returns"].append(ret) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) - - print("Round {0}, return: {1}".format(i, ret)) - - # Print the averages/std for all variables in the info_dict. - for key in info_dict.keys(): - print("Average, std {}: {}, {}".format( - key, np.mean(info_dict[key]), np.std(info_dict[key]))) - - print("Total time:", time.time() - t) - print("steps/second:", np.mean(times)) - self.env.terminate() - - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - # collect the location of the emission file - dir_path = self.env.sim_params.emission_path - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - - trajectory_table_path = './data/' + source_id + ".csv" - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) - - if partition_name: - if partition_name == "default": - partition_name = source_id[0:3] - partition_name = date.today().isoformat() + " " + partition_name - upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', - upload_file_path, str(only_query)[2:-2]) - - # delete the S3-only version of the trajectory file - os.remove(upload_file_path) - - return info_dict +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info import datetime import logging import time from datetime import date import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = extra_init() source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: if partition_name == "default": partition_name = source_id[0:3] partition_name = date.today().isoformat() + " " + partition_name upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 647ef37fe..16331ad08 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -701,6 +701,10 @@ def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError + def get_2D_position(self, veh_id, error=-1001): + """ see traci class """ + raise NotImplementedError + def get_accel_without_noise(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError From aa14dbf247bbe5610d4f3741ed81581152596293 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 22 Apr 2020 05:22:01 -0700 Subject: [PATCH 156/438] added more support for lambda function --- examples/data_pipeline.py | 28 ++++++++++++++++++++++++++-- examples/lambda_function.py | 26 ++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 examples/lambda_function.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 9d56548c2..28d3b5e73 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -39,6 +39,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based only on flow output + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -47,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): return upload_only_file_path -def upload_to_s3(bucket_name, bucket_key, file_path): +def upload_to_s3(bucket_name, bucket_key, file_path, only_query): """ upload a file to S3 bucket Parameters @@ -58,9 +76,15 @@ def upload_to_s3(bucket_name, bucket_key, file_path): the key within the bucket for the file file_path: str the path of the file to be uploaded + only_query: str + specify which query should be run on this file by lambda: + if empty: run none of them + if "all": run all available analysis query + if a string of list of queries: run only those mentioned in the list """ s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + s3.Bucket(bucket_name).upload_file(file_path, bucket_key, + ExtraArgs={"Metadata": {"run-query": only_query}}) return diff --git a/examples/lambda_function.py b/examples/lambda_function.py new file mode 100644 index 000000000..01ce1512a --- /dev/null +++ b/examples/lambda_function.py @@ -0,0 +1,26 @@ +import boto3 +from urllib.parse import unquote_plus +from examples.data_pipeline import AthenaQuery +from examples.query import tags + +s3 = boto3.client('s3') +queryEngine = AthenaQuery() + + +def lambda_handler(event, context): + for record in event['Records']: + bucket = record['s3']['bucket']['name'] + key = unquote_plus(record['s3']['object']['key']) + partition = key.split('/')[-2].split('=')[-1] + response = s3.head_object(Bucket=bucket, Key=key) + run_query = response["Metadata"]["run-query"] + + if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if run_query == "all": + query_list = tags["analysis"] + elif not run_query: + break + else: + query_list = run_query.split("\', \'") + for query_name in query_list: + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file From 00a526b43f8ee069c768b27629233b074ca60260 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 02:54:33 -0700 Subject: [PATCH 157/438] fix windoes line ending issue with experiment.py --- flow/core/experiment.py | 229 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 228 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 97467adb5..2296ef635 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1,228 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info import datetime import logging import time from datetime import date import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = extra_init() source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: if partition_name == "default": partition_name = source_id[0:3] partition_name = date.today().isoformat() + " " + partition_name upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" +from flow.core.util import emission_to_csv +from flow.utils.registry import make_create_env +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info +import datetime +import logging +import time +from datetime import date +import os +import numpy as np +import uuid + + +class Experiment: + """ + Class for systematically running simulations in any supported simulator. + + This class acts as a runner for a network and environment. In order to use + it to run an network and environment in the absence of a method specifying + the actions of RL agents in the network, type the following: + + >>> from flow.envs import Env + >>> flow_params = dict(...) # see the examples in exp_config + >>> exp = Experiment(flow_params) # for some experiment configuration + >>> exp.run(num_runs=1) + + If you wish to specify the actions of RL agents in the network, this may be + done as follows: + + >>> rl_actions = lambda state: 0 # replace with something appropriate + >>> exp.run(num_runs=1, rl_actions=rl_actions) + + Finally, if you would like to like to plot and visualize your results, this + class can generate csv files from emission files produced by sumo. These + files will contain the speeds, positions, edges, etc... of every vehicle + in the network at every time step. + + In order to ensure that the simulator constructs an emission file, set the + ``emission_path`` attribute in ``SimParams`` to some path. + + >>> from flow.core.params import SimParams + >>> flow_params['sim'] = SimParams(emission_path="./data") + + Once you have included this in your environment, run your Experiment object + as follows: + + >>> exp.run(num_runs=1, convert_to_csv=True) + + After the experiment is complete, look at the "./data" directory. There + will be two files, one with the suffix .xml and another with the suffix + .csv. The latter should be easily interpretable from any csv reader (e.g. + Excel), and can be parsed using tools such as numpy and pandas. + + Attributes + ---------- + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we want + to extract from the environment. The lambda will be called at each step + to extract information from the env and it will be stored in a dict + keyed by the str. + env : flow.envs.Env + the environment object the simulator will run + """ + + def __init__(self, flow_params, custom_callables=None): + """Instantiate the Experiment class. + + Parameters + ---------- + flow_params : dict + flow-specific parameters + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we + want to extract from the environment. The lambda will be called at + each step to extract information from the env and it will be stored + in a dict keyed by the str. + """ + self.custom_callables = custom_callables or {} + + # Get the env name and a creator for the environment. + create_env, _ = make_create_env(flow_params) + + # Create the environment. + self.env = create_env() + + logging.info(" Starting experiment {} at {}".format( + self.env.network.name, str(datetime.datetime.utcnow()))) + + logging.info("Initializing environment.") + + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + """Run the given network for a set number of runs. + + Parameters + ---------- + num_runs : int + number of runs the experiment should perform + rl_actions : method, optional + maps states to actions to be performed by the RL agents (if + there are any) + convert_to_csv : bool + Specifies whether to convert the emission file created by sumo + into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. + only_query: str + Specifies whether queries should be automatically run the + simulation data when it gets uploaded to s3 + + Returns + ------- + info_dict : dict < str, Any > + contains returns, average speed per step + """ + num_steps = self.env.env_params.horizon + + # raise an error if convert_to_csv is set to True but no emission + # file will be generated, to avoid getting an error at the end of the + # simulation + if convert_to_csv and self.env.sim_params.emission_path is None: + raise ValueError( + 'The experiment was run with convert_to_csv set ' + 'to True, but no emission file will be generated. If you wish ' + 'to generate an emission file, you should set the parameter ' + 'emission_path in the simulation parameters (SumoParams or ' + 'AimsunParams) to the path of the folder where emissions ' + 'output should be generated. If you do not wish to generate ' + 'emissions, set the convert_to_csv parameter to False.') + + # used to store + info_dict = { + "returns": [], + "velocities": [], + "outflows": [], + } + info_dict.update({ + key: [] for key in self.custom_callables.keys() + }) + + if rl_actions is None: + def rl_actions(*_): + return None + + # time profiling information + t = time.time() + times = [] + extra_info = extra_init() + source_id = uuid.uuid4().hex + + for i in range(num_runs): + ret = 0 + vel = [] + custom_vals = {key: [] for key in self.custom_callables.keys()} + state = self.env.reset() + for j in range(num_steps): + t0 = time.time() + state, reward, done, _ = self.env.step(rl_actions(state)) + t1 = time.time() + times.append(1 / (t1 - t0)) + + # Compute the velocity speeds and cumulative returns. + veh_ids = self.env.k.vehicle.get_ids() + vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) + ret += reward + + # collect additional information for the data pipeline + get_extra_info(self.env.k.vehicle, extra_info, veh_ids) + extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + + # Compute the results for the custom callables. + for (key, lambda_func) in self.custom_callables.items(): + custom_vals[key].append(lambda_func(self.env)) + + if done: + break + + # Store the information from the run in info_dict. + outflow = self.env.k.vehicle.get_outflow_rate(int(500)) + info_dict["returns"].append(ret) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + + print("Round {0}, return: {1}".format(i, ret)) + + # Print the averages/std for all variables in the info_dict. + for key in info_dict.keys(): + print("Average, std {}: {}, {}".format( + key, np.mean(info_dict[key]), np.std(info_dict[key]))) + + print("Total time:", time.time() - t) + print("steps/second:", np.mean(times)) + self.env.terminate() + + if convert_to_csv and self.env.simulator == "traci": + # wait a short period of time to ensure the xml file is readable + time.sleep(0.1) + + # collect the location of the emission file + dir_path = self.env.sim_params.emission_path + emission_filename = \ + "{0}-emission.xml".format(self.env.network.name) + emission_path = os.path.join(dir_path, emission_filename) + + # convert the emission file into a csv + emission_to_csv(emission_path) + + # Delete the .xml version of the emission file. + os.remove(emission_path) + + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + + if partition_name: + if partition_name == "default": + partition_name = source_id[0:3] + partition_name = date.today().isoformat() + " " + partition_name + upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(only_query)[2:-2]) + + # delete the S3-only version of the trajectory file + os.remove(upload_file_path) + + return info_dict From de35f9009e9de0c75de7ba4c1eccdccac794e877 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:35:54 -0700 Subject: [PATCH 158/438] fix style issue --- examples/data_pipeline.py | 113 ++++++++++++++++++++----------- examples/datapipeline_test.py | 4 ++ examples/lambda_function.py | 10 +++ examples/query.py | 11 ++- examples/run_query.py | 1 + flow/core/kernel/vehicle/base.py | 2 +- 6 files changed, 98 insertions(+), 43 deletions(-) diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 28d3b5e73..03b0f87e5 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,3 +1,4 @@ +"""contains class and helper functions for the data pipeline.""" import pandas as pd import numpy as np import boto3 @@ -7,21 +8,21 @@ def generate_trajectory_table(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based on standard SUMO emission + """Generate desired output for the trajectory_table based on standard SUMO emission. - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ raw_output = pd.read_csv(data_path, index_col=["time", "id"]) required_cols = {"time", "id", "speed", "x", "y"} raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) @@ -39,24 +40,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based only on flow output - - Parameters - ---------- - data_path : str - output file path - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder - """ + """Generate desired output for the trajectory_table based only on flow output. + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -66,7 +67,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): def upload_to_s3(bucket_name, bucket_key, file_path, only_query): - """ upload a file to S3 bucket + """Upload a file to S3 bucket. Parameters ---------- @@ -89,15 +90,40 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): class AthenaQuery: + """ + Class used to run query. + + Act as a query engine, maintains an open session with AWS Athena. + + Attributes + ---------- + MAX_WAIT: int + maximum number of seconds to wait before declares time-out + client: boto3.client + the athena client that is used to run the query + existing_partitions: list + a list of partitions that is already recorded in Athena's datalog, + this is obtained through query at the initialization of this class + instance. + """ def __init__(self): + """Initialize AthenaQuery instance. + + initialize a client session with AWS Athena, + query Athena to obtain extisting_partition. + """ self.MAX_WAIT = 60 self.client = boto3.client("athena") self.existing_partitions = self.get_existing_partitions() def get_existing_partitions(self): - """prints the existing partitions in the S3 bucket""" + """Return the existing partitions in the S3 bucket. + Returns + ------- + partitions: a list of existing partitions on S3 bucket + """ response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ @@ -114,7 +140,7 @@ def get_existing_partitions(self): return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): - """ Return the status of the execution with given id + """Return the status of the execution with given id. Parameters ---------- @@ -125,14 +151,13 @@ def check_status(self, execution_id): status: str QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED """ - response = self.client.get_query_execution( QueryExecutionId=execution_id ) return response['QueryExecution']['Status']['State'] def wait_for_execution(self, execution_id): - """ wait for the execution to finish or time-out + """Wait for the execution to finish or time-out. Parameters ---------- @@ -156,7 +181,7 @@ def wait_for_execution(self, execution_id): return True def update_partition(self, partition): - """ load the given partition to the trajectory_table on Athena + """Load the given partition to the trajectory_table on Athena. Parameters ---------- @@ -176,7 +201,7 @@ def update_partition(self, partition): return def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """ start the execution of a query, does not wait for it to finish + """Start the execution of a query, does not wait for it to finish. Parameters ---------- @@ -218,6 +243,16 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re def test_sql_query(query_name): + """Start the execution of a query, does not wait for it to finish. + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be tested + Raises + ------ + RuntimeError: if timeout + """ if query_name not in testing_functions: raise ValueError("no tests supported for this query") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py index 564060d3b..ae0ea382f 100644 --- a/examples/datapipeline_test.py +++ b/examples/datapipeline_test.py @@ -1,3 +1,4 @@ +"""functions that calculates the expected result for testing.""" import math # Vehicle Mass @@ -17,10 +18,12 @@ def heavyside(inp): + """Return 1 if input is positive.""" return 0 if inp <= 0 else 1 def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + """Calculate the expected power for POWER_DEMAND_MODEL query.""" acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) rolling_friction = M * g * C_r * mu @@ -30,4 +33,5 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): + """Apply the power calculation to a row of the dataframe.""" return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/lambda_function.py b/examples/lambda_function.py index 01ce1512a..4f7937c85 100644 --- a/examples/lambda_function.py +++ b/examples/lambda_function.py @@ -1,3 +1,4 @@ +"""lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus from examples.data_pipeline import AthenaQuery @@ -8,6 +9,15 @@ def lambda_handler(event, context): + """Invoke by AWS Lambda upon triggered by an event. + + Parameters + ---------- + event : dic < str: dic > + an S3 event + context: + not used + """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) diff --git a/examples/query.py b/examples/query.py index 6354cec3b..0f0ee13b4 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,15 +1,20 @@ +"""stores all the pre-defined query strings.""" from enum import Enum from examples.datapipeline_test import apply_energy_one -tags = {"energy": ["ENERGY_ONE"]} +# tags for different queries +tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} -testing_functions = {"ENERGY_ONE": apply_energy_one} +# specify the function to calculate the expected result of each query +testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} class QueryStrings(Enum): + """An enumeration of all the pre-defined query strings.""" + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ diff --git a/examples/run_query.py b/examples/run_query.py index ea8839b09..64baa6656 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,3 +1,4 @@ +"""runner script for invoking query manually.""" import argparse from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 16331ad08..20a11cf99 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -702,7 +702,7 @@ def get_2d_position(self, veh_id, error=-1001): raise NotImplementedError def get_2D_position(self, veh_id, error=-1001): - """ see traci class """ + """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError def get_accel_without_noise(self, veh_id): From 979d0476fbd2e3308d4bc75f0fc3576306ae6ad5 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:38:47 -0700 Subject: [PATCH 159/438] reorganized file locations --- examples/data_pipeline.py | 285 ------------------ examples/lambda_function.py | 36 --- examples/query.py | 22 -- examples/run_query.py | 37 --- .../data_pipeline}/datapipeline_test.py | 0 5 files changed, 380 deletions(-) delete mode 100644 examples/data_pipeline.py delete mode 100644 examples/lambda_function.py delete mode 100644 examples/query.py delete mode 100644 examples/run_query.py rename {examples => flow/data_pipeline}/datapipeline_test.py (100%) diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py deleted file mode 100644 index 03b0f87e5..000000000 --- a/examples/data_pipeline.py +++ /dev/null @@ -1,285 +0,0 @@ -"""contains class and helper functions for the data pipeline.""" -import pandas as pd -import numpy as np -import boto3 -from botocore.exceptions import ClientError -from examples.query import QueryStrings, testing_functions -from time import time - - -def generate_trajectory_table(data_path, extra_info, partition_name): - """Generate desired output for the trajectory_table based on standard SUMO emission. - - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ - raw_output = pd.read_csv(data_path, index_col=["time", "id"]) - required_cols = {"time", "id", "speed", "x", "y"} - raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) - - extra_info = pd.DataFrame.from_dict(extra_info) - extra_info.set_index(["time", "id"]) - raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) - - # add the partition column - # raw_output['partition'] = partition_name - raw_output = raw_output.sort_values(by=["time", "id"]) - output_file_path = data_path[:-4]+"_trajectory.csv" - raw_output.to_csv(output_file_path, index=False) - return output_file_path - - -def generate_trajectory_from_flow(data_path, extra_info, partition_name): - """Generate desired output for the trajectory_table based only on flow output. - - Parameters - ---------- - data_path : str - output file path - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder - """ - extra_info = pd.DataFrame.from_dict(extra_info) - # extra_info["partition"] = partition_name - extra_info.to_csv(data_path, index=False) - upload_only_file_path = data_path[:-4] + "_upload" + ".csv" - extra_info.to_csv(upload_only_file_path, index=False, header=False) - return upload_only_file_path - - -def upload_to_s3(bucket_name, bucket_key, file_path, only_query): - """Upload a file to S3 bucket. - - Parameters - ---------- - bucket_name : str - the bucket to upload to - bucket_key: str - the key within the bucket for the file - file_path: str - the path of the file to be uploaded - only_query: str - specify which query should be run on this file by lambda: - if empty: run none of them - if "all": run all available analysis query - if a string of list of queries: run only those mentioned in the list - """ - s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key, - ExtraArgs={"Metadata": {"run-query": only_query}}) - return - - -class AthenaQuery: - """ - Class used to run query. - - Act as a query engine, maintains an open session with AWS Athena. - - Attributes - ---------- - MAX_WAIT: int - maximum number of seconds to wait before declares time-out - client: boto3.client - the athena client that is used to run the query - existing_partitions: list - a list of partitions that is already recorded in Athena's datalog, - this is obtained through query at the initialization of this class - instance. - """ - - def __init__(self): - """Initialize AthenaQuery instance. - - initialize a client session with AWS Athena, - query Athena to obtain extisting_partition. - """ - self.MAX_WAIT = 60 - self.client = boto3.client("athena") - self.existing_partitions = self.get_existing_partitions() - - def get_existing_partitions(self): - """Return the existing partitions in the S3 bucket. - - Returns - ------- - partitions: a list of existing partitions on S3 bucket - """ - response = self.client.start_query_execution( - QueryString='SHOW PARTITIONS trajectory_table', - QueryExecutionContext={ - 'Database': 'simulation' - }, - WorkGroup='primary' - ) - if self.wait_for_execution(response['QueryExecutionId']): - raise RuntimeError("get current partitions timed out") - response = self.client.get_query_results( - QueryExecutionId=response['QueryExecutionId'], - MaxResults=1000 - ) - return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] - - def check_status(self, execution_id): - """Return the status of the execution with given id. - - Parameters - ---------- - execution_id : string - id of the execution that is checked for - Returns - ------- - status: str - QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED - """ - response = self.client.get_query_execution( - QueryExecutionId=execution_id - ) - return response['QueryExecution']['Status']['State'] - - def wait_for_execution(self, execution_id): - """Wait for the execution to finish or time-out. - - Parameters - ---------- - execution_id : str - id of the execution this is watiing for - Returns - ------- - time_out: bool - True if time-out, False if success - Raises - ------ - RuntimeError: if execution failed or get canceled - """ - start = time() - while time() - start < self.MAX_WAIT: - state = self.check_status(execution_id) - if state == 'FAILED' or state == 'CANCELLED': - raise RuntimeError("update partition failed") - elif state == 'SUCCEEDED': - return False - return True - - def update_partition(self, partition): - """Load the given partition to the trajectory_table on Athena. - - Parameters - ---------- - partition : str - the new partition that needs to be loaded - """ - response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), - QueryExecutionContext={ - 'Database': 'simulation' - }, - WorkGroup='primary' - ) - if self.wait_for_execution(response['QueryExecutionId']): - raise RuntimeError("update partition timed out") - self.existing_partitions.append(partition) - return - - def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """Start the execution of a query, does not wait for it to finish. - - Parameters - ---------- - query_name : str - name of the query in QueryStrings enum that will be run - result_location: str, optional - location on the S3 bucket where the result will be stored - partition: str, optional - name of the partition to run this query on - Returns - ------- - execution_id: str - the execution id of the execution started by this method - Raises - ------ - ValueError: if tries to run a query not existed in QueryStrings enum - """ - if query_name not in QueryStrings.__members__: - raise ValueError("query not existed: please add it to query.py") - - if partition not in self.existing_partitions: - self.update_partition(partition) - - response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(partition=partition), - QueryExecutionContext={ - 'Database': 'simulation' - }, - ResultConfiguration={ - 'OutputLocation': result_location, - }, - WorkGroup='primary' - ) - return response['QueryExecutionId'] - -########################################################################### -# Helpers for testing the SQL Queries # -########################################################################### - - -def test_sql_query(query_name): - """Start the execution of a query, does not wait for it to finish. - - Parameters - ---------- - query_name : str - name of the query in QueryStrings enum that will be tested - Raises - ------ - RuntimeError: if timeout - """ - if query_name not in testing_functions: - raise ValueError("no tests supported for this query") - - # Run the respective sql query - queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", - partition="test") - if queryEngine.wait_for_execution(execution_id): - raise RuntimeError("execution timed out") - - # get the Athena query result from S3 - s3 = boto3.resource("s3") - s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", - "data/athena_result.csv") - athena_result = pd.read_csv("data/athena_result.csv") - athena_result = athena_result.sort_values(by=["time", "id"]) - - # get the python expected result - expected_result = pd.read_csv("data/test_data.csv") - expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") - expected_result.columns = ["time", "id", "power"] - expected_result = expected_result.sort_values(by=["time", "id"]) - - difference = athena_result["power"] - expected_result["power"] - print("average difference is: " + str(np.mean(difference))) - print("std of difference is: " + str(np.std(difference))) - print("average ratio of difference to expected is: " + - str(np.mean(np.divide(difference, expected_result["power"])))) - difference = pd.DataFrame(difference) - difference.to_csv("./difference.csv") diff --git a/examples/lambda_function.py b/examples/lambda_function.py deleted file mode 100644 index 4f7937c85..000000000 --- a/examples/lambda_function.py +++ /dev/null @@ -1,36 +0,0 @@ -"""lambda function on AWS Lambda.""" -import boto3 -from urllib.parse import unquote_plus -from examples.data_pipeline import AthenaQuery -from examples.query import tags - -s3 = boto3.client('s3') -queryEngine = AthenaQuery() - - -def lambda_handler(event, context): - """Invoke by AWS Lambda upon triggered by an event. - - Parameters - ---------- - event : dic < str: dic > - an S3 event - context: - not used - """ - for record in event['Records']: - bucket = record['s3']['bucket']['name'] - key = unquote_plus(record['s3']['object']['key']) - partition = key.split('/')[-2].split('=')[-1] - response = s3.head_object(Bucket=bucket, Key=key) - run_query = response["Metadata"]["run-query"] - - if bucket == 'brent.experiments' and 'trajectory-output/' in key: - if run_query == "all": - query_list = tags["analysis"] - elif not run_query: - break - else: - query_list = run_query.split("\', \'") - for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file diff --git a/examples/query.py b/examples/query.py deleted file mode 100644 index 0f0ee13b4..000000000 --- a/examples/query.py +++ /dev/null @@ -1,22 +0,0 @@ -"""stores all the pre-defined query strings.""" -from enum import Enum -from examples.datapipeline_test import apply_energy_one - -# tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} - -# specify the function to calculate the expected result of each query -testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} - - -class QueryStrings(Enum): - """An enumeration of all the pre-defined query strings.""" - - SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py deleted file mode 100644 index 64baa6656..000000000 --- a/examples/run_query.py +++ /dev/null @@ -1,37 +0,0 @@ -"""runner script for invoking query manually.""" -import argparse -from examples.data_pipeline import AthenaQuery, test_sql_query -from examples.query import QueryStrings - -parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" - "a S3 location") -parser.add_argument("--run", type=str, nargs="+") -parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") -parser.add_argument("--partition", type=str, nargs='?', default="default") -parser.add_argument("--list_partitions", action="store_true") -parser.add_argument("--check_status", type=str, nargs='+') -parser.add_argument("--list_queries", action="store_true") -parser.add_argument("--test_query", nargs=1) - - -if __name__ == "__main__": - args = parser.parse_args() - queryEngine = AthenaQuery() - - if args.run: - execution_ids = [] - for query_name in args.run: - execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) - print(execution_ids) - if args.list_partitions: - print(queryEngine.existing_partitions) - if args.check_status: - status = dict() - for execution_id in args.check_status: - status[execution_id] = queryEngine.check_status(execution_id) - print(status) - if args.list_queries: - for q in QueryStrings: - print(q) - if args.test_query: - test_sql_query(args.test_query[0]) \ No newline at end of file diff --git a/examples/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py similarity index 100% rename from examples/datapipeline_test.py rename to flow/data_pipeline/datapipeline_test.py From fdd983eb19b7a4acd75b9101568dfa8441c86294 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:58:44 -0700 Subject: [PATCH 160/438] fix some more style issues --- flow/data_pipeline/datapipeline_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py index ae0ea382f..0e1a50518 100644 --- a/flow/data_pipeline/datapipeline_test.py +++ b/flow/data_pipeline/datapipeline_test.py @@ -34,4 +34,4 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file + return [row[0], row[1], calculate_power(row[4], row[6])] From 6af7e02c86ddfbce78851d2c85a2042ae3b9ea6c Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:01:41 -0700 Subject: [PATCH 161/438] added auto upload to s3 feature for the reply scipt and fix some other minor issues --- flow/core/experiment.py | 15 ++--- flow/core/kernel/vehicle/traci.py | 5 +- flow/data_pipeline/data_pipeline.py | 12 ++-- flow/data_pipeline/lambda_function.py | 4 +- flow/data_pipeline/query.py | 86 ++++++++++++++------------- flow/data_pipeline/run_query.py | 2 +- flow/visualize/i210_replay.py | 21 ++++++- 7 files changed, 82 insertions(+), 63 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 2296ef635..84a34d0e3 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -88,7 +88,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=""): """Run the given network for a set number of runs. Parameters @@ -106,8 +106,9 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=No will be used to later for query. If NONE, won't upload output to S3. only_query: str - Specifies whether queries should be automatically run the - simulation data when it gets uploaded to s3 + Specifies which queries should be automatically run when the + simulation data gets uploaded to S3. If an empty str is passed in, + then it implies no queries should be run on this. Returns ------- @@ -147,7 +148,7 @@ def rl_actions(*_): t = time.time() times = [] extra_info = extra_init() - source_id = uuid.uuid4().hex + source_id = 'flow_{}'.format(uuid.uuid4().hex) for i in range(num_runs): ret = 0 @@ -167,7 +168,7 @@ def rl_actions(*_): # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): @@ -218,8 +219,8 @@ def rl_actions(*_): if partition_name == "default": partition_name = source_id[0:3] partition_name = date.today().isoformat() + " " + partition_name - upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_to_s3('circles.data.pipeline', 'trajectory-output/partition_name={}/{}.csv'.format( + partition_name, upload_file_path.split('/')[-1].split('_')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index f40eed99c..824ec4b0c 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -970,7 +970,6 @@ def apply_acceleration(self, veh_ids, acc): self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) - #self.__vehicles[vid]["next_v"] = next_vel self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) def apply_lane_change(self, veh_ids, direction): @@ -1135,7 +1134,7 @@ def set_max_speed(self, veh_id, max_speed): # add for data pipeline def get_accel(self, veh_id): """See parent class.""" - if not "accel" in self.__vehicles[veh_id]: + if "accel" not in self.__vehicles[veh_id]: self.__vehicles[veh_id]["accel"] = None return self.__vehicles[veh_id]["accel"] @@ -1145,7 +1144,7 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): """See parent class.""" - if not "accel_without_noise" in self.__vehicles[veh_id]: + if "accel_without_noise" not in self.__vehicles[veh_id]: self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 0cd0cbc79..fbd975c5e 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -38,7 +38,7 @@ def generate_trajectory_table(data_path, extra_info, partition_name): return output_file_path -def generate_trajectory_from_flow(data_path, extra_info, partition_name): +def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): """Generate desired output for the trajectory_table based only on flow output. Parameters @@ -227,7 +227,7 @@ def update_partition(self, partition): self.existing_partitions.append(partition) return - def run_query(self, query_name, result_location="s3://circles.data/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data.pipeline/query-result/", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -285,15 +285,15 @@ def test_sql_query(query_name): # Run the respective sql query queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data/query-result/query-test", - partition="test") + execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data.pipeline/" + "query-result/query-test", partition="test") if queryEngine.wait_for_execution(execution_id): raise RuntimeError("execution timed out") # get the Athena query result from S3 s3 = boto3.resource("s3") - s3.Bucket("circles.data").download_file("query-result/query-test/"+execution_id+".csv", - "data/athena_result.csv") + s3.Bucket("circles.data.pipeline").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") athena_result = pd.read_csv("data/athena_result.csv") athena_result = athena_result.sort_values(by=["time", "id"]) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 3f0abb8a1..fd50ba8f5 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -25,7 +25,7 @@ def lambda_handler(event, context): response = s3.head_object(Bucket=bucket, Key=key) run_query = response["Metadata"]["run-query"] - if bucket == 'circles.data' and 'trajectory-output/' in key: + if bucket == 'circles.data.pipeline' and 'trajectory-output/' in key: if run_query == "all": query_list = tags["analysis"] elif not run_query: @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://circles.data/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data.pipeline/query-result/auto/', partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 9054364e6..7b8cf70c8 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -15,45 +15,47 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = "SELECT id, time, speed, acceleration, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL' AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\' " \ - "ORDER BY id, time " - POWER_DEMAND_MODEL_DENOISED_ACCEL = \ - "SELECT id, time, speed, accel_without_noise, 1200 * speed * " \ - "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\' " \ - "ORDER BY id, time " - POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ - "WITH sub1 AS ( " \ - "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ - "time - LAG(time, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, " \ - "LAG(speed, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, " \ - "LAG(acceleration, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, " \ - "LAG(accel_without_noise, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised "\ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ - ")," \ - "sub2 AS (" \ - "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ - "prev_speed+accel_without_noise*sim_step AS speed_denoised " \ - "FROM sub1" \ - ") " \ - "SELECT id, time, speed_denoised, accel_without_noise, " \ - "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ - "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ - "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ - "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ - "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id " \ - "FROM sub2 " \ - "ORDER BY id, time " + POWER_DEMAND_MODEL = """ + SELECT id, time, speed, acceleration, 1200 * speed * + ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 + * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time """ + POWER_DEMAND_MODEL_DENOISED_ACCEL = """ + SELECT id, time, speed, accel_without_noise, + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time """ + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ + WITH lagged_timestep AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + time - LAG(time, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, + LAG(acceleration, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, + LAG(accel_without_noise, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised + FROM trajectory_table + WHERE partition_name=\'{partition}\'), + speed_denoised_table AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + prev_speed+accel_without_noise*sim_step AS speed_denoised + FROM lagged_timestep + ) + SELECT id, time, speed_denoised, accel_without_noise, + 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 + THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 + * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 + * 2.6 * 0.3 * POW(speed_denoised,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id + FROM speed_denoised_table + ORDER BY id, time """ diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index f065a726e..ac927c749 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -6,7 +6,7 @@ parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" "a S3 location") parser.add_argument("--run", type=str, nargs="+") -parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://circles.data.pipeline/query-result/") parser.add_argument("--partition", type=str, nargs='?', default="default") parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index c50f12a05..0df23942e 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -209,7 +209,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= }) extra_info = extra_init() - source_id = uuid.uuid4().hex + source_id = 'flow_{}'.format(uuid.uuid4().hex) for i in range(args.num_rollouts): vel = [] @@ -249,7 +249,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # Collect information from flow for the trajectory output get_extra_info(env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id + "run" + str(i)] * len(veh_ids)) + extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): @@ -326,6 +326,17 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # convert the emission file into a csv file emission_to_csv(emission_path, output_path=output_path) + # generate the trajectory output file + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) + + # upload to s3 if asked + if args.use_s3: + partition_name = date.today().isoformat() + " " + source_id[0:3] + upload_to_s3('circles.data.pipeline', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(args.only_query)[2:-2]) + # print the location of the emission csv file print("\nGenerated emission file at " + output_path) @@ -435,6 +446,12 @@ def create_parser(): 'be run in cluster mode') parser.add_argument('--exp_title', type=str, required=False, default=None, help='Informative experiment title to help distinguish results') + parser.add_argument( + '--only_query', + nargs='*', default="[\'all\']", + help='specify which query should be run by lambda' + 'for detail, see upload_to_s3 in data_pipeline.py' + ) return parser From 72d4733f07458a2863bb2c95cb7ef75c89935d33 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:07:29 -0700 Subject: [PATCH 162/438] fix trailing white space style issue --- flow/data_pipeline/query.py | 79 ++++++++++++++++++------------------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 7b8cf70c8..c915d44bf 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -16,46 +16,45 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" POWER_DEMAND_MODEL = """ - SELECT id, time, speed, acceleration, 1200 * speed * - ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 - + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 - * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time """ + SELECT id, time, speed, acceleration, 1200 * speed * + ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 + * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL = """ - SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) - * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) - + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time """ + SELECT id, time, speed, accel_without_noise, + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ - WITH lagged_timestep AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - time - LAG(time, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, - LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, - LAG(acceleration, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, - LAG(accel_without_noise, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised - FROM trajectory_table + WITH lagged_timestep AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + time - LAG(time, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, + LAG(acceleration, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, + LAG(accel_without_noise, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised + FROM trajectory_table WHERE partition_name=\'{partition}\'), - speed_denoised_table AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - prev_speed+accel_without_noise*sim_step AS speed_denoised - FROM lagged_timestep - ) - SELECT id, time, speed_denoised, accel_without_noise, - 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 - THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 - * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 - * 2.6 * 0.3 * POW(speed_denoised,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id - FROM speed_denoised_table - ORDER BY id, time """ + speed_denoised_table AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + prev_speed+accel_without_noise*sim_step AS speed_denoised + FROM lagged_timestep) + SELECT id, time, speed_denoised, accel_without_noise, + 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 + THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 + * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 + * 2.6 * 0.3 * POW(speed_denoised,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id + FROM speed_denoised_table + ORDER BY id, time""" From 420ea3f798d00e2a79260b82b79092f304ee9b72 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:10:43 -0700 Subject: [PATCH 163/438] some minor issue fixed --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index c915d44bf..d40e14c45 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -26,7 +26,7 @@ class QueryStrings(Enum): ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL = """ SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id From e45eb92cc420836fa297c0ccceb2d93d88d06359 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 08:42:29 -0700 Subject: [PATCH 164/438] reformatting energy queries --- flow/data_pipeline/query.py | 94 ++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 43 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d40e14c45..c6be5efe4 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -13,48 +13,56 @@ class QueryStrings(Enum): """An enumeration of all the pre-defined query strings.""" - SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = """ - SELECT id, time, speed, acceleration, 1200 * speed * - ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 - + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 - * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time""" + SAMPLE = """ + SELECT * + FROM trajectory_table + WHERE partition_name=\'{partition}\' + LIMIT 15; + """ + + UPDATE_PARTITION = """ + ALTER TABLE trajectory_table + ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); + """ + + POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_SUBQUERY.format('trajectory_table') + POWER_DEMAND_MODEL_DENOISED_ACCEL = """ - SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) - * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) - + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time""" + WITH denoised_accel_cte AS ( + SELECT + id, + "time", + speed, + accel_without_noise AS acceleration, + road_grade, + source_id + FROM trajectory_table + ) + {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_accel_cte')) + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ - WITH lagged_timestep AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - time - LAG(time, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, - LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, - LAG(acceleration, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, - LAG(accel_without_noise, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised - FROM trajectory_table - WHERE partition_name=\'{partition}\'), - speed_denoised_table AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - prev_speed+accel_without_noise*sim_step AS speed_denoised - FROM lagged_timestep) - SELECT id, time, speed_denoised, accel_without_noise, - 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 - THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 - * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 - * 2.6 * 0.3 * POW(speed_denoised,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id - FROM speed_denoised_table - ORDER BY id, time""" + WITH lagged_timestep AS ( + SELECT + "time", + id, + accel_without_noise, + road_grade, + source_id, + "time" - LAG("time", 1) + OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ), denoised_speed_cte AS ( + SELECT + id, + "time", + prev_speed + accel_without_noise * sim_step AS speed, + accel_without_noise AS acceleration, + road_grade, + source_id + FROM lagged_timestep + ) + {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_speed_cte')) From d578e6337b117316ce9d0633c7e18070ec27d6dc Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 08:52:17 -0700 Subject: [PATCH 165/438] rename vehicle power demand query --- flow/data_pipeline/query.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index c6be5efe4..826c28242 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -6,9 +6,24 @@ tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], "analysis": ["POWER_DEMAND_MODEL"]} -# specify the function to calculate the expected result of each query -testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} - +VEHICLE_POWER_DEMAND_FINAL_SELECT = """ + SELECT + id, + "time", + speed, + acceleration, + road_grade, + 1200 * speed * ( + (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + + 0.8 + 9.81 * SIN(road_grade) + ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, + source_id + FROM {} + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ORDER BY id, "time" + """ class QueryStrings(Enum): """An enumeration of all the pre-defined query strings.""" @@ -25,7 +40,7 @@ class QueryStrings(Enum): ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); """ - POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_SUBQUERY.format('trajectory_table') + POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_FINAL_SELECT.format('trajectory_table') POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -38,7 +53,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table ) - {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -65,4 +80,4 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_speed_cte')) From 32c052866e2d750fb4e4911c06320cb89ccd3157 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 10:44:06 -0700 Subject: [PATCH 166/438] move partition condition to cte's --- flow/data_pipeline/query.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 826c28242..2eb2146f2 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -20,8 +20,6 @@ 'POWER_DEMAND_MODEL' AS energy_model_id, source_id FROM {} - WHERE 1 = 1 - AND partition_name=\'{partition}\' ORDER BY id, "time" """ @@ -40,7 +38,20 @@ class QueryStrings(Enum): ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); """ - POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_FINAL_SELECT.format('trajectory_table') + POWER_DEMAND_MODEL = """ + WITH regular_cte AS ( + SELECT + id, + "time", + speed, + acceleration, + road_grade, + source_id + FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -52,14 +63,16 @@ class QueryStrings(Enum): road_grade, source_id FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( SELECT - "time", id, + "time", accel_without_noise, road_grade, source_id, From c7cd96303620e97530bceb9507a085d6e4089cc9 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 13:41:17 -0700 Subject: [PATCH 167/438] fix some query string formatting issue --- flow/data_pipeline/query.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2eb2146f2..ca59a12b1 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -49,7 +49,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) @@ -64,7 +64,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) @@ -82,7 +82,7 @@ class QueryStrings(Enum): OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ), denoised_speed_cte AS ( SELECT id, From b5be92ac038b118b4055ef6489612a9836cf00f2 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:28:54 -0700 Subject: [PATCH 168/438] fix some style issue --- flow/data_pipeline/query.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index ca59a12b1..e1f98aaf1 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -23,6 +23,7 @@ ORDER BY id, "time" """ + class QueryStrings(Enum): """An enumeration of all the pre-defined query strings.""" From 6884960aecf8adb4704143b17383fcddd2aa0ffa Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:56:53 -0700 Subject: [PATCH 169/438] get up to date with i210_dev --- examples/exp_configs/non_rl/highway.py | 40 ++++++---- .../exp_configs/non_rl/i210_subnetwork.py | 2 +- .../exp_configs/templates/sumo/test2.net.xml | 78 +++++-------------- 3 files changed, 48 insertions(+), 72 deletions(-) diff --git a/examples/exp_configs/non_rl/highway.py b/examples/exp_configs/non_rl/highway.py index 1905e2f7f..e7505f2d7 100644 --- a/examples/exp_configs/non_rl/highway.py +++ b/examples/exp_configs/non_rl/highway.py @@ -5,19 +5,25 @@ from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.envs import TestEnv +from flow.envs import LaneChangeAccelEnv vehicles = VehicleParams() vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), - ) + veh_id="human", + acceleration_controller=(IDMController, {}), + lane_change_params=SumoLaneChangeParams( + model="SL2015", + lc_sublane=2.0, + ), + num_vehicles=20) +vehicles.add( + veh_id="human2", + acceleration_controller=(IDMController, {}), + lane_change_params=SumoLaneChangeParams( + model="SL2015", + lc_sublane=2.0, + ), + num_vehicles=20) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) @@ -25,7 +31,13 @@ inflow.add( veh_type="human", edge="highway_0", - vehs_per_hour=10800 / 5.0, + probability=0.25, + departLane="free", + departSpeed=20) +inflow.add( + veh_type="human2", + edge="highway_0", + probability=0.25, departLane="free", departSpeed=20) @@ -35,7 +47,7 @@ exp_tag='highway', # name of the flow environment the experiment is running on - env_name=TestEnv, + env_name=LaneChangeAccelEnv, # name of the network class the experiment is running on network=HighwayNetwork, @@ -46,12 +58,12 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( render=True, - sim_step=0.5 + lateral_resolution=1.0, ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4000, + horizon=1500, additional_params=ADDITIONAL_ENV_PARAMS.copy(), ), diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 8970e6165..3704a7a1c 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=False, + color_by_speed=True, use_ballistic=True ), diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 16170b917..00e3edcd5 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,41 +1,5 @@ - - @@ -4716,24 +4680,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4837,10 +4801,10 @@ - + - - + + From 7e549be514a427b1877f19cab3ecb603a02c4f50 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:18:43 -0700 Subject: [PATCH 170/438] update lambda function, change partition into multi-column --- flow/core/experiment.py | 8 +-- flow/data_pipeline/data_pipeline.py | 84 ++++++++------------------- flow/data_pipeline/lambda_function.py | 26 +++------ flow/data_pipeline/query.py | 29 ++++----- flow/data_pipeline/run_query.py | 6 +- flow/visualize/i210_replay.py | 7 ++- 6 files changed, 58 insertions(+), 102 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 84a34d0e3..01f732379 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -217,10 +217,10 @@ def rl_actions(*_): if partition_name: if partition_name == "default": - partition_name = source_id[0:3] - partition_name = date.today().isoformat() + " " + partition_name - upload_to_s3('circles.data.pipeline', 'trajectory-output/partition_name={}/{}.csv'.format( - partition_name, upload_file_path.split('/')[-1].split('_')[0]), + partition_name = source_id[-3:] + cur_date = date.today().isoformat() + upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index fbd975c5e..111c41994 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,8 +2,9 @@ import pandas as pd import numpy as np import boto3 -from flow.data_pipeline.query import QueryStrings, testing_functions +from flow.data_pipeline.query import QueryStrings from time import time +from datetime import date def generate_trajectory_table(data_path, extra_info, partition_name): @@ -90,7 +91,7 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -99,7 +100,7 @@ def extra_init(): def get_extra_info(veh_kernel, extra_info, veh_ids): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: - extra_info["time"].append(veh_kernel.get_timestep(vid) / 1000) + extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(veh_kernel.get_headway(vid)) extra_info["acceleration"].append(veh_kernel.get_accel(vid)) @@ -154,7 +155,7 @@ def get_existing_partitions(self): response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, WorkGroup='primary' ) @@ -164,7 +165,7 @@ def get_existing_partitions(self): QueryExecutionId=response['QueryExecutionId'], MaxResults=1000 ) - return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + return [data['Data'][0]['VarCharValue'] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): """Return the status of the execution with given id. @@ -207,27 +208,30 @@ def wait_for_execution(self, execution_id): return False return True - def update_partition(self, partition): + def update_partition(self, query_date, partition): """Load the given partition to the trajectory_table on Athena. Parameters ---------- + query_date : str + the new partition date that needs to be loaded partition : str the new partition that needs to be loaded """ response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(date=query_date, partition=partition), QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, WorkGroup='primary' ) if self.wait_for_execution(response['QueryExecutionId']): raise RuntimeError("update partition timed out") - self.existing_partitions.append(partition) + self.existing_partitions.append("date={}/partition_name={}".format(query_date, partition)) return - def run_query(self, query_name, result_location="s3://circles.data.pipeline/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", + query_date="today", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -236,6 +240,8 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer name of the query in QueryStrings enum that will be run result_location: str, optional location on the S3 bucket where the result will be stored + query_date : str + name of the partition date to run this query on partition: str, optional name of the partition to run this query on Returns @@ -249,13 +255,16 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer if query_name not in QueryStrings.__members__: raise ValueError("query not existed: please add it to query.py") - if partition not in self.existing_partitions: - self.update_partition(partition) + if query_date == "today": + query_date = date.today().isoformat() + + if "date={}/partition_name={}".format(query_date, partition) not in self.existing_partitions: + self.update_partition(query_date, partition) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryString=QueryStrings[query_name].value.format(date=query_date, partition=partition), QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, ResultConfiguration={ 'OutputLocation': result_location, @@ -263,50 +272,3 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer WorkGroup='primary' ) return response['QueryExecutionId'] - -########################################################################### -# Helpers for testing the SQL Queries # -########################################################################### - - -def test_sql_query(query_name): - """Start the execution of a query, does not wait for it to finish. - - Parameters - ---------- - query_name : str - name of the query in QueryStrings enum that will be tested - Raises - ------ - RuntimeError: if timeout - """ - if query_name not in testing_functions: - raise ValueError("no tests supported for this query") - - # Run the respective sql query - queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data.pipeline/" - "query-result/query-test", partition="test") - if queryEngine.wait_for_execution(execution_id): - raise RuntimeError("execution timed out") - - # get the Athena query result from S3 - s3 = boto3.resource("s3") - s3.Bucket("circles.data.pipeline").download_file("query-result/query-test/"+execution_id+".csv", - "data/athena_result.csv") - athena_result = pd.read_csv("data/athena_result.csv") - athena_result = athena_result.sort_values(by=["time", "id"]) - - # get the python expected result - expected_result = pd.read_csv("data/test_data.csv") - expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") - expected_result.columns = ["time", "id", "power"] - expected_result = expected_result.sort_values(by=["time", "id"]) - - difference = athena_result["power"] - expected_result["power"] - print("average difference is: " + str(np.mean(difference))) - print("std of difference is: " + str(np.std(difference))) - print("average ratio of difference to expected is: " + - str(np.mean(np.divide(difference, expected_result["power"])))) - difference = pd.DataFrame(difference) - difference.to_csv("./difference.csv") diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index fd50ba8f5..35dcbfba8 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,36 +1,28 @@ """lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus -from examples.data_pipeline import AthenaQuery -from examples.query import tags +from flow.data_pipeline.data_pipeline import AthenaQuery +from flow.data_pipeline.query import tags s3 = boto3.client('s3') queryEngine = AthenaQuery() def lambda_handler(event, context): - """Invoke by AWS Lambda upon triggered by an event. - - Parameters - ---------- - event : dic < str: dic > - an S3 event - context: - not used - """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) + query_date = key.split('/')[-3].split('=')[-1] partition = key.split('/')[-2].split('=')[-1] response = s3.head_object(Bucket=bucket, Key=key) - run_query = response["Metadata"]["run-query"] + required_query = response["Metadata"]["run-query"] if bucket == 'circles.data.pipeline' and 'trajectory-output/' in key: - if run_query == "all": - query_list = tags["analysis"] - elif not run_query: + if required_query == "all": + query_list = tags["energy"] + elif not required_query: break else: - query_list = run_query.split("\', \'") + query_list = required_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://circles.data.pipeline/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data.pipeline/result/auto/', query_date, partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e1f98aaf1..1d805279b 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -3,13 +3,12 @@ from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], - "analysis": ["POWER_DEMAND_MODEL"]} +tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT id, - "time", + time_step, speed, acceleration, road_grade, @@ -20,7 +19,7 @@ 'POWER_DEMAND_MODEL' AS energy_model_id, source_id FROM {} - ORDER BY id, "time" + ORDER BY id, time_step """ @@ -30,26 +29,28 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * FROM trajectory_table - WHERE partition_name=\'{partition}\' + WHERE date = \'{date}\' + AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ ALTER TABLE trajectory_table - ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); + ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ POWER_DEMAND_MODEL = """ WITH regular_cte AS ( SELECT id, - "time", + time_step, speed, acceleration, road_grade, source_id FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) @@ -58,13 +59,14 @@ class QueryStrings(Enum): WITH denoised_accel_cte AS ( SELECT id, - "time", + time_step, speed, accel_without_noise AS acceleration, road_grade, source_id FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) @@ -73,21 +75,22 @@ class QueryStrings(Enum): WITH lagged_timestep AS ( SELECT id, - "time", + time_step, accel_without_noise, road_grade, source_id, - "time" - LAG("time", 1) - OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) - OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ), denoised_speed_cte AS ( SELECT id, - "time", + time_step, prev_speed + accel_without_noise * sim_step AS speed, accel_without_noise AS acceleration, road_grade, diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index ac927c749..1eb802205 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -1,6 +1,6 @@ """runner script for invoking query manually.""" import argparse -from flow.data_pipeline.data_pipeline import AthenaQuery, test_sql_query +from flow.data_pipeline.data_pipeline import AthenaQuery from flow.data_pipeline.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -21,7 +21,7 @@ if args.run: execution_ids = [] for query_name in args.run: - execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + execution_ids.append(queryEngine.run_query(query_name, args.result_location, partition=args.partition)) print(execution_ids) if args.list_partitions: print(queryEngine.existing_partitions) @@ -33,5 +33,3 @@ if args.list_queries: for q in QueryStrings: print(q) - if args.test_query: - test_sql_query(args.test_query[0]) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 0df23942e..f21808705 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -332,9 +332,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: - partition_name = date.today().isoformat() + " " + source_id[0:3] - upload_to_s3('circles.data.pipeline', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + partition_name = source_id[-3:] + cur_date = date.today().isoformat() + upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) # print the location of the emission csv file From a799abda655a821b66828b44669210c8a8dd35ea Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:45:08 -0700 Subject: [PATCH 171/438] remove dupe imports --- examples/train.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/examples/train.py b/examples/train.py index d9e7dde07..7cb84d361 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,8 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv - from stable_baselines import PPO2 if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() @@ -175,12 +173,7 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ - from ray import tune from ray.tune.registry import register_env - try: - from ray.rllib.agents.agent import get_agent_class - except ImportError: - from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon @@ -263,7 +256,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - import ray flow_params = submodule.flow_params flow_params['sim'].render = flags.render @@ -412,8 +404,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - from stable_baselines.common.vec_env import DummyVecEnv - from stable_baselines import PPO2 flow_params = submodule.flow_params # Path to the saved files From f4fa42632a13c17b76ba49e73d67d13559f19062 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:51:14 -0700 Subject: [PATCH 172/438] remove blank lines after docstrings --- examples/train.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/train.py b/examples/train.py index 7cb84d361..5a9ab5903 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,7 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -256,7 +255,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -404,7 +402,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 2563818e4e31cf61606f53955a7b7aed35557a7b Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:59:00 -0700 Subject: [PATCH 173/438] add back ray import --- examples/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/train.py b/examples/train.py index 5a9ab5903..50720b756 100644 --- a/examples/train.py +++ b/examples/train.py @@ -255,6 +255,8 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 498e08aa1f35d2c37bb1551b35b5d8c98635afa4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 21:04:56 -0700 Subject: [PATCH 174/438] remove whitespace --- examples/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index 50720b756..1689d846f 100644 --- a/examples/train.py +++ b/examples/train.py @@ -256,7 +256,7 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray - + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From d7da535e81b50dd7b14b2cbb5c72d8cd65fa1825 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:47:19 -0700 Subject: [PATCH 175/438] style fixed --- flow/data_pipeline/data_pipeline.py | 1 - flow/data_pipeline/query.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 111c41994..a999b6eb1 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -1,6 +1,5 @@ """contains class and helper functions for the data pipeline.""" import pandas as pd -import numpy as np import boto3 from flow.data_pipeline.query import QueryStrings from time import time diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 1d805279b..c2a64013c 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -80,9 +80,9 @@ class QueryStrings(Enum): road_grade, source_id, time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 AND date = \'{{date}}\' From 3df23123743b183a737adb0c7f29516771f2d353 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 11:49:38 -0700 Subject: [PATCH 176/438] specify power demand model names --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index c2a64013c..a319550e2 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -16,7 +16,7 @@ (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, + \'{}\' AS energy_model_id, source_id FROM {} ORDER BY id, time_step @@ -53,7 +53,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -69,7 +69,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -97,4 +97,4 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) From 28d4f73c4170c05b8fde403d8a6148347d2d1351 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 24 May 2020 21:29:11 -0700 Subject: [PATCH 177/438] fix bug in vehicle power demand --- flow/data_pipeline/query.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index a319550e2..bbc0b9709 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -12,9 +12,12 @@ speed, acceleration, road_grade, - 1200 * speed * ( - (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) - + 0.8 + 9.81 * SIN(road_grade) + 1200 * speed * MAX(0, ( + CASE + WHEN acceleration > 0 THEN 1 + WHEN acceleration < 0 THEN 0 + ELSE 0.5 + END * (1 - 0.8) + 0.8) * acceleration + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, \'{}\' AS energy_model_id, source_id From 077983206ea4454190ffa98987dc81e4ba5d2954 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:31:20 -0700 Subject: [PATCH 178/438] Add several accelerations (with/without noise, with/without failsafes) to custom output --- flow/controllers/base_controller.py | 18 +++++++++------ flow/core/kernel/vehicle/base.py | 20 ++++++++++++++-- flow/core/kernel/vehicle/traci.py | 36 +++++++++++++++++++++++------ flow/data_pipeline/data_pipeline.py | 19 ++++++++++----- 4 files changed, 71 insertions(+), 22 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 7adcdf310..c417bb73a 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,8 +88,10 @@ def get_action(self, env): float the modified form of the acceleration """ - # clear the current stored accel_without_noise of this vehicle None - env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # clear the current stored accel_no_noise_no_failsafe of this vehicle None + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed @@ -110,23 +112,25 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noise = accel + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) if self.fail_safe == 'instantaneous': - accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': - accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, accel_no_noise_with_failsafe) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, accel) # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) - + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 20a11cf99..eb88ff397 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -693,7 +693,15 @@ def get_accel(self, veh_id): """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError @@ -705,7 +713,15 @@ def get_2D_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError - def get_accel_without_noise(self, veh_id): + def get_accel_no_noise_no_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_no_noise_with_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_with_noise_no_failsafe(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 824ec4b0c..344bcfde2 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,7 +113,9 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] - self.__vehicles[veh_id]["accel_without_noise"] = None + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1138,15 +1140,35 @@ def get_accel(self, veh_id): self.__vehicles[veh_id]["accel"] = None return self.__vehicles[veh_id]["accel"] - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): """See parent class.""" - self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = accel_no_noise_no_failsafe - def get_accel_without_noise(self, veh_id): + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): """See parent class.""" - if "accel_without_noise" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_without_noise"] = None - return self.__vehicles[veh_id]["accel_without_noise"] + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = accel_no_noise_with_failsafe + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): + """See parent class.""" + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe + + def get_accel_no_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] + + def get_accel_no_noise_with_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_with_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] + + def get_accel_with_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_with_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] def get_realized_accel(self, veh_id): """See parent class.""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a999b6eb1..11d85cb0d 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -89,9 +89,11 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): - """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + """Return the dictionary with all the field pre-populated with empty list.""" + extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], + "target_accel_with_noise_with_failsafe": [], "target_accel_no_noise_no_failsafe": [], + "target_accel_with_noise_no_failsafe": [], "target_accel_no_noise_with_failsafe": [], + "realized_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -102,13 +104,18 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(veh_kernel.get_headway(vid)) - extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["target_accel_with_noise_with_failsafe"].append(veh_kernel.get_accel(vid)) extra_info["leader_id"].append(veh_kernel.get_leader(vid)) extra_info["follower_id"].append(veh_kernel.get_follower(vid)) extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) - extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["target_accel_no_noise_no_failsafe"].append( + veh_kernel.get_accel_no_noise_no_failsafe(vid)) + extra_info["target_accel_with_noise_no_failsafe"].append( + veh_kernel.get_accel_with_noise_no_failsafe(vid)) + extra_info["target_accel_no_noise_with_failsafe"].append( + veh_kernel.get_accel_no_noise_with_failsafe(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) position = veh_kernel.get_2d_position(vid) extra_info["x"].append(position[0]) From b3f15a3c2a4527b59139ed1d9198f68110c93270 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:44:15 -0700 Subject: [PATCH 179/438] update queries with new column names --- flow/data_pipeline/query.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index bbc0b9709..d3f136a72 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -31,14 +31,14 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * - FROM trajectory_table + FROM fact_vehicle_trace WHERE date = \'{date}\' AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ - ALTER TABLE trajectory_table + ALTER TABLE fact_vehicle_trace ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -48,10 +48,10 @@ class QueryStrings(Enum): id, time_step, speed, - acceleration, + target_accel_with_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -64,10 +64,10 @@ class QueryStrings(Enum): id, time_step, speed, - accel_without_noise AS acceleration, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -79,14 +79,14 @@ class QueryStrings(Enum): SELECT id, time_step, - accel_without_noise, + target_accel_no_noise_with_failsafe, road_grade, source_id, time_step - LAG(time_step, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -94,8 +94,8 @@ class QueryStrings(Enum): SELECT id, time_step, - prev_speed + accel_without_noise * sim_step AS speed, - accel_without_noise AS acceleration, + prev_speed + target_accel_no_noise_with_failsafe * sim_step AS speed, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id FROM lagged_timestep From d66a0ab6542a6075ce9495991790afadf8a4d3e4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:47:44 -0700 Subject: [PATCH 180/438] fix flake8 issues --- flow/controllers/base_controller.py | 2 +- flow/data_pipeline/query.py | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index c417bb73a..3f6a0f4ae 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -130,7 +130,7 @@ def get_action(self, env): elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) - + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d3f136a72..b8cd24b55 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -3,7 +3,13 @@ from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = { + "energy": [ + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" + ] + } VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT @@ -56,7 +62,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -72,7 +79,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', + 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -100,4 +108,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) From 38af177a02bd47cc691201083f4192f61fa2dedc Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:51:46 -0700 Subject: [PATCH 181/438] remove trailing whitespaces --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index b8cd24b55..57def52de 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -5,8 +5,8 @@ # tags for different queries tags = { "energy": [ - "POWER_DEMAND_MODEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" ] } @@ -62,7 +62,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ @@ -108,5 +108,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) From fceedf874599c68852aa8feb016921b12abd358e Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:31:20 -0700 Subject: [PATCH 182/438] Add several accelerations (with/without noise, with/without failsafes) to custom output --- flow/core/kernel/vehicle/base.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index eb88ff397..ed53773cb 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -709,10 +709,6 @@ def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError - def get_2D_position(self, veh_id, error=-1001): - """Return (x, y) position of vehicle with veh_id.""" - raise NotImplementedError - def get_accel_no_noise_no_failsafe(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError From df182ad6c820b1fd2b05db9ce6a305aee248cec5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 24 May 2020 23:20:29 -0700 Subject: [PATCH 183/438] fix accel with noise with failsafe output --- flow/controllers/base_controller.py | 1 + flow/core/kernel/vehicle/base.py | 20 ++++++++++++++------ flow/core/kernel/vehicle/traci.py | 10 ++++++++++ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 3f6a0f4ae..1169ce5b8 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -113,6 +113,7 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) + accel_no_noise_with_failsafe = accel if self.fail_safe == 'instantaneous': accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index ed53773cb..f6f8ee382 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -694,15 +694,19 @@ def get_accel(self, veh_id): raise NotImplementedError def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration without noise without failsafe of vehicle with veh_id.""" raise NotImplementedError def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration without noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration with noise without failsafe of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): + """Update stored acceleration with noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_2d_position(self, veh_id, error=-1001): @@ -710,15 +714,19 @@ def get_2d_position(self, veh_id, error=-1001): raise NotImplementedError def get_accel_no_noise_no_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration without noise without failsafe of vehicle with veh_id.""" raise NotImplementedError def get_accel_no_noise_with_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration without noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_accel_with_noise_no_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration with noise without failsafe of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_with_noise_with_failsafe(self, veh_id): + """Return the acceleration with noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_realized_accel(self, veh_id): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 344bcfde2..5de35956f 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1152,6 +1152,10 @@ def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsa """See parent class.""" self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe + def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): + """See parent class.""" + self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = accel_with_noise_with_failsafe + def get_accel_no_noise_no_failsafe(self, veh_id): """See parent class.""" if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: @@ -1170,6 +1174,12 @@ def get_accel_with_noise_no_failsafe(self, veh_id): self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] + def get_accel_with_noise_with_failsafe(self, veh_id): + """See parent class.""" + if "accel_with_noise_with_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = None + return self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] + def get_realized_accel(self, veh_id): """See parent class.""" return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step From d88840578f88c70da428d829b7b9d22024d6bf52 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 16:57:52 -0700 Subject: [PATCH 184/438] fix rebase errors --- flow/controllers/base_controller.py | 1 + flow/core/kernel/vehicle/traci.py | 8 -------- flow/data_pipeline/data_pipeline.py | 10 ---------- 3 files changed, 1 insertion(+), 18 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 1169ce5b8..ac29bca2e 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -92,6 +92,7 @@ def get_action(self, env): env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, None) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 5de35956f..1c0b5f19b 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -223,14 +223,6 @@ def update(self, reset): self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] - # update the number of not departed vehicles - self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ - sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] - - # update the number of not departed vehicles - self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ - sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] - # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 11d85cb0d..aea9b349c 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -88,16 +88,6 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): return -def extra_init(): - """Return the dictionary with all the field pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], - "target_accel_with_noise_with_failsafe": [], "target_accel_no_noise_no_failsafe": [], - "target_accel_with_noise_no_failsafe": [], "target_accel_no_noise_with_failsafe": [], - "realized_accel": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "road_grade": [], "source_id": []} - return extra_info - - def get_extra_info(veh_kernel, extra_info, veh_ids): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: From 69f6f5536a3be4d885652471c3008da258e58416 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 17:57:41 -0700 Subject: [PATCH 185/438] rm deleted file --- flow/data_pipeline/datapipeline_test.py | 37 ------------------------- 1 file changed, 37 deletions(-) delete mode 100644 flow/data_pipeline/datapipeline_test.py diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py deleted file mode 100644 index 0e1a50518..000000000 --- a/flow/data_pipeline/datapipeline_test.py +++ /dev/null @@ -1,37 +0,0 @@ -"""functions that calculates the expected result for testing.""" -import math - -# Vehicle Mass -M = 1200 -# Gravity -g = 9.81 -# Density of Air -ro_air = 1.225 -# Rolling resistance coefficient -C_r = .005 -# Aerodynamic drag coefficient -C_a = 0.3 -# Vehicle Cross sectional Area -A = 2.6 -# Road grade -theta = 0 - - -def heavyside(inp): - """Return 1 if input is positive.""" - return 0 if inp <= 0 else 1 - - -def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): - """Calculate the expected power for POWER_DEMAND_MODEL query.""" - acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) - accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) - rolling_friction = M * g * C_r * mu - air_drag = .5 * ro_air * A * C_a * mu**3 - power = accel_and_slope + rolling_friction + air_drag - return power - - -def apply_energy_one(row): - """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] From 4f2f23ec7d47bff699baeac9bf8810af68f2f465 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 17:58:58 -0700 Subject: [PATCH 186/438] add return carriage to eof --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 3704a7a1c..25565bb49 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -163,4 +163,4 @@ "avg_density": lambda env: 5 * 1000 * len(env.k.vehicle.get_ids_by_edge( edge_id)) / (env.k.network.edge_length(edge_id) * env.k.network.num_lanes(edge_id)), -} \ No newline at end of file +} From d2ba0694ef7cf0e4f6c913d4e855011fbcdc76e2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 18:00:46 -0700 Subject: [PATCH 187/438] revert accidental change --- flow/core/experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index f25a9fcac..779fdb0f4 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -175,7 +175,7 @@ def rl_actions(*_): for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) - if done: + if type(done) is dict and done['__all__'] or type(done) is not dict and done: break # Store the information from the run in info_dict. From 8eee7722bc28ae05ac330e741e33ee9b659391a2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 18:03:02 -0700 Subject: [PATCH 188/438] rename trajectory table --- flow/data_pipeline/query.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e8ac34abc..b6e66fcec 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -36,14 +36,14 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * - FROM fact_vehicle_trace + FROM trajectory_table WHERE date = \'{date}\' AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ - ALTER TABLE fact_vehicle_trace + ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -56,7 +56,7 @@ class QueryStrings(Enum): target_accel_with_noise_with_failsafe AS acceleration, road_grade, source_id - FROM fact_vehicle_trace + FROM trajectory_table WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -73,7 +73,7 @@ class QueryStrings(Enum): target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id - FROM fact_vehicle_trace + FROM trajectory_table WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -93,7 +93,7 @@ class QueryStrings(Enum): OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM fact_vehicle_trace + FROM trajectory_table WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' From 3c6dcf71c0ac4219e13da3a3a58471a69dfc88d1 Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Tue, 26 May 2020 13:38:08 -0700 Subject: [PATCH 189/438] added apply acceleratino function which uses setSpeed() method instead of slowDown() --- flow/core/kernel/network/flow_params.json | 2 ++ flow/core/kernel/vehicle/base.py | 17 +++++++++++++++++ flow/core/kernel/vehicle/traci.py | 14 ++++++++++++++ 3 files changed, 33 insertions(+) create mode 100644 flow/core/kernel/network/flow_params.json diff --git a/flow/core/kernel/network/flow_params.json b/flow/core/kernel/network/flow_params.json new file mode 100644 index 000000000..c70a22e63 --- /dev/null +++ b/flow/core/kernel/network/flow_params.json @@ -0,0 +1,2 @@ +{ + "env": \ No newline at end of file diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 647ef37fe..21edd8d4d 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -124,6 +124,23 @@ def remove(self, veh_id): def apply_acceleration(self, veh_id, acc): """Apply the acceleration requested by a vehicle in the simulator. + In SUMO, this function applies slowDown method which applies smoothing. + + Parameters + ---------- + veh_id : str or list of str + list of vehicle identifiers + acc : float or array_like + requested accelerations from the vehicles + """ + raise NotImplementedError + + def apply_acceleration_not_smooth(self, veh_id, acc): + """Apply the acceleration requested by a vehicle in the simulator. + + In SUMO, this function applies setSpeed method which doesn't apply + smoothing. + Parameters ---------- veh_id : str or list of str diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2a4e06257..b56e36ae0 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -964,6 +964,20 @@ def apply_acceleration(self, veh_ids, acc): next_vel = max([this_vel + acc[i] * self.sim_step, 0]) self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) + def apply_acceleration_not_smooth(self, veh_ids, acc): + """See parent class.""" + # to hand the case of a single vehicle + if type(veh_ids) == str: + veh_ids = [veh_ids] + acc = [acc] + + for i, vid in enumerate(veh_ids): + if acc[i] is not None and vid in self.get_ids(): + self.__vehicles[vid]["accel"] = acc[i] + this_vel = self.get_speed(vid) + next_vel = max([this_vel + acc[i] * self.sim_step, 0]) + self.kernel_api.vehicle.setSpeed(vid, next_vel) + def apply_lane_change(self, veh_ids, direction): """See parent class.""" # to hand the case of a single vehicle From ddf6a2435d0c2ca7eafe0dd6292ec574626bd397 Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Tue, 26 May 2020 14:12:23 -0700 Subject: [PATCH 190/438] added failsafe methods for max accel/decel and speed limit, and all --- flow/controllers/base_controller.py | 78 +++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 3 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 7adcdf310..0984349d3 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -34,7 +34,7 @@ class BaseController: delay : int delay in applying the action (time) fail_safe : str - Should be either "instantaneous" or "safe_velocity" + Should be "instantaneous", "safe_velocity", "feasible_accel", or "all" noise : double variance of the gaussian from which to sample a noisy acceleration """ @@ -75,8 +75,10 @@ def get_action(self, env): time step. This method also augments the controller with the desired level of - stochastic noise, and utlizes the "instantaneous" or "safe_velocity" - failsafes if requested. + stochastic noise, and utlizes the "instantaneous", "safe_velocity", + "feasible_accel", or "all" failsafes if requested. The "all" failsafe + performs all three failsafes with this order: 1)"safe_velocity", + 2) "feasible_accel", 3) "instantaneous". Parameters ---------- @@ -115,6 +117,13 @@ def get_action(self, env): accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) elif self.fail_safe == 'safe_velocity': accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) + elif self.fail_safe == 'feasible_accel': + accel_without_noise = self.get_feasible_action(accel_without_noise) + elif self.fail_safe == 'all': + accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) + accel_without_noise = self.get_feasible_action(accel_without_noise) + accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) # add noise to the accelerations, if requested @@ -126,6 +135,12 @@ def get_action(self, env): accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) + elif self.fail_safe == 'feasible_accel': + accel = self.get_feasible_action(accel) + elif self.fail_safe == 'all': + accel = self.get_safe_velocity_action(env, accel) + accel = self.get_feasible_action(accel) + accel = self.get_safe_action_instantaneous(env, accel) return accel @@ -172,6 +187,14 @@ def get_safe_action_instantaneous(self, env, action): # if the vehicle will crash into the vehicle ahead of it in the # next time step (assuming the vehicle ahead of it is not # moving), then stop immediately + print( + "=====================================\n" + "Vehicle {} is about to crash. Instantaneous acceleration " + "clipping applied.\n" + "=====================================".format(self.veh_id)) + + print("Vehicle {} is about to crash. Instantaneous acceleration" + "clipping applied.".format(self.veh_id)) return -this_vel / sim_step else: # if the vehicle is not in danger of crashing, continue with @@ -245,4 +268,53 @@ def safe_velocity(self, env): v_safe = 2 * h / env.sim_step + dv - this_vel * (2 * self.delay) + # check for speed limit + this_edge = env.k.vehicle.get_edge(self.veh_id) + edge_speed_limit = env.k.network.speed_limit(this_edge) + + if v_safe > edge_speed_limit: + v_safe = edge_speed_limit + print( + "=====================================\n" + "Speed of vehicle {} is greater than speed limit. Safe " + "velocity clipping applied.\n" + "=====================================".format(self.veh_id)) + return v_safe + + def get_feasible_action(self, action): + """Perform the "feasible_accel" failsafe action. + + Checks if the computed acceleration would put us above maximum + acceleration or deceleration. If it would, output the acceleration + equal to maximum acceleration or deceleration. + + Parameters + ---------- + action : float + requested acceleration action + + Returns + ------- + float + the requested action clipped by the safe velocity + """ + if action > self.max_accel: + action = self.max_accel + + print( + "=====================================\n" + "Acceleration of vehicle {} is greater than the max " + "acceleration. Feasible acceleration clipping applied.\n" + "=====================================".format(self.veh_id)) + + if action < -self.max_deaccel: + action = -self.max_deaccel + + print( + "=====================================\n" + "Deceleration of vehicle {} is greater than the max " + "deceleration. Feasible acceleration clipping applied.\n" + "=====================================".format(self.veh_id)) + + return action From 53cf035684b02668fa2116942c75b02cd4398d29 Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Tue, 26 May 2020 14:22:16 -0700 Subject: [PATCH 191/438] removed json file which was added by mistake --- flow/core/kernel/network/flow_params.json | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 flow/core/kernel/network/flow_params.json diff --git a/flow/core/kernel/network/flow_params.json b/flow/core/kernel/network/flow_params.json deleted file mode 100644 index c70a22e63..000000000 --- a/flow/core/kernel/network/flow_params.json +++ /dev/null @@ -1,2 +0,0 @@ -{ - "env": \ No newline at end of file From b16d949c32b0ef2ef6d1a9ad400fe514769418a0 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 26 May 2020 14:28:40 -0700 Subject: [PATCH 192/438] leader utils added --- flow/data_pipeline/data_pipeline.py | 12 +-- flow/data_pipeline/leaderboard_utils.py | 116 ++++++++++++++++++++++++ flow/data_pipeline/query.py | 13 +-- 3 files changed, 129 insertions(+), 12 deletions(-) create mode 100644 flow/data_pipeline/leaderboard_utils.py diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 72caa5218..d414aa420 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -93,19 +93,19 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): for vid in veh_ids: extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) + position = veh_kernel.get_2d_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(veh_kernel.get_speed(vid)) extra_info["headway"].append(veh_kernel.get_headway(vid)) extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["leader_id"].append(veh_kernel.get_leader(vid)) extra_info["follower_id"].append(veh_kernel.get_follower(vid)) extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) - extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) - position = veh_kernel.get_2d_position(vid) - extra_info["x"].append(position[0]) - extra_info["y"].append(position[1]) - extra_info["speed"].append(veh_kernel.get_speed(vid)) class AthenaQuery: diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py new file mode 100644 index 000000000..5eb5a7be6 --- /dev/null +++ b/flow/data_pipeline/leaderboard_utils.py @@ -0,0 +1,116 @@ +import os +import boto3 +import pandas as pd +from io import StringIO + + +def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipeline"): + """Fetch tables from s3 and store in ./result directory. + + Parameters + ---------- + table_name: str + The name of table to retrieve from S3, the current available tables are: + fact_vehicle_trace + fact_energy_trace + fact_network_throughput_agg + fact_network_inflows_outflows + fact_vehicle_fuel_efficiency_agg + fact_network_metrics_by_distance_agg + fact_network_metrics_by_time_agg + fact_network_fuel_efficiency_agg + leaderboard_chart + bucket: str + the S3 bucket that holds these tables + """ + try: + os.makedirs("result/{}".format(table_name)) + except FileExistsError as e: + pass + s3 = boto3.client("s3") + response = s3.list_objects_v2(Bucket=bucket) + keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] + names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), + e.split("/")[2].replace("partition_name=", ""))for e in keys] + existing_results = os.listdir("./result/{}".format(table_name)) + for index in range(len(keys)): + if names[index] not in existing_results: + s3.download_file(bucket, keys[index], "./result/{}/{}".format(table_name, names[index])) + + +def get_table_memory(table_name="fact_vehicle_trace", bucket="circles.data.pipeline", existing_results=()): + """Fetch tables from s3 and return them as in-memory pandas dataframe objects. + + Parameters + ---------- + bucket: str + the S3 bucket that holds the tables + table_name: str + the name of the name to retrieve from S3, for detail see get_table_disk + existing_results: list + tables that should not be fetched, + the names must follow the convention: + {source_id(no run number)}_{query_name}.csv + + Returns + ------- + file_list: dict + a dictionary of pandas dataframes, each contains a table from S3 + The dataframs are keyed by their name: {source_id(no run number)}_{query_name}.csv + + """ + s3 = boto3.client("s3") + response = s3.list_objects_v2(Bucket=bucket) + keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] + names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), + e.split("/")[2].replace("partition_name=", ""))for e in keys] + results = dict() + for index in range(len(keys)): + if names[index] not in existing_results: + obj = s3.get_object(Bucket=bucket, Key=keys[index])["Body"] + obj_str = obj.read().decode("utf-8") + results[names[index]] = pd.read_csv(StringIO(obj_str)) + return results + + +def get_table_url(table_name="fact_vehicle_trace", bucket="circles.data.pipeline", existing_results=()): + """Fetch tables from s3 and return as urls, requires the bucket to have public access. + + Parameters + ---------- + bucket: str + the S3 bucket that holds the tables + table_name: str + the name of the name to retrieve from S3, for detail see get_table_disk + existing_results: list + tables that should not be fetched, + the names must follow the convention: + {source_id(no run number)}_{query_name}.csv + + Returns + ------- + file_list: dict + a dictionary of urls, each contains a table from S3 + The urls are keyed by their name: {source_id(no run number)}_{query_name}.csv + + """ + s3 = boto3.client("s3") + response = s3.list_objects_v2(Bucket=bucket) + keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] + names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), + e.split("/")[2].replace("partition_name=", "")) for e in keys] + results = dict() + for index in range(len(keys)): + if names[index] not in existing_results: + results[names[index]] = "https://{}.s3.{}.amazonaws.com/{}".format(bucket, "us-west-2", keys[index]) + return results + + +def get_metadata(name, bucket="circles.data.pipeline"): + s3 = boto3.client("s3") + name_list = name.split('_') + source_id = "flow_{}".format(name_list[2]) + response = s3.head_object(Bucket=bucket, + Key="vehicle_trace_table/date={0}/partition_name={1}/{1}.csv".format(name_list[0], + source_id)) + return response["Metadata"] diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index abc3bcd53..5e9aa27b2 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -19,7 +19,7 @@ "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart"] -VEHICLE_POWER_DEMAND_FINAL_SELECT = """ +VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT id, time_step, @@ -31,11 +31,11 @@ WHEN acceleration > 0 THEN 1 WHEN acceleration < 0 THEN 0 ELSE 0.5 - END * (1 - {}) + {}) * acceleration + 9.81 * SIN(road_grade) + END * (1 - {0}) + {0}) * acceleration + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3)) AS power, - \'{}\' AS energy_model_id, + \'{1}\' AS energy_model_id, source_id - FROM {} + FROM {2} ORDER BY id, time_step """ @@ -119,8 +119,9 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', - 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) FACT_NETWORK_THROUGHPUT_AGG = """ WITH min_time AS ( From 528f0aace706fc8a3de99aba720bda7c0eb309b4 Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Tue, 26 May 2020 15:14:00 -0700 Subject: [PATCH 193/438] fixed docstrings --- flow/controllers/base_controller.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 2f01faef2..7cbbef6db 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -261,8 +261,8 @@ def safe_velocity(self, env): Returns ------- float - maximum safe velocity given a maximum deceleration and delay in - performing the breaking action + maximum safe velocity given a maximum deceleration, delay in + performing the breaking action, and speed limit """ lead_id = env.k.vehicle.get_leader(self.veh_id) lead_vel = env.k.vehicle.get_speed(lead_id) @@ -302,7 +302,8 @@ def get_feasible_action(self, action): Returns ------- float - the requested action clipped by the safe velocity + the requested action clipped by the feasible acceleration or + deceleration. """ if action > self.max_accel: action = self.max_accel From cbf6a420b727f5bf1d60a9bf8ff7cef92bbfe5ae Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Tue, 26 May 2020 15:15:05 -0700 Subject: [PATCH 194/438] removed duplicated print --- flow/controllers/base_controller.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 7cbbef6db..95ecd1737 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -198,8 +198,6 @@ def get_safe_action_instantaneous(self, env, action): "clipping applied.\n" "=====================================".format(self.veh_id)) - print("Vehicle {} is about to crash. Instantaneous acceleration" - "clipping applied.".format(self.veh_id)) return -this_vel / sim_step else: # if the vehicle is not in danger of crashing, continue with From 86458115b1f2f4d1f59755fae484daa8af4b00dc Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 26 May 2020 17:07:41 -0700 Subject: [PATCH 195/438] minor docstring formatting --- flow/controllers/base_controller.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 95ecd1737..2fdb2f399 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -77,8 +77,10 @@ def get_action(self, env): This method also augments the controller with the desired level of stochastic noise, and utlizes the "instantaneous", "safe_velocity", "feasible_accel", or "all" failsafes if requested. The "all" failsafe - performs all three failsafes with this order: 1)"safe_velocity", - 2) "feasible_accel", 3) "instantaneous". + performs all three failsafes with this order: + 1) "safe_velocity", + 2) "feasible_accel", + 3) "instantaneous". Parameters ---------- From 6f8d878c68ba6d6ef4413af6d8e1d7c4d86f32dc Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 26 May 2020 19:07:14 -0700 Subject: [PATCH 196/438] fixed a monor error in energy query, added network in metadata --- flow/core/experiment.py | 9 +++++++-- flow/data_pipeline/data_pipeline.py | 11 ++++------- flow/data_pipeline/leaderboard_utils.py | 17 ++++++++++++++++- flow/data_pipeline/query.py | 2 +- 4 files changed, 28 insertions(+), 11 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index c50648746..16f2f04e3 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -148,8 +148,12 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] + + # data pipeline extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) + metadate = defaultdict(lambda: "") + metadate['network'] = self.env.network.name.split('_')[0] for i in range(num_runs): ret = 0 @@ -169,7 +173,8 @@ def rl_actions(*_): # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) + extra_info["source_id"].extend([source_id] * len(veh_ids)) + extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): @@ -220,7 +225,7 @@ def rl_actions(*_): cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, source_id), - trajectory_table_path, str(only_query)[2:-2]) + trajectory_table_path, metadate) # delete the S3-only version of the trajectory file # os.remove(upload_file_path) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index d414aa420..6649273a6 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -65,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): return -def upload_to_s3(bucket_name, bucket_key, file_path, only_query): +def upload_to_s3(bucket_name, bucket_key, file_path, metadata): """Upload a file to S3 bucket. Parameters @@ -76,15 +76,12 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): the key within the bucket for the file file_path: str the path of the file to be uploaded - only_query: str - specify which query should be run on this file by lambda: - if empty: run none of them - if "all": run all available analysis query - if a string of list of queries: run only those mentioned in the list + metadata: dict + all the metadata that should be attached to this simulation """ s3 = boto3.resource("s3") s3.Bucket(bucket_name).upload_file(file_path, bucket_key, - ExtraArgs={"Metadata": {"run-query": only_query}}) + ExtraArgs={"Metadata": metadata}) return diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 5eb5a7be6..c227a50dc 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -107,10 +107,25 @@ def get_table_url(table_name="fact_vehicle_trace", bucket="circles.data.pipeline def get_metadata(name, bucket="circles.data.pipeline"): + """Get the metadata by name. + + Parameters + ---------- + name: str + the name of the table whose metadata will be returned + bucket: str + the bucket that hold the table + + Returns + ------- + metadata: dict + a dictionary of all the metadata, there is no guarantee + for which keys are included + """ s3 = boto3.client("s3") name_list = name.split('_') source_id = "flow_{}".format(name_list[2]) response = s3.head_object(Bucket=bucket, - Key="vehicle_trace_table/date={0}/partition_name={1}/{1}.csv".format(name_list[0], + Key="fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv".format(name_list[0], source_id)) return response["Metadata"] diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 5e9aa27b2..3242cae96 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -26,7 +26,7 @@ speed, acceleration, road_grade, - MAX(0, 1200 * speed * ( + GREATEST(0, 1200 * speed * (( CASE WHEN acceleration > 0 THEN 1 WHEN acceleration < 0 THEN 0 From db33f7c084c9b5bc40283a335899859cf919d785 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 26 May 2020 19:12:54 -0700 Subject: [PATCH 197/438] fix a minor mistake in docstring --- flow/data_pipeline/leaderboard_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index c227a50dc..3c86e3090 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -50,7 +50,7 @@ def get_table_memory(table_name="fact_vehicle_trace", bucket="circles.data.pipel existing_results: list tables that should not be fetched, the names must follow the convention: - {source_id(no run number)}_{query_name}.csv + {date}_{source_id(no run number)}_{query_name}.csv Returns ------- @@ -85,7 +85,7 @@ def get_table_url(table_name="fact_vehicle_trace", bucket="circles.data.pipeline existing_results: list tables that should not be fetched, the names must follow the convention: - {source_id(no run number)}_{query_name}.csv + {date}_{source_id(no run number)}_{query_name}.csv Returns ------- From 089822a82b5d95b35480f0fd8f53b3ab7c0585dd Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 26 May 2020 19:22:09 -0700 Subject: [PATCH 198/438] flake8 fix --- flow/data_pipeline/lambda_function.py | 2 +- flow/data_pipeline/leaderboard_utils.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 22145af9c..483439eb5 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -9,7 +9,7 @@ def lambda_handler(event, context): - """Handles S3 put event on AWS Lambda.""" + """Handle S3 put event on AWS Lambda.""" records = [] # delete all unwanted metadata for record in event['Records']: diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 3c86e3090..49083ce3e 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -1,3 +1,4 @@ +"""APIs for the leader board front end""" import os import boto3 import pandas as pd From c3756f8745cac215ec7f53845aa439d5aac4ef74 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 27 May 2020 09:07:46 -0700 Subject: [PATCH 199/438] Fixed trajectory_table_path --- flow/core/experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 779fdb0f4..82cdcd943 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -213,7 +213,7 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) - trajectory_table_path = './data/' + source_id + ".csv" + trajectory_table_path = dir_path + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: From 7f68c503945c14eec9ca81fab228759d50668b39 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 27 May 2020 09:21:09 -0700 Subject: [PATCH 200/438] Fixed trajectory_table_path --- flow/visualize/i210_replay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 57e72586a..af19111dc 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -329,7 +329,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= emission_to_csv(emission_path, output_path=output_path) # generate the trajectory output file - trajectory_table_path = './data/' + source_id + ".csv" + trajectory_table_path = dir_path + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) # upload to s3 if asked From 16697871d63f735a90aad66ace14b5d757ce73e0 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 27 May 2020 10:05:09 -0700 Subject: [PATCH 201/438] addressing comments --- flow/core/kernel/vehicle/traci.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2166709b6..9485572b2 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,10 +113,6 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] - self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None - self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None - self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None - self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -955,7 +951,7 @@ def _prev_edge_followers(self, veh_id, edge_dict, lane, num_edges): def apply_acceleration(self, veh_ids, acc): """See parent class.""" - # to hand the case of a single vehicle + # to handle the case of a single vehicle if type(veh_ids) == str: veh_ids = [veh_ids] acc = [acc] @@ -969,7 +965,7 @@ def apply_acceleration(self, veh_ids, acc): def apply_acceleration_not_smooth(self, veh_ids, acc): """See parent class.""" - # to hand the case of a single vehicle + # to handle the case of a single vehicle if type(veh_ids) == str: veh_ids = [veh_ids] acc = [acc] From 1857f834e8187495dc0985c0fb18c330409c2750 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 27 May 2020 21:46:11 -0700 Subject: [PATCH 202/438] fixed naming convention --- flow/data_pipeline/leaderboard_utils.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 49083ce3e..cc60f0dd2 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -5,6 +5,20 @@ from io import StringIO +def key_to_name(key): + """Return the standard formatted file name from object key.""" + k_list = key.split("/") + date = k_list[1].replace("date=", "") + name = k_list[2].replace("partition_name=", "") + index = name.find("_", 5) + source_id = name + query_name = "" + if index != -1: + source_id = name[0:index] + query_name = "_" + name[index+1:].replace("_", "-") + return "{}_{}{}.csv".format(date, source_id.replace("_", "-"), query_name) + + def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipeline"): """Fetch tables from s3 and store in ./result directory. @@ -31,8 +45,7 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin s3 = boto3.client("s3") response = s3.list_objects_v2(Bucket=bucket) keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] - names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), - e.split("/")[2].replace("partition_name=", ""))for e in keys] + names = [key_to_name(k) for k in keys] existing_results = os.listdir("./result/{}".format(table_name)) for index in range(len(keys)): if names[index] not in existing_results: @@ -63,8 +76,7 @@ def get_table_memory(table_name="fact_vehicle_trace", bucket="circles.data.pipel s3 = boto3.client("s3") response = s3.list_objects_v2(Bucket=bucket) keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] - names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), - e.split("/")[2].replace("partition_name=", ""))for e in keys] + names = [key_to_name(k) for k in keys] results = dict() for index in range(len(keys)): if names[index] not in existing_results: @@ -98,8 +110,7 @@ def get_table_url(table_name="fact_vehicle_trace", bucket="circles.data.pipeline s3 = boto3.client("s3") response = s3.list_objects_v2(Bucket=bucket) keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] - names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), - e.split("/")[2].replace("partition_name=", "")) for e in keys] + names = [key_to_name(k) for k in keys] results = dict() for index in range(len(keys)): if names[index] not in existing_results: From 87ebf59836869ac45ae96017acc02e08d086ac4b Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 27 May 2020 23:06:08 -0700 Subject: [PATCH 203/438] do repair partition for all new data upon arrival --- flow/data_pipeline/data_pipeline.py | 19 +++++++++---------- flow/data_pipeline/query.py | 3 +++ 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 6649273a6..630782e6d 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -221,8 +221,16 @@ def update_partition(self, table, query_date, partition): self.existing_partitions[table].append("date={}/partition_name={}".format(query_date, partition)) return + def repair_partition(self, table, query_date, partition): + """Load the missing partitions.""" + if table not in self.existing_partitions.keys(): + self.existing_partitions[table] = self.get_existing_partitions(table) + if "date={}/partition_name={}".format(query_date, partition) not in \ + self.existing_partitions[table]: + self.update_partition(table, query_date, partition) + def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", - query_date="today", partition="default", primary_table=""): + query_date="today", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -235,8 +243,6 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu name of the partition date to run this query on partition: str, optional name of the partition to run this query on - primary_table: str - the table whose partition that may need update Returns ------- execution_id: str @@ -253,13 +259,6 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu source_id = "flow_{}".format(partition.split('_')[1]) - if primary_table: - if primary_table not in self.existing_partitions.keys(): - self.existing_partitions[primary_table] = self.get_existing_partitions(primary_table) - if "date={}/partition_name={}".format(query_date, partition) not in \ - self.existing_partitions[primary_table]: - self.update_partition(primary_table, query_date, partition) - response = self.client.start_query_execution( QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id), QueryExecutionContext={ diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 3242cae96..0d026eb3c 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -280,6 +280,7 @@ class QueryStrings(Enum): FULL OUTER JOIN outflows o ON 1 = 1 AND i.time_step = o.time_step AND i.source_id = o.source_id + ORDER BY time_step ;""" FACT_NETWORK_METRICS_BY_DISTANCE_AGG = """ @@ -388,6 +389,7 @@ class QueryStrings(Enum): FULL OUTER JOIN binned_energy be ON 1 = 1 AND COALESCE(bce.source_id, bsa.source_id) = be.source_id AND COALESCE(bce.distance_meters_bin, bce.distance_meters_bin) = be.distance_meters_bin + ORDER BY distance_meters_bin ASC ;""" FACT_NETWORK_METRICS_BY_TIME_AGG = """ @@ -495,4 +497,5 @@ class QueryStrings(Enum): FULL OUTER JOIN binned_energy be ON 1 = 1 AND COALESCE(bce.source_id, bsa.source_id) = be.source_id AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin + ORDER BY time_seconds_bin ASC ;""" From 05e793a37a8d792d1d373d6d68097e00a8881592 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 01:23:13 -0700 Subject: [PATCH 204/438] added leaderboard chart aggregation --- flow/core/experiment.py | 2 +- flow/data_pipeline/data_pipeline.py | 9 ++++++ flow/data_pipeline/query.py | 44 ++++++++++++++++++++--------- flow/visualize/i210_replay.py | 2 +- 4 files changed, 42 insertions(+), 15 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 20154977a..e9668d9db 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -218,7 +218,7 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) - trajectory_table_path = dir_path + source_id + ".csv" + trajectory_table_path = "{}/{}.csv".format(dir_path, source_id) generate_trajectory_from_flow(trajectory_table_path, extra_info) if to_aws: diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a8ffb81c4..5c9346c40 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -110,6 +110,15 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) +def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): + """Delete the obsolete data on S3""" + response = s3.list_objects_v2(Bucket=bucket) + keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table) == 0 and e["Key"][-4:] == ".csv"] + keys.remove(latest_key) + for key in keys: + s3.delete_object(Bucket=bucket, Key=key) + + class AthenaQuery: """ Class used to run query. diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0a75eb382..04793cc73 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -12,12 +12,14 @@ ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, "fact_vehicle_fuel_efficiency_agg": {"fact_network_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"]}, - "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]} + "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]}, + "leaderboard_chart": {"leaderboard_chart_agg": ["LEADERBOARD_CHART_AGG"]} } tables = ["fact_vehicle_trace", "fact_energy_trace", "fact_network_throughput_agg", "fact_network_inflows_outflows", "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", - "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart"] + "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart", + "leaderboard_chart_agg"] VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT @@ -343,9 +345,9 @@ class QueryStrings(Enum): AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, - AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound + AVG(target_accel_no_noise_with_failsafe) AS accel_avg, + AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) AS accel_upper_bound, + AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -405,10 +407,11 @@ class QueryStrings(Enum): vt.x, energy_model_id, vt.time_step - LAG(vt.time_step, 1) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) + AS sim_step, SUM(power) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS - cumulative_power + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) + AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 AND vt.date = \'{date}\' @@ -451,9 +454,11 @@ class QueryStrings(Enum): AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, - AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound + AVG(target_accel_no_noise_with_failsafe) AS accel_avg, + AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) + AS accel_upper_bound, + AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) + AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -466,8 +471,10 @@ class QueryStrings(Enum): source_id, id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) + AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) + AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -503,3 +510,14 @@ class QueryStrings(Enum): AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin ORDER BY time_seconds_bin ASC ;""" + + LEADERBOARD_CHART_AGG = """ + SELECT + source_id, + energy_model_id, + efficiency_meters_per_joules, + efficiency_miles_per_gallon, + throughput_per_hour + FROM leaderboard_chart + ORDER BY date, source_id ASC + ;""" diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 2ac8873d5..0f1d54eb5 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -329,7 +329,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= emission_to_csv(emission_path, output_path=output_path) # generate the trajectory output file - trajectory_table_path = dir_path + source_id + ".csv" + trajectory_table_path = "{}/{}.csv".format(dir_path, source_id) upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) # upload to s3 if asked From 7a75c6e36f267c441a7f1c4c3b1a3347a5cc5ac5 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 01:38:30 -0700 Subject: [PATCH 205/438] update lambda function, added some comments --- flow/data_pipeline/lambda_function.py | 28 ++++++++++++++++++------- flow/data_pipeline/leaderboard_utils.py | 6 ++++++ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 483439eb5..3a9f55ded 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,7 +1,7 @@ """lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus -from flow.data_pipeline.data_pipeline import AthenaQuery +from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data from flow.data_pipeline.query import tags, tables s3 = boto3.client('s3') @@ -11,22 +11,34 @@ def lambda_handler(event, context): """Handle S3 put event on AWS Lambda.""" records = [] - # delete all unwanted metadata + # do a pre-sweep to handle tasks other than initalizing a query for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) table = key.split('/')[0] if table not in tables: continue - if key[-9:] == '.metadata': + + # delete unwanted metadata files + if (key[-9:] == '.metadata'): s3.delete_object(Bucket=bucket, Key=key) continue - if table in tags.keys(): - records.append((bucket, key, table)) - # initialize the queries - for bucket, key, table in records: + + # load the partition for newly added table query_date = key.split('/')[-3].split('=')[-1] partition = key.split('/')[-2].split('=')[-1] + queryEngine.repair_partition(table, query_date, partition) + + # delete obsolete data + if table == "leaderboard_chart_agg": + delete_obsolete_data(s3, key, table) + + # add table that need to start a query to list + if table in tags.keys(): + records.append((bucket, key, table, query_date, partition)) + + # initialize the queries + for bucket, key, table, query_date, partition in records: source_id = "flow_{}".format(partition.split('_')[1]) # response = s3.head_object(Bucket=bucket, Key=key) # required_query = response["Metadata"]["run-query"] @@ -45,4 +57,4 @@ def lambda_handler(event, context): query_date, source_id, query_name) - queryEngine.run_query(query_name, result_location, query_date, partition, table) + queryEngine.run_query(query_name, result_location, query_date, partition) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index cc60f0dd2..7a6eb6b5b 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -35,6 +35,12 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin fact_network_metrics_by_time_agg fact_network_fuel_efficiency_agg leaderboard_chart + leaderboard_chart_agg + Note that leaderboard_chart_agg is a combination of all previous + learderboard_chart entries in one CSV file. It's only used to + avoid burdening the web server with more calculation. The date + and source_id in its name is always going to reflect the latest + leaderboard_chart entry. bucket: str the S3 bucket that holds these tables """ From 8fac7208565f22df9063da3df23000a3aaff68b3 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 11:15:55 -0700 Subject: [PATCH 206/438] minor change to get_table_disk --- flow/data_pipeline/leaderboard_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 7a6eb6b5b..58366d4d6 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -53,6 +53,9 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] names = [key_to_name(k) for k in keys] existing_results = os.listdir("./result/{}".format(table_name)) + if table_name == "leaderboard_chart_agg": + for p in existing_results: + os.remove(p) for index in range(len(keys)): if names[index] not in existing_results: s3.download_file(bucket, keys[index], "./result/{}/{}".format(table_name, names[index])) From 9537bb4b0266995290a578ff16d0af63a4ee1fc7 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 11:17:58 -0700 Subject: [PATCH 207/438] fix minor path issue --- flow/data_pipeline/leaderboard_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 58366d4d6..f2c26d01b 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -55,7 +55,7 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin existing_results = os.listdir("./result/{}".format(table_name)) if table_name == "leaderboard_chart_agg": for p in existing_results: - os.remove(p) + os.remove("./result/{}/{}".format(table_name, p)) for index in range(len(keys)): if names[index] not in existing_results: s3.download_file(bucket, keys[index], "./result/{}/{}".format(table_name, names[index])) From ff90e8d1b7548deeeeae665f2d3016868e976aa8 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 11:20:46 -0700 Subject: [PATCH 208/438] move deleting leaderboard_chart_agg to after downloading --- flow/data_pipeline/leaderboard_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index f2c26d01b..f4476b2eb 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -53,12 +53,12 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] names = [key_to_name(k) for k in keys] existing_results = os.listdir("./result/{}".format(table_name)) - if table_name == "leaderboard_chart_agg": - for p in existing_results: - os.remove("./result/{}/{}".format(table_name, p)) for index in range(len(keys)): if names[index] not in existing_results: s3.download_file(bucket, keys[index], "./result/{}/{}".format(table_name, names[index])) + if table_name == "leaderboard_chart_agg": + for p in existing_results: + os.remove("./result/{}/{}".format(table_name, p)) def get_table_memory(table_name="fact_vehicle_trace", bucket="circles.data.pipeline", existing_results=()): From f7a278c585e0076e12ce9436a340e4d3d87aa1c6 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 28 May 2020 15:27:18 -0400 Subject: [PATCH 209/438] Network update (#953) Add new I210 models and envs. --- docs/source/flow_setup.rst | 16 + examples/exp_configs/non_rl/highway_single.py | 12 +- .../exp_configs/non_rl/i210_subnetwork.py | 249 +- examples/exp_configs/non_rl/straight_road.py | 7 +- .../rl/multiagent/multiagent_i210.py | 273 +- .../rl/multiagent/multiagent_straight_road.py | 58 +- .../i210_with_ghost_cell_with_downstream.xml | 10 +- ...0_with_ghost_cell_with_downstream_test.xml | 5719 +++++++++++++++++ examples/train.py | 94 +- flow/algorithms/centralized_PPO.py | 547 ++ flow/algorithms/custom_ppo.py | 318 + flow/controllers/car_following_models.py | 1 + flow/controllers/velocity_controllers.py | 84 +- flow/core/kernel/vehicle/base.py | 1 + flow/core/rewards.py | 4 +- flow/envs/base.py | 2 + flow/envs/multiagent/__init__.py | 1 + flow/envs/multiagent/base.py | 3 + flow/envs/multiagent/i210.py | 201 +- flow/networks/i210_subnetwork_ghost_cell.py | 162 + flow/visualize/time_space_diagram.py | 29 +- flow/visualize/visualizer_rllib.py | 27 +- scripts/ray_autoscale.yaml | 16 +- 23 files changed, 7550 insertions(+), 284 deletions(-) create mode 100644 examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml create mode 100644 flow/algorithms/centralized_PPO.py create mode 100644 flow/algorithms/custom_ppo.py create mode 100644 flow/networks/i210_subnetwork_ghost_cell.py diff --git a/docs/source/flow_setup.rst b/docs/source/flow_setup.rst index 60734b7b1..cbe585d36 100644 --- a/docs/source/flow_setup.rst +++ b/docs/source/flow_setup.rst @@ -112,6 +112,22 @@ If you are a Mac user and the above command gives you the error ``FXApp:openDisplay: unable to open display :0.0``, make sure to open the application XQuartz. +*Troubleshooting*: +If you are a Mac user and the above command gives you the error +``Segmentation fault: 11``, make sure to reinstall ``fox`` using brew. +:: + + # Uninstall Catalina bottle of fox: + $ brew uninstall --ignore-dependencies fox + + # Edit brew Formula of fox: + $ brew edit fox + + # Comment out or delete the following line: sha256 "c6697be294c9a0458580564d59f8db32791beb5e67a05a6246e0b969ffc068bc" => :catalina + # Install Mojave bottle of fox: + $ brew install fox + + Testing your SUMO and Flow installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index 7e0a5eb49..0a9a6774b 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -1,4 +1,5 @@ """Example of an open network with human-driven vehicles and a wave.""" + import numpy as np from flow.controllers import IDMController @@ -10,8 +11,8 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams +from flow.core.rewards import miles_per_gallon, miles_per_megajoule from flow.core.params import SumoCarFollowingParams -from flow.core.rewards import miles_per_megajoule from flow.networks import HighwayNetwork from flow.envs import TestEnv from flow.networks.highway import ADDITIONAL_NET_PARAMS @@ -23,7 +24,7 @@ # the inflow rate of vehicles TRAFFIC_FLOW = 2215 # the simulation time horizon (in steps) -HORIZON = 1500 +HORIZON = 1000 # whether to include noise in the car-following models INCLUDE_NOISE = True @@ -64,13 +65,13 @@ ), ) -inflows = InFlows() if PENETRATION_RATE > 0.0: vehicles.add( "av", + color='red', num_vehicles=0, - acceleration_controller=(FollowerStopper, {"v_des": 6.0}), + acceleration_controller=(FollowerStopper, {"v_des": 5.0, "control_length": [500, 2300]}), ) inflows = InFlows() @@ -145,5 +146,8 @@ env.k.vehicle.get_outflow_rate(120)), "miles_per_megajoule": lambda env: np.nan_to_num( miles_per_megajoule(env, env.k.vehicle.get_ids(), gain=1.0) + ), + "miles_per_gallon": lambda env: np.nan_to_num( + miles_per_gallon(env, env.k.vehicle.get_ids(), gain=1.0) ) } diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 25565bb49..65131a6bd 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -2,8 +2,9 @@ import os import numpy as np -from flow.controllers import IDMController -from flow.controllers import I210Router +from flow.controllers.car_following_models import IDMController +from flow.controllers.velocity_controllers import FollowerStopper +from flow.controllers.routing_controllers import I210Router from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -11,94 +12,181 @@ from flow.core.params import VehicleParams from flow.core.params import InitialConfig from flow.core.params import InFlows +from flow.core.rewards import miles_per_gallon, miles_per_megajoule + import flow.config as config from flow.envs import TestEnv -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# =========================================================================== # -# Specify some configurable constants. # -# =========================================================================== # +# Instantiate which conditions we want to be true about the network -# whether to include the upstream ghost edge in the network +# whether to include a ghost cell at the entrance WANT_GHOST_CELL = True -# whether to include the downstream slow-down edge in the network -WANT_DOWNSTREAM_BOUNDARY = True # whether to include vehicles on the on-ramp -ON_RAMP = True -# the inflow rate of vehicles (in veh/hr) -INFLOW_RATE = 5 * 2215 +ON_RAMP = False +# fraction of vehicles that are follower-stoppers. 0.10 corresponds to 10% +PENETRATION_RATE = 0.0 +# desired speed of the follower stopper vehicles +V_DES = 5.0 +# horizon over which to run the env +HORIZON = 1000 +# steps to run before follower-stopper is allowed to take control +WARMUP_STEPS = 600 + +# Number of vehicles/hour/lane +inflow_rate = 2050 # the speed of inflowing vehicles from the main edge (in m/s) -INFLOW_SPEED = 24.1 +inflow_speed = 25.5 -# =========================================================================== # -# Specify the path to the network template. # -# =========================================================================== # +accel_data = (IDMController, {'a': 1.3, 'b': 2.0, 'noise': 0.3}) + +if WANT_GHOST_CELL: + from flow.networks.i210_subnetwork_ghost_cell import I210SubNetworkGhostCell, EDGES_DISTRIBUTION -if WANT_DOWNSTREAM_BOUNDARY: - net_template = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_" - "downstream.xml") -elif WANT_GHOST_CELL: - net_template = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml") + highway_start_edge = 'ghost0' else: - net_template = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") + from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# If the ghost cell is not being used, remove it from the initial edges that -# vehicles can be placed on. -edges_distribution = EDGES_DISTRIBUTION.copy() -if not WANT_GHOST_CELL: - edges_distribution.remove("ghost0") - -# =========================================================================== # -# Specify vehicle-specific information and inflows. # -# =========================================================================== # + highway_start_edge = "119257914" vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 1.3, - "b": 2.0, - "noise": 0.3, - }), - routing_controller=(I210Router, {}) if ON_RAMP else None, -) inflow = InFlows() -# main highway -inflow.add( - veh_type="human", - edge="ghost0" if WANT_GHOST_CELL else "119257914", - vehs_per_hour=INFLOW_RATE, - departLane="best", - departSpeed=INFLOW_SPEED) -# on ramp + if ON_RAMP: + vehicles.add( + "human", + num_vehicles=0, + color="white", + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=accel_data, + routing_controller=(I210Router, {}) + ) + if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + num_vehicles=0, + color="red", + acceleration_controller=(FollowerStopper, {"v_des": V_DES, + "no_control_edges": ["ghost0", "119257908#3"] + }), + routing_controller=(I210Router, {}) + ) + + # inflow.add( + # veh_type="human", + # edge=highway_start_edge, + # vehs_per_hour=inflow_rate, + # departLane="best", + # departSpeed=inflow_speed) + + lane_list = ['0', '1', '2', '3', '4'] + + for lane in lane_list: + inflow.add( + veh_type="human", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=inflow_speed) + inflow.add( veh_type="human", edge="27414345", - vehs_per_hour=500, + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) inflow.add( veh_type="human", edge="27414342#0", - vehs_per_hour=500, + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) -# =========================================================================== # -# Generate the flow_params dict with all relevant simulation information. # -# =========================================================================== # + if PENETRATION_RATE > 0.0: + for lane in lane_list: + inflow.add( + veh_type="av", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * PENETRATION_RATE), + departLane=lane, + departSpeed=inflow_speed) + + inflow.add( + veh_type="av", + edge="27414345", + vehs_per_hour=int(500 * PENETRATION_RATE), + departLane="random", + departSpeed=10) + inflow.add( + veh_type="av", + edge="27414342#0", + vehs_per_hour=int(500 * PENETRATION_RATE), + departLane="random", + departSpeed=10) + +else: + # create the base vehicle type that will be used for inflows + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=accel_data, + ) + if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + color="red", + num_vehicles=0, + acceleration_controller=(FollowerStopper, {"v_des": V_DES, + "no_control_edges": ["ghost0", "119257908#3"] + }), + ) + + # If you want to turn off the fail safes uncomment this: + + # vehicles.add( + # 'human', + # num_vehicles=0, + # lane_change_params=SumoLaneChangeParams( + # lane_change_mode='strategic', + # ), + # acceleration_controller=accel_data, + # car_following_params=SumoCarFollowingParams(speed_mode='19') + # ) + + lane_list = ['0', '1', '2', '3', '4'] + + for lane in lane_list: + inflow.add( + veh_type="human", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=inflow_speed) + + if PENETRATION_RATE > 0.0: + for lane in lane_list: + inflow.add( + veh_type="av", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * PENETRATION_RATE), + departLane=lane, + departSpeed=inflow_speed) + +network_xml_file = "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml" + +# network_xml_file = "examples/exp_configs/templates/sumo/i210_with_congestion.xml" + +NET_TEMPLATE = os.path.join(config.PROJECT_PATH, network_xml_file) + +if WANT_GHOST_CELL: + network = I210SubNetworkGhostCell +else: + network = I210SubNetwork flow_params = dict( # name of the experiment @@ -108,7 +196,7 @@ env_name=TestEnv, # name of the network class the experiment is running on - network=I210SubNetwork, + network=network, # simulator that is used by the experiment simulator='traci', @@ -117,24 +205,23 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=10000, + horizon=HORIZON, + warmup_steps=WARMUP_STEPS, + sims_per_step=3 ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=net_template, - additional_params={ - "on_ramp": ON_RAMP, - "ghost_edge": WANT_GHOST_CELL, - } + template=NET_TEMPLATE, + additional_params={"on_ramp": ON_RAMP, "ghost_edge": WANT_GHOST_CELL} ), # vehicles to be placed in the network at the start of a rollout (see @@ -144,7 +231,7 @@ # parameters specifying the positioning of vehicles upon initialization/ # reset (see flow.core.params.InitialConfig) initial=InitialConfig( - edges_distribution=edges_distribution, + edges_distribution=EDGES_DISTRIBUTION, ), ) @@ -153,14 +240,20 @@ # =========================================================================== # edge_id = "119257908#1-AddedOnRampEdge" + +def valid_ids(env, veh_ids): + return [veh_id for veh_id in veh_ids if env.k.vehicle.get_edge(veh_id) not in ["ghost0", "119257908#3"]] + custom_callables = { "avg_merge_speed": lambda env: np.nan_to_num(np.mean( - env.k.vehicle.get_speed(env.k.vehicle.get_ids()))), + env.k.vehicle.get_speed(valid_ids(env, env.k.vehicle.get_ids())))), "avg_outflow": lambda env: np.nan_to_num( env.k.vehicle.get_outflow_rate(120)), - # we multiply by 5 to account for the vehicle length and by 1000 to convert - # into veh/km - "avg_density": lambda env: 5 * 1000 * len(env.k.vehicle.get_ids_by_edge( - edge_id)) / (env.k.network.edge_length(edge_id) - * env.k.network.num_lanes(edge_id)), + # # we multiply by 5 to account for the vehicle length and by 1000 to convert + # # into veh/km + # "avg_density": lambda env: 5 * 1000 * len(env.k.vehicle.get_ids_by_edge( + # edge_id)) / (env.k.network.edge_length(edge_id) + # * env.k.network.num_lanes(edge_id)), + "mpg": lambda env: miles_per_gallon(env, valid_ids(env, env.k.vehicle.get_ids()), gain=1.0), + "mpj": lambda env: miles_per_megajoule(env, valid_ids(env, env.k.vehicle.get_ids()), gain=1.0), } diff --git a/examples/exp_configs/non_rl/straight_road.py b/examples/exp_configs/non_rl/straight_road.py index c557ce836..1669bb896 100644 --- a/examples/exp_configs/non_rl/straight_road.py +++ b/examples/exp_configs/non_rl/straight_road.py @@ -9,6 +9,7 @@ from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ VehicleParams, SumoParams, SumoLaneChangeParams +from flow.core.rewards import miles_per_gallon from flow.networks import HighwayNetwork from flow.envs import TestEnv from flow.networks.highway import ADDITIONAL_NET_PARAMS @@ -58,7 +59,7 @@ vehicles.add( "av", num_vehicles=0, - acceleration_controller=(FollowerStopper, {"v_des": 18.0}), + acceleration_controller=(FollowerStopper, {"v_des": 12.0}), ) # add human vehicles on the highway @@ -98,7 +99,7 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - warmup_steps=0, + warmup_steps=400, sims_per_step=1, ), @@ -128,4 +129,6 @@ custom_callables = { "avg_speed": lambda env: np.nan_to_num(np.mean( env.k.vehicle.get_speed(env.k.vehicle.get_ids_by_edge(['highway_0', 'highway_1'])))), + "mpg": lambda env: miles_per_gallon(env, env.k.vehicle.get_ids(), gain=1.0) + } diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 01b9e6082..f55917e49 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -9,6 +9,7 @@ from ray.tune.registry import register_env from flow.controllers import RLController +from flow.controllers.routing_controllers import I210Router from flow.controllers.car_following_models import IDMController import flow.config as config from flow.core.params import EnvParams @@ -25,20 +26,32 @@ from flow.utils.registry import make_create_env # SET UP PARAMETERS FOR THE SIMULATION +WANT_GHOST_CELL = True +# WANT_DOWNSTREAM_BOUNDARY = True +ON_RAMP = False +PENETRATION_RATE = 0.10 +V_DES = 7.0 +HORIZON = 1000 +WARMUP_STEPS = 600 -# number of steps per rollout -HORIZON = 2000 +inflow_rate = 2050 +inflow_speed = 25.5 + +accel_data = (IDMController, {'a': 1.3, 'b': 2.0, 'noise': 0.3}) VEH_PER_HOUR_BASE_119257914 = 10800 VEH_PER_HOUR_BASE_27414345 = 321 VEH_PER_HOUR_BASE_27414342 = 421 -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 10 +if WANT_GHOST_CELL: + from flow.networks.i210_subnetwork_ghost_cell import I210SubNetworkGhostCell, EDGES_DISTRIBUTION -# TODO: temporary fix -edges_distribution = EDGES_DISTRIBUTION.copy() -edges_distribution.remove("ghost0") + edges_distribution = EDGES_DISTRIBUTION + highway_start_edge = 'ghost0' +else: + from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION + edges_distribution = EDGES_DISTRIBUTION + highway_start_edge = "119257914" # SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() @@ -49,84 +62,180 @@ 'lead_obs': True, # whether to add in a reward for the speed of nearby vehicles "local_reward": True, + # whether to use the MPG reward. Otherwise, defaults to a target velocity reward + "mpg_reward": False, + # whether to use the MPJ reward. Otherwise, defaults to a target velocity reward + "mpj_reward": False, + # how many vehicles to look back for the MPG reward + "look_back_length": 1, # whether to reroute vehicles once they have exited "reroute_on_exit": True, - 'target_velocity': 18, + 'target_velocity': 8.0, + # how many AVs there can be at once (this is only for centralized critics) + "max_num_agents": 10, + # which edges we shouldn't apply control on + "no_control_edges": ["ghost0", "119257908#3"], + + # whether to add a slight reward for opening up a gap that will be annealed out N iterations in + "headway_curriculum": False, + # how many timesteps to anneal the headway curriculum over + "headway_curriculum_iters": 100, + # weight of the headway reward + "headway_reward_gain": 2.0, + # desired time headway + "min_time_headway": 2.0, + + # whether to add a slight reward for traveling at a desired speed + "speed_curriculum": True, + # how many timesteps to anneal the headway curriculum over + "speed_curriculum_iters": 20, + # weight of the headway reward + "speed_reward_gain": 0.5, + # penalize stopped vehicles + "penalize_stops": True, + + # penalize accels + "penalize_accel": True }) # CREATE VEHICLE TYPES AND INFLOWS # no vehicles in the network vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), - car_following_params=SumoCarFollowingParams(speed_mode="no_collide"), -) -vehicles.add( - "av", - acceleration_controller=(RLController, {}), - num_vehicles=0, - color='red' -) inflow = InFlows() -# main highway -pen_rate = PENETRATION_RATE / 100 -assert pen_rate < 1.0, "your penetration rate is over 100%" -assert pen_rate > 0.0, "your penetration rate should be above zero" -inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * (1 - pen_rate)), - # probability=1.0, - depart_lane="random", - departSpeed=20) -# # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321 * pen_rate, -# depart_lane="random", -# depart_speed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421 * pen_rate, -# depart_lane="random", -# depart_speed=20) - -# Now add the AVs -# main highway -inflow.add( - veh_type="av", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pen_rate), - # probability=1.0, - depart_lane="random", - depart_speed=20) -# # on ramp -# inflow.add( -# veh_type="av", -# edge="27414345", -# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414345 * pen_rate), -# depart_lane="random", -# depart_speed=20) -# inflow.add( -# veh_type="av", -# edge="27414342#0", -# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414342 * pen_rate), -# depart_lane="random", -# depart_speed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") - -warmup_steps = 0 -if additional_env_params['reroute_on_exit']: - warmup_steps = 400 + +if ON_RAMP: + vehicles.add( + "human", + num_vehicles=0, + color="white", + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=accel_data, + routing_controller=(I210Router, {}) + ) + if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + num_vehicles=0, + color="red", + acceleration_controller=(RLController, {}), + routing_controller=(I210Router, {}) + ) + + # inflow.add( + # veh_type="human", + # edge=highway_start_edge, + # vehs_per_hour=inflow_rate, + # departLane="best", + # departSpeed=inflow_speed) + + lane_list = ['0', '1', '2', '3', '4'] + + for lane in lane_list: + inflow.add( + veh_type="human", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=inflow_speed) + + inflow.add( + veh_type="human", + edge="27414345", + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + departLane="random", + departSpeed=10) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + departLane="random", + departSpeed=10) + + if PENETRATION_RATE > 0.0: + for lane in lane_list: + inflow.add( + veh_type="av", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * PENETRATION_RATE), + departLane=lane, + departSpeed=inflow_speed) + + inflow.add( + veh_type="av", + edge="27414345", + vehs_per_hour=int(500 * PENETRATION_RATE), + departLane="random", + departSpeed=10) + inflow.add( + veh_type="av", + edge="27414342#0", + vehs_per_hour=int(500 * PENETRATION_RATE), + departLane="random", + departSpeed=10) + +else: + # create the base vehicle type that will be used for inflows + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=accel_data, + ) + if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + color="red", + num_vehicles=0, + acceleration_controller=(RLController, {}), + ) + + # If you want to turn off the fail safes uncomment this: + + # vehicles.add( + # 'human', + # num_vehicles=0, + # lane_change_params=SumoLaneChangeParams( + # lane_change_mode='strategic', + # ), + # acceleration_controller=accel_data, + # car_following_params=SumoCarFollowingParams(speed_mode='19') + # ) + + lane_list = ['0', '1', '2', '3', '4'] + + for lane in lane_list: + inflow.add( + veh_type="human", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=inflow_speed) + + if PENETRATION_RATE > 0.0: + for lane in lane_list: + inflow.add( + veh_type="av", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * PENETRATION_RATE), + departLane=lane, + departSpeed=inflow_speed) + + +network_xml_file = "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml" + +# network_xml_file = "examples/exp_configs/templates/sumo/i210_with_congestion.xml" + +NET_TEMPLATE = os.path.join(config.PROJECT_PATH, network_xml_file) + +if WANT_GHOST_CELL: + network = I210SubNetworkGhostCell +else: + network = I210SubNetwork flow_params = dict( # name of the experiment @@ -136,14 +245,14 @@ env_name=I210MultiEnv, # name of the network class the experiment is running on - network=I210SubNetwork, + network=network, # simulator that is used by the experiment simulator='traci', # simulation-related parameters sim=SumoParams( - sim_step=0.5, + sim_step=0.4, render=False, color_by_speed=False, restart_instance=True, @@ -154,8 +263,8 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - sims_per_step=1, - warmup_steps=warmup_steps, + sims_per_step=3, + warmup_steps=WARMUP_STEPS, additional_params=additional_env_params, done_at_exit=False ), @@ -166,8 +275,8 @@ inflows=inflow, template=NET_TEMPLATE, additional_params={ - "on_ramp": False, - "ghost_edge": False + "on_ramp": ON_RAMP, + "ghost_edge": WANT_GHOST_CELL } ), diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index ec71a2f42..5816d3fe7 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -6,14 +6,13 @@ from flow.controllers import RLController, IDMController from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ VehicleParams, SumoParams, SumoLaneChangeParams, SumoCarFollowingParams -from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS from flow.networks import HighwayNetwork +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS from flow.envs.multiagent import MultiStraightRoad from flow.networks.highway import ADDITIONAL_NET_PARAMS from flow.utils.registry import make_create_env from ray.tune.registry import register_env - # SET UP PARAMETERS FOR THE SIMULATION # the speed of vehicles entering the network @@ -23,7 +22,7 @@ # the inflow rate of vehicles HIGHWAY_INFLOW_RATE = 2215 # the simulation time horizon (in steps) -HORIZON = 1500 +HORIZON = 1000 # whether to include noise in the car-following models INCLUDE_NOISE = True @@ -54,11 +53,42 @@ additional_env_params.update({ 'max_accel': 2.6, 'max_decel': 4.5, - 'target_velocity': 18, + 'target_velocity': 6.0, 'local_reward': True, 'lead_obs': True, + 'control_range': [500, 2300], # whether to reroute vehicles once they have exited - "reroute_on_exit": True + "reroute_on_exit": True, + # whether to use the MPG reward. Otherwise, defaults to a target velocity reward + "mpg_reward": False, + # whether to use the joules reward. Otherwise, defaults to a target velocity reward + "mpj_reward": False, + # how many vehicles to look back for the MPG reward + "look_back_length": 3, + # how many AVs there can be at once (this is only for centralized critics) + "max_num_agents": 10, + + # whether to add a slight reward for opening up a gap that will be annealed out N iterations in + "headway_curriculum": False, + # how many timesteps to anneal the headway curriculum over + "headway_curriculum_iters": 100, + # weight of the headway reward + "headway_reward_gain": 2.0, + # desired time headway + "min_time_headway": 2.0, + + # whether to add a slight reward for traveling at a desired speed + "speed_curriculum": True, + # how many timesteps to anneal the headway curriculum over + "speed_curriculum_iters": 20, + # weight of the headway reward + "speed_reward_gain": 1.0, + + # penalize stopped vehicles + "penalize_stops": True, + + # penalize accels + "penalize_accel": True }) @@ -66,8 +96,6 @@ vehicles = VehicleParams() inflows = InFlows() - -# human vehicles vehicles.add( "human", acceleration_controller=(IDMController, { @@ -96,7 +124,7 @@ edge="highway_0", vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), depart_lane="free", - depart_speed="23.0", + depart_speed=TRAFFIC_SPEED, name="idm_highway_inflow") # add autonomous vehicles on the highway @@ -106,13 +134,13 @@ edge="highway_0", vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), depart_lane="free", - depart_speed="23.0", + depart_speed=TRAFFIC_SPEED, name="rl_highway_inflow") # SET UP FLOW PARAMETERS warmup_steps = 0 if additional_env_params['reroute_on_exit']: - warmup_steps = 400 + warmup_steps = 500 flow_params = dict( # name of the experiment @@ -131,16 +159,16 @@ env=EnvParams( horizon=HORIZON, warmup_steps=warmup_steps, - sims_per_step=1, # do not put more than one - additional_params=additional_env_params, + sims_per_step=3, + additional_params=additional_env_params ), # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( - sim_step=0.5, + sim_step=0.4, render=False, - use_ballistic=True, - restart_instance=True + restart_instance=True, + use_ballistic=True ), # network-related parameters (see flow.core.params.NetParams and the diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml index 10d4d8d45..b9b2db479 100644 --- a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml @@ -3501,11 +3501,11 @@ - - - - - + + + + + diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml new file mode 100644 index 000000000..ee508b730 --- /dev/null +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml @@ -0,0 +1,5719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/train.py b/examples/train.py index 1689d846f..74a6cd71a 100644 --- a/examples/train.py +++ b/examples/train.py @@ -21,17 +21,19 @@ from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv from stable_baselines import PPO2 except ImportError: - print("Stable-baselines not installed") + print("Stable-baselines not installed. Please install it if you need it.") +import ray from ray import tune from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper try: from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class +from ray.tune.registry import register_env from flow.core.util import ensure_dir -from flow.core.rewards import energy_consumption +from flow.core.rewards import miles_per_gallon, miles_per_megajoule from flow.utils.registry import env_constructor from flow.utils.rllib import FlowParamsEncoder, get_flow_params from flow.utils.registry import make_create_env @@ -58,7 +60,7 @@ def parse_args(args): parser.add_argument( 'exp_title', type=str, - help='Title to give the run.') + help='Name of experiment that results will be stored in') # optional input parameters parser.add_argument( @@ -66,7 +68,8 @@ def parse_args(args): help='the RL trainer to use. either rllib or Stable-Baselines') parser.add_argument( '--algorithm', type=str, default="PPO", - help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' + help='RL algorithm to use. Options are PPO, TD3, and CENTRALIZEDPPO (which uses a centralized value function)' + ' right now.' ) parser.add_argument( '--num_cpus', type=int, default=1, @@ -172,37 +175,68 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ - from ray.tune.registry import register_env horizon = flow_params['env'].horizon alg_run = flags.algorithm.upper() if alg_run == "PPO": - agent_cls = get_agent_class(alg_run) - config = deepcopy(agent_cls._default_config) + from flow.algorithms.custom_ppo import CustomPPOTrainer + from ray.rllib.agents.ppo import DEFAULT_CONFIG + alg_run = CustomPPOTrainer + config = deepcopy(DEFAULT_CONFIG) config["num_workers"] = n_cpus config["horizon"] = horizon - config["model"].update({"fcnet_hiddens": [32, 32, 32]}) + config["model"].update({"fcnet_hiddens": [32, 32]}) config["train_batch_size"] = horizon * n_rollouts - config["gamma"] = 0.999 # discount rate + config["gamma"] = 0.995 # discount rate config["use_gae"] = True config["lambda"] = 0.97 config["kl_target"] = 0.02 config["num_sgd_iter"] = 10 + if flags.grid_search: + config["lambda"] = tune.grid_search([0.5, 0.9]) + config["lr"] = tune.grid_search([5e-4, 5e-5]) + elif alg_run == "CENTRALIZEDPPO": + from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel + from ray.rllib.agents.ppo import DEFAULT_CONFIG + from ray.rllib.models import ModelCatalog + alg_run = CCTrainer + config = deepcopy(DEFAULT_CONFIG) + config['model']['custom_model'] = "cc_model" + config["model"]["custom_options"]["max_num_agents"] = flow_params['env'].additional_params['max_num_agents'] + config["model"]["custom_options"]["central_vf_size"] = 100 + + ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel) + + config["num_workers"] = n_cpus + config["horizon"] = horizon + config["model"].update({"fcnet_hiddens": [32, 32]}) + config["train_batch_size"] = horizon * n_rollouts + config["gamma"] = 0.995 # discount rate + config["use_gae"] = True + config["lambda"] = 0.97 + config["kl_target"] = 0.02 + config["num_sgd_iter"] = 10 + if flags.grid_search: + config["lambda"] = tune.grid_search([0.5, 0.9]) + config["lr"] = tune.grid_search([5e-4, 5e-5]) + elif alg_run == "TD3": agent_cls = get_agent_class(alg_run) config = deepcopy(agent_cls._default_config) config["num_workers"] = n_cpus config["horizon"] = horizon + config["learning_starts"] = 10000 config["buffer_size"] = 20000 # reduced to test if this is the source of memory problems if flags.grid_search: config["prioritized_replay"] = tune.grid_search(['True', 'False']) config["actor_lr"] = tune.grid_search([1e-3, 1e-4]) config["critic_lr"] = tune.grid_search([1e-3, 1e-4]) config["n_step"] = tune.grid_search([1, 10]) + else: sys.exit("We only support PPO, TD3, right now.") @@ -210,27 +244,59 @@ def setup_exps_rllib(flow_params, def on_episode_start(info): episode = info["episode"] episode.user_data["avg_speed"] = [] + episode.user_data["avg_speed_avs"] = [] episode.user_data["avg_energy"] = [] + episode.user_data["avg_mpg"] = [] + episode.user_data["avg_mpj"] = [] + def on_episode_step(info): episode = info["episode"] env = info["env"].get_unwrapped()[0] if isinstance(env, _GroupAgentsWrapper): env = env.env - speed = np.mean([speed for speed in env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]) + if hasattr(env, 'no_control_edges'): + veh_ids = [veh_id for veh_id in env.k.vehicle.get_ids() if (env.k.vehicle.get_speed(veh_id) >= 0 + and env.k.vehicle.get_edge(veh_id) + not in env.no_control_edges)] + rl_ids = [veh_id for veh_id in env.k.vehicle.get_rl_ids() if (env.k.vehicle.get_speed(veh_id) >= 0 + and env.k.vehicle.get_edge(veh_id) + not in env.no_control_edges)] + else: + veh_ids = [veh_id for veh_id in env.k.vehicle.get_ids() if env.k.vehicle.get_speed(veh_id) >= 0] + rl_ids = [veh_id for veh_id in env.k.vehicle.get_rl_ids() if env.k.vehicle.get_speed(veh_id) >= 0] + + speed = np.mean([speed for speed in env.k.vehicle.get_speed(veh_ids)]) if not np.isnan(speed): episode.user_data["avg_speed"].append(speed) - episode.user_data["avg_energy"].append(energy_consumption(env)) + av_speed = np.mean([speed for speed in env.k.vehicle.get_speed(rl_ids) if speed >= 0]) + if not np.isnan(av_speed): + episode.user_data["avg_speed_avs"].append(av_speed) + episode.user_data["avg_mpg"].append(miles_per_gallon(env, veh_ids, gain=1.0)) + episode.user_data["avg_mpj"].append(miles_per_megajoule(env, veh_ids, gain=1.0)) + def on_episode_end(info): episode = info["episode"] avg_speed = np.mean(episode.user_data["avg_speed"]) episode.custom_metrics["avg_speed"] = avg_speed + avg_speed_avs = np.mean(episode.user_data["avg_speed_avs"]) + episode.custom_metrics["avg_speed_avs"] = avg_speed_avs episode.custom_metrics["avg_energy_per_veh"] = np.mean(episode.user_data["avg_energy"]) + episode.custom_metrics["avg_mpg_per_veh"] = np.mean(episode.user_data["avg_mpg"]) + episode.custom_metrics["avg_mpj_per_veh"] = np.mean(episode.user_data["avg_mpj"]) + + def on_train_result(info): + """Store the mean score of the episode, and increment or decrement how many adversaries are on""" + trainer = info["trainer"] + trainer.workers.foreach_worker( + lambda ev: ev.foreach_env( + lambda env: env.set_iteration_num())) config["callbacks"] = {"on_episode_start": tune.function(on_episode_start), "on_episode_step": tune.function(on_episode_step), - "on_episode_end": tune.function(on_episode_end)} + "on_episode_end": tune.function(on_episode_end), + "on_train_result": tune.function(on_train_result)} # save the flow params for replay flow_json = json.dumps( @@ -240,7 +306,6 @@ def on_episode_end(info): # multiagent configuration if policy_graphs is not None: - print("policy_graphs", policy_graphs) config['multiagent'].update({'policies': policy_graphs}) if policy_mapping_fn is not None: config['multiagent'].update({'policy_mapping_fn': tune.function(policy_mapping_fn)}) @@ -255,7 +320,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - import ray flow_params = submodule.flow_params flow_params['sim'].render = flags.render @@ -280,7 +344,7 @@ def trial_str_creator(trial): ray.init() exp_dict = { "run_or_experiment": alg_run, - "name": gym_name, + "name": flags.exp_title, "config": config, "checkpoint_freq": flags.checkpoint_freq, "checkpoint_at_end": True, diff --git a/flow/algorithms/centralized_PPO.py b/flow/algorithms/centralized_PPO.py new file mode 100644 index 000000000..8f3b9f261 --- /dev/null +++ b/flow/algorithms/centralized_PPO.py @@ -0,0 +1,547 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +"""An example of customizing PPO to leverage a centralized critic.""" + +import argparse +import numpy as np + +from gym.spaces import Dict + +from ray import tune +from ray.rllib.agents.ppo.ppo import PPOTrainer +from flow.algorithms.custom_ppo import CustomPPOTFPolicy +from ray.rllib.evaluation.postprocessing import compute_advantages, \ + Postprocessing +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.tf_policy import LearningRateSchedule, \ + EntropyCoeffSchedule, ACTION_LOGP +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.models.tf.recurrent_tf_modelv2 import RecurrentTFModelV2 +from ray.rllib.models.model import restore_original_dimensions +from ray.rllib.utils.annotations import override +from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork +from ray.rllib.utils.explained_variance import explained_variance +from ray.rllib.utils import try_import_tf + + +tf = try_import_tf() + +# Frozen logits of the policy that computed the action +BEHAVIOUR_LOGITS = "behaviour_logits" + +CENTRAL_OBS = "central_obs" +OPPONENT_ACTION = "opponent_action" + +parser = argparse.ArgumentParser() +parser.add_argument("--stop", type=int, default=100000) + +#TODOy + +class CentralizedCriticModel(TFModelV2): + """Multi-agent model that implements a centralized VF.""" + # TODO(@evinitsky) make this work with more than boxes + + def __init__(self, obs_space, action_space, num_outputs, model_config, + name): + super(CentralizedCriticModel, self).__init__( + obs_space, action_space, num_outputs, model_config, name) + # Base of the model + self.model = FullyConnectedNetwork(obs_space, action_space, + num_outputs, model_config, name) + self.register_variables(self.model.variables()) + + # Central VF maps (obs, opp_ops, opp_act) -> vf_pred + self.max_num_agents = model_config['custom_options']['max_num_agents'] + self.obs_space_shape = obs_space.shape[0] + self.obs_space = obs_space + other_obs = tf.keras.layers.Input(shape=(obs_space.shape[0] * self.max_num_agents, ), name="central_obs") + central_vf_dense = tf.keras.layers.Dense( + model_config['custom_options']['central_vf_size'], activation=tf.nn.tanh, name="c_vf_dense")(other_obs) + central_vf_out = tf.keras.layers.Dense( + 1, activation=None, name="c_vf_out")(central_vf_dense) + self.central_vf = tf.keras.Model( + inputs=[other_obs], outputs=central_vf_out) + self.register_variables(self.central_vf.variables) + + def forward(self, input_dict, state, seq_lens): + return self.model.forward(input_dict, state, seq_lens) + + def central_value_function(self, central_obs): + return tf.reshape( + self.central_vf( + [central_obs]), [-1]) + + def value_function(self): + return self.model.value_function() # not used + + +# TODO(@evinitsky) support recurrence +class CentralizedCriticModelRNN(RecurrentTFModelV2): + """Example of using the Keras functional API to define a RNN model.""" + + def __init__(self, + obs_space, + action_space, + num_outputs, + model_config, + name, + hiddens_size=64, + cell_size=64): + super(CentralizedCriticModelRNN, self).__init__(obs_space, action_space, num_outputs, + model_config, name) + self.cell_size = cell_size + + # Define input layers + input_layer = tf.keras.layers.Input( + shape=(None, obs_space.shape[0]), name="inputs") + state_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h") + state_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c") + seq_in = tf.keras.layers.Input(shape=(), name="seq_in") + + # Preprocess observation with a hidden layer and send to LSTM cell + dense1 = tf.keras.layers.Dense( + hiddens_size, activation=tf.nn.relu, name="dense1")(input_layer) + lstm_out, state_h, state_c = tf.keras.layers.LSTM( + cell_size, return_sequences=True, return_state=True, name="lstm")( + inputs=dense1, + mask=tf.sequence_mask(seq_in), + initial_state=[state_in_h, state_in_c]) + + # Postprocess LSTM output with another hidden layer and compute values + logits = tf.keras.layers.Dense( + self.num_outputs, + activation=tf.keras.activations.linear, + name="logits")(lstm_out) + values = tf.keras.layers.Dense( + 1, activation=None, name="values")(lstm_out) + + # Create the RNN model + self.model = tf.keras.Model( + inputs=[input_layer, seq_in, state_in_h, state_in_c], + outputs=[logits, values, state_h, state_c]) + self.register_variables(self.model.variables) + self.model.summary() + + #TODO(@evinitsky) add layer sharing to the VF + # Create the centralized VF + # Central VF maps (obs, opp_ops, opp_act) -> vf_pred + self.max_num_agents = model_config.get("max_num_agents", 120) + self.obs_space_shape = obs_space.shape[0] + other_obs = tf.keras.layers.Input(shape=(obs_space.shape[0] * self.max_num_agents,), name="all_agent_obs") + central_vf_dense = tf.keras.layers.Dense( + model_config.get("central_vf_size", 64), activation=tf.nn.tanh, name="c_vf_dense")(other_obs) + central_vf_dense2 = tf.keras.layers.Dense( + model_config.get("central_vf_size", 64), activation=tf.nn.tanh, name="c_vf_dense")(central_vf_dense) + central_vf_out = tf.keras.layers.Dense( + 1, activation=None, name="c_vf_out")(central_vf_dense2) + self.central_vf = tf.keras.Model( + inputs=[other_obs], outputs=central_vf_out) + self.register_variables(self.central_vf.variables) + + @override(RecurrentTFModelV2) + def forward_rnn(self, inputs, state, seq_lens): + model_out, self._value_out, h, c = self.model([inputs, seq_lens] + + state) + return model_out, [h, c] + + @override(ModelV2) + def get_initial_state(self): + return [ + np.zeros(self.cell_size, np.float32), + np.zeros(self.cell_size, np.float32), + ] + + def central_value_function(self, central_obs): + return tf.reshape( + self.central_vf( + [central_obs]), [-1]) + + def value_function(self): + return tf.reshape(self._value_out, [-1]) # not used + + +class CentralizedValueMixin(object): + """Add methods to evaluate the central value function from the model.""" + + def __init__(self): + # TODO(@evinitsky) clean up naming + self.central_value_function = self.model.central_value_function( + self.get_placeholder(CENTRAL_OBS) + ) + + def compute_central_vf(self, central_obs): + feed_dict = { + self.get_placeholder(CENTRAL_OBS): central_obs, + } + return self.get_session().run(self.central_value_function, feed_dict) + + +# Grabs the opponent obs/act and includes it in the experience train_batch, +# and computes GAE using the central vf predictions. +def centralized_critic_postprocessing(policy, + sample_batch, + other_agent_batches=None, + episode=None): + if policy.loss_initialized(): + assert other_agent_batches is not None + + # time_span = (sample_batch['t'][0], sample_batch['t'][-1]) + # # there's a new problem here, namely that a segment might not be continuous due to the rerouting + # other_agent_timespans = {agent_id: + # (other_agent_batches[agent_id][1]["t"][0], + # other_agent_batches[agent_id][1]["t"][-1]) + # for agent_id in other_agent_batches.keys()} + other_agent_times = {agent_id: other_agent_batches[agent_id][1]["t"] + for agent_id in other_agent_batches.keys()} + agent_time = sample_batch['t'] + # # find agents whose time overlaps with the current agent + rel_agents = {agent_id: other_agent_time for agent_id, other_agent_time in other_agent_times.items()} + # if len(rel_agents) > 0: + other_obs = {agent_id: + other_agent_batches[agent_id][1]["obs"].copy() + for agent_id in other_agent_batches.keys()} + # padded_agent_obs = {agent_id: + # overlap_and_pad_agent( + # time_span, + # rel_agent_time, + # other_obs[agent_id]) + # for agent_id, + # rel_agent_time in rel_agents.items()} + padded_agent_obs = {agent_id: + fill_missing( + agent_time, + other_agent_times[agent_id], + other_obs[agent_id]) + for agent_id, + rel_agent_time in rel_agents.items()} + # okay, now we need to stack and sort + central_obs_list = [padded_obs for padded_obs in padded_agent_obs.values()] + try: + central_obs_batch = np.hstack((sample_batch["obs"], np.hstack(central_obs_list))) + except: + # TODO(@ev) this is a bug and needs to be fixed + central_obs_batch = sample_batch["obs"] + max_vf_agents = policy.model.max_num_agents + num_agents = len(rel_agents) + 1 + if num_agents < max_vf_agents: + diff = max_vf_agents - num_agents + zero_pad = np.zeros((central_obs_batch.shape[0], + policy.model.obs_space_shape * diff)) + central_obs_batch = np.hstack((central_obs_batch, + zero_pad)) + elif num_agents > max_vf_agents: + print("Too many agents!") + + # also record the opponent obs and actions in the trajectory + sample_batch[CENTRAL_OBS] = central_obs_batch + + # overwrite default VF prediction with the central VF + sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf(sample_batch[CENTRAL_OBS]) + else: + # policy hasn't initialized yet, use zeros + #TODO(evinitsky) put in the right shape + obs_shape = sample_batch[SampleBatch.CUR_OBS].shape[1] + obs_shape = (1, obs_shape * (policy.model.max_num_agents)) + sample_batch[CENTRAL_OBS] = np.zeros(obs_shape) + # TODO(evinitsky) put in the right shape. Will break if actions aren't 1 + sample_batch[SampleBatch.VF_PREDS] = np.zeros(1, dtype=np.float32) + + completed = sample_batch["dones"][-1] + + # if not completed and policy.loss_initialized(): + # last_r = 0.0 + # else: + # next_state = [] + # for i in range(policy.num_state_tensors()): + # next_state.append([sample_batch["state_out_{}".format(i)][-1]]) + # last_r = policy.compute_central_vf(sample_batch[CENTRAL_OBS][-1][np.newaxis, ...])[0] + + batch = compute_advantages( + sample_batch, + 0.0, + policy.config["gamma"], + policy.config["lambda"], + use_gae=policy.config["use_gae"]) + return batch + + + +def time_overlap(time_span, agent_time): + """Check if agent_time overlaps with time_span""" + if agent_time[0] <= time_span[1] and agent_time[1] >= time_span[0]: + return True + else: + return False + + +def fill_missing(agent_time, other_agent_time, obs): + # shortcut, the two overlap perfectly + if np.sum(agent_time == other_agent_time) == agent_time.shape[0]: + return obs + new_obs = np.zeros((agent_time.shape[0], obs.shape[1])) + other_agent_time_set = set(other_agent_time) + for i, time in enumerate(agent_time): + if time in other_agent_time_set: + new_obs[i] = obs[np.where(other_agent_time == time)] + return new_obs + + +def overlap_and_pad_agent(time_span, agent_time, obs): + """take the part of obs that overlaps, pad to length time_span + Arguments: + time_span (tuple): tuple of the first and last time that the agent + of interest is in the system + agent_time (tuple): tuple of the first and last time that the + agent whose obs we are padding is in the system + obs (np.ndarray): observations of the agent whose time is + agent_time + """ + assert time_overlap(time_span, agent_time) + print(time_span) + print(agent_time) + if time_span[0] == 7 or agent_time[0] == 7: + import ipdb; ipdb.set_trace() + # FIXME(ev) some of these conditions can be combined + # no padding needed + if agent_time[0] == time_span[0] and agent_time[1] == time_span[1]: + if obs.shape[0] < 200: + import ipdb; ipdb.set_trace() + return obs + # agent enters before time_span starts and exits before time_span end + if agent_time[0] < time_span[0] and agent_time[1] < time_span[1]: + non_overlap_time = time_span[0] - agent_time[0] + missing_time = time_span[1] - agent_time[1] + overlap_obs = obs[non_overlap_time:] + padding = np.zeros((missing_time, obs.shape[1])) + obs_concat = np.concatenate((overlap_obs, padding)) + if obs_concat.shape[0] < 200: + import ipdb; ipdb.set_trace() + return obs_concat + # agent enters after time_span starts and exits after time_span ends + elif agent_time[0] > time_span[0] and agent_time[1] > time_span[1]: + non_overlap_time = agent_time[1] - time_span[1] + overlap_obs = obs[:-non_overlap_time] + missing_time = agent_time[0] - time_span[0] + padding = np.zeros((missing_time, obs.shape[1])) + obs_concat = np.concatenate((padding, overlap_obs)) + if obs_concat.shape[0] < 200: + import ipdb; ipdb.set_trace() + return obs_concat + # agent time is entirely contained in time_span + elif agent_time[0] >= time_span[0] and agent_time[1] <= time_span[1]: + missing_left = agent_time[0] - time_span[0] + missing_right = time_span[1] - agent_time[1] + obs_concat = obs + if missing_left > 0: + padding = np.zeros((missing_left, obs.shape[1])) + obs_concat = np.concatenate((padding, obs_concat)) + if missing_right > 0: + padding = np.zeros((missing_right, obs.shape[1])) + obs_concat = np.concatenate((obs_concat, padding)) + if obs_concat.shape[0] < 200: + import ipdb; ipdb.set_trace() + return obs_concat + # agent time totally contains time_span + elif agent_time[0] <= time_span[0] and agent_time[1] >= time_span[1]: + non_overlap_left = time_span[0] - agent_time[0] + non_overlap_right = agent_time[1] - time_span[1] + overlap_obs = obs + if non_overlap_left > 0: + overlap_obs = overlap_obs[non_overlap_left:] + if non_overlap_right > 0: + overlap_obs = overlap_obs[:-non_overlap_right] + if overlap_obs.shape[0] < 200: + import ipdb; ipdb.set_trace() + return overlap_obs + + +# Copied from PPO but optimizing the central value function +def loss_with_central_critic(policy, model, dist_class, train_batch): + CentralizedValueMixin.__init__(policy) + + logits, state = model.from_batch(train_batch) + action_dist = dist_class(logits, model) + + policy.loss_obj = PPOLoss( + policy.action_space, + dist_class, + model, + train_batch[Postprocessing.VALUE_TARGETS], + train_batch[Postprocessing.ADVANTAGES], + train_batch[SampleBatch.ACTIONS], + train_batch[BEHAVIOUR_LOGITS], + train_batch[ACTION_LOGP], + train_batch[SampleBatch.VF_PREDS], + action_dist, + policy.central_value_function, + policy.kl_coeff, + tf.ones_like(train_batch[Postprocessing.ADVANTAGES], dtype=tf.bool), + entropy_coeff=policy.entropy_coeff, + clip_param=policy.config["clip_param"], + vf_clip_param=policy.config["vf_clip_param"], + vf_loss_coeff=policy.config["vf_loss_coeff"], + use_gae=policy.config["use_gae"], + model_config=policy.config["model"]) + + return policy.loss_obj.loss + + +class PPOLoss(object): + def __init__(self, + action_space, + dist_class, + model, + value_targets, + advantages, + actions, + prev_logits, + prev_actions_logp, + vf_preds, + curr_action_dist, + value_fn, + cur_kl_coeff, + valid_mask, + entropy_coeff=0, + clip_param=0.1, + vf_clip_param=0.1, + vf_loss_coeff=1.0, + use_gae=True, + model_config=None): + """Constructs the loss for Proximal Policy Objective. + + Arguments: + action_space: Environment observation space specification. + dist_class: action distribution class for logits. + value_targets (Placeholder): Placeholder for target values; used + for GAE. + actions (Placeholder): Placeholder for actions taken + from previous model evaluation. + advantages (Placeholder): Placeholder for calculated advantages + from previous model evaluation. + prev_logits (Placeholder): Placeholder for logits output from + previous model evaluation. + prev_actions_logp (Placeholder): Placeholder for prob output from + previous model evaluation. + vf_preds (Placeholder): Placeholder for value function output + from previous model evaluation. + curr_action_dist (ActionDistribution): ActionDistribution + of the current model. + value_fn (Tensor): Current value function output Tensor. + cur_kl_coeff (Variable): Variable holding the current PPO KL + coefficient. + valid_mask (Tensor): A bool mask of valid input elements (#2992). + entropy_coeff (float): Coefficient of the entropy regularizer. + clip_param (float): Clip parameter + vf_clip_param (float): Clip parameter for the value function + vf_loss_coeff (float): Coefficient of the value function loss + use_gae (bool): If true, use the Generalized Advantage Estimator. + model_config (dict): (Optional) model config for use in specifying + action distributions. + """ + + def reduce_mean_valid(t): + return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) + + prev_dist = dist_class(prev_logits, model) + # Make loss functions. + logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp) + action_kl = prev_dist.kl(curr_action_dist) + self.mean_kl = reduce_mean_valid(action_kl) + + curr_entropy = curr_action_dist.entropy() + self.mean_entropy = reduce_mean_valid(curr_entropy) + + surrogate_loss = tf.minimum( + advantages * logp_ratio, + advantages * tf.clip_by_value(logp_ratio, 1 - clip_param, + 1 + clip_param)) + self.mean_policy_loss = reduce_mean_valid(-surrogate_loss) + + if use_gae: + vf_loss1 = tf.square(value_fn - value_targets) + vf_clipped = vf_preds + tf.clip_by_value( + value_fn - vf_preds, -vf_clip_param, vf_clip_param) + vf_loss2 = tf.square(vf_clipped - value_targets) + vf_loss = tf.maximum(vf_loss1, vf_loss2) + self.mean_vf_loss = reduce_mean_valid(vf_loss) + loss = reduce_mean_valid( + -surrogate_loss + + vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) + else: + self.mean_vf_loss = tf.constant(0.0) + loss = reduce_mean_valid(-surrogate_loss - + entropy_coeff * curr_entropy) + self.loss = loss + + +def new_ppo_surrogate_loss(policy, model, dist_class, train_batch): + loss = loss_with_central_critic(policy, model, dist_class, train_batch) + return loss + + +class KLCoeffMixin(object): + def __init__(self, config): + # KL Coefficient + self.kl_coeff_val = config["kl_coeff"] + self.kl_target = config["kl_target"] + self.kl_coeff = tf.get_variable( + initializer=tf.constant_initializer(self.kl_coeff_val), + name="kl_coeff", + shape=(), + trainable=False, + dtype=tf.float32) + def update_kl(self, blah): + pass + + +def setup_mixins(policy, obs_space, action_space, config): + # copied from PPO + KLCoeffMixin.__init__(policy, config) + + EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], + config["entropy_coeff_schedule"]) + LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) + # hack: put in a noop VF so some of the inherited PPO code runs + policy.value_function = tf.zeros( + tf.shape(policy.get_placeholder(SampleBatch.CUR_OBS))[0]) + + +def central_vf_stats(policy, train_batch, grads): + # Report the explained variance of the central value function. + return { + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + policy.central_value_function), + } + +def kl_and_loss_stats(policy, train_batch): + return { + "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), + "cur_lr": tf.cast(policy.cur_lr, tf.float64), + "total_loss": policy.loss_obj.loss, + "policy_loss": policy.loss_obj.mean_policy_loss, + "vf_loss": policy.loss_obj.mean_vf_loss, + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + policy.model.value_function()), + "vf_preds": train_batch[Postprocessing.VALUE_TARGETS], + "kl": policy.loss_obj.mean_kl, + "entropy": policy.loss_obj.mean_entropy, + "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), + } + +CCPPO = CustomPPOTFPolicy.with_updates( + name="CCPPO", + postprocess_fn=centralized_critic_postprocessing, + loss_fn=new_ppo_surrogate_loss, + stats_fn=kl_and_loss_stats, + before_loss_init=setup_mixins, + grad_stats_fn=central_vf_stats, + mixins=[ + LearningRateSchedule, EntropyCoeffSchedule, + CentralizedValueMixin, KLCoeffMixin + ]) + +CCTrainer = PPOTrainer.with_updates(name="CCPPOTrainer", default_policy=CCPPO) \ No newline at end of file diff --git a/flow/algorithms/custom_ppo.py b/flow/algorithms/custom_ppo.py new file mode 100644 index 000000000..a98af6c2d --- /dev/null +++ b/flow/algorithms/custom_ppo.py @@ -0,0 +1,318 @@ +"""PPO but we add in the outflow after the reward to the final reward""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +import numpy as np +import ray +from ray.rllib.agents.ppo.ppo import PPOTrainer +from ray.rllib.evaluation.postprocessing import compute_advantages, \ + Postprocessing +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.tf_policy import LearningRateSchedule, \ + EntropyCoeffSchedule, ACTION_LOGP +from ray.rllib.policy.tf_policy_template import build_tf_policy +from ray.rllib.utils.explained_variance import explained_variance +from ray.rllib.utils.tf_ops import make_tf_callable +from ray.rllib.utils import try_import_tf + +tf = try_import_tf() + +logger = logging.getLogger(__name__) + +# Frozen logits of the policy that computed the action +BEHAVIOUR_LOGITS = "behaviour_logits" + + +class PPOLoss(object): + def __init__(self, + action_space, + dist_class, + model, + value_targets, + advantages, + actions, + prev_logits, + prev_actions_logp, + vf_preds, + curr_action_dist, + value_fn, + cur_kl_coeff, + valid_mask, + entropy_coeff=0, + clip_param=0.1, + vf_clip_param=0.1, + vf_loss_coeff=1.0, + use_gae=True, + model_config=None): + """Constructs the loss for Proximal Policy Objective. + + Arguments: + action_space: Environment observation space specification. + dist_class: action distribution class for logits. + value_targets (Placeholder): Placeholder for target values; used + for GAE. + actions (Placeholder): Placeholder for actions taken + from previous model evaluation. + advantages (Placeholder): Placeholder for calculated advantages + from previous model evaluation. + prev_logits (Placeholder): Placeholder for logits output from + previous model evaluation. + prev_actions_logp (Placeholder): Placeholder for prob output from + previous model evaluation. + vf_preds (Placeholder): Placeholder for value function output + from previous model evaluation. + curr_action_dist (ActionDistribution): ActionDistribution + of the current model. + value_fn (Tensor): Current value function output Tensor. + cur_kl_coeff (Variable): Variable holding the current PPO KL + coefficient. + valid_mask (Tensor): A bool mask of valid input elements (#2992). + entropy_coeff (float): Coefficient of the entropy regularizer. + clip_param (float): Clip parameter + vf_clip_param (float): Clip parameter for the value function + vf_loss_coeff (float): Coefficient of the value function loss + use_gae (bool): If true, use the Generalized Advantage Estimator. + model_config (dict): (Optional) model config for use in specifying + action distributions. + """ + + def reduce_mean_valid(t): + return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) + + prev_dist = dist_class(prev_logits, model) + # Make loss functions. + logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp) + action_kl = prev_dist.kl(curr_action_dist) + self.mean_kl = reduce_mean_valid(action_kl) + + curr_entropy = curr_action_dist.entropy() + self.mean_entropy = reduce_mean_valid(curr_entropy) + + surrogate_loss = tf.minimum( + advantages * logp_ratio, + advantages * tf.clip_by_value(logp_ratio, 1 - clip_param, + 1 + clip_param)) + self.mean_policy_loss = reduce_mean_valid(-surrogate_loss) + + if use_gae: + vf_loss1 = tf.square(value_fn - value_targets) + vf_clipped = vf_preds + tf.clip_by_value( + value_fn - vf_preds, -vf_clip_param, vf_clip_param) + vf_loss2 = tf.square(vf_clipped - value_targets) + vf_loss = tf.maximum(vf_loss1, vf_loss2) + self.mean_vf_loss = reduce_mean_valid(vf_loss) + loss = reduce_mean_valid( + -surrogate_loss + + vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) + else: + self.mean_vf_loss = tf.constant(0.0) + loss = reduce_mean_valid(-surrogate_loss -entropy_coeff * curr_entropy) + self.loss = loss + + +def ppo_surrogate_loss(policy, model, dist_class, train_batch): + logits, state = model.from_batch(train_batch) + action_dist = dist_class(logits, model) + + if state: + max_seq_len = tf.reduce_max(train_batch["seq_lens"]) + mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len) + mask = tf.reshape(mask, [-1]) + else: + mask = tf.ones_like( + train_batch[Postprocessing.ADVANTAGES], dtype=tf.bool) + + policy.loss_obj = PPOLoss( + policy.action_space, + dist_class, + model, + train_batch[Postprocessing.VALUE_TARGETS], + train_batch[Postprocessing.ADVANTAGES], + train_batch[SampleBatch.ACTIONS], + train_batch[BEHAVIOUR_LOGITS], + train_batch[ACTION_LOGP], + train_batch[SampleBatch.VF_PREDS], + action_dist, + model.value_function(), + policy.kl_coeff, + mask, + entropy_coeff=policy.entropy_coeff, + clip_param=policy.config["clip_param"], + vf_clip_param=policy.config["vf_clip_param"], + vf_loss_coeff=policy.config["vf_loss_coeff"], + use_gae=policy.config["use_gae"], + model_config=policy.config["model"]) + + return policy.loss_obj.loss + + +def kl_and_loss_stats(policy, train_batch): + return { + "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), + "cur_lr": tf.cast(policy.cur_lr, tf.float64), + "total_loss": policy.loss_obj.loss, + "policy_loss": policy.loss_obj.mean_policy_loss, + "vf_loss": policy.loss_obj.mean_vf_loss, + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + policy.model.value_function()), + "vf_preds": train_batch[Postprocessing.VALUE_TARGETS], + "kl": policy.loss_obj.mean_kl, + "entropy": policy.loss_obj.mean_entropy, + "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), + "advantages": train_batch[Postprocessing.ADVANTAGES], + "rewards": train_batch["rewards"] + } + + +def vf_preds_and_logits_fetches(policy): + """Adds value function and logits outputs to experience train_batches.""" + return { + SampleBatch.VF_PREDS: policy.model.value_function(), + BEHAVIOUR_LOGITS: policy.model.last_output(), + } + + +def postprocess_ppo_gae(policy, + sample_batch, + other_agent_batches=None, + episode=None): + """Adds the policy logits, VF preds, and advantages to the trajectory.""" + + completed = sample_batch["dones"][-1] + if completed: + last_r = 0.0 + else: + next_state = [] + for i in range(policy.num_state_tensors()): + next_state.append([sample_batch["state_out_{}".format(i)][-1]]) + last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1], + sample_batch[SampleBatch.ACTIONS][-1], + sample_batch[SampleBatch.REWARDS][-1], + *next_state) + + batch = compute_advantages( + sample_batch, + last_r, + policy.config["gamma"], + policy.config["lambda"], + use_gae=policy.config["use_gae"]) + return batch + + +def clip_gradients(policy, optimizer, loss): + variables = policy.model.trainable_variables() + if policy.config["grad_clip"] is not None: + grads_and_vars = optimizer.compute_gradients(loss, variables) + grads = [g for (g, v) in grads_and_vars] + policy.grads, _ = tf.clip_by_global_norm(grads, + policy.config["grad_clip"]) + clipped_grads = list(zip(policy.grads, variables)) + return clipped_grads + else: + return optimizer.compute_gradients(loss, variables) + + +class ValueNetworkMixin(object): + def __init__(self, obs_space, action_space, config): + if config["use_gae"]: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + model_out, _ = self.model({ + SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]), + SampleBatch.PREV_ACTIONS: tf.convert_to_tensor( + [prev_action]), + SampleBatch.PREV_REWARDS: tf.convert_to_tensor( + [prev_reward]), + "is_training": tf.convert_to_tensor(False), + }, [tf.convert_to_tensor([s]) for s in state], + tf.convert_to_tensor([1])) + return self.model.value_function()[0] + + else: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + return tf.constant(0.0) + + self._value = value + + +def setup_config(policy, obs_space, action_space, config): + # auto set the model option for layer sharing + config["model"]["vf_share_layers"] = config["vf_share_layers"] + + +def setup_mixins(policy, obs_space, action_space, config): + KLCoeffMixin.__init__(policy, config) + ValueNetworkMixin.__init__(policy, obs_space, action_space, config) + EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], + config["entropy_coeff_schedule"]) + LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) + + +class KLCoeffMixin(object): + def __init__(self, config): + # KL Coefficient + self.kl_coeff_val = config["kl_coeff"] + self.kl_target = config["kl_target"] + self.kl_coeff = tf.get_variable( + initializer=tf.constant_initializer(self.kl_coeff_val), + name="kl_coeff", + shape=(), + trainable=False, + dtype=tf.float32) + def update_kl(self, blah): + pass + + +CustomPPOTFPolicy = build_tf_policy( + name="CustomPPOTFPolicy", + get_default_config=lambda: ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG, + loss_fn=ppo_surrogate_loss, + stats_fn=kl_and_loss_stats, + extra_action_fetches_fn=vf_preds_and_logits_fetches, + postprocess_fn=postprocess_ppo_gae, + gradients_fn=clip_gradients, + before_init=setup_config, + before_loss_init=setup_mixins, + mixins=[ + LearningRateSchedule, EntropyCoeffSchedule, + ValueNetworkMixin, KLCoeffMixin + ]) + +def validate_config(config): + if config["entropy_coeff"] < 0: + raise DeprecationWarning("entropy_coeff must be >= 0") + if isinstance(config["entropy_coeff"], int): + config["entropy_coeff"] = float(config["entropy_coeff"]) + if config["batch_mode"] == "truncate_episodes" and not config["use_gae"]: + raise ValueError( + "Episode truncation is not supported without a value " + "function. Consider setting batch_mode=complete_episodes.") + if config["multiagent"]["policies"] and not config["simple_optimizer"]: + logger.info( + "In multi-agent mode, policies will be optimized sequentially " + "by the multi-GPU optimizer. Consider setting " + "simple_optimizer=True if this doesn't work for you.") + if config["simple_optimizer"]: + logger.warning( + "Using the simple minibatch optimizer. This will significantly " + "reduce performance, consider simple_optimizer=False.") + elif tf and tf.executing_eagerly(): + config["simple_optimizer"] = True # multi-gpu not supported + +from ray.rllib.agents.trainer_template import build_trainer +from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG, update_kl, \ + warn_about_bad_reward_scales +CustomPPOTrainer = build_trainer( + name="CustomPPOTrainer", + default_config=DEFAULT_CONFIG, + default_policy=CustomPPOTFPolicy, + make_policy_optimizer=choose_policy_optimizer, + validate_config=validate_config, + after_train_result=warn_about_bad_reward_scales) \ No newline at end of file diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index 42c9b2a9b..280c94d37 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -647,6 +647,7 @@ def __init__(self, def get_accel(self, env): """See parent class.""" + # without generating waves. lead_id = env.k.vehicle.get_leader(self.veh_id) if not lead_id: # no car ahead if self.want_max_accel: diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index c3da6136d..62ce15beb 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -25,7 +25,10 @@ class FollowerStopper(BaseController): def __init__(self, veh_id, car_following_params, - v_des=15): + v_des=15, + danger_edges=None, + control_length=None, + no_control_edges=None): """Instantiate FollowerStopper.""" BaseController.__init__( self, veh_id, car_following_params, delay=0.0, @@ -45,6 +48,10 @@ def __init__(self, self.d_2 = 1.0 self.d_3 = 0.5 + self.danger_edges = danger_edges if danger_edges else {} + self.control_length = control_length + self.no_control_edges = no_control_edges + def find_intersection_dist(self, env): """Find distance to intersection. @@ -72,41 +79,54 @@ def find_intersection_dist(self, env): def get_accel(self, env): """See parent class.""" - lead_id = env.k.vehicle.get_leader(self.veh_id) - this_vel = env.k.vehicle.get_speed(self.veh_id) - lead_vel = env.k.vehicle.get_speed(lead_id) - - if self.v_des is None: + if env.time_counter < env.env_params.warmup_steps * env.env_params.sims_per_step: return None - - if lead_id is None: - v_cmd = self.v_des else: - dx = env.k.vehicle.get_headway(self.veh_id) - dv_minus = min(lead_vel - this_vel, 0) + lead_id = env.k.vehicle.get_leader(self.veh_id) + this_vel = env.k.vehicle.get_speed(self.veh_id) + lead_vel = env.k.vehicle.get_speed(lead_id) - dx_1 = self.dx_1_0 + 1 / (2 * self.d_1) * dv_minus**2 - dx_2 = self.dx_2_0 + 1 / (2 * self.d_2) * dv_minus**2 - dx_3 = self.dx_3_0 + 1 / (2 * self.d_3) * dv_minus**2 - v = min(max(lead_vel, 0), self.v_des) - # compute the desired velocity - if dx <= dx_1: - v_cmd = 0 - elif dx <= dx_2: - v_cmd = v * (dx - dx_1) / (dx_2 - dx_1) - elif dx <= dx_3: - v_cmd = v + (self.v_des - this_vel) * (dx - dx_2) \ - / (dx_3 - dx_2) - else: - v_cmd = self.v_des - - edge = env.k.vehicle.get_edge(self.veh_id) + if self.v_des is None: + return None - if edge == "": - return None - else: - # compute the acceleration from the desired velocity - return np.clip((v_cmd - this_vel) / env.sim_step, -np.abs(self.max_deaccel), self.max_accel) + if lead_id is None: + v_cmd = self.v_des + else: + dx = env.k.vehicle.get_headway(self.veh_id) + dv_minus = min(lead_vel - this_vel, 0) + + dx_1 = self.dx_1_0 + 1 / (2 * self.d_1) * dv_minus**2 + dx_2 = self.dx_2_0 + 1 / (2 * self.d_2) * dv_minus**2 + dx_3 = self.dx_3_0 + 1 / (2 * self.d_3) * dv_minus**2 + v = min(max(lead_vel, 0), self.v_des) + # compute the desired velocity + if dx <= dx_1: + v_cmd = 0 + elif dx <= dx_2: + v_cmd = v * (dx - dx_1) / (dx_2 - dx_1) + elif dx <= dx_3: + v_cmd = v + (self.v_des - this_vel) * (dx - dx_2) \ + / (dx_3 - dx_2) + else: + v_cmd = self.v_des + + edge = env.k.vehicle.get_edge(self.veh_id) + + if edge == "": + return None + + if (self.find_intersection_dist(env) <= 10 and \ + env.k.vehicle.get_edge(self.veh_id) in self.danger_edges) or \ + env.k.vehicle.get_edge(self.veh_id)[0] == ":"\ + or (self.control_length and (env.k.vehicle.get_x_by_id(self.veh_id) < self.control_length[0] + or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1]))\ + or edge in self.no_control_edges: + # TODO(@evinitsky) put back + # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: + return None + else: + # compute the acceleration from the desired velocity + return np.clip((v_cmd - this_vel) / env.sim_step, -np.abs(self.max_deaccel), self.max_accel) class NonLocalFollowerStopper(FollowerStopper): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 9ca83ab40..1c5ed271a 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -323,6 +323,7 @@ def get_fuel_consumption(selfself, veh_id, error=-1001): vehicle id, or list of vehicle ids error : any, optional value that is returned if the vehicle is not found + Returns ------- float diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 1434636e6..5aada2d8e 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -333,7 +333,6 @@ def energy_consumption(env, gain=.001): return -gain * power - def vehicle_energy_consumption(env, veh_id, gain=.001): """Calculate power consumption of a vehicle. @@ -352,6 +351,7 @@ def vehicle_energy_consumption(env, veh_id, gain=.001): if veh_id not in env.k.vehicle.previous_speeds: return 0 + speed = env.k.vehicle.get_speed(veh_id) prev_speed = env.k.vehicle.get_previous_speed(veh_id) @@ -389,7 +389,7 @@ def miles_per_megajoule(env, veh_ids=None, gain=.001): speed = env.k.vehicle.get_speed(veh_id) # convert to be positive since the function called is a penalty power = -vehicle_energy_consumption(env, veh_id, gain=1.0) - if power > 0 and speed >= 0.0: + if power > 0 and speed >= 0.1: counter += 1 # meters / joule is (v * \delta t) / (power * \delta t) mpj += speed / power diff --git a/flow/envs/base.py b/flow/envs/base.py index cf1674355..fbc57f33b 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -148,6 +148,8 @@ def __init__(self, self.state = None self.obs_var_labels = [] + self.num_training_iters = 0 + # track IDs that have ever been observed in the system self.observed_ids = set() self.observed_rl_ids = set() diff --git a/flow/envs/multiagent/__init__.py b/flow/envs/multiagent/__init__.py index 818d6662b..8c5552580 100644 --- a/flow/envs/multiagent/__init__.py +++ b/flow/envs/multiagent/__init__.py @@ -12,6 +12,7 @@ from flow.envs.multiagent.merge import MultiAgentMergePOEnv from flow.envs.multiagent.i210 import I210MultiEnv, MultiStraightRoad + __all__ = [ 'MultiEnv', 'AdversarialAccelEnv', diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index 594fb2fdb..7104138de 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -322,3 +322,6 @@ def apply_rl_actions(self, rl_actions=None): # clip according to the action space requirements clipped_actions = self.clip_actions(rl_actions) self._apply_rl_actions(clipped_actions) + + def set_iteration_num(self): + self.num_training_iters += 1 diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index a6e39cdec..c9b63b23a 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -1,8 +1,13 @@ """Environment for training vehicles to reduce congestion in the I210.""" -from gym.spaces import Box +from collections import OrderedDict +from copy import deepcopy +from time import time + +from gym.spaces import Box, Discrete, Dict import numpy as np +from flow.core.rewards import miles_per_gallon, miles_per_megajoule from flow.envs.multiagent.base import MultiEnv # largest number of lanes on any given edge in the network @@ -19,6 +24,7 @@ "lead_obs": True, # whether the reward should come from local vehicles instead of global rewards "local_reward": True, + # desired velocity "target_velocity": 25 } @@ -66,10 +72,35 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): self.reroute_on_exit = env_params.additional_params.get("reroute_on_exit") self.max_lanes = MAX_LANES self.num_enter_lanes = 5 - self.entrance_edge = "119257914" - self.exit_edge = "119257908#3" + self.entrance_edge = "ghost0" + self.exit_edge = "119257908#2" + self.control_range = env_params.additional_params.get('control_range', None) + self.no_control_edges = env_params.additional_params.get('no_control_edges', []) + self.mpg_reward = env_params.additional_params["mpg_reward"] + self.mpj_reward = env_params.additional_params["mpj_reward"] + self.look_back_length = env_params.additional_params["look_back_length"] + + # whether to add a slight reward for opening up a gap that will be annealed out N iterations in + self.headway_curriculum = env_params.additional_params["headway_curriculum"] + # how many timesteps to anneal the headway curriculum over + self.headway_curriculum_iters = env_params.additional_params["headway_curriculum_iters"] + self.headway_reward_gain = env_params.additional_params["headway_reward_gain"] + self.min_time_headway = env_params.additional_params["min_time_headway"] + + # whether to add a slight reward for opening up a gap that will be annealed out N iterations in + self.speed_curriculum = env_params.additional_params["speed_curriculum"] + # how many timesteps to anneal the headway curriculum over + self.speed_curriculum_iters = env_params.additional_params["speed_curriculum_iters"] + self.speed_reward_gain = env_params.additional_params["speed_reward_gain"] + self.num_training_iters = 0 self.leader = [] + # penalize stops + self.penalize_stops = env_params.additional_params["penalize_stops"] + + # penalize accel + self.penalize_accel = env_params.additional_params.get("penalize_accel", False) + @property def observation_space(self): """See class definition.""" @@ -109,6 +140,8 @@ def action_space(self): def _apply_rl_actions(self, rl_actions): """See class definition.""" # in the warmup steps, rl_actions is None + id_list = [] + accel_list = [] if rl_actions: for rl_id, actions in rl_actions.items(): accel = actions[0] @@ -117,15 +150,28 @@ def _apply_rl_actions(self, rl_actions): # lane_change_softmax /= np.sum(lane_change_softmax) # lane_change_action = np.random.choice([-1, 0, 1], # p=lane_change_softmax) + id_list.append(rl_id) + accel_list.append(accel) + self.k.vehicle.apply_acceleration(id_list, accel_list) + # self.k.vehicle.apply_lane_change(rl_id, lane_change_action) + # print('time to apply actions is ', time() - t) - self.k.vehicle.apply_acceleration(rl_id, accel) - # self.k.vehicle.apply_lane_change(rl_id, lane_change_action) + def in_control_range(self, veh_id): + """Return if a veh_id is on an edge that is allowed to be controlled. + + If control range is defined it uses control range, otherwise it searches over a set of edges + """ + return (self.control_range and self.k.vehicle.get_x_by_id(veh_id) < self.control_range[1] \ + and self.k.vehicle.get_x_by_id(veh_id) > self.control_range[0]) or \ + (len(self.no_control_edges) > 0 and self.k.vehicle.get_edge(veh_id) not in + self.no_control_edges) def get_state(self): """See class definition.""" + valid_ids = [rl_id for rl_id in self.k.vehicle.get_rl_ids() if self.in_control_range(rl_id)] if self.lead_obs: veh_info = {} - for rl_id in self.k.vehicle.get_rl_ids(): + for rl_id in valid_ids: speed = self.k.vehicle.get_speed(rl_id) lead_id = self.k.vehicle.get_leader(rl_id) if lead_id in ["", None]: @@ -140,7 +186,7 @@ def get_state(self): else: veh_info = {rl_id: np.concatenate((self.state_util(rl_id), self.veh_statistics(rl_id))) - for rl_id in self.k.vehicle.get_rl_ids()} + for rl_id in valid_ids} return veh_info def compute_reward(self, rl_actions, **kwargs): @@ -150,27 +196,107 @@ def compute_reward(self, rl_actions, **kwargs): return {} rewards = {} + valid_ids = [rl_id for rl_id in self.k.vehicle.get_rl_ids() if self.in_control_range(rl_id)] + if self.env_params.additional_params["local_reward"]: des_speed = self.env_params.additional_params["target_velocity"] - for rl_id in self.k.vehicle.get_rl_ids(): + for rl_id in valid_ids: rewards[rl_id] = 0 - speeds = [] - follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) - if follow_speed >= 0: - speeds.append(follow_speed) - if self.k.vehicle.get_speed(rl_id) >= 0: - speeds.append(self.k.vehicle.get_speed(rl_id)) - if len(speeds) > 0: - # rescale so the critic can estimate it quickly - rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 - for speed in speeds]) / (des_speed ** 2) + if self.mpg_reward: + rewards[rl_id] = miles_per_gallon(self, rl_id, gain=1.0) / 100.0 + follow_id = rl_id + for i in range(self.look_back_length): + follow_id = self.k.vehicle.get_follower(follow_id) + if follow_id not in ["", None]: + rewards[rl_id] += miles_per_gallon(self, follow_id, gain=1.0) / 100.0 + else: + break + elif self.mpj_reward: + rewards[rl_id] = miles_per_megajoule(self, rl_id, gain=1.0) / 100.0 + follow_id = rl_id + for i in range(self.look_back_length): + follow_id = self.k.vehicle.get_follower(follow_id) + if follow_id not in ["", None]: + # if self.time_counter > 700 and miles_per_megajoule(self, follow_id, gain=1.0) > 1.0: + # import ipdb; ipdb.set_trace() + rewards[rl_id] += miles_per_megajoule(self, follow_id, gain=1.0) / 100.0 + else: + break + else: + speeds = [] + follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) + if follow_speed >= 0: + speeds.append(follow_speed) + if self.k.vehicle.get_speed(rl_id) >= 0: + speeds.append(self.k.vehicle.get_speed(rl_id)) + if len(speeds) > 0: + # rescale so the critic can estimate it quickly + rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in speeds]) / (des_speed ** 2) else: - speeds = self.k.vehicle.get_speed(self.k.vehicle.get_ids()) + if self.mpg_reward: + reward = np.nan_to_num(miles_per_gallon(self, self.k.vehicle.get_ids(), gain=1.0)) / 100.0 + else: + speeds = self.k.vehicle.get_speed(self.k.vehicle.get_ids()) + des_speed = self.env_params.additional_params["target_velocity"] + # rescale so the critic can estimate it quickly + if self.reroute_on_exit: + reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) + for speed in speeds]) / (des_speed)) + else: + reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in speeds]) / (des_speed ** 2)) + rewards = {rl_id: reward for rl_id in valid_ids} + + # curriculum over time-gaps + if self.headway_curriculum and self.num_training_iters <= self.headway_curriculum_iters: + t_min = self.min_time_headway # smallest acceptable time headway + for veh_id, rew in rewards.items(): + lead_id = self.k.vehicle.get_leader(veh_id) + penalty = 0 + if lead_id not in ["", None] \ + and self.k.vehicle.get_speed(veh_id) > 0: + t_headway = max( + self.k.vehicle.get_headway(veh_id) / + self.k.vehicle.get_speed(veh_id), 0) + # print('time headway is {}, headway is {}'.format(t_headway, self.k.vehicle.get_headway(veh_id))) + scaling_factor = max(0, 1 - self.num_training_iters / self.headway_curriculum_iters) + penalty += scaling_factor * self.headway_reward_gain * min((t_headway - t_min) / t_min, 0) + # print('penalty is ', penalty) + + rewards[veh_id] += penalty + + if self.speed_curriculum and self.num_training_iters <= self.speed_curriculum_iters: des_speed = self.env_params.additional_params["target_velocity"] - # rescale so the critic can estimate it quickly - reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 - for speed in speeds]) / (des_speed ** 2)) - rewards = {rl_id: reward for rl_id in self.k.vehicle.get_rl_ids()} + + for veh_id, rew in rewards.items(): + speed = self.k.vehicle.get_speed(veh_id) + speed_reward = 0.0 + follow_id = veh_id + for i in range(self.look_back_length): + follow_id = self.k.vehicle.get_follower(follow_id) + if follow_id not in ["", None]: + if self.reroute_on_exit: + speed_reward += ((des_speed - np.abs(speed - des_speed))) / (des_speed) + else: + speed_reward += ((des_speed - np.abs(speed - des_speed)) ** 2) / (des_speed ** 2) + else: + break + scaling_factor = max(0, 1 - self.num_training_iters / self.speed_curriculum_iters) + + rewards[veh_id] += speed_reward * scaling_factor * self.speed_reward_gain + + for veh_id in rewards.keys(): + speed = self.k.vehicle.get_speed(veh_id) + if self.penalize_stops: + if speed < 1.0: + rewards[veh_id] -= .01 + if self.penalize_accel and veh_id in self.k.vehicle.previous_speeds: + prev_speed = self.k.vehicle.get_previous_speed(veh_id) + abs_accel = abs(speed - prev_speed) / self.sim_step + rewards[veh_id] -= abs_accel / 400.0 + + # print('time to get reward is ', time() - t) return rewards def additional_command(self): @@ -191,6 +317,7 @@ def additional_command(self): and not self.env_params.evaluate: veh_ids = self.k.vehicle.get_ids() edges = self.k.vehicle.get_edge(veh_ids) + valid_lanes = list(range(self.num_enter_lanes)) for veh_id, edge in zip(veh_ids, edges): if edge == "": continue @@ -200,28 +327,38 @@ def additional_command(self): if edge == self.exit_edge and \ (self.k.vehicle.get_position(veh_id) > self.k.network.edge_length(self.exit_edge) - 100) \ and self.k.vehicle.get_leader(veh_id) is None: + # if self.step_counter > 6000: + # import ipdb; ipdb.set_trace() type_id = self.k.vehicle.get_type(veh_id) # remove the vehicle self.k.vehicle.remove(veh_id) - lane = np.random.randint(low=0, high=self.num_enter_lanes) + index = np.random.randint(low=0, high=len(valid_lanes)) + lane = valid_lanes[index] + del valid_lanes[index] # reintroduce it at the start of the network # TODO(@evinitsky) select the lane and speed a bit more cleanly # Note, the position is 10 so you are not overlapping with the inflow car that is being removed. # this allows the vehicle to be immediately inserted. - self.k.vehicle.add( - veh_id=veh_id, - edge=self.entrance_edge, - type_id=str(type_id), - lane=str(lane), - pos="10.0", - speed="23.0") + try: + self.k.vehicle.add( + veh_id=veh_id, + edge=self.entrance_edge, + type_id=str(type_id), + lane=str(lane), + pos="20.0", + speed="23.0") + except Exception as e: + print(e) + if len(valid_lanes) == 0: + break departed_ids = self.k.vehicle.get_departed_ids() - if len(departed_ids) > 0: + if isinstance(departed_ids, tuple) and len(departed_ids) > 0: for veh_id in departed_ids: if veh_id not in self.observed_ids: self.k.vehicle.remove(veh_id) + def state_util(self, rl_id): """Return an array of headway, tailway, leader speed, follower speed. diff --git a/flow/networks/i210_subnetwork_ghost_cell.py b/flow/networks/i210_subnetwork_ghost_cell.py new file mode 100644 index 000000000..8a45b4d91 --- /dev/null +++ b/flow/networks/i210_subnetwork_ghost_cell.py @@ -0,0 +1,162 @@ +"""Contains the I-210 sub-network class.""" + +from flow.networks.base import Network + +EDGES_DISTRIBUTION = [ + # Main highway + "ghost0", + "119257914", + "119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3", + + # On-ramp + "27414345", + "27414342#0", + "27414342#1-AddedOnRampEdge", + + # Off-ramp + "173381935", +] + + +class I210SubNetworkGhostCell(Network): + """A network used to simulate the I-210 sub-network. + + Usage + ----- + >>> from flow.core.params import NetParams + >>> from flow.core.params import VehicleParams + >>> from flow.core.params import InitialConfig + >>> from flow.networks import I210SubNetwork + >>> + >>> network = I210SubNetwork( + >>> name='I-210_subnetwork', + >>> vehicles=VehicleParams(), + >>> net_params=NetParams() + >>> ) + """ + + def specify_routes(self, net_params): + """See parent class. + + Routes for vehicles moving through the I210. + """ + if net_params.additional_params["on_ramp"]: + rts = { + # Main highway + "ghost0": [ + (["ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 17 / 8378), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 17 / 8378) + ], + "119257914": [ + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 17 / 8378), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 17 / 8378) + ], + "119257908#0": [ + (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOnRampEdge": [ + (["119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1": [ + (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOffRampEdge": [ + (["119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#2": [ + (["119257908#2", "119257908#3"], 1), + ], + "119257908#3": [ + (["119257908#3"], 1), + ], + + # On-ramp + "27414345": [ + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 9 / 321), + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 9 / 321), + ], + "27414342#0": [ + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 20 / 421), + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 20 / 421), + ], + "27414342#1-AddedOnRampEdge": [ + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 0.5), + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 0.5), + ], + + # Off-ramp + "173381935": [ + (["173381935"], 1), + ], + } + + else: + rts = { + # Main highway + "ghost0": [ + (["ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1), + ], + } + + return rts diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index 9ac6938d4..004172765 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -256,12 +256,31 @@ def _highway(data, params, all_time): time step. Set to zero if the vehicle is not present in the network at that time step. """ - length = params['net'].additional_params['length'] - num_edges = params['net'].additional_params['num_edges'] - edge_len = length / num_edges + junction_length = 0.1 + length = params['net'].additional_params["length"] + num_edges = params['net'].additional_params.get("num_edges", 1) edge_starts = {} - for i in range(num_edges): - edge_starts.update({"highway_{}".format(i): i * edge_len, ":edge_{}_0".format(i): i * edge_len}) + # Add the main edges. + edge_starts.update({ + "highway_{}".format(i): + i * (length / num_edges + junction_length) + for i in range(num_edges) + }) + + if params['net'].additional_params["use_ghost_edge"]: + edge_starts.update({"highway_end": length + num_edges * junction_length}) + + edge_starts.update({ + ":edge_{}".format(i + 1): + (i + 1) * length / num_edges + i * junction_length + for i in range(num_edges - 1) + }) + + if params['net'].additional_params["use_ghost_edge"]: + edge_starts.update({ + ":edge_{}".format(num_edges): + length + (num_edges - 1) * junction_length + }) # compute the absolute position for veh_id in data.keys(): diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index c1dd83193..5c52e196f 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -26,6 +26,7 @@ from ray.rllib.agents.registry import get_agent_class from ray.tune.registry import register_env +from flow.core.rewards import miles_per_gallon, miles_per_megajoule from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.utils.rllib import get_flow_params @@ -90,6 +91,14 @@ def visualizer_rllib(args): sys.exit(1) if args.run: agent_cls = get_agent_class(args.run) + elif config['env_config']['run'] == "": + from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel + from ray.rllib.models import ModelCatalog + agent_cls = CCTrainer + ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel) + elif config['env_config']['run'] == "": + from flow.algorithms.custom_ppo import CustomPPOTrainer + agent_cls = CustomPPOTrainer elif config_run: agent_cls = get_agent_class(config_run) else: @@ -160,6 +169,10 @@ def visualizer_rllib(args): else: env = gym.make(env_name) + # reroute on exit is a training hack, it should be turned off at test time. + if hasattr(env, "reroute_on_exit"): + env.reroute_on_exit = False + if args.render_mode == 'sumo_gui': env.sim_params.render = True # set to True after initializing agent and env @@ -197,6 +210,8 @@ def visualizer_rllib(args): # Simulate and collect metrics final_outflows = [] final_inflows = [] + mpg = [] + mpj = [] mean_speed = [] std_speed = [] for i in range(args.num_rollouts): @@ -214,6 +229,9 @@ def visualizer_rllib(args): if speeds: vel.append(np.mean(speeds)) + mpg.append(miles_per_gallon(env.unwrapped, vehicles.get_ids(), gain=1.0)) + mpj.append(miles_per_megajoule(env.unwrapped, vehicles.get_ids(), gain=1.0)) + if multiagent: action = {} for agent_id in state.keys(): @@ -279,10 +297,11 @@ def visualizer_rllib(args): print(mean_speed) print('Average, std: {}, {}'.format(np.mean(mean_speed), np.std( mean_speed))) - print("\nSpeed, std (m/s):") - print(std_speed) - print('Average, std: {}, {}'.format(np.mean(std_speed), np.std( - std_speed))) + + print('Average, std miles per gallon: {}, {}'.format(np.mean(mpg), np.std(mpg))) + + print('Average, std miles per megajoule: {}, {}'.format(np.mean(mpj), np.std(mpj))) + # Compute arrival rate of vehicles in the last 500 sec of the run print("\nOutflows (veh/hr):") diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 5cf0eca96..18e25154d 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -1,4 +1,4 @@ -# cluster.yaml ========================================= +# cluster.yaml ========================================= # An unique identifier for the head node and workers of this cluster. cluster_name: test # @@ -39,8 +39,8 @@ auth: # For more documentation on available fields, see: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances head_node: - InstanceType: c4.4xlarge - ImageId: ami-0b489700e7f810707 # Flow AMI (Ubuntu) + InstanceType: c4.8xlarge + ImageId: ami-0c047f3ddd3939b30 # Flow AMI (Ubuntu) InstanceMarketOptions: MarketType: spot #Additional options can be found in the boto docs, e.g. @@ -54,10 +54,10 @@ head_node: # For more documentation on available fields, see: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances worker_nodes: - InstanceType: c4.4xlarge - ImageId: ami-0b489700e7f810707 # Flow AMI (Ubuntu) + InstanceType: c4.8xlarge + ImageId: ami-0c047f3ddd3939b30 # Flow AMI (Ubuntu) - #Run workers on spot by default. Comment this out to use on-demand. + #Run workers on spot by default. Comment this out to use on-demand. InstanceMarketOptions: MarketType: spot # Additional options can be found in the boto docs, e.g. @@ -67,7 +67,8 @@ worker_nodes: # Additional options in the boto docs. setup_commands: - - cd flow && git fetch && git checkout origin/i210_dev + - cd flow && git fetch && git checkout origin/flow_maddpg + - flow/scripts/setup_sumo_ubuntu1604.sh - pip install ray==0.8.0 - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions @@ -79,7 +80,6 @@ setup_commands: - pip install lz4 - pip install dm-tree - pip install numpy==1.18.4 - - ./flow/scripts/setup_sumo_ubuntu1604.sh head_setup_commands: [] From 4ebcc0629fe98008a4faa63159613da33e31f3ff Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Thu, 28 May 2020 12:38:16 -0700 Subject: [PATCH 210/438] seperated speed limit check, modified orders --- flow/controllers/base_controller.py | 112 ++++++++++++++++++++-------- 1 file changed, 82 insertions(+), 30 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 95ecd1737..4a7818d11 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -33,8 +33,10 @@ class BaseController: specified to in this model are as desired. delay : int delay in applying the action (time) - fail_safe : str - Should be "instantaneous", "safe_velocity", "feasible_accel", or "all" + fail_safe : list of str or str + List of failsafes which can be "instantaneous", "safe_velocity", + "feasible_accel", or "obey_speed_limit". The order of applying the + falsafes will be based on the order in the list. noise : double variance of the gaussian from which to sample a noisy acceleration """ @@ -55,7 +57,18 @@ def __init__(self, self.delay = delay # longitudinal failsafe used by the vehicle - self.fail_safe = fail_safe + if isinstance(fail_safe, str): + self.fail_safe = [fail_safe] + elif isinstance(fail_safe, list) or fail_safe is None: + self.fail_safe = fail_safe + else: + print( + "==========================================================\n" + "WARNING: fail_safe should be string or list of strings. \n" + "Set fal_safe to None\n" + "==========================================================\n" + ) + self.fail_safe = None self.max_accel = car_following_params.controller_params['accel'] # max deaccel should always be a positive @@ -76,9 +89,7 @@ def get_action(self, env): This method also augments the controller with the desired level of stochastic noise, and utlizes the "instantaneous", "safe_velocity", - "feasible_accel", or "all" failsafes if requested. The "all" failsafe - performs all three failsafes with this order: 1)"safe_velocity", - 2) "feasible_accel", 3) "instantaneous". + "feasible_accel", or "obey_speed_limit" failsafes if requested. Parameters ---------- @@ -117,16 +128,17 @@ def get_action(self, env): # run fail safe if requested env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) accel_no_noise_with_failsafe = accel - if self.fail_safe == 'instantaneous': - accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) - elif self.fail_safe == 'safe_velocity': - accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel) - elif self.fail_safe == 'feasible_accel': - accel_no_noise_with_failsafe = self.get_feasible_action(accel) - elif self.fail_safe == 'all': - accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel) - accel_no_noise_with_failsafe = self.get_feasible_action(accel_no_noise_with_failsafe) - accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel_no_noise_with_failsafe) + + if self.fail_safe is not None: + for check in self.fail_safe: + if check == 'instantaneous': + accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel_no_noise_with_failsafe) + elif check == 'safe_velocity': + accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel_no_noise_with_failsafe) + elif check == 'feasible_accel': + accel_no_noise_with_failsafe = self.get_feasible_action(accel_no_noise_with_failsafe) + elif check == 'obey_speed_limit': + accel_no_noise_with_failsafe = self.get_obey_speed_limit_action(env, accel_no_noise_with_failsafe) env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, accel_no_noise_with_failsafe) @@ -136,16 +148,17 @@ def get_action(self, env): env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, accel) # run the fail-safes, if requested - if self.fail_safe == 'instantaneous': - accel = self.get_safe_action_instantaneous(env, accel) - elif self.fail_safe == 'safe_velocity': - accel = self.get_safe_velocity_action(env, accel) - elif self.fail_safe == 'feasible_accel': - accel = self.get_feasible_action(accel) - elif self.fail_safe == 'all': - accel = self.get_safe_velocity_action(env, accel) - accel = self.get_feasible_action(accel) - accel = self.get_safe_action_instantaneous(env, accel) + if self.fail_safe is not None: + for check in self.fail_safe: + if check == 'instantaneous': + accel = self.get_safe_action_instantaneous(env, accel) + elif check == 'safe_velocity': + accel = self.get_safe_velocity_action(env, accel) + elif check == 'feasible_accel': + accel = self.get_feasible_action(accel) + elif check == 'obey_speed_limit': + accel = self.get_obey_speed_limit_action(env, accel) + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) return accel @@ -275,16 +288,55 @@ def safe_velocity(self, env): this_edge = env.k.vehicle.get_edge(self.veh_id) edge_speed_limit = env.k.network.speed_limit(this_edge) - if v_safe > edge_speed_limit: - v_safe = edge_speed_limit + if this_vel > v_safe: print( "=====================================\n" - "Speed of vehicle {} is greater than speed limit. Safe " - "velocity clipping applied.\n" + "Speed of vehicle {} is greater than safe speed. Safe velocity " + "clipping applied.\n" "=====================================".format(self.veh_id)) return v_safe + def get_obey_speed_limit_action(self, env, action): + """Perform the "obey_speed_limit" failsafe action. + + Checks if the computed acceleration would put us above edge speed limit. + If it would, output the acceleration that would put at the speed limit + velocity. + + Parameters + ---------- + env : flow.envs.Env + current environment, which contains information of the state of the + network at the current time step + action : float + requested acceleration action + + Returns + ------- + float + the requested action clipped by the speed limit + """ + # check for speed limit + this_edge = env.k.vehicle.get_edge(self.veh_id) + edge_speed_limit = env.k.network.speed_limit(this_edge) + + this_vel = env.k.vehicle.get_speed(self.veh_id) + sim_step = env.sim_step + + if this_vel + action * sim_step > edge_speed_limit: + if edge_speed_limit > 0: + print( + "=====================================\n" + "Speed of vehicle {} is greater than speed limit. Obey " + "speed limit clipping applied.\n" + "=====================================".format(self.veh_id)) + return (edge_speed_limit - this_vel) / sim_step + else: + return -this_vel / sim_step + else: + return action + def get_feasible_action(self, action): """Perform the "feasible_accel" failsafe action. From bd7622a0d0290e68fb2e37df85dcbedd02e7ec3b Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 13:56:33 -0700 Subject: [PATCH 211/438] fix get metadata --- flow/data_pipeline/leaderboard_utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index f4476b2eb..dedd5b3c6 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -145,8 +145,7 @@ def get_metadata(name, bucket="circles.data.pipeline"): """ s3 = boto3.client("s3") name_list = name.split('_') - source_id = "flow_{}".format(name_list[2]) response = s3.head_object(Bucket=bucket, Key="fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv".format(name_list[0], - source_id)) + name_list[1])) return response["Metadata"] From e9c0438038ddf53571363812262de48f919fd466 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 14:03:35 -0700 Subject: [PATCH 212/438] fix get metadata --- flow/data_pipeline/leaderboard_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index dedd5b3c6..5cef40804 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -145,7 +145,8 @@ def get_metadata(name, bucket="circles.data.pipeline"): """ s3 = boto3.client("s3") name_list = name.split('_') + source_id = name_list[1].replace('.csv', "").replace('-', '_') response = s3.head_object(Bucket=bucket, Key="fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv".format(name_list[0], - name_list[1])) + source_id)) return response["Metadata"] From 39e4bc4436f894754f766c5878137ee2d0675ec6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 09:23:43 -0700 Subject: [PATCH 213/438] fix pathname --- flow/core/experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 82cdcd943..de61d691c 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -213,7 +213,7 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) - trajectory_table_path = dir_path + source_id + ".csv" + trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: From d6b6c182893abef5ce19671b03f053105244f118 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 09:25:31 -0700 Subject: [PATCH 214/438] fix pathname --- flow/visualize/i210_replay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index af19111dc..b3e41b6c1 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -329,7 +329,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= emission_to_csv(emission_path, output_path=output_path) # generate the trajectory output file - trajectory_table_path = dir_path + source_id + ".csv" + trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) # upload to s3 if asked From 82b252eb14c460ed2411cdbfb450d280b2d01e6a Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 29 May 2020 19:33:26 -0700 Subject: [PATCH 215/438] added edge_id, lane_id, and distance --- flow/core/experiment.py | 6 +-- flow/core/kernel/vehicle/traci.py | 7 +++- flow/data_pipeline/data_pipeline.py | 3 ++ flow/data_pipeline/query.py | 62 +++++++++++------------------ 4 files changed, 36 insertions(+), 42 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index e9668d9db..6a4dafdca 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -152,8 +152,8 @@ def rl_actions(*_): # data pipeline extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) - metadate = defaultdict(lambda: "") - metadate['network'] = self.env.network.name.split('_')[0] + metadata = defaultdict(lambda: "") + metadata['network'] = self.env.network.name.split('_')[0] for i in range(num_runs): ret = 0 @@ -225,7 +225,7 @@ def rl_actions(*_): cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, source_id), - trajectory_table_path, metadate) + trajectory_table_path, metadata) # delete the S3-only version of the trajectory file # os.remove(upload_file_path) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 9485572b2..9ca112345 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -336,7 +336,8 @@ def _add_departed(self, veh_id, veh_type): tc.VAR_POSITION, tc.VAR_ANGLE, tc.VAR_SPEED_WITHOUT_TRACI, - tc.VAR_FUELCONSUMPTION + tc.VAR_FUELCONSUMPTION, + tc.VAR_DISTANCE ]) self.kernel_api.vehicle.subscribeLeader(veh_id, 2000) @@ -1191,6 +1192,10 @@ def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) + def get_distance(self, veh_id, error=-1001): + """See parent class.""" + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_DISTANCE, error) + def get_road_grade(self, veh_id): """See parent class.""" # TODO : Brent diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 5c9346c40..b1b9d1fef 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -108,6 +108,9 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): veh_kernel.get_accel_no_noise_with_failsafe(vid)) extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) + extra_info["edge_id"].append(veh_kernel.get_edge(vid)) + extra_info["lane_id"].append(veh_kernel.get_lane(vid)) + extra_info["distance"].append(veh_kernel.get_distance(vid)) def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 04793cc73..f591aba26 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -160,57 +160,41 @@ class QueryStrings(Enum): FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ WITH sub_fact_vehicle_trace AS ( SELECT - id, - time_step, - x, - source_id - FROM fact_vehicle_trace - WHERE 1 = 1 - AND date = \'{date}\' - AND partition_name = \'{partition}\' - ), distance AS ( - SELECT - id, - source_id, - MAX(x)-MIN(x) AS distance_meters - FROM sub_fact_vehicle_trace - WHERE 1 = 1 - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 - GROUP BY 1, 2 - ), energy AS ( - SELECT - e.id, - e.source_id, + v.id, + v.source_id, e.energy_model_id, + MAX(x) - MIN(x) AS distance_meters, (MAX(e.time_step) - MIN(e.time_step)) / (COUNT(DISTINCT e.time_step) - 1) AS time_step_size_seconds, SUM(e.power) AS power_watts - FROM fact_energy_trace AS e - JOIN sub_fact_vehicle_trace AS v ON 1 = 1 + FROM fact_vehicle_trace v + JOIN fact_energy_trace AS e ON 1 = 1 AND e.id = v.id AND e.time_step = v.time_step AND e.source_id = v.source_id - WHERE 1 = 1 AND e.date = \'{date}\' AND e.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND v.x BETWEEN 500 AND 2300 AND e.time_step >= 600 + WHERE 1 = 1 + AND v.date = \'{date}\' + AND v.partition_name = \'{partition}' + AND v.x BETWEEN 500 AND 2300 GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT e.time_step) > 1 + HAVING 1 = 1 + AND MIN(x) - MIN(x) > 10 + AND COUNT(DISTINCT e.time_step) > 10 ) SELECT - d.id, - d.source_id, - e.energy_model_id, + id, + source_id, + energy_model_id, distance_meters, power_watts * time_step_size_seconds AS energy_joules, distance_meters / (power_watts * time_step_size_seconds) AS efficiency_meters_per_joules, - 74564 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon - FROM distance d - JOIN energy e ON 1=1 - AND d.id = e.id - AND d.source_id = e.source_id + 19972 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon + FROM sub_fact_vehicle_trace + WHERE 1 = 1 + AND ABS(power_watts * time_step_size_seconds) > 0 ; """ @@ -221,12 +205,13 @@ class QueryStrings(Enum): SUM(distance_meters) AS distance_meters, SUM(energy_joules) AS energy_joules, SUM(distance_meters) / SUM(energy_joules) AS efficiency_meters_per_joules, - 74564 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon + 19972 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND ABS(SUM(energy_joules)) != 0 GROUP BY 1, 2 ;""" @@ -235,7 +220,7 @@ class QueryStrings(Enum): t.source_id, e.energy_model_id, e.efficiency_meters_per_joules, - 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, + 19972 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, t.throughput_per_hour FROM fact_network_throughput_agg AS t JOIN fact_network_fuel_efficiency_agg AS e ON 1 = 1 @@ -513,11 +498,12 @@ class QueryStrings(Enum): LEADERBOARD_CHART_AGG = """ SELECT + date AS query_date, source_id, energy_model_id, efficiency_meters_per_joules, efficiency_miles_per_gallon, throughput_per_hour FROM leaderboard_chart - ORDER BY date, source_id ASC + ORDER BY query_date, source_id ASC ;""" From 65791df1e82deddf9c01456ce8ae1275315bb8ac Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 29 May 2020 19:59:18 -0700 Subject: [PATCH 216/438] added netwokr name translation --- flow/core/experiment.py | 3 ++- flow/data_pipeline/leaderboard_utils.py | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 6a4dafdca..80126a306 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -2,6 +2,7 @@ from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, get_extra_info +from flow.data_pipeline.leaderboard_utils import network_name_translate from collections import defaultdict import datetime import logging @@ -153,7 +154,7 @@ def rl_actions(*_): extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) metadata = defaultdict(lambda: "") - metadata['network'] = self.env.network.name.split('_')[0] + metadata['network'] = network_name_translate(self.env.network.name.split('_')[0]) for i in range(num_runs): ret = 0 diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 5cef40804..86f3fed07 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -5,6 +5,16 @@ from io import StringIO +network_name_map = {"highway": "Single-Lane Straight Road", + "ring": "Single-Lane Ring Road"} + +def network_name_translate(network_name): + """Translate network name to a human friendly name for the leaderboard.""" + if network_name in network_name_map: + return network_name_map[network_name] + return network_name + + def key_to_name(key): """Return the standard formatted file name from object key.""" k_list = key.split("/") From f1ef8e2b703594f63a7950a871ce0e5caf3840dc Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 23:29:54 -0700 Subject: [PATCH 217/438] fix some query bugs --- flow/data_pipeline/query.py | 180 ++++++++++++++++-------------------- 1 file changed, 78 insertions(+), 102 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index f591aba26..15914264f 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -163,7 +163,7 @@ class QueryStrings(Enum): v.id, v.source_id, e.energy_model_id, - MAX(x) - MIN(x) AS distance_meters, + MAX(distance) - MIN(distance) AS distance_meters, (MAX(e.time_step) - MIN(e.time_step)) / (COUNT(DISTINCT e.time_step) - 1) AS time_step_size_seconds, SUM(e.power) AS power_watts FROM fact_vehicle_trace v @@ -177,11 +177,11 @@ class QueryStrings(Enum): AND e.time_step >= 600 WHERE 1 = 1 AND v.date = \'{date}\' - AND v.partition_name = \'{partition}' + AND v.partition_name = \'{partition}\' AND v.x BETWEEN 500 AND 2300 GROUP BY 1, 2, 3 HAVING 1 = 1 - AND MIN(x) - MIN(x) > 10 + AND MIN(distance) - MIN(distance) > 10 AND COUNT(DISTINCT e.time_step) > 10 ) SELECT @@ -211,7 +211,7 @@ class QueryStrings(Enum): AND date = \'{date}\' AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND ABS(SUM(energy_joules)) != 0 + AND SUM(energy_joules) != 0 GROUP BY 1, 2 ;""" @@ -245,7 +245,6 @@ class QueryStrings(Enum): AND date = \'{date}\' AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 - AND time_step >= 600 GROUP BY 1, 2 ), inflows AS ( SELECT @@ -253,6 +252,8 @@ class QueryStrings(Enum): source_id, 60 * COUNT(DISTINCT id) AS inflow_rate FROM min_max_time_step + WHERE 1 = 1 + AND min_time_step >= 600 GROUP BY 1, 2 ), outflows AS ( SELECT @@ -260,6 +261,8 @@ class QueryStrings(Enum): source_id, 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step + WHERE 1 = 1 + AND max_time_step < 1000 GROUP BY 1, 2 ) SELECT @@ -280,73 +283,66 @@ class QueryStrings(Enum): vt.id, vt.source_id, vt.time_step, - vt.x, + vt.distance - FIRST_VALUE(vt.distance) + OVER (PARITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS distance_meters, energy_model_id, + et.speed, + et.acceleration, vt.time_step - LAG(vt.time_step, 1) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS sim_step, SUM(power) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS - cumulative_power + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC + ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{date}\' - AND vt.partition_name = \'{partition}\' AND et.date = \'{date}\' AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step - AND vt.x BETWEEN 500 AND 2300 - AND vt.time_step >= 600 AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' WHERE 1 = 1 + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 ), cumulative_energy AS ( SELECT id, source_id, time_step, - x, + distance_meters, energy_model_id, + speed, + acceleration, cumulative_power * sim_step AS energy_joules FROM joined_trace - WHERE 1 = 1 - AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 ), binned_cumulative_energy AS ( SELECT source_id, - CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, - AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound - FROM cumulative_energy - GROUP BY 1, 2 - HAVING COUNT(DISTINCT time_step) > 1 - ), binned_speed_accel AS ( - SELECT - source_id, - CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, + CAST(distance_meters/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(target_accel_no_noise_with_failsafe) AS accel_avg, AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) AS accel_upper_bound, - AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound - FROM fact_vehicle_trace - WHERE 1 = 1 - AND date = \'{date}\' - AND partition_name = \'{partition}\' - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 + AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy GROUP BY 1, 2 + HAVING 1 = 1 + AND COUNT(DISTINCT time_step) > 1 ), binned_energy_start_end AS ( SELECT DISTINCT source_id, id, - CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end + CAST(distance_meters/10 AS INTEGER) * 10 AS distance_meters_bin, + FIRST_VALUE(energy_joules) + OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) + OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -359,27 +355,24 @@ class QueryStrings(Enum): GROUP BY 1, 2 ) SELECT - COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, - COALESCE(bce.distance_meters_bin, bsa.distance_meters_bin, be.distance_meters_bin) AS distance_meters_bin, + COALESCE(bce.source_id, be.source_id) AS source_id, + COALESCE(bce.distance_meters_bin, be.distance_meters_bin) AS distance_meters_bin, bce.cumulative_energy_avg, bce.cumulative_energy_lower_bound, bce.cumulative_energy_upper_bound, - bsa.speed_avg, - bsa.speed_upper_bound, - bsa.speed_lower_bound, - bsa.accel_avg, - bsa.accel_upper_bound, - bsa.accel_lower_bound, + bce.speed_avg, + bce.speed_upper_bound, + bce.speed_lower_bound, + bce.accel_avg, + bce.accel_upper_bound, + bce.accel_lower_bound, be.instantaneous_energy_avg, be.instantaneous_energy_upper_bound, be.instantaneous_energy_lower_bound FROM binned_cumulative_energy bce - FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 - AND bce.source_id = bsa.source_id - AND bce.distance_meters_bin = bsa.distance_meters_bin FULL OUTER JOIN binned_energy be ON 1 = 1 - AND COALESCE(bce.source_id, bsa.source_id) = be.source_id - AND COALESCE(bce.distance_meters_bin, bce.distance_meters_bin) = be.distance_meters_bin + AND bce.source_id = be.source_id + AND bce.distance_meters_bin = be.distance_meters_bin ORDER BY distance_meters_bin ASC ;""" @@ -389,50 +382,38 @@ class QueryStrings(Enum): vt.id, vt.source_id, vt.time_step, - vt.x, energy_model_id, + et.speed, + et.acceleration, vt.time_step - LAG(vt.time_step, 1) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) - AS sim_step, + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS sim_step, SUM(power) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) - AS cumulative_power + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC + ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{date}\' - AND vt.partition_name = \'{partition}\' AND et.date = \'{date}\' AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step - AND vt.x BETWEEN 500 AND 2300 - AND vt.time_step >= 600 AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' WHERE 1 = 1 + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 ), cumulative_energy AS ( SELECT id, source_id, time_step, - x, energy_model_id, + speed, + acceleration, cumulative_power * sim_step AS energy_joules FROM joined_trace - WHERE 1 = 1 - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 ), binned_cumulative_energy AS ( - SELECT - source_id, - CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, - AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound - FROM cumulative_energy - GROUP BY 1, 2 - HAVING COUNT(DISTINCT time_step) > 1 - ), binned_speed_accel AS ( SELECT source_id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, @@ -440,26 +421,24 @@ class QueryStrings(Enum): AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(target_accel_no_noise_with_failsafe) AS accel_avg, - AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) - AS accel_upper_bound, - AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) - AS accel_lower_bound - FROM fact_vehicle_trace - WHERE 1 = 1 - AND date = \'{date}\' - AND partition_name = \'{partition}\' - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 + AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) AS accel_upper_bound, + AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy GROUP BY 1, 2 + HAVING 1 = 1 + AND COUNT(DISTINCT time_step) > 1 ), binned_energy_start_end AS ( SELECT DISTINCT source_id, id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) - AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) - AS energy_end + FIRST_VALUE(energy_joules) + OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) + OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -472,27 +451,24 @@ class QueryStrings(Enum): GROUP BY 1, 2 ) SELECT - COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, - COALESCE(bce.time_seconds_bin, bsa.time_seconds_bin, be.time_seconds_bin) AS time_seconds_bin, + COALESCE(bce.source_id, be.source_id) AS source_id, + COALESCE(bce.time_seconds_bin, be.time_seconds_bin) AS time_seconds_bin, bce.cumulative_energy_avg, bce.cumulative_energy_lower_bound, bce.cumulative_energy_upper_bound, - bsa.speed_avg, - bsa.speed_upper_bound, - bsa.speed_lower_bound, - bsa.accel_avg, - bsa.accel_upper_bound, - bsa.accel_lower_bound, + bce.speed_avg, + bce.speed_upper_bound, + bce.speed_lower_bound, + bce.accel_avg, + bce.accel_upper_bound, + bce.accel_lower_bound, be.instantaneous_energy_avg, be.instantaneous_energy_upper_bound, be.instantaneous_energy_lower_bound FROM binned_cumulative_energy bce - FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 - AND bce.source_id = bsa.source_id - AND bce.time_seconds_bin = bsa.time_seconds_bin FULL OUTER JOIN binned_energy be ON 1 = 1 - AND COALESCE(bce.source_id, bsa.source_id) = be.source_id - AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin + AND bce.source_id = be.source_id + AND bce.time_seconds_bin = be.time_seconds_bin ORDER BY time_seconds_bin ASC ;""" From 4b5cb4156dc3de8bd2a5bdf787645b3dadd6a0e8 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 23:35:06 -0700 Subject: [PATCH 218/438] update values for warm-up time and horizon --- flow/data_pipeline/query.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 15914264f..ac2770af2 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -148,7 +148,7 @@ class QueryStrings(Enum): MAX(enter_time) - MIN(enter_time) AS total_time_seconds FROM min_time WHERE 1 = 1 - AND enter_time >= 600 + AND enter_time >= 720 GROUP BY 1 ) SELECT @@ -174,7 +174,7 @@ class QueryStrings(Enum): AND e.date = \'{date}\' AND e.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND e.time_step >= 600 + AND e.time_step >= 720 WHERE 1 = 1 AND v.date = \'{date}\' AND v.partition_name = \'{partition}\' @@ -253,7 +253,7 @@ class QueryStrings(Enum): 60 * COUNT(DISTINCT id) AS inflow_rate FROM min_max_time_step WHERE 1 = 1 - AND min_time_step >= 600 + AND min_time_step >= 720 GROUP BY 1, 2 ), outflows AS ( SELECT @@ -262,7 +262,7 @@ class QueryStrings(Enum): 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step WHERE 1 = 1 - AND max_time_step < 1000 + AND max_time_step < 1200 GROUP BY 1, 2 ) SELECT @@ -305,7 +305,7 @@ class QueryStrings(Enum): AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' AND vt.x BETWEEN 500 AND 2300 - AND vt.time_step >= 600 + AND vt.time_step >= 720 ), cumulative_energy AS ( SELECT id, @@ -402,7 +402,7 @@ class QueryStrings(Enum): AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' AND vt.x BETWEEN 500 AND 2300 - AND vt.time_step >= 600 + AND vt.time_step >= 720 ), cumulative_energy AS ( SELECT id, From 4c0358a6e626583f605ad146fc77b8b700f4b724 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 23:40:57 -0700 Subject: [PATCH 219/438] leaderboard chart agg query fixes --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index ac2770af2..5fa7937ec 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -474,12 +474,12 @@ class QueryStrings(Enum): LEADERBOARD_CHART_AGG = """ SELECT - date AS query_date, + date AS submission_date, source_id, energy_model_id, efficiency_meters_per_joules, efficiency_miles_per_gallon, throughput_per_hour - FROM leaderboard_chart + FROM leaderboard_chart ORDER BY query_date, source_id ASC ;""" From ce18a3677f23ebca0d724dc025f159c1bd55c2cf Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 23:46:46 -0700 Subject: [PATCH 220/438] remove unnecessary references to "x" --- flow/data_pipeline/query.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 5fa7937ec..a664fd100 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -340,9 +340,11 @@ class QueryStrings(Enum): id, CAST(distance_meters/10 AS INTEGER) * 10 AS distance_meters_bin, FIRST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 + ORDER BY time_step ASC) AS energy_start, LAST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end + OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 + ORDER BY time_step ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -436,9 +438,11 @@ class QueryStrings(Enum): id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, FIRST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY x ASC) AS energy_start, + OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + ORDER BY time_step ASC) AS energy_start, LAST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY x ASC) AS energy_end + OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + ORDER BY time_step ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT From 6c7c3c0d5a54b0487ae3626825a7a1b5591d5698 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sat, 30 May 2020 02:54:23 -0700 Subject: [PATCH 221/438] fix some error in query --- flow/core/experiment.py | 2 +- flow/data_pipeline/leaderboard_utils.py | 4 +++- flow/data_pipeline/query.py | 6 +++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 80126a306..64b46fe7d 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -154,7 +154,7 @@ def rl_actions(*_): extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) metadata = defaultdict(lambda: "") - metadata['network'] = network_name_translate(self.env.network.name.split('_')[0]) + metadata['network'] = network_name_translate(self.env.network.name.split('_20')[0]) for i in range(num_runs): ret = 0 diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 86f3fed07..0ab8dc6cd 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -6,7 +6,9 @@ network_name_map = {"highway": "Single-Lane Straight Road", - "ring": "Single-Lane Ring Road"} + "ring": "Single-Lane Ring Road", + "I-210_subnetwork": "I-210 without Ramps"} + def network_name_translate(network_name): """Translate network name to a human friendly name for the leaderboard.""" diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index f591aba26..7f309fb84 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -11,6 +11,8 @@ "fact_network_metrics_by_distance_agg": ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, + "POWER_DEMAND_MODEL": {}, + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL": {}, "fact_vehicle_fuel_efficiency_agg": {"fact_network_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"]}, "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]}, "leaderboard_chart": {"leaderboard_chart_agg": ["LEADERBOARD_CHART_AGG"]} @@ -211,8 +213,10 @@ class QueryStrings(Enum): AND date = \'{date}\' AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND ABS(SUM(energy_joules)) != 0 + AND ABS(energy_joules) != 0 GROUP BY 1, 2 + HAVING 1=1 + AND ABS(SUM(energy_joules)) != 0 ;""" LEADERBOARD_CHART = """ From f0aa7b4954283c47b988cb02790e8c98b867d102 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 31 May 2020 03:27:13 -0700 Subject: [PATCH 222/438] added metadata as a table, update realized_accel at timestep 0, fixed some error with previous query, add different edge constraint for different network --- examples/simulate.py | 7 ++- flow/core/experiment.py | 25 +++++++---- flow/core/kernel/vehicle/traci.py | 2 + flow/data_pipeline/data_pipeline.py | 8 ++-- flow/data_pipeline/lambda_function.py | 11 +++-- flow/data_pipeline/query.py | 64 ++++++++++++++++----------- flow/visualize/i210_replay.py | 2 +- 7 files changed, 74 insertions(+), 45 deletions(-) diff --git a/examples/simulate.py b/examples/simulate.py index 0b183649b..26ed916c0 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -60,6 +60,11 @@ def parse_args(args): help='specify which query should be run by lambda' 'for detail, see upload_to_s3 in data_pipeline.py' ) + parser.add_argument( + '--is_baseline', + action='store_true', + help='specifies whether this is a baseline run' + ) return parser.parse_known_args(args)[0] @@ -98,4 +103,4 @@ def parse_args(args): # Run for the specified number of rollouts. exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, to_aws=flags.to_aws, - only_query=flags.only_query) + only_query=flags.only_query, is_baseline=flags.is_baseline) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index c2f6ed44a..0be1be176 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,7 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, get_extra_info +from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info from flow.data_pipeline.leaderboard_utils import network_name_translate from collections import defaultdict import datetime @@ -90,7 +90,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query=""): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query="", is_baseline=False): """Run the given network for a set number of runs. Parameters @@ -111,6 +111,8 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only Specifies which queries should be automatically run when the simulation data gets uploaded to S3. If an empty str is passed in, then it implies no queries should be run on this. + is_baseline: bool + Specifies whether this is a baseline run. Returns ------- @@ -153,8 +155,10 @@ def rl_actions(*_): # data pipeline extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) - metadata = defaultdict(lambda: "") - metadata['network'] = network_name_translate(self.env.network.name.split('_20')[0]) + metadata = defaultdict(lambda: []) + metadata['source_id'].append(source_id) + metadata['network'].append(network_name_translate(self.env.network.name.split('_20')[0])) + metadata['is_baseline'].append(str(is_baseline)) for i in range(num_runs): ret = 0 @@ -220,13 +224,18 @@ def rl_actions(*_): os.remove(emission_path) trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + write_dict_to_csv(trajectory_table_path, extra_info) + metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + write_dict_to_csv(metadata_table_path, metadata) if to_aws: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( - cur_date, source_id, source_id), - trajectory_table_path, metadata) + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( + cur_date, source_id), + trajectory_table_path, {'network': metadata['network']}) + upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' + '{1}_METADATA.csv'.format(cur_date, source_id), + metadata_table_path) # delete the S3-only version of the trajectory file # os.remove(upload_file_path) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 9ca112345..f59fa9ba0 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1186,6 +1186,8 @@ def get_accel_with_noise_with_failsafe(self, veh_id): def get_realized_accel(self, veh_id): """See parent class.""" + if self.get_distance(veh_id) == 0: + return 0 return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def get_2d_position(self, veh_id, error=-1001): diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index b1b9d1fef..f98c1fb60 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -38,7 +38,7 @@ def generate_trajectory_table(data_path, extra_info, partition_name): return output_file_path -def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): +def write_dict_to_csv(data_path, extra_info, partition_name=None): """Generate desired output for the trajectory_table based only on flow output. Parameters @@ -65,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): return -def upload_to_s3(bucket_name, bucket_key, file_path, metadata): +def upload_to_s3(bucket_name, bucket_key, file_path, metadata={}): """Upload a file to S3 bucket. Parameters @@ -247,7 +247,7 @@ def repair_partition(self, table, query_date, partition): self.update_partition(table, query_date, partition) def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", - query_date="today", partition="default"): + query_date="today", partition="default", **kwargs): """Start the execution of a query, does not wait for it to finish. Parameters @@ -277,7 +277,7 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu source_id = "flow_{}".format(partition.split('_')[1]) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id), + QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id, **kwargs), QueryExecutionContext={ 'Database': 'circles' }, diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 3a9f55ded..0985b0cdc 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -2,7 +2,7 @@ import boto3 from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data -from flow.data_pipeline.query import tags, tables +from flow.data_pipeline.query import tags, tables, network_using_edge, X_CONSTRAINT, EDGE_CONSTRAINT s3 = boto3.client('s3') queryEngine = AthenaQuery() @@ -40,8 +40,11 @@ def lambda_handler(event, context): # initialize the queries for bucket, key, table, query_date, partition in records: source_id = "flow_{}".format(partition.split('_')[1]) - # response = s3.head_object(Bucket=bucket, Key=key) - # required_query = response["Metadata"]["run-query"] + response = s3.head_object(Bucket=bucket, Key=key) + network_constraint = X_CONSTRAINT + if 'network' in response["Metadata"]: + if response["Metadata"]['network'] in network_using_edge: + network_constraint = EDGE_CONSTRAINT query_dict = tags[table] @@ -57,4 +60,4 @@ def lambda_handler(event, context): query_date, source_id, query_name) - queryEngine.run_query(query_name, result_location, query_date, partition) + queryEngine.run_query(query_name, result_location, query_date, partition, constraint=network_constraint) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 4bcc78b5f..31eea85fd 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -23,6 +23,12 @@ "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart", "leaderboard_chart_agg"] +network_using_edge = ["I-210 without Ramps"] + +X_CONSTRAINT = "x BETWEEN 500 AND 2300" + +EDGE_CONSTRAINT = "edge_id <> ANY (VALUES 'ghost0', '119257908#3')" + VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT id, @@ -141,7 +147,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND x BETWEEN 500 AND 2300 + AND {constraint} GROUP BY 1, 2 ), agg AS ( SELECT @@ -180,10 +186,10 @@ class QueryStrings(Enum): WHERE 1 = 1 AND v.date = \'{date}\' AND v.partition_name = \'{partition}\' - AND v.x BETWEEN 500 AND 2300 + AND v.{constraint} GROUP BY 1, 2, 3 HAVING 1 = 1 - AND MIN(distance) - MIN(distance) > 10 + AND MAX(distance) - MIN(distance) > 10 AND COUNT(DISTINCT e.time_step) > 10 ) SELECT @@ -247,7 +253,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND x BETWEEN 500 AND 2300 + AND {constraint} GROUP BY 1, 2 ), inflows AS ( SELECT @@ -287,7 +293,7 @@ class QueryStrings(Enum): vt.source_id, vt.time_step, vt.distance - FIRST_VALUE(vt.distance) - OVER (PARITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS distance_meters, + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS distance_meters, energy_model_id, et.speed, et.acceleration, @@ -307,7 +313,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' - AND vt.x BETWEEN 500 AND 2300 + AND vt.{constraint} AND vt.time_step >= 720 ), cumulative_energy AS ( SELECT @@ -327,9 +333,9 @@ class QueryStrings(Enum): AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, - AVG(target_accel_no_noise_with_failsafe) AS accel_avg, - AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) AS accel_upper_bound, - AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound, + AVG(acceleration) AS accel_avg, + AVG(acceleration) + STDDEV(acceleration) AS accel_upper_bound, + AVG(acceleration) - STDDEV(acceleration) AS accel_lower_bound, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound @@ -371,9 +377,9 @@ class QueryStrings(Enum): bce.accel_avg, bce.accel_upper_bound, bce.accel_lower_bound, - be.instantaneous_energy_avg, - be.instantaneous_energy_upper_bound, - be.instantaneous_energy_lower_bound + COALESCE(be.instantaneous_energy_avg, 0) AS instantaneous_energy_avg, + COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, + COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound FROM binned_cumulative_energy bce FULL OUTER JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id @@ -406,7 +412,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' - AND vt.x BETWEEN 500 AND 2300 + AND vt.{constraint} AND vt.time_step >= 720 ), cumulative_energy AS ( SELECT @@ -425,9 +431,9 @@ class QueryStrings(Enum): AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, - AVG(target_accel_no_noise_with_failsafe) AS accel_avg, - AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) AS accel_upper_bound, - AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound, + AVG(acceleration) AS accel_avg, + AVG(acceleration) + STDDEV(acceleration) AS accel_upper_bound, + AVG(acceleration) - STDDEV(acceleration) AS accel_lower_bound, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound @@ -469,9 +475,9 @@ class QueryStrings(Enum): bce.accel_avg, bce.accel_upper_bound, bce.accel_lower_bound, - be.instantaneous_energy_avg, - be.instantaneous_energy_upper_bound, - be.instantaneous_energy_lower_bound + COALESCE(be.instantaneous_energy_avg, 0) AS instantaneous_energy_avg, + COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, + COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound FROM binned_cumulative_energy bce FULL OUTER JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id @@ -481,12 +487,16 @@ class QueryStrings(Enum): LEADERBOARD_CHART_AGG = """ SELECT - date AS submission_date, - source_id, - energy_model_id, - efficiency_meters_per_joules, - efficiency_miles_per_gallon, - throughput_per_hour - FROM leaderboard_chart - ORDER BY query_date, source_id ASC + l.date AS submission_date, + l.source_id, + m.network, + m.is_baseline, + l.energy_model_id, + l.efficiency_meters_per_joules, + l.efficiency_miles_per_gallon, + l.throughput_per_hour + FROM leaderboard_chart AS l, metadata_table AS m + WHERE 1 = 1 + AND l.source_id = m.source_id + ORDER BY l.date, source_id ASC ;""" diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index cf5565442..faadd87b4 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -337,7 +337,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), - upload_file_path, str(args.only_query)[2:-2]) + upload_file_path) # print the location of the emission csv file print("\nGenerated emission file at " + output_path) From b46d5f933e2560e7c83f18ffa369dcc3081c9ab4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:21:00 -0700 Subject: [PATCH 223/438] minor re-formats --- flow/core/experiment.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 0be1be176..3589f7d36 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -230,14 +230,13 @@ def rl_actions(*_): if to_aws: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( - cur_date, source_id), - trajectory_table_path, {'network': metadata['network']}) - upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' - '{1}_METADATA.csv'.format(cur_date, source_id), + upload_to_s3('circles.data.pipeline', + 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), + trajectory_table_path, + {'network': metadata['network']}) + upload_to_s3('circles.data.pipeline', + 'metadata_table/date={0}/partition_name={1}_METADATA/', + '{1}_METADATA.csv'.format(cur_date, source_id), metadata_table_path) - # delete the S3-only version of the trajectory file - # os.remove(upload_file_path) - return info_dict From 15b646be3e8505db028712bcb905766ecf460c61 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:28:16 -0700 Subject: [PATCH 224/438] update docstring for write_dict_to_csv --- flow/data_pipeline/data_pipeline.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index f98c1fb60..4b2861a22 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -49,19 +49,9 @@ def write_dict_to_csv(data_path, extra_info, partition_name=None): extra information needed in the trajectory table, collected from flow partition_name: str the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder """ extra_info = pd.DataFrame.from_dict(extra_info) - # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) - # upload_only_file_path = data_path[:-4] + "_upload" + ".csv" - # extra_info.to_csv(upload_only_file_path, index=False, header=False) return From 17802fdc3d7ce11edec7536852ac859af9c6f498 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:33:28 -0700 Subject: [PATCH 225/438] simplify network_name_translate --- flow/data_pipeline/leaderboard_utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 0ab8dc6cd..1b3cb64c3 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -12,9 +12,7 @@ def network_name_translate(network_name): """Translate network name to a human friendly name for the leaderboard.""" - if network_name in network_name_map: - return network_name_map[network_name] - return network_name + return network_name_map.get(network_name, network_name) def key_to_name(key): From a0b60c5c7743c725afa2c42bd183ffd44cee3cae Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:47:52 -0700 Subject: [PATCH 226/438] modify constraint specifications for run_query --- flow/data_pipeline/lambda_function.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 0985b0cdc..d296d99d1 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -2,7 +2,8 @@ import boto3 from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data -from flow.data_pipeline.query import tags, tables, network_using_edge, X_CONSTRAINT, EDGE_CONSTRAINT +from flow.data_pipeline.query import tags, tables, network_using_edge +from flow.data_pipeline.query import X_CONSTRAINT, EDGE_CONSTRAINT, WARMUP_STEPS, HORIZON_STEPS s3 = boto3.client('s3') queryEngine = AthenaQuery() @@ -38,13 +39,15 @@ def lambda_handler(event, context): records.append((bucket, key, table, query_date, partition)) # initialize the queries + start_constraint = WARMUP_STEPS + stop_constraint = WARMUP_STEPS + HORIZON_STEPS for bucket, key, table, query_date, partition in records: source_id = "flow_{}".format(partition.split('_')[1]) response = s3.head_object(Bucket=bucket, Key=key) - network_constraint = X_CONSTRAINT + loc_constraint = X_CONSTRAINT if 'network' in response["Metadata"]: if response["Metadata"]['network'] in network_using_edge: - network_constraint = EDGE_CONSTRAINT + loc_constraint = EDGE_CONSTRAINT query_dict = tags[table] @@ -60,4 +63,10 @@ def lambda_handler(event, context): query_date, source_id, query_name) - queryEngine.run_query(query_name, result_location, query_date, partition, constraint=network_constraint) + queryEngine.run_query(query_name, + result_location, + query_date, + partition, + loc_constraint=loc_constraint, + start_constraint=start_constraint, + stop_constraint=stop_constraint) From 79afdae957f06115f0efbb4c465803b878f27772 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:53:26 -0700 Subject: [PATCH 227/438] rename loc_filter, add time filters --- flow/data_pipeline/query.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 31eea85fd..2d70daed7 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -25,9 +25,13 @@ network_using_edge = ["I-210 without Ramps"] -X_CONSTRAINT = "x BETWEEN 500 AND 2300" +X_FILTER = "x BETWEEN 500 AND 2300" -EDGE_CONSTRAINT = "edge_id <> ANY (VALUES 'ghost0', '119257908#3')" +EDGE_FILTER = "edge_id <> ANY (VALUES 'ghost0', '119257908#3')" + +WARMUP_STEPS = 600 * 3 * 0.4 + +HORIZON_STEPS = 1000 * 3 * 0.4 VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT @@ -147,7 +151,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND {constraint} + AND {loc_filter} GROUP BY 1, 2 ), agg AS ( SELECT @@ -156,7 +160,7 @@ class QueryStrings(Enum): MAX(enter_time) - MIN(enter_time) AS total_time_seconds FROM min_time WHERE 1 = 1 - AND enter_time >= 720 + AND enter_time >= {start_filter} GROUP BY 1 ) SELECT @@ -182,11 +186,11 @@ class QueryStrings(Enum): AND e.date = \'{date}\' AND e.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND e.time_step >= 720 + AND e.time_step >= {start_filter} WHERE 1 = 1 AND v.date = \'{date}\' AND v.partition_name = \'{partition}\' - AND v.{constraint} + AND v.{loc_filter} GROUP BY 1, 2, 3 HAVING 1 = 1 AND MAX(distance) - MIN(distance) > 10 @@ -253,7 +257,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND {constraint} + AND {loc_filter} GROUP BY 1, 2 ), inflows AS ( SELECT @@ -262,7 +266,7 @@ class QueryStrings(Enum): 60 * COUNT(DISTINCT id) AS inflow_rate FROM min_max_time_step WHERE 1 = 1 - AND min_time_step >= 720 + AND min_time_step >= {start_filter} GROUP BY 1, 2 ), outflows AS ( SELECT @@ -271,7 +275,7 @@ class QueryStrings(Enum): 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step WHERE 1 = 1 - AND max_time_step < 1200 + AND max_time_step < {stop_filter} GROUP BY 1, 2 ) SELECT @@ -313,8 +317,8 @@ class QueryStrings(Enum): WHERE 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' - AND vt.{constraint} - AND vt.time_step >= 720 + AND vt.{loc_filter} + AND vt.time_step >= {start_filter} ), cumulative_energy AS ( SELECT id, @@ -412,8 +416,8 @@ class QueryStrings(Enum): WHERE 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' - AND vt.{constraint} - AND vt.time_step >= 720 + AND vt.{loc_filter} + AND vt.time_step >= {start_filter} ), cumulative_energy AS ( SELECT id, From 09a26a3cbf393d59e0ba6b5a3d7678ce96991dff Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:54:18 -0700 Subject: [PATCH 228/438] rename constraints to filters --- flow/data_pipeline/lambda_function.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index d296d99d1..dfcd95f43 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -39,15 +39,15 @@ def lambda_handler(event, context): records.append((bucket, key, table, query_date, partition)) # initialize the queries - start_constraint = WARMUP_STEPS - stop_constraint = WARMUP_STEPS + HORIZON_STEPS + start_filter = WARMUP_STEPS + stop_filter = WARMUP_STEPS + HORIZON_STEPS for bucket, key, table, query_date, partition in records: source_id = "flow_{}".format(partition.split('_')[1]) response = s3.head_object(Bucket=bucket, Key=key) - loc_constraint = X_CONSTRAINT + loc_filter = X_FILTER if 'network' in response["Metadata"]: if response["Metadata"]['network'] in network_using_edge: - loc_constraint = EDGE_CONSTRAINT + loc_filter = EDGE_FILTER query_dict = tags[table] @@ -67,6 +67,6 @@ def lambda_handler(event, context): result_location, query_date, partition, - loc_constraint=loc_constraint, - start_constraint=start_constraint, - stop_constraint=stop_constraint) + loc_filter=loc_filter, + start_filter=start_filter, + stop_filter=stop_filter) From f7a4d6ed4d22065585b97a0a7333636e439d1604 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:54:53 -0700 Subject: [PATCH 229/438] rename constraints to filters --- flow/data_pipeline/lambda_function.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index dfcd95f43..00cf0fba5 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -3,7 +3,7 @@ from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data from flow.data_pipeline.query import tags, tables, network_using_edge -from flow.data_pipeline.query import X_CONSTRAINT, EDGE_CONSTRAINT, WARMUP_STEPS, HORIZON_STEPS +from flow.data_pipeline.query import X_FILTER, EDGE_FILTER, WARMUP_STEPS, HORIZON_STEPS s3 = boto3.client('s3') queryEngine = AthenaQuery() From 830516557fc9fda2e113a5025dd2d843bfd3f577 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:57:15 -0700 Subject: [PATCH 230/438] tweak queries for styling --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2d70daed7..845609494 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -206,7 +206,7 @@ class QueryStrings(Enum): 19972 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon FROM sub_fact_vehicle_trace WHERE 1 = 1 - AND ABS(power_watts * time_step_size_seconds) > 0 + AND power_watts * time_step_size_seconds != 0 ; """ @@ -225,7 +225,7 @@ class QueryStrings(Enum): AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' GROUP BY 1, 2 HAVING 1=1 - AND ABS(SUM(energy_joules)) != 0 + AND SUM(energy_joules) != 0 ;""" LEADERBOARD_CHART = """ @@ -237,13 +237,13 @@ class QueryStrings(Enum): t.throughput_per_hour FROM fact_network_throughput_agg AS t JOIN fact_network_fuel_efficiency_agg AS e ON 1 = 1 - AND t.date = \'{date}\' - AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' AND e.date = \'{date}\' AND e.partition_name = \'{partition}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' AND t.source_id = e.source_id AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' WHERE 1 = 1 + AND t.date = \'{date}\' + AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' ;""" FACT_NETWORK_INFLOWS_OUTFLOWS = """ From ac8c5457506715c9e8beb4ceafc6771a174c1910 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 22:00:32 -0700 Subject: [PATCH 231/438] remove outer joins to avoid edge cases --- flow/data_pipeline/query.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 845609494..a2ef711cc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -370,8 +370,8 @@ class QueryStrings(Enum): GROUP BY 1, 2 ) SELECT - COALESCE(bce.source_id, be.source_id) AS source_id, - COALESCE(bce.distance_meters_bin, be.distance_meters_bin) AS distance_meters_bin, + bce.source_id AS source_id, + bce.distance_meters_bin AS distance_meters_bin, bce.cumulative_energy_avg, bce.cumulative_energy_lower_bound, bce.cumulative_energy_upper_bound, @@ -385,7 +385,7 @@ class QueryStrings(Enum): COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound FROM binned_cumulative_energy bce - FULL OUTER JOIN binned_energy be ON 1 = 1 + JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id AND bce.distance_meters_bin = be.distance_meters_bin ORDER BY distance_meters_bin ASC @@ -468,8 +468,8 @@ class QueryStrings(Enum): GROUP BY 1, 2 ) SELECT - COALESCE(bce.source_id, be.source_id) AS source_id, - COALESCE(bce.time_seconds_bin, be.time_seconds_bin) AS time_seconds_bin, + bce.source_id AS source_id, + bce.time_seconds_bin AS time_seconds_bin, bce.cumulative_energy_avg, bce.cumulative_energy_lower_bound, bce.cumulative_energy_upper_bound, @@ -483,7 +483,7 @@ class QueryStrings(Enum): COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound FROM binned_cumulative_energy bce - FULL OUTER JOIN binned_energy be ON 1 = 1 + JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id AND bce.time_seconds_bin = be.time_seconds_bin ORDER BY time_seconds_bin ASC From f9f75af78ca28cfba3360476ebec26f44c931e85 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 22:04:50 -0700 Subject: [PATCH 232/438] rename query_date to submission-date --- flow/data_pipeline/data_pipeline.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 4b2861a22..5669fb6a8 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -203,20 +203,20 @@ def wait_for_execution(self, execution_id): return False return True - def update_partition(self, table, query_date, partition): + def update_partition(self, table, submission_date, partition): """Load the given partition to the trajectory_table on Athena. Parameters ---------- table : str the name of the table to update - query_date : str + submission_date : str the new partition date that needs to be loaded partition : str the new partition that needs to be loaded """ response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(table=table, date=query_date, + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(table=table, date=submission_date, partition=partition), QueryExecutionContext={ 'Database': 'circles' @@ -225,19 +225,19 @@ def update_partition(self, table, query_date, partition): ) if self.wait_for_execution(response['QueryExecutionId']): raise RuntimeError("update partition timed out") - self.existing_partitions[table].append("date={}/partition_name={}".format(query_date, partition)) + self.existing_partitions[table].append("date={}/partition_name={}".format(submission_date, partition)) return - def repair_partition(self, table, query_date, partition): + def repair_partition(self, table, submission_date, partition): """Load the missing partitions.""" if table not in self.existing_partitions.keys(): self.existing_partitions[table] = self.get_existing_partitions(table) - if "date={}/partition_name={}".format(query_date, partition) not in \ + if "date={}/partition_name={}".format(submission_date, partition) not in \ self.existing_partitions[table]: - self.update_partition(table, query_date, partition) + self.update_partition(table, submission_date, partition) def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", - query_date="today", partition="default", **kwargs): + submission_date="today", partition="default", **kwargs): """Start the execution of a query, does not wait for it to finish. Parameters @@ -246,7 +246,7 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu name of the query in QueryStrings enum that will be run result_location: str, optional location on the S3 bucket where the result will be stored - query_date : str + submission_date : str name of the partition date to run this query on partition: str, optional name of the partition to run this query on @@ -261,13 +261,13 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu if query_name not in QueryStrings.__members__: raise ValueError("query not existed: please add it to query.py") - if query_date == "today": - query_date = date.today().isoformat() + if submission_date == "today": + submission_date = date.today().isoformat() source_id = "flow_{}".format(partition.split('_')[1]) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id, **kwargs), + QueryString=QueryStrings[query_name].value.format(date=submission_date, partition=source_id, **kwargs), QueryExecutionContext={ 'Database': 'circles' }, From 660891a540a8e1a0c567659ab05333b7ce86d349 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 31 May 2020 23:19:34 -0700 Subject: [PATCH 233/438] write simulation result to disk every 100 time step --- flow/controllers/velocity_controllers.py | 3 +- flow/core/experiment.py | 37 ++++++++++++++---------- flow/core/kernel/vehicle/traci.py | 2 ++ flow/data_pipeline/data_pipeline.py | 4 +-- flow/data_pipeline/query.py | 2 +- flow/visualize/i210_replay.py | 17 +++++++++-- 6 files changed, 43 insertions(+), 22 deletions(-) diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index 62ce15beb..e1f69de98 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -120,7 +120,8 @@ def get_accel(self, env): env.k.vehicle.get_edge(self.veh_id)[0] == ":"\ or (self.control_length and (env.k.vehicle.get_x_by_id(self.veh_id) < self.control_length[0] or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1]))\ - or edge in self.no_control_edges: + or (self.no_control_edges is not None and len(self.no_control_edges) > 0 + and edge in self.no_control_edges): # TODO(@evinitsky) put back # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: return None diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 0be1be176..e5fe0cb4a 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -4,10 +4,9 @@ from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info from flow.data_pipeline.leaderboard_utils import network_name_translate from collections import defaultdict -import datetime +from datetime import datetime, timezone import logging import time -from datetime import date import os import numpy as np import uuid @@ -86,7 +85,7 @@ def __init__(self, flow_params, custom_callables=None): self.env = create_env() logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) + self.env.network.name, str(datetime.utcnow()))) logging.info("Initializing environment.") @@ -156,10 +155,19 @@ def rl_actions(*_): extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) metadata = defaultdict(lambda: []) + # collect current time + cur_datetime = datetime.now(timezone.utc) + cur_date = cur_datetime.date().isoformat() + cur_time = cur_datetime.time().isoformat() metadata['source_id'].append(source_id) + metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(self.env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(is_baseline)) + dir_path = self.env.sim_params.emission_path + trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) + metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + for i in range(num_runs): ret = 0 vel = [] @@ -181,6 +189,11 @@ def rl_actions(*_): extra_info["source_id"].extend([source_id] * len(veh_ids)) extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) + # write to disk every 100 steps + if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0: + write_dict_to_csv(trajectory_table_path, extra_info, not j) + extra_info.clear() + # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) @@ -212,32 +225,26 @@ def rl_actions(*_): time.sleep(0.1) # collect the location of the emission file - dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv - emission_to_csv(emission_path) + # FIXME: Brent: produce seg fault with large CSV + # emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) - trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) write_dict_to_csv(trajectory_table_path, extra_info) - metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) - write_dict_to_csv(metadata_table_path, metadata) + write_dict_to_csv(metadata_table_path, metadata, True) if to_aws: - cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( - cur_date, source_id), - trajectory_table_path, {'network': metadata['network']}) upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' '{1}_METADATA.csv'.format(cur_date, source_id), metadata_table_path) - - # delete the S3-only version of the trajectory file - # os.remove(upload_file_path) + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( + cur_date, source_id), + trajectory_table_path, {'network': metadata['network'][0]}) return info_dict diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index f59fa9ba0..2a6a4da12 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -87,6 +87,8 @@ def __init__(self, # old speeds used to compute accelerations self.previous_speeds = {} + # The time that previous speed is recorded, used to calculate realized_accel + self.previous_time = 0 def initialize(self, vehicles): """Initialize vehicle state information. diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index f98c1fb60..366dc9bd1 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -38,7 +38,7 @@ def generate_trajectory_table(data_path, extra_info, partition_name): return output_file_path -def write_dict_to_csv(data_path, extra_info, partition_name=None): +def write_dict_to_csv(data_path, extra_info, include_header=False): """Generate desired output for the trajectory_table based only on flow output. Parameters @@ -59,7 +59,7 @@ def write_dict_to_csv(data_path, extra_info, partition_name=None): """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name - extra_info.to_csv(data_path, index=False) + extra_info.to_csv(data_path, mode='a+', index=False, header=include_header) # upload_only_file_path = data_path[:-4] + "_upload" + ".csv" # extra_info.to_csv(upload_only_file_path, index=False, header=False) return diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 31eea85fd..feb597143 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -21,7 +21,7 @@ tables = ["fact_vehicle_trace", "fact_energy_trace", "fact_network_throughput_agg", "fact_network_inflows_outflows", "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart", - "leaderboard_chart_agg"] + "leaderboard_chart_agg", "metadata_table"] network_using_edge = ["I-210 without Ramps"] diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index faadd87b4..a70fd72e3 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -1,6 +1,6 @@ """Transfer and replay for i210 environment.""" import argparse -from datetime import datetime +from datetime import datetime, timezone from collections import defaultdict from copy import deepcopy import numpy as np @@ -32,7 +32,8 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, get_extra_info +from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info +from flow.data_pipeline.leaderboard_utils import network_name_translate import uuid EXAMPLE_USAGE = """ @@ -208,8 +209,18 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= key: [] for key in custom_callables.keys() }) + # date pipeline extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) + metadata = defaultdict(lambda: []) + # collect current time + cur_datetime = datetime.now(timezone.utc) + cur_date = cur_datetime.date().isoformat() + cur_time = cur_datetime.time().isoformat() + metadata['source_id'].append(source_id) + metadata['submission_time'].append(cur_time) + metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) + metadata['is_baseline'].append(str(is_baseline)) i = 0 while i < args.num_rollouts: @@ -330,7 +341,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # generate the trajectory output file trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) + upload_file_path = write_dict_to_csv(trajectory_table_path, extra_info) # upload to s3 if asked if args.use_s3: From d617c2f452bc924290145d1b59c2abb17e4d9ed8 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 1 Jun 2020 10:19:19 -0700 Subject: [PATCH 234/438] reformat upload_to_s3, add missing comma --- flow/core/experiment.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index d849def74..64674dfda 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -240,11 +240,13 @@ def rl_actions(*_): write_dict_to_csv(metadata_table_path, metadata, True) if to_aws: - upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' - '{1}_METADATA.csv'.format(cur_date, source_id), + upload_to_s3('circles.data.pipeline', + 'metadata_table/date={0}/partition_name={1}_METADATA/', + '{1}_METADATA.csv'.format(cur_date, source_id), metadata_table_path) - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( - cur_date, source_id), - trajectory_table_path, {'network': metadata['network'][0]}) + upload_to_s3('circles.data.pipeline', + 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), + trajectory_table_path, + {'network': metadata['network'][0]}) return info_dict From a7eda70c3b69a3a183faa5228a45a88c4751085c Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 1 Jun 2020 10:38:08 -0700 Subject: [PATCH 235/438] fix i210 replay data collection --- flow/data_pipeline/data_pipeline.py | 4 +++- flow/data_pipeline/query.py | 2 +- flow/visualize/i210_replay.py | 21 +++++++++++++++------ 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 3b6f953d9..ea5307dad 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -48,7 +48,9 @@ def write_dict_to_csv(data_path, extra_info, include_header=False): extra_info: dict extra information needed in the trajectory table, collected from flow include_header: bool - whether or not to include the header in the output + whether or not to include the header in the output, this should be set to + True for the first write to the a empty or newly created CSV, and set to + False for subsequent appends. """ extra_info = pd.DataFrame.from_dict(extra_info) extra_info.to_csv(data_path, mode='a+', index=False, header=include_header) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 29828e685..89432e260 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -502,5 +502,5 @@ class QueryStrings(Enum): FROM leaderboard_chart AS l, metadata_table AS m WHERE 1 = 1 AND l.source_id = m.source_id - ORDER BY l.date, source_id ASC + ORDER BY l.date, m.submission_time, l.source_id ASC ;""" diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index a70fd72e3..b3b4fcb78 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -220,7 +220,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= metadata['source_id'].append(source_id) metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) - metadata['is_baseline'].append(str(is_baseline)) + metadata['is_baseline'].append(str(args.is_baseline)) i = 0 while i < args.num_rollouts: @@ -341,14 +341,18 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # generate the trajectory output file trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - upload_file_path = write_dict_to_csv(trajectory_table_path, extra_info) + write_dict_to_csv(trajectory_table_path, extra_info, True) + metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + write_dict_to_csv(metadata_table_path, metadata, True) # upload to s3 if asked if args.use_s3: - cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( - cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), - upload_file_path) + upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' + '{1}_METADATA.csv'.format(cur_date, source_id), + metadata_table_path) + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( + cur_date, source_id), + trajectory_table_path, {'network': metadata['network'][0]}) # print the location of the emission csv file print("\nGenerated emission file at " + output_path) @@ -465,6 +469,11 @@ def create_parser(): help='specify which query should be run by lambda' 'for detail, see upload_to_s3 in data_pipeline.py' ) + parser.add_argument( + '--is_baseline', + action='store_true', + help='specifies whether this is a baseline run' + ) return parser From f5f000ee58122c7a994b00e4a9e049b88f84dac4 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 1 Jun 2020 10:41:38 -0700 Subject: [PATCH 236/438] remove extra comma --- flow/core/experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 64674dfda..8ac9c3699 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -241,7 +241,7 @@ def rl_actions(*_): if to_aws: upload_to_s3('circles.data.pipeline', - 'metadata_table/date={0}/partition_name={1}_METADATA/', + 'metadata_table/date={0}/partition_name={1}_METADATA/' '{1}_METADATA.csv'.format(cur_date, source_id), metadata_table_path) upload_to_s3('circles.data.pipeline', From 1e42556d58c418311e39e3896304f00291ae510e Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 1 Jun 2020 14:35:30 -0700 Subject: [PATCH 237/438] fix network name mapping for highway-single --- flow/data_pipeline/leaderboard_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 1b3cb64c3..f0c4178df 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -5,7 +5,7 @@ from io import StringIO -network_name_map = {"highway": "Single-Lane Straight Road", +network_name_map = {"highway-single": "Single-Lane Straight Road", "ring": "Single-Lane Ring Road", "I-210_subnetwork": "I-210 without Ramps"} From 1ae0081b693bd4502b601f7ab51fb5dda70987cb Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 1 Jun 2020 21:08:14 -0700 Subject: [PATCH 238/438] fix data collection issue in i210_replay --- flow/visualize/i210_replay.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index b3b4fcb78..b2e22d5b3 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -262,7 +262,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # Collect information from flow for the trajectory output get_extra_info(env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) + extra_info["source_id"].extend([source_id] * len(veh_ids)) + extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): From f7d9ec17c6e57f8bf308e6a3f1343959db5b101b Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 1 Jun 2020 21:09:26 -0700 Subject: [PATCH 239/438] fix the network name mapping for i210 single lane --- flow/data_pipeline/leaderboard_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index f0c4178df..7553703f3 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -7,7 +7,8 @@ network_name_map = {"highway-single": "Single-Lane Straight Road", "ring": "Single-Lane Ring Road", - "I-210_subnetwork": "I-210 without Ramps"} + "I-210_subnetwork": "I-210 without Ramps", + "I_210_subnetwork": "I-210 without Ramps"} def network_name_translate(network_name): From a0c39045be0d74712fd1ab797882778f2006c528 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 2 Jun 2020 14:33:34 -0400 Subject: [PATCH 240/438] Minor changes to support cusotm PPO --- .../rl/multiagent/multiagent_i210.py | 4 +++- flow/visualize/i210_replay.py | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index f55917e49..8c619ee88 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -318,5 +318,7 @@ def policy_mapping_fn(_): env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), "avg_outflow": lambda env: np.nan_to_num(env.k.vehicle.get_outflow_rate(120)), "avg_energy": lambda env: -1*energy_consumption(env, 0.1), - "avg_per_step_energy": lambda env: -1*energy_consumption(env, 0.1) / env.k.vehicle.num_vehicles, + "avg_per_step_energy": lambda env: -1*energy_consumption(env, 0.1) / env.k.vehicle.num_vehicles + if env.k.vehicle.num_vehicles > 0 + else 0, } diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index b3b4fcb78..58f8aa755 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -167,7 +167,18 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= rllib_flow_params = get_flow_params(rllib_config) agent_create_env, agent_env_name = make_create_env(params=rllib_flow_params, version=0) register_env(agent_env_name, agent_create_env) - agent_cls = get_agent_class(config_run) + # agent_cls = get_agent_class(config_run) + + if rllib_config['env_config']['run'] == "": + from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel + from ray.rllib.models import ModelCatalog + agent_cls = CCTrainer + ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel) + elif rllib_config['env_config']['run'] == "": + from flow.algorithms.custom_ppo import CustomPPOTrainer + agent_cls = CustomPPOTrainer + elif config_run: + agent_cls = get_agent_class(config_run) # create the agent that will be used to compute the actions agent = agent_cls(env=agent_env_name, config=rllib_config) @@ -209,6 +220,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= key: [] for key in custom_callables.keys() }) + # reroute on exit is a training hack, it should be turned off at test time. + if hasattr(env, "reroute_on_exit"): + env.reroute_on_exit = False + # date pipeline extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) From 5e2f859bcf001254bd9deb476be4ca1535e2af4c Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Tue, 9 Jun 2020 14:19:58 -0700 Subject: [PATCH 241/438] Aimsun bugs (#907) * added aimsunparams * fixed aimsun default template * fixed bug in network close function * fixed bug in simulate.py * minor * minor * bug fix * minor tests Co-authored-by: AboudyKreidieh --- examples/simulate.py | 15 ++++++++++---- examples/train.py | 2 +- flow/core/kernel/network/aimsun.py | 4 +++- flow/utils/aimsun/Aimsun_Flow.ang | Bin 0 -> 159038 bytes tests/fast_tests/test_examples.py | 31 +++++++++++++++++++++++++++++ 5 files changed, 46 insertions(+), 6 deletions(-) create mode 100644 flow/utils/aimsun/Aimsun_Flow.ang diff --git a/examples/simulate.py b/examples/simulate.py index 848f030a4..d1dcc5a79 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -8,6 +8,8 @@ import json import os from flow.core.experiment import Experiment + +from flow.core.params import AimsunParams from flow.utils.rllib import FlowParamsEncoder @@ -20,7 +22,6 @@ def parse_args(args): the output parser object """ parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, description="Parse argument used when running a Flow simulation.", epilog="python simulate.py EXP_CONFIG --num_runs INT --no_render") @@ -65,17 +66,23 @@ def parse_args(args): else: callables = None - # Update some variables based on inputs. flow_params['sim'].render = not flags.no_render flow_params['simulator'] = 'aimsun' if flags.aimsun else 'traci' + # If Aimsun is being called, replace SumoParams with AimsunParams. + if flags.aimsun: + sim_params = AimsunParams() + sim_params.__dict__.update(flow_params['sim'].__dict__) + flow_params['sim'] = sim_params + # Specify an emission path if they are meant to be generated. if flags.gen_emission: flow_params['sim'].emission_path = "./data" # Create the flow_params object - json_filename = flow_params['exp_tag'] - with open(os.path.join(flow_params['sim'].emission_path, json_filename) + '.json', 'w') as outfile: + fp_ = flow_params['exp_tag'] + dir_ = flow_params['sim'].emission_path + with open(os.path.join(dir_, "{}.json".format(fp_)), 'w') as outfile: json.dump(flow_params, outfile, cls=FlowParamsEncoder, sort_keys=True, indent=4) diff --git a/examples/train.py b/examples/train.py index 652d0efa5..1b2f22476 100644 --- a/examples/train.py +++ b/examples/train.py @@ -306,7 +306,7 @@ def train_h_baselines(flow_params, args, multiagent): # Perform training. alg.learn( - total_timesteps=args.total_steps, + total_steps=args.total_steps, log_dir=dir_name, log_interval=args.log_interval, eval_interval=args.eval_interval, diff --git a/flow/core/kernel/network/aimsun.py b/flow/core/kernel/network/aimsun.py index 0378d45a9..89971bf58 100644 --- a/flow/core/kernel/network/aimsun.py +++ b/flow/core/kernel/network/aimsun.py @@ -262,7 +262,9 @@ def close(self): cur_dir = os.path.join(config.PROJECT_PATH, 'flow/core/kernel/network') os.remove(os.path.join(cur_dir, 'data_%s.json' % self.sim_params.port)) - os.remove('%s_%s' % (self.network.net_params.template, self.sim_params.port)) + if self.network.net_params.template is not None: + os.remove('%s_%s' % (self.network.net_params.template, + self.sim_params.port)) ########################################################################### # State acquisition methods # diff --git a/flow/utils/aimsun/Aimsun_Flow.ang b/flow/utils/aimsun/Aimsun_Flow.ang new file mode 100644 index 0000000000000000000000000000000000000000..17119f4e95771d1699dd7052ac7eddbc651a1fab GIT binary patch literal 159038 zcma&MWmH^Cv^7c~34|aC9wY<;1b1sRKyas#;KAM9g9mqacL?roK^rG@U(ultdHG zLmUYD2mlyjhk4-l8DJJO0t@V)0YMLe6Ttu$%K~fjZydap9)cHwdoVKs0wN9q6!r(} zoB+WY!3-Aah+uQz_+KD*u2JjZe2qFkP2uuhn zuoxp)8wVJ+f^}_=U;_JOg4s5(!8jtg{NpqF&*KEU4<8UbS_t-yRxsp*V2z*)8@w)T z_)G|fFs}ow9TP0d6?V0UwTDML!fY!9TLep(4afg?51$Z}|E~x}Sfn*+7&ls@d67+_NafLR>>OdUJydpThU{@fg}CKlpIlLdC*C94J|O+};8T40yX%Gc3S~7{MXw^}gyWtSw5ATxP~!sZ4{d1MWoZAt`;{5QD*Ll7pex&k#k+ z9f{Ojh7rX_9O=E%Lha0%XSz2MR!hdb%}3m8eyyp&snWVT0p|69`XSj(qS}h9LBy_ z2)}r#b*SBe!Z?zJg>l<3cZEd2w?7j0{K0+whEmrGPHgxDk-Lvw5eVtb-zb6|d#CE9P}C8}=~y%@e- zagcKC?Bs1fdd{S0MWtT+Wp|y7L4tA%v_V!Dm$}L}j>e|IA{r(G<3Vq`l$G&hsyFd`NDeh;?qR*QB5)FIQ zL!2a(3&y#QKU0deZK$NIWyFUfQm8+Ue>-P3c1aEalNjf@a$^tmCIbYqId)872GKtT z(f?)a_xCmz0|a5W{;~dv!{^vZle?7l>OW&P9tVkz-YtB6QfLf4p!2jI729jC zjoFDOc(P@b5)4pf!TPk_$1=DsN6d1+rj_qa;NbjIm1T-*`w$_PYnyH`9dSEX?uCP5 z(a+|ubQRXPwMsqZ^(`uBn7!q@Eh?jj=(U9hsw}lPS~!>>(X&&n89uZN*IiW>K0bW- zP<$B8$Fri=AMdDPmrc)6DZu=1rBqMi)p^K|sN0j_8#BQY50ST*j5=Re26#M%*SeBS zJTNgGnnn)!soH{hr&{jQy1YmRJA0PDK61{`NWKW)BZK-xY;;EOcYzKJ)36V*NJ%H$!$g)hf43eo zZ0>wUyeA0mAn)vj9Li#}kJ|xVyT5owh;*Z>X1LRO>AJuu_|WM~ zphj{oGlK8oKW)(HK4j_?*a_)Mq4w|7P*XeBF*sPT=^e3&V^4J1871WQliVPnQ}0S%1IFVRHuT%yqu=TfM^{M> zq=9FcNwzZVEBzAM?~rdzGh;UtSP06foo0x&okwqJ0(96n>rw~1=s18{Q@fl}-9;!) z$dF!Fwe*elfgN2VcFXlzC~fU$-j|E~={d|rsLRIKTYs_R*_cH-NDBrT1VB+|H}B@p z@a#WQkhP`t?5O>I;t}xWXr(RSYHRJvAze)i{)^rA-YRi~HgRhhBt460a2rGHyA2$bsvmTJG+eh+_=6+BJSNk%^OJ^R>Z~S zmYCOoD{kdoLoYr?mk3_m{2ra01qO3Ig>A{#mfMccObnFq@bn`*8lH3sY zmNb9Zhi4d+Ze?TrOrut6mV%r)H1q77wynha?KElo!{3m6s*|78N2-^iH|b$S3jqbt_LvAf2t7;qh0$FUk9>G;uK1oLwj# z`RHV(IgMn?Wg~CQ<32dmc7APGnD0W&Ko4_BxereMfjNRE((;@W=i=O6A7EVi2+JQ# z^7+^ID~Tr-<+Z4rq;;bvH%}c;EgCZq3aXW+T{e2|glLAP$#+nh(3f6Z`xNH65b!fn zwu|iA!KWfhTJq$byB2I}CN`9&cmqD-3!mJ&YtfpUsdGl~YVV`{c4Tk0XW5B+sGXWz z%j!=8X&I#Rp-@<`3zlCG{TgS;>UYitfnl27Fc=>tGJ} ztT4eG@L6g3p4psCvpXs-5RvsZ+Hc&`aQgNPO0!EW&bW2dEhB#N-JaG46aKt0nrf0+pvWaW-LFwBoeC(pcMdMDE4 zLeWKSXaSr!?Ijco3YUekMk14GQmHVf^|>WMge8jk5{uEh4R#`226nWi%HkTD%cG2l z^Eh-7HwVK7Wy8lC=DJq$kwWGha@`i~Y9nBBd+WOdQf9QR%3IbL09I1Sd$GG@@8J#_ z66qwOY54^~*dDhrxTkq2awN{Mk`Xj=k#d-s9=HHr%bEzBn-TD;uQ+ahP7A7zxRTxB zwm0(>++x0rq%0b^0b+=raNAqJ915^aJ_vIZ=5mQ!Sy;j%X11w@>}NU$u7MaLR@SVR zo)Pqesnxv`5JN~-&*4FGTX>FXks;Vj?&3q6k5>YeIIf)y=PEv2^J^*4L;1dnrtA@m zP2Hhws0Xu-iQ)$3O4mx&BS8Ng5jZK(p)>EZrOqP0Etc_ljVE)DBR#AW;MK<2}FR&Z0cXR*xP%g`>GH`NPMOt(gEVpszf^Ej&xh9K4I6>yOt;>u=$3K$=W%P+$-V994PoB>l`k<<4;qc`*EVGm+LNja>i0LD z1(oJyY|2U&q=2~wlAvY1Tv_{aD_)#-5mOhqCv=&tu9esVQkd)Fc~rRqZU8FHbEj!d z?>2T+fY0#A4=OQ4+%yHu7BfnFCypbzeOrHAWH4BdVO&nm5Sn#fS>rD4J(ar6wjHHS zRbMt~@t44H)|6cxEI0gv<;)^^G%}m>0q_`D@|(U@NBbAsZ7MA#_fSHnWP#&j>l6M-y_3lULd_>;JZfCImQ& z3rNE?;q4ni7jgq)Jfaz=ZA!WT%Rls4^xrB4XSV{TEPu@c3gv5bc?KsJ2QJ00Wp@a` z;N*@!9eczgB1F@e3klo+6{ z&%7dg6?lX430GFQ(drE(D$l$k%!4&eUXLpj3RMo!F}+Wb;z%g+R5(OGcadlN4epD`Oo1toJlo3Gqyds^9>2@_5D zuXDIfstlkBIt~TSXbf~P9I2umoa2nv4#(bCS`S4WTzd(P0*MWN2hkep9nXKf%jz*o zatbbGuZxPdz85*6mOZk^vEg<-}Jhsw>2~jC6)Vg;$6eiU}%PU$4hb_@0v7RE%Kzi-o-b zj%N+v__7v;0V?`Oal@X5Q-2lj0kc1jl7_YNC;zhC0aSF+b68Dm3?mcN;G=(&P|L+S zm|6y3tIgpBq7>cw*=MscS@*3X?X5JV6y1bbGSe|>bM!o?C4ej_Z!7+Xf%l?np2UIlj#dX#qGAfFag*nIu0?4BS6H6gtvpSj(IkKw%Zd zV8ov9A?)pPY-DMVUZNRO)FlF8FK}ht{VCF;H}aZJiLxXd5T4`8*!WX$)*jnC!JC*) zxWWd&Y2Wx$an>FO&nU!1D1%+60g5^b_f z56Hp7Loq?juK}xv*OuYe?F;aGx@SFvwY>HLYJJWY3qqAYR}vU?ypswP;bL=sJDoQVE!8Pyp83 zC86q}7&Nbk&3b@yl0iQ>XO~2{b)?$diq?H_{y3;ai#LK$r!t;vCw4riFrMz{m^!XG z^U_yF=>lI=8!RI&aq$*!<_K5n z`)I@I*ym;FYoCF?(h?xfK|~y(R(S*I+uGhe>Kvai=-`^}=BdTw7zXr#ZQ6!#c$s}j=hTXiD(bDT3%gj)GOj1Wh}Ca zyJ_nOphq(|jHC-<-r01qgcjx#G{R3$I8 zzT@~;1?em03sXd4spv5|QGet$)?%r3AyZ)c-o+hDF!Xu@H7j_6*7IX#zaT7PhpY}} znbF8PlRf?a?#`t8V_P0`+fN6LEi+nFs@ zY%oh}uguIbGf{tUip0l((YqA5zR};i@Q<%S&3cgU}L zlWUP+PEHn+U_cueb)m|rS|s4rsAvY_Hey8`O!bg@Q#-d0(>Ty-bK&9#*Jx^`X8-82 zgCb1z=e`n-^0J`x!1cb{>y|~OTe?zsiMeXKh^!5nuWJjsmZCjAO3ULE<`uUYeN2jzjNGU zc@rww!;N{m+ZOSNxN=WK-13PU5&H1@{Pw-Q!d?9|&;`{4k^6m~QZ|>isI#S1)G?L! zg%ju5f-cqlO@vo-pLO*I@k8l9=)NNTHy9!|Ey+!=^9~o63&uXy*I}ogis2%RgM>y9 z!4AZ;_7Yec>~MiGkd%|R^o$2y1f|+4v_*OQBj_S*8ApugNr43bwPBh&s#50uO)X3j zHp-1}R|Uzbh`kesr8?}r`lTAdlYGY39xHI39gzZAhNnO?gYAYJ`91E`h2-WV_(@1a z#MIbs!*dZM-@B=L1!EyKX33tVuwqS|F(L~hfClH*XATf}>TBlkwto1ReSoawMPW2+ z?r-za{~U$9ja~%i4sm)L7KVW37(TT2UWw|C6VdS$IH!(mB*hdxc{f#l@(?+g@8TIz zzCsjPw5d&5(jN1)Y)${@enD`TOt(~L2y_9{e_KjqLEE}KqV>D3O>R!3+p64$kWYV7 zE7eYDG>LtHT(p3iY@`OKmhG=(ww;3Q;Aec<)_%(90mjC0{C06lL2GXll%F%f1?#$G z@5SYBWiEU=l0&|^o}no7`K2$l$sTc}?Ti@*^vgXNFkofjj&QtvG;|0pT=#^@2hd?i zT*}5#AN<@GOKkkY)(Hs-&Yr8t%jlx2L806-w)M`RJGM%7xNV7llp36zw1+uz9EscM)uM_>3jq82Zrl2W*=(eD?B0xDQDI9hyDZ1zxSmaX@J1B2m3e636ngV7oI)GIDRd&7SDV3{%|o?%>@{S?PJ{76;nSANbrZ~v zt+~U66OAsM!}6T4qdLY27<0A)V@aX(X+4W!o_o&yiL!dv$HL0qUCi;V>m%Q`CA6r- z2)5v~8CKM3Tf=BGe8D>-_KN-~>Ka&{;^A9ao(LrWUr~2x_+L@S`Cn1@6#rjQC;wkj z_Z0tMQ78XjQ3s3opQ29we~UVc0oe1!S^1#WRvDbL2IXeNniT=YOip#SRO*rkH&4(+ z1u3`iD3(gvX`kj3R60UZ%H5U?w~)d3u^xf7V2In{P9{))Beiy zT0Ks6C{|s7Nq;}-QvT*$(NY`j%(eX9yQ0N^0e2CE?$RR5^0Je_R!Sa(BmgALaagjQ@+c$Skns2Y+Rh;D#81 zd)C3ye~9#UVq+D@f0!|;wyGXl`Y(~*`pz0%9T4*Sk(C@g=la5T&% z7+itLib;g6ue2*TbOC1rww7%y-;<7djkt2e8H5h)}Z@recu%4oGX{Wamo!6_c^=yxO zQnevb1xoLg9RhVns+|wuO2L_ZIfMtT{^`I1FlYG>r>-YHs4vWMw)C?4rBR`M{Fd17 zoKtZ7@E5O^rSYarQEOyyb~^(bYsDPkTfOR^aG3*q;#}3T;jqKB6*qVGi_%#zSBAl^ zCFIV-I79F`63>U;cT&I6If%+jVkz zOwKN;@bQ)Q3)NGPQmzxJ6%IZ5i>l}mPg331gGRYS;hnZlJy;@LM;t#795s=27%0f~ zmPsL~d_%kMki`-2_;SIu5=asXi=i;6zZZwH38;~pW^ovS{`?AU-ekdbQ%Qykm2z!v zDk=Yf7B`jDBX~m5qV?JR5FVToRvI_S)nAEfvNPg-_hi9eVh+me#V((6#beD+o15MS zEKO&W)?CRpN{gp4o|ut}gF>dq@ov*{Tr(P@V5zjO^mwGXm{%ky*NfPT!N$iT(#ae? zL+Fe!>^*B9oZX;a=_|*z{Sm7Y;+v%3FcWiLuw)fAEfAWVs@1C?f8bV|ET41P_RLWj zp9u!cq;1y}Evd|2Yn5CS6!H(YG{BBM8G5P6dMjABDEv%=_wcM9keOr<0a3n;tlB{b z?l==VwQJGXjPquQM9`>#m#V29+V7x2B~H8~>K;z8{lX?cy_12Va@OF};&9ct4F|9~ zW6S=kdRyh@AG`?1%1;ftJPxr68zusq%1DjyJt4cX1K3o4!xcW>_LKuq?SBUtDX5;9x1HcHZsv_^58o3X=43y6q*$$`iMK(~pTn#DJzw$? z_aez*Up97kGpfiZJx~kMLjV?9&FM)*i(tL!d_W2k_RNtzLtm{U=HFq zd_aW05&IsjHqn}%X@!A|!)G3-vjfUJH-Q{dLHAgMkU5-Z-H9NU0EpZN9*kc+m%-y!6OXUBtHIK^}Qat18%jzhxXNX>lR(bjHe%chh6OEM4)Al zHm+4AD;VkEa4+!kxesHR?(z=Mkn4Umg@`@6||ET~H$ z^w|2Yh69^)^#np)?`&~^PGAgfoi%Sa_Cq}LeF zRnw0&YX*nwrKXEKA00U{2eW|LmI)4>KGk?Wnf;#c)|wzVBxXp3`C z1X3M`@E#vA9j9`smFxLGOJhDStK#^Q-w<>c{W)PKVh%chwYWG75~;Zb1*TC1MJq0n z-~E2~bB!~EY*W*-r8c}%WQv*er(Vg0XVXD)T*uMMCmL>|pKBi_yM?>LJ>%lfFJ7#i zvt^jw(!*LvdsZd{C0V2nOK%eT?M_iNW?N(kz zMBvm93xgcb-}Njks8A;>3j<)igL@Mv74t0!S(7xar@bspj5AP)SBLcN6IO!&74_-h zj{^g}*#5bJK7vP+I#>_-u*SaKQ0c$m@4KFLFsuhA@~?;pQM@&PcT4m*Ur-ShI4+?{ z`ZmF~0XDeQa6wW_FZ%e`nxxq2m+=0a2&uhB8?NQfY-+}i*-rg%wC?>2e6Sx>uDz|) z1;&gB|0bSbU$1_l#6ulKf4)JrsOiv*_c~$HXJYOM=6+UretdzgDsUP{FAeG;Jh^$J zUw)txc8L27#a+Jnyio)}JB-o%<)9|^<)Cp`0CVF7MrW^#Ao}0kqG7?)=PjKQ^*+Uu zY{7LJp{Vh;6R{B@Zt=boWhR!tx*}jTyN~4?aiWiR+Lb%ydC+yM%gq^&G`U3xlu8Xn_Fs7y$oJaHi zyxZXAaI9OBnSBxze^mo*tA{ zYj@}kwDNCDO_GTqO~wt1Z+*l&n(6YiiTQ;E+4z203^SWFY7z^`A zvzWj?Ez|1G!09NN@SC-;7gsr?H+GIeq2#T@)@Mxa&3G@) zSg)&Cy5j2ULbXX--@0cG^D0;S3`f$MMaO-oB6Zn@K_b#xoBf?+D+qiO8v82MK?R1R4mx5!42>n{BhWMEHXo^w#5OWbtOHkgX69$r(?mB}iic|;1%8jUE z3ozHGitCPTB3KX9*53)G^sMck${c^%Q(r$}I<2oU{L^d{sbXrb|7xj6ASljOCAG5F zD88L5nH$7tK!0dU+%mY5Tzk#>5Pq3hf28d-`I%yrD#>-&xXe-2?jw$B0Wy*|i%+V+ zm?n}aBg8aWmYpO}?G|+4%KDP1$l3yUOBL+>p#pa229GC%dkKXI(C3szYDRnw?7B3v z?+@o7q9zpT)TDIrtv2q%S|Q?`#Hu)4?zKA=$ZTIWE-VmsHC44swB{ODP<(189d@Qr zE55N=Z&ulgc3kP#*>G$hg5p^K)7i|3s6EBGKXW;V&J!H^2L;<_A!?3DnVAtpBCqpN z*e9wgHoE@uDEc!kdB&l$0h%Xn*X`m=^VIn{-T=<_3eM%e|d>@Ov8Nspyhyb zuYBX6?f~s+Q6=>l7C6blML9Rcqs*GOi|lYK&4u@UYYVFSyjj^ZGrttP-`++sBEzxP+_Uw0HRg_@yb9p%dTzf zU7Cwg3f6!|6hke1lPXS@PQ(7oncN^3J@;&%szP?$#F2yB=a@4sBM3%sSlU$RwQ8QS z=5B#abXj&Mz2uQ=^82FNJn+*V^;5aS zY==~0FIPeA`FjfujkV^VtMUR=-rMy!cZPeAa-0BfT!1}XH<$Jzmmdu}hyV5sGC#lD zp2gk#(AGJ+*06>4nx@6S_3bw(bw#|aH)SP>P2O6VMp(~eVV8H}=5A7qLt47H&5*Y7 zRpYG>HgPn`Ho*_l#qI=@X8JPMpsli3%bmXhi4ju30<-%1aVxf+2FSxEfoG0_!bJNK zOdnPP>tAHs%?f)2S4deMk<6s`F}9dT#$(@mC(T)o`_V~By!U9Kx0*j8sEMo2J10s` zJUV4!J_BIb3!@f}NFaEw({Uqw|C&pUB6C?}U%WhV*mPD{+(s8Zce-RwAiW+&c!nrv z#e(@4NDzev;xyajU{~&Z6i*=^2$rJrnR2}t`E_PpR&&2(g9 zUGMEQM9f=x+nTS9=BOIno}$}j)Zo5`&*Sx)=K<;~?_WXr^cJLFYw3N(dwEz5&JR#g z@TXM+rGRZ0+Y$U5e*9vSXt+K6RG)ReKrS<3;cD9wDqp=COPq^OA6(XheeP8t%ClzhP>P0w$0M^=ZBjSOxq6z zE)82H?gfUcJj2r}%|@<0H3oPxLSISBJ!$V2lP+~l)eI>xHFIuDt2%J|2kUw^3+h-g zbD{mHn2nqFKydZMc4l_}m=Np=8p9N%(v`NNtz;U3t1E)&N~iW3BXr(S=waRwV`KjYmwZ$QuYEAZG#W< zX*{boxpz|KqX@3v&e^M|h)TZ_z!&EhLRiPNp+Bmp`rkK@7wT~rY5n5fgHAj}p1i!S z#n7BmByI*HC7#uWGH8x!XoB5D93)<;%&dkI8kUMYyd;*{rqxW{{auPVwZ8Fm+u*9i z3BjpXt?Xa1(#>*hNZ%kgBtrbtkPnbPI00Lqc@ywK{bX&uuhE$ppq-3gx~-nqI~Ely zM=~Bc-7Pq2&7D6J3u338ywxWNd_m&Hb31AOxh;&;`EBdcfSj{bB$u%5UDYzqT&ro1 zx6Q&4dU_yv&mJNpSGzi=ux-yMm*^IWWO9_>xti3Z@oVPZFYnvz+crqRAnJBHKQ$;4 zZTruCL@?nivV<>|Hh0tBG)eQPH%>lY=b47LSfp>7j$uC)4mR6uUJqCWH>Jn{~^FWXLLRc8qa3H=?RhV_a)>c?4XFiVFPy#7^mS;ww4a^2v#$CLPox6VF!!! zRq5Y$IX$nB$lj?my|x@z79l0;nED4+zH8GR_m#zM{6cj)->xKfk>}qXSD%DFb=Xga zQI|I+7ZiYtOw(b^a=3{RM6JK|w*~sijyb z)9xCmqF*r5V(7Osv^~A`Z8Atcp_iHuvTbPJvszMnU)hB+qD_I=a(q5UH6Fk3?WBpc zP=xAf+HBG~OAa|_P^zaPJ`G%Jm`}_t4P!`2b12YC7TzsGy_4c?Kt4tvBP&zT*f~Bx z_%)uI$6y~M>%D{=DnHS}Ue*zXhY$U1!Ti8{J&K&Va`BYgfiz+J1x328cVe?vUq?K~ zJ?)KJZVTgA9Cjtd`LjgKqWePTWxx@Q#LW9LBNc~?+hwKvH(Quu^Rn;`>&BCtyI};lCD~HK!~Qi~ifw5cDD0+EUxAD9 zXXENbs{1a~f2C{e7!`YHVQRMD*DNUmKOZgQMe#JHmXq$@b`!4(QS7nR;4%7Zw`RW2 zQ@qG&j}e?GjNh~#B$4$V=aV^U76=tpjLv>;Ooumow)pkBR=T)SB^#&JcLdW_c<$60uW#Lv~tP!hdI_ZL%<3}wBZG-!GJeb@auPuc%0wp~093CC$%w+2k>}SqwWk!RyDthuu2XuTxq38V(L;g`ryNeWj}z`OFX0m2qb!J@@ppQynNz)T65M1w)xXrz^5iciw?7l8v(87+5*Q4Z}#ia z`~&o5A~mnruHXwh8iS;yB?XWbaoH$Sb6u`fU1f-r<=u z=q$QxPf02BWa8#1I?+ueY475Xi5OU(X@NnX*80m(-Mc|SHw8CUspYyW%wR8*3)ST9 zROPMi_Ue$=8Z3^rjk1?_%%`M`uTwtl59bL4v}a-96WTt>5`wj68D{kK5pf+=RAmOG zxYX^uDcjHmSDROXBy5G|wLME&?>inO#VAzb<#Y}Jy7!63J^M6-jIYL|2k+vx{y1Tc z@{C*3BJMxEGwtAXY6Atn<7Fc^fm=Cr9T~zv#xt}x- z{Q7}k5{3m@aHwC&^Z(pce_jei%0H}@;ymy_e^|2nbh7tTp|yUQbRBacaN4#hgojS5 z40C}eR||6v*idz@d3kgD5;!jBl-2+i3FKEWWD$8)FtQs3rtz2RGYqi|Ypzg9_WK#y z(CjJiM$z==(+AIt5-bH{iqxw}7gNCtl;~@1XC|T%X*Fw3|7=>2-9k#|ptte$%AEy5 zdb-a$#E7>!r4+$Iy$zl+@3mY)K$S%3lkE}j4u5$P;TA`I`$*3+<9G0^NOP;rqr~rX zevrfP_W=DqNl8k3PlfC*Nk{6~Wg~Y24+=?Vx^%eWc z@LJburc}weiAM^QDJ!wGnB;D{%`RA+}-2>RGrBnfp^%5sP4t3v{ZzvZ-sf7NXsLQF{f4dzp5R~4ReuOzzA z>z02B`Ma`nXr212A66To(_iBT z+Rl-`$H_|!?s$%2j_>hKz0>(n?;A85a;H>`;*-k!l=9*2Nu$t@=ooL{*!OJNUkw{i zhUkMvRQp0!6GTh0#ANZGeBze++Ap+;NpURw?LoP}{ZJ*Y{0vPDB`?w*$4u;`RMy^> z1$u}Q*V^ud5q(@QY(?#*PG(kBu3&218WQ=6LZI~Dc?ZO*r9l*h&d zA%DyiysGlXDw-g_N=;P6Fo>)fX8p6Y`C7 zIscUGHGZhv?9(MXFQ^r@(WGs&OXO($@T3kjDMb_dnz|A)x1u_a7$fnAuW&BQJe_=E zix73lz0~7k#=gP9yrfk0>#>h1N>>y!N||Y7X8xCXHGRz*`ImhJj%guSLHaGpMmidL3oM9*zq1$1EGP54v;`GL3`9PpyqLaWKbcCH4ngt@ z!@ZDIbek_w8tV&AX02(xmTMGwqC2yijK#6Ali89p5d>+NgC!#>UIkq=m6+=|c09Nw zk*c}*m35;V1=QL|(rEGHFZO}#-|R#- z%X9lrik635 zOh#%6k8vR6YSgHq`C!gA3_e0net^T&b;tW0F0E5Ld1)PkNUoVU_tUS2QHGVtxj#;h zyP?%sFz<5mllphOb@|qvv2RwMI;osrmFHjEV^di0eg}FNfeMGF!!C zm}}0~N#XSVmwwxV0!rblAkuF+$2Ylrtkx14N+LL>%f8p7dGbz5$>#J?(m?<3<%dzs z#YHAjgfrj&f>*j%0zJPcske-M&1jj8r|I~(B6 z(0AW2RjMIGr#05oLX+@^Cm!;6H3YV`w(D^@3u#%%dBuyipJG3fZ`76{d5FFgP5K=K zpsIXbc8C9X)rM|f&Vl!B6Ajck>U!0;p)e*C=Ujr4_h7!*x273AJ zf1486;=;<{lBK?s`S;cYN7f71-Eq z`&!%fcWOW~y~)X&yfXhekw(oQzff~S5L?$ck_9jbsYh{2tD08U^4}K-wG{2k6PwW1 z8oa^vaxFRR&_ATrl1Xg(Hsz<0ZCv?@D;f1FPY?TL){#M}Tn%TU&yx2*CWm*VoVccb z?s6P+PRbRqqA0OYrOGSj>n}l2-AX*Ir&2p=M8H>2Y1z9_aYft}(4{72%RZsML!|=x z508}+Wd}1~dXog5Do&ZYwWA~ZAzhJqoPeL!^UlaIoUo$-Qy%h`lG2K&VK-SBl;9(uPTwW|1j4BTp*xE5X4exrp1D#eG1 zFh98wk=?H6U`}uvI;_8&n5MKdUC*sHQbos`1Af4=unrO!K$VZ0F)L(?>$?MaMf^r4 z9tbKGVM2e;kl1wym|xX#p2uYl3qo$(I?^|P?@HnML44qP9|=1;y~C6kb)!m6p};eC zbv-T|HR`JKT~5|ZJ+)nWq28CYNVJD}AMF=#USB38!+r^{SL#Uo?y{GUV!Lw29*9v*)UJW0!8Gcv8q8j0eC(0(QnDLpkGW867)iTT@MbD5{%_&&Zb! z-k&O;Kem^9hwrgbYSXUHgFoFa3|&#Vd$cKX>ge|Xa!1bIfdGP~ovjgw;uC)weUXeDO(Vr=`+@HrBHpeR5$`zzl6F0gNG7%d z$Jb@`+N2539{pvS>+AY=ydF}fXH^=JoSdDWaIp2>etKUuG_Nn!drKVwY4a;_PnG!@ zyzyJJRm+4bsHbZ!5o54CT{3$<)pS0YwW-A%HBC(Y3tATF*t|F6r0|(3C6tHxz~?|+ z3*S=rjsK3tU;rKAr??Njf+nA81zkC~`;jmlUR`!arV2|6s|a+bic|zNFlM&xD3&(9 z2I-|~ovHlwFJN1C{Ybccb%SnJg*xK*T44#h1IAUz0*-$q78jrfiYqVQb9rv-ShcWs z*(n{I9Na{R!+yK_$1&mDT;)aCs&z%78(m$(MltSEkGVX~tyTOt#R7Z*4eP6hm40?X zY@0jQI)3I1M)OR4o_!;Au(?q7l3tJDCM|IP#aOGV0*{@%5Y^l8p#7bT`e|W@J)MW+ z5R9YhiluRt<3@0U0Y}}+c>I{!NnTyZftRj?7Ph-Q28V;&(Wpmq=zJAVn!yHiMLrC( z{^Qo)W30O@%M~cS|0a23AZVrZ!kboxIBdk8Kp*<1jyceJrVz{A28UN>7R$nS4ZGvH z(*Neng(b#qA(}&1ami(Pcv#4ad09cw_Yo|cp5J3u6*qO@%1QU|eY()QkG16&k%a<* z+-ga6{Z|A7(nm^CYDN<;TAC}M@1lifXO}X!Xl?lH&8v1&u%Aeyq)DfTf7|9|rvhpi z5qAtHujDR8vVsjhLCv3G_rL{~HCh2#pPj1PuQ^NuT-Oy>bQ|o)1ZUT5GU`)gOHmEg z^UJ5T-{T_MQ~7q&E*DL`pf)Lc!j-%$3!6~LqfSfm(axmZH}dewmu|17jW1tddRO5m zqMA=aBdAE}67*X<$AgruxD=YJM^4&SD%#mvI+kk&?fHDd%pm7T-&*>WC3w$>F?r=v zn(j4m@>o~IEZ+KLmi+qam6g?GHt8O!LrGxeCp{nlD&`>wlUc-F)?xAgxk-wP6FW$( z^qs+@EX^*|H-~-Zm5j+ORhNI_Hly`sU5b$J@kWx=Q%J`jTS?kaBit;V9}$;aV_a}q z!A_TuWY(fzgdU}pKbQZc+uSW@@1Lt=xl|o+orzK%zvwhLd$W0uF->(TMo?KYHI9~6 z)gntc|ECYPa9CAYT3lHKcTWX(GP1Fx2frRqPyI;8Z}aok<+~wo^Y<1}z84ItJEtK} zToA|K8`@O;zZor18-v?4GkMxRHE)`>per?gk70;Sl=F(*JCv*4kftVc9yYLhi-xya zie=pqv77|`R9mY1$3A@ySWa$3Ya_>0B|qp;5Kti|k+w|jHp0MmGJX1UJNl~lpfpNa zL`<9@+888dDuP~e4-&U<^WXc_TPnKbE*0?co;!*^@5TpnnuEfebuZ<_xl<%a&0#+8 z#+x+fMUEa@Isf?}-nwp>)evBwL8my4HJ(<^RR8@4g``{;+m@f4r`eYUsa%J&Ik>Y} zE?-egi@PXQ3NTqzF1zP>l{)RVEwouy$0(jZCeFBw|Hfoh;n;r}uR^YVD7{hmL4h3a%Vfq{`y2_pkd&tC{C2 zxmd$~aV9yohe4)Po5@FucUK$QGUVOVS~Jw{7VK|_LjQONaHI$+qlnKQ5+j_1S52r* z-=(BOq)FmurQ)&B?03!jca_}66Z-Z~$XveGQPcW_yiyy9YeTp0*PI}(By}C-MmW#! zBDmnReR^kJDC`ylEdA7R9&p6qBNe}Q?vpgK8RRHa!9m6C-lOlC>u1&i<<|yp^qD9` zwT4L|&U~0Z!$`2)k0!BqM-SEG8;XbVA?%Jyh>|?`o57J<+KPc5;H<5Vc)+LmwEZ67%L}}MA>Z%^n)(( z_R%fuH;t7Ok=gMRsFc?N!nW6#*hmy#@M4Zen;Xh;?DxFr6>RnXa!b4Xge?CJy*r^qH+pC9q2F6!d6aeYQXR_5 zJZ24IdtfulU6zpwE>xCcGoXyQzm}H%+fAoUYDwyQ8NLo`( z{t1cEnycWSG#=k0fVPZS=1OQjBs;9I&RB?+mJ7{6-**<0mz!;DVLhokIGQR*Q{V&3!!ZJm9(>?6lM^c>fy65amd&B(~rsTIK!^uDtrg6Rn0B0 zmM!6Kj$?T0gmt}+vU?Hd@7r?C3(3OR2bj1{WK)Lfxgp{5(0wQ?;48nyCK{wy*RME_ z@p2m&`z*p)s_pF2>$cRM^FK3Hf!(I*F@7mAX0QvwPwzC&q##VB{C`Gps)2){)QWJ? z_n3&ydCUp}rs{bwWScV%x&9Xz*!5hOMAu!?K$DpH7qEEr8tNM2A}z4wiNfZd*d!kk z_AW8*58Qpzj=zu)MFR|F*@P8$t}PG%DcI8PnNn}_3L%K^LAKWR9V&vXozwB-t4F6% za~XVhcfHT}AT1xDE~_BG(JI!JDo(5Q;t*>`FEmKpp*D}c_;FGeL$1RYvPEIEA-~pl z{5)dOVeiyhQ#6YZsl#q)Ir0E}vc+T|Kf@?~t`_7)@Zm)WJ=k!1R@6>sUcw3es4MWq zo?pL@QHLOz@m6=rRn}B>xT*sUOc}X+ag{+H`!8`PjAo4 zf1k(T?_H+TS|(6d-;k$}Wz;bCFp=u^1o@+$imR*ML`kzzc>`MhevB)4f-74+{{Rz3 zg+U^zXM35r{}{d+K&$5;WTN2i6HQ0MeCYmNj$(=roHgOnyn>0y#RA8->WVVKB#F5) zMzUV-L+BD)nC!Ju+6t?FTB4oA9fjAFM*-_8b_~yO&fcbZvu}>M&%>KC>mBwCwT5p< zO9A|L1OJY(YItEL_g4L%PaICykTFBzk8lU`6d8)Cv%rKtCbUpJ z;~U$^F$ZH-GmwSNz?c!7EH&$bi3($Oxak$9F3+UaFXXqMsmIGQ6{}-nx@P!nTxrUI zm?GC|IK77mzFfl^^OQC6pJQ;_pG%8uy7>d>c!<*DaT&`2SK-}4nY_b9B{D(Nt5H2H zq8(7p&7P@@)J^*5-Qh~4Oi_WMWyDn`&Z(FAJLn0lJmLhfF3)A+{06c}-y%p$-T*v` zgN%@~4IB)k^cC{C+Q-fIb?d21(>qkkBip06Y4`zD;#|A0G#Hy~r>#u&+X;OoYS;)4 zRs0-gY9M800_x^1X++I((eJxV_*JTwq?WdFWYPUtoOFcDd-1f(X!r4U>KVz;Nw`pY zPG-zk<48>@@nzJ}Q|D=SBI3mT2JF5K&C^`_pbo`^00! zUbUw_uEDA_G}Zg55R7uviK&oS_XqMjJuePh*vyn{N*;*n6|`P_5A_Z>LBejOBAbPU zv}w1Az-R8;q;`Lty^Im2@+X3uaichC`Jyzu+K-xJxyK`XK?p%`;L#YJ^x*%B5p%$r#tu*;FUkp*P7h9 zFOti`E8k=vPBF&grR7%hR>8D*S@*!DOJ^E=ym0x0@Za9Ry`tF;5QvN9TDw;|oMPKA zt)c3EY2Av_MnBH~e8w49pvym)b$u>jE|1DVQQjwSO&{sM%*pz>f6eh;M;hsCe@{s?NK8Lh+Mhvpy^WS^@ZBpJs^09NWqliG znuwY@>~1M(j>O%tV(Q!kko_1vcoQSZ-{9Z(vVvn~yh6+fuXC&*0q-*rSEckI1+Q1l zC-|RfBWmO-uA&|MA|f-<6)B=r8O)FV{k| z_g3fH>Kss)U;0eVyKCm@>ei;GH(cI_EE}ux3pU(}EJvN7ABib6S~$%JU#h_sHORt%rFP_ZnX=nb zwj+=bP_<4(cQ#tcB)%YA=IRcP|MI8zdE8_4b6N|AsGb>zw57G7_WdFPU(-Ipn)CSL zsGn)f)T8x0nuAA5$ss)E+j5UtsTRdVnjk(%SsQg0bZgs+tc>Wz#B$Vgbnb*^_&G22v+&WLLUe+nZ8BRuD@wY7x9D6xRG=R?&^XO5h< zG+o1WChR2Qny7l7op}Qm#UrpGe8%ZZ*+B~FQ}erQS)OO$yQsVO)CsuVBGp^3>YGH_ z*!$tav9dCjn#KCA!bY3H1Y9#YFSQA}2GKj@{HdM;-BpyLaho$gKz?=`OHkJ6o)0in zU!ba$sOvJ-?GC$@_2R6;!l3q+Zey;pf~kmCfw_8%n9}dWj<{aY536f6nr_K+J63*= z{P&P7O`-CRPnYcW0n+e~EqIjnY+xms$INmo`DJvE5WksITRY6^_KZq+Ec5(jM zH^~ZZzFN`5eV4PYk`maD#9cg*JisiA_RPj8%(88$|2TFOpXB=89LDDAIx%J?EU+W#mRc_{aZr*OF3@~P zoRQ-$K86u|3EwX=b!CQW2fLc8C&}4?C^z+(fE{)?zMf*QK4HeiH>D=#+F!&^Leql1qV7qo>P7AfYlG7ixw6OLUA-ca?XA7Yge7#9 ziPP`p8fJPf{0XMwx+pIT#C|G^4`5R>t8OW4vI2I-F7Z%y()MVzZlgLQ@3JzXP2IlRF zlb>bU_ar2Q9Og z@SRzK+&7kgFq$GI$&fq)&$en?kgW@&JuTlaGKSRvX%BfdZU`zzJVdSeX)VuQSiF2Z z{z?04_(o0AD)o5ht+*+(WxdY8OJ#e!kqHaE+g>Cmiq#E9Y^_#T&Gv>ixT1qbHoAfd5O6*&%hEdswgSh6rU^S zz!fQ5!zqTF^X6NMk~9ut*`R;5&b3tL}g!Y--gi5Yu*h;A)M*8#VZhR7WM_?m8`Im)ggOo5R$ z!WO$@z|na$P?O-KWA=lV48B)U^Qn6BqpPfVe28f);Mx3r9%<9hX-RQ`soU>tJHTrkdJAX73VJbh?dV-RZR}ROp6%9;g#DOn zBp?eT?MSa!XR_DinHZv>s<$#${lhO|B{<=ScAU^AVYa+l&H_41vs#B6Ry@pNk#7lm zLwh!Aryc?%6@awmq;?(cRrWp1U7K8c<)>ItwhG8f-SeABqu7Hk%L=2rs5L@6;bYmt zDB`DBspU=V<1rtr&gp#7)xP{59q_BYHSqM;bxgo>1*0-l72Iirr1|+X4mk%^g|=}e z!K&Jkn$nv5;$}o4NVQ|_7#jAFfW9P_$w2L|`v?z|RS#RDBwzSRrtY?~eXJWsq_L=W z(|ob=jyxBx)FeCQPwuHtiCx5E`*XZM4t&O$`)*!8eFOJsp;4*0f~t>ZN5+ag#68HV zflmBxrfeH-O+d^YQE5=?yNzum&w??_cA>ZH#*E-aYj16?wDfCC*<9a{n!P?F0eA8OX1yuoJ4&AXI#V_WS5NOs4F^_%Db0K%>pEbU7(#C>?KYkx zHJz@gK+*fx|H|BVBRs%)x{NUpc0M|_u7fbiHk2o%{k|%HYWL$w!hAz>;zg#;H!l7^ zLhJE;CaOJ)nOdza(Ke$x;g>kV&ko(j^hx3#N3&IdT&>r1*m|e^GAMLR%bK)=D$F7uW)1aG5%aA$|B^3eNlj0b zNbJwBu-VlgUrwNhunJue@br#)yPOS_C*&Re`$In8ed0Sju}J99h^hA3>_h*_+D)xy zr6jNDs67xJb=P6j*Ec8Fc|zvxX8|=LIvTec6?9=nX?c*W{`m!D2<@ilyb ze}UFq*MpF46@Igxj-nLRLZ09$Qq{|=*K@2#Y5T{Y4on#-9VS|(!|?orN-a7T)y6?( zU{QD|S@SP3r8&7~qlh7|#z9wYn%9}CsZ)>MY+H9d^&GkH_Q!|U@vBwukkmfXW!z`$ z)^v@{Bd&E1RD62Lv2U$29ogI-#p;bEa{Yu*d8l?nP-8UeUcf%r_zk!4D=wi2dJXsF zYE;H+Ox&I*a#Y;&HmVod_GsX&j2+PtJXb<3u{)@D7ZoUapVd)YR36^9_b)t+s;HqR+ z&pg;=A}4B*sqCq>O0}N$x@}2&lsJzBev{Oro*^Em1P;Cz@9dGbx!)@kvvM$v$;BnN~_QDo#zsI z=*4|kmEVbPm5LjlJ}&m-Ozn`A%}7^9S;O2Fz+*;5woAQ^xFcm>_Ys{ztB?&_j|y#+ zk1xQ<=3zvY6Wb!Sg&JMm8*m5D$L*H1foPDux$C#>lMxUiK6?VWb{TJI+HI)|G!zfd zkk*mHYE{T~^uG8%F;$b%RoO$t>C{i*R1_LjQJ}4kNpF2i{Gri(ee&W}w7~9=c8K<< zKDv@JRLB0IQWD=U{R}{@sFJR0-9UVW?VEtR+P{R7Li8%|)Nju|VD}-p*L&}>AMI%) zXz}{CL-6VI@~nHhQZooJo+kZ5+LrtZe(!1@MXeWl4gVZd`JD89h*g`Ic_MLH*6y)o zDN*WkxXE)m2n`MUYd;3PrC+kt^K7rpL z<%nM|Ay4DiT$MH&U2B*+nC#eN;%GpnD3+Z zrGzM`+rX-n)2dq~9H7nrMD>_!armgrS1xb6CI8#);OwE<3 zdYBUzSyPGBsQ?4n+fl8wc_S2 z+=(d~ib@a4ucmsrFMFYP$geV}ZPDyiUq$Ve^3H`v5hUtMVhg>AC*4R-NS&VcTDD~{ z{~f04lQh+x2Hz1_D@?7(RiECc&UV5cCC1P=xU^$F!R`^zc4R26dz>sKzD>nbJQWMu z@J0SNc>kZ^+SGCGRf`VA@6Onr+KTdvJ%gHss$cY3)aKfExNhT}E8jhqGajVVXAKO6 z#8DWn>2CmM*D2`-5VdA^@mwZ+)XUh>ZnIY_?%55v68%PpMmjXI9&6RwP!m27*ppDh~n9m-%?k(uij;Mkb;yW z@Wf(D#68sc4f+r@->bW@h#9ydC6uc1rZe@#tUXjhKz84CuC3kVv-AQJr?ulGZm2W( zl<9n)Ci&~ntHTyl%gukB3T4?g?-UhLky?81uR|#5*W6M3POoRrv77B+sds zjEnr7bqC|iV4o*rSg^H#vrks9Fm+?kvTrYu>(O@vzO+*N!uK)dQ+3JIm2##-H6*RG z?#fv!ENZu8El7C!HdCHY!H}`xQBfyQNKuK1cfUG+AFt zf|r)ChEuo~_fvA=l+1y6o9R14c39!0j@k!)kTu;@92OsN3pK0P0rS2%<2z(9vY|`A~D7R&imtK8X-&jXwbSeL( zh$DS~+Hfs(`~ABsxNAJXegWTD@_ z5s6>6)>cINMB2{(`0+h&^+w(B{XBnu)2SV}>N#vPQ4L?5L$uXRS>}A}T_ot)m zx+A3@|1p_yt{|$GNd-miP`|+F`>S73u$;l6=>g;4E5`%2KC%bd z@)@K8{A$f(KG+oTX^mL?N1ZEMm*g;5o|~-% zB0p6{NrqE7>b}FbYdQ2g5qG6_#BJ`qiDH4e zk9LZ;KW#w=-<-PVxpH>6;}%4W(F*GDORVazZI8)_#;)XC)Lf4n%^{4zQ*Vp%Jsrme zHPr4o{b6gBZ=+(b-$YBTj_@*beEshw?WxyD>D(U==2%F~dUHLezrd8OzcnAEZl)aSeOFhy!dhs8nvHnS>Yz4~mZOWAgh)Y-chJA3x zsCjvbO9$IxbCAL(`%BuF#2H#;EA@D96BW^go)(t3{J7a}^1}N}RS0oQoh6)Q581A? z$_c9;M30)$bv`mSNly(sUDFpzHOi9)q`LLIa6(hIs@E8tt6lr9E6+JJ7cM~$kvc`1 zv7Kwo#SYXU*iSGOll!s54dD}opG;Qa8EPX=UzFW_nx#Yy+^OaI?mHNIcY+d!qrQ;Zj zQiUoAII6bEsdE3TY8d8R*{-bQdrViI0t&^P*_C4e%f);wzKTweh#ra4jx9VW|E2GZ z-B;#7?l9#e(~%C!M<()s-eDr!T1hx~4}Q1k=FZC`f8e{r{(c)VshA@e?K+MdaDEFyT*N1k;kGKJ!D>mPxR2U( zzx*quWeR58Bh1LH=D;!bttf$CuUSWmO0*_eMGm2swZcS|EjTwUOB%k7iTikU^!f8l z-PtSfG4}0DFuC zvB_4YjNO#EJbbT3>V;I@k?~$CU#Bh`ChGt8unzYV&nTVTJ&PZO^(~zwaXg|;)^4e5gE0s|!FbKV#w&_1-wv%Iu!X?{&R52skM-e-cV`ZgQeJ@KD z(p9Sksv4-1*X#Pi0J0^D6C|3L>z{mXm?p ziKF|ltaqa!iV}vNZE(A>@UqyIa?EPmA|{EhAz8fvv5E(DB@+*2n<*T43$&KmKF~xI zlaizT%cSB~AAF&{@W08_Nu+w0uh2WqY+k*#==+9)u*f$11NX{MN{^Aza#dKGFW=3~ zx@kT#sNJ^`DtD9iFLCzkflAG>1l}n2`Esn+kSY~dzbq@8+>t9t)MBd6@v?=)6^KW+ zIuIz`!>K*YRGk(PS0m)XDq1XQ>+YwRihK0GN5s4zR%P9IW02$Q*e9t?2x~xlglCtL zRy_<9M?sd|jk0c5V{*XHq0aVQt-ig`qa+DqwOau~&z*g7>!=lLwkG#W0Y}S;f6V{9 z6KU_H{a+eNOZ#`|LPPQPbS!Bbe6Xsh=7O!>&0wMV*+t4qIrwWde9bVe7}Xo{3< z4%DB!aG>j0DL)?Eef5Qin}uA~U*X4sI^d=UtIe|BGX;^~s(-#5t$oq!Tz7vBE&Xe5 z@7}&sOxX`?%dl@1LARB=6J}F?+2e2L?mC*AWsIqc+@~teK)gnWDvRt_8YqRF)*Ts< zv@E~ginGFE2;aRhOB@GSeZPw1yYFa@v#SVMSg@`25?_E5=;xy}q%B70=!0Ku3u*7F zSN2MmxkduMRo}ci?yP-K7|`^}fVlmcMmxW{XR5AcwD~e3u*#MnLB^T3m%Z}eW9VrX ze5+Sc&^k2me82RSYIvd3%^2QEit}26n;3;(zrCim*&FDaqLSY#uCVG`h^iBjd`3SF zb13c`9WX}Q=$YUDKGS}GJE~Ju3VWMNgk__y%&}Vh7f?h;w+3NWv%uQ(pOyd85s=TJ zMQ1}|5?}jyj5G$FN<9^o@ZUt~wo!TW49*&e3VedTb@luMi2DlCte)*f*d^pGK&$5; zMA#*a8I;c&zG*1Gk79J*)6!a~q6FTQ(Bzl>PH_jg>n!4`G@$Jap|uq{_eE+V?bQ@g zjPV%e9IjWQPQB_U9RB%|0_xsY?N_^j-&ts$)r`hAzhh_YBwv{)U%QQJrsi8T;q5zV z8@!AM;JGU+T3I4>Md;>QULiT)p136z%B~ud=j}hDQ`zx{&jDDb;Wty z+_eX8?wXS_RqH*Xj-vPyU-*Bo z$Rr<|Ruo>@HuQeQ*fBtMPvd=X2cXjB)t2wnerL6=Y|%a=ll2zK%ao%%W5u6Q@GX@; zCVrABpQ)<~jEE7cp4uAg#>jC8VwJ6IugD?!@3Tf-8sX{H6=L3C`ri2Jer%p9_b`#C zM4!)92HHLJygZSSer7J!GfZTtiCs_F$b}VSw>*cn?mulcq7gdi!D&TqlPo- zCmQ@M;49z48KUFp?20>(&-O}(JG(@_n~#OU~8~ka95;PVM&O7nFfslHvcKh#O za3XQ9D3v?B&CL5%Y&;;@t~>58volrA_+t^t@&K(XE2x{ujO2#BCtS)OwxHYY*7##g z-Q7`*GkbzF`E!V)9w$_fh4XB0?!WpIG<`rK@%Z3`Y4<%kpaF0md} zOq91`fz5fd0_`(9({rY%VNh9GCSAmL+WTXS_Tbek?C#1_J4n2`ZQrksaHn2Ot+6uG zRVlZ-xF4Uk;#Jx>aEiqD3G0>_mvyF=HNZ&}UdMj%E6&0_K=(G0`G)TqfNR7Vv znvR3iDI4beIVqFWoY%46BDx!HrD%#*FI!6fMBMjsnxACzqW_n6XJZ{fX;%S`1_zamfM+jyoDHB}B$kP?$87R8eZIV@>c zQ#oPv9^3iiPyLH!FNO7v;tf>2o|9kfE!00(*(~p;ev=-Ylj<3sN1NNp9-^?DJ57L` ziyg^xsFOG5p1uHQ%`bRV%xb$!te#0_dPYG~NA)0i(KCIK60v`-P02Hpc@vwRu~uzE z*>Ybn=1`N~xk=tt<)f=dVehyaLPzpk`yE^C4>6rJ0M_28(t@J=x2w`Jvd(l3#_dwN z*4jv%!T4mmeSv(5qg&WRi}LLg*DRV8wrA+0o<9bCfX&O!dlSEl`-E34e%0XRn3D7% zafN1yr5fK$?3%R6+nILNHuN1cpmMxkt0T80;~`cpjrYD2($sicu}OX;?AMg60be2Q z$frnM->%W8Thg)ttivabHMWL6PNB_JZ!6c9!Wy*16o5sx&{ib!=5{ZxhSF~LK}iMh zR(wKwFR7Zrem5_j@WUWS)v-Ut(^8L(&9&=3Zm%1s*uTtF#Hwv=c*thZ8ZT;ByKTlE z?alo-z&h+l8R#i94B~fSMip$D|2D<{eVeb$mw8J^EYIXLQ#+fTF|di2mSyx)Uf~k= z3g6+|a~^`RnR`h9Ut*|U$EXrG^XdE0#SM+yawdr=?``Snt$TNGF_D?M(+UTg5Pc%} zMgIK99j9G7w^!c-@VL~7%?Nea=S=T;{EZ0VxzSmUMABAh3DYWX#<|Z1ww(+-=fzTgw3L1>2zMbqEF}oU z*b5u4c^lYacPlxn2)weT?ML~b{FlyX*vmEP?J}2PpUf-RDDxQhV${Me6aE-cS9ndn z|A1@()~|QV*qD8oh4e^hA`M?Fj(5)^J^DGl7rnxi|HgH$Up1a95C2M~nsr+QUnl$x zJ5{?+Dt=7!(i>4!aA?@-&E+DFcagWbO!Jw2mZhl{CUPG7t%v5y`j`;hZQy#k5ysR* z19*ooeqy1dj$TJ~-Sz$67E@#DoBmcpQe%DjfCsYR*9 z*!K-ymKC)1J&|Uu^(#!=MPJQ(Ys4eRo}@>Ki?Xb?-$B!Tw1^9qKRJb$#5hEz5d zWfyvv)hnRBgU->;_ zrs()m-|W90sUUX<{WbPV>p=Mv@x}V((HSc~mx4E=b}9Yn2bhYApl&sKhuKvSs0yDz zfv`QT;wd@g*2tcFZ}^Vwks^Ka z8{0l-kJb`0G%L!Fu4l_9wB_jEVdm`yA|hmkiQM{neX$2j*tyIbn)PjeP|FImr0RXw zGvT=9SbL+&)NrEifJo0J+TWpDLH7NoJd-ut5YB#+smNbDpYtgb_Z7hj9da4ob5@<9 zvQeY4lG;vtrNS8Yi1!ideK&BYX+DVu;4$Ywz-c>-(nDAyOFb`)HxwsMl(NajzkwFv$4u0} zd4N0mdaZu5y9P}VK9V!fv~nhn@+q|YsrMo>ML<;V(^4@jt~;H6uKYIU&3fvlEh>)V zAO&5SM_!mZ%_Co0kD|YIM{3rBAFWX+6IYr077pMq+^Jgkd#|xCv(=jHt@{>_;j9)n zvfBP{fuU2+qNhHa&)*7XPoLW&U6xVUx39>ZLEOm)`>UwX3Hw5~sJGrI zBWHg5uF)r_UXD-=^-9Zu3t?Tv=Kz|$bx zV$P&ArE^v`z-vr-wFIPD383-R#lFszRhr)A)hf013FTD~HDT%2)t$S7L-yZu-?Ep` zIEel^@=EDv(V3hsJxf^Wbfpklj{P}<a4(CY__(ods_%-jjbk60yv|gNlsj|_yQNs7KFs+WP~SA& z2+z zBKMYdFK;Nd_IXO&VYJ3XW@f#DdPDW9iFf()`_BEW>DDap==%sy+}?CK%ML< zG!|kQzP&<5Mflw|M(=o_na(&PyPC4Rnj+XDFv^ypr`Jw-7 z+2LPgZjUtV8k_Pgrgge@=@WsEltHXj2E|b>VsY&EZBpl-G37h8H4>2zpz6cc>ovCP zSsn074AP3sTB-tj@wMA1Pg)h`b*)^x`X)6i!8342?#90Nojzey)v%dd^T0mS78W#d<9>QJY~|eru6Opzd^!jaX~N8p+u%qc~F5 zp`f5>%N683xNBc*NzD;_k1MECuHfmMzRx4Bwnso84YE0|wny0bPL-N;!Nh&sMcKyp z-jW|>s_w4VG3|;0^;pEi?iLbnAWqq8GEid+r@Zy1=NG=j;NIDEenfNbyYxuCX4-7t zBT*)MeELR}I(uEA;<8h}aiE>Z6i zo6Zb9NaFSRg6a6NpXi^`_?MX5Yb^4H)rmIVzfIsFcU)n=it%sLlpc}x{$(a^1Q0T! z(T{x?B4uwn?FUqiY;|k8$nDvz#CfZ)(UcwO#<+-QIORu5OqKZs0%S~j-W-tef95;d zKJxu#|CEhkxr`{Uo_X)FWl1Al6S2v5lDIc=*8B22<(OYF;_vGg0=un8q}${Tro8BF zXX@@An}q!|tb!&vly;=^!S~fQdT$P$Y+R=_+)p#s zJ2GPt&KBY<44Tk7u8FD+pl+*rTz(s^tNPT#`$nl-G{?K^n0`9azo?y7)EhH)G@u_z zon0$wQvSPF;!*j@ccu2Osw4@!8StdNj-&6$ea1*>R-o^nlNJ-RT8Y?!pg(a-6Pzbh zTt@xGMYNRTiNq{hPXzAB@#8p1zNK~ZA1`+r%Et7d_DoRo5YJ*yDto%J-Zfj&685Cz zh+>zt@7UFDU3h%HL0Zn7A}i~<>7!=&-(v2Yjo=23M@(cQ?Xao^wf$rjjg{Oz+vRtc zQJmmhbwjmfZ~in>w`>d9rS6=Blf*XS61Niyt03$z8>lC}i@NFChRwefha7k3-mKZK zZv2IrbRT*5nCl}VJF|UWrx^V~x%XDz(o*KHF~17v;`}E3vpO5Kv7H=U6n5D*)CLwi zlIP1)sCZ7>tiHRH{nTk}UAEN~io{vU$4eiHm0aKLaX?+hpndc$m6-Mvtj#7VtOHfe zPw5kBY^|+5G)TyJTdU8wPNJ36gfe4EVXr9+t&8CAhoP$B6rqWj)hX}t@3q9AfMx71 z=EVBc^$f-;+l5Aq#86+9PgPywyE50evFhE#KV-@dXYXsKe@WOyAg)l?OKPt-(Ft?I z9BkU9yN>2$18yu0Tln36%ZS zNJepbI?S#zktYM|5VFU?pRt0gy*rh=+!D za(q#^W!uSMttM_9&tpvm{hZD(`5FHFwiWqH-;m)`Ury`bzBhy<@7R$52OMnAe> z&tG;|8vXG1GO$|vt^S?zIpxpvmwE~MiYQ7kyjZVc>_DaYMFQ4!H@>9j0A-tdnW=NHedd5$ z5L9N+RqO$PnoWiv|Y~6N-H_e2z;`oU1j*&<-hy!EDB>$I0PRdzE#|-w!(yRwVy3; zE`1LZnJ*%HeG;F~^Z%>QpTXzn*lg|j2k`k>{*|iF_u}(ecE0xfgZO-!W2!!XmciMg z{`6ZIob<%ZhfKgtd4>}=5{O81SOpt6KE_l|uvsUe4)?7%yS2~EhIkHpT|_A-z6ph? zz&!qM^Mswj@%h2ddgMff&cQ>*d?*YEdSCqortIGJyZ9h-d<%O9&-t6U5+CF4Ue(v^ z8wI2A$+n^Qn#Ix%@Lv>AG(QQrZbbT7rah7_%j0T!FGMv~zR7ADh|`n}6>dEv z|D|t2QW8VoTp{GcvnVCkaQ;l!QZ9@f{6NA!INFZNY>r{?7EF6Vyz)<=;aVf)6a zHo;n}iUbbAC|lO=W~_SW%Kutda`hjIE%!4I7 z`GVI;RGc$!;%lGV(bywrb(hf`HQpBTeNlm+V%zsKI0*}23>>Py>;oLlNyQK?=*@0lNGs*Y&9%)fv??VuhDJNp*u^A8Sct+Y+s2(V!|4YfgMMF>5W<58 z^2cR|`XW>3D_0#Meb0rDjWiG!Idj`@@Cc`PhOB)=XBaidl>BE*owb@H((Sx>Iwt>> zTD-ceI;z-H1y)fdcr}g^IgS?sEvIo~?lw9~WB8p+X^wV&ldusylK53-fQliyQ-0;9 z?5&Qt8)R{Nyk3_PeWC_ySM<`*{`XG&5vJ-^E|xTG7^}s;2{tUYUEP-3ie1vvBQ3HA ztxNZj70@p)6J!az0G5b{3F|3{S*CLBg*D#)Hh_UBk-bd@uG-=FbhIW%?{GXzrFX*> zodYrd=bRB4yOIK}d5QS_s#Zitl0;&S+!PV8ydwH zej&;SvcC>5=?=mp+t~iMz4G7Zr8WIJ&LI^MYJ1V6;m!-6`!MQU0Za4~^Z`SwLgsI7 zpa)a<;ER%)g7*s^%XOR?r&-Z>yMzfn(x~M(F7%ggod?e_`>u`W%swUj@b_^%SK$$? zR{t+Yuhz+OSFGv~kILVGDlWUBrvMcocsnncxql`k#MX3N)bhL?OXNM3a8(VX+aRXn?l zbB=6X5Vp&!tRpbkS8UeMl^J9PS?Z?@32iGVC`Gu}nY zqG1XHik7BdVE%X8wAUsI!ep-S9&X}s-)!1+6HjABqCMXzcS0(!wVFLCbUjfEXzvh$kkw)6u-&1jMka;8OY<8(weMg?# ztMQ&;mAn&EA+d(mo=VS)!xpxpgrTL?|Ht$#D)udJZacDyII*)!6)nW-kv!<3KXadm zxIZTC$ZyLk8f)bnVbg7@-vn*V+L#_^O79HJujj?~Om@B4HcIoe<>vfX6hNh^lhqw;Pl z1y$7G=-GX=LF$RqLuhsQczNHUKU1~@#Pjwu@)Hfd>DEg42VQ3?^1DjcHJs_q>2R@b4#;)dW=oHlz(_6*UNVrKQ&XB=-iTjQpzYJI2u5yI70jtm7&$m)OMQTfONP zYdgD|vD9m6youv%l1H7&FmlWPl;rFe^Za7-W@kC%&I!ehyRT)|P zTXo!BI~U`|=I7s4R8V|{I;&s6zLlS%Idks;6R`_IT5Y53nlXGFm-sGXTk=kL_5iu{ zmufvMqwi3}{;lHKMB@h^m$*Gazw*0vTucPhW~kheZbzSYA*K7 z5&8W?eOCQ{r#!e%ogaNGd9O3OmRGt;-WGOqCrVt&n;OY;?Ul&Tu_6*)>H3g zKf;u^oyxVeZ*y(R5B0{=uwyi>t`ItpSbihb#%(44E~af@ZFf3sBgSpqAKt0-?0=s{ z$G#L7S(vo$KEeZd@~Xb0PuTw&QxVh9VoPdv`lJEul-T%)R z+*hP4Bf{y$6^8--BB*HgCKIPjV(aX}sMOAPfpo$C@LM6Sn;@+C(uvmMPx&-jlrrQuK& zZq@xgJ~?LISV$S&e#!}<@}P0|P+Pv(7n#z9Z9l;>;n%%~r;R6-3@f)Uq!VV7QvqJ? zuA@2T@MoK_^lSpTdd0B9h2^TzH#@oW@DlZ;xQy)c&&f+S&dxVIF^EZ>cYJmGRvEoE z+UM{8Pu-Wmd399>pP3Re``ql33SGBEMU21Es)l{qItA6in@80*`z3;vI-uqAb zrN0Wi|Gnqld+xpGp8eE@>O8yUQfZS_M4sw6C3VQUehgYI(w!Y;i_qwN?v*K_+C#$W zrSX|+FKcaa9D(T9pv)%@RK2&mX!VMr^2(WQ;bM(9=ZwC2QPRh~KmQzme`?}8h1sii z(0v=GD31xrsxc-U_^{>x)9`hLymzDO$!ixn8}m@^vgDn}glP_Dpu)mhXU( zIr2KI2{=wRgh|oRUC7|ftz!#@C;dWoZFT0oGM4GGzg5(|!RGk?4+mI;!sRvZ7$l?`nc<}YPuF$Nfs>ye-M(%Z|Mv6q>BO#aVdwS^2DG!1W)e2sTHZA4h%{YX$~5|byB$Ucc$4& zddV_5S*lrxKz&kNFB-?w?V{Zqr+utu@2UaO#!`SRR&VYrBo@$U53nN7l-O#yts7r| zmF!I3DyfzI*aw!ajVBWC<#G#a1GtUOSlyMZNGcVs%d|B4sNGon9VpAsYB@ zoY*Rl;0vE|SPWm`O{f&QI6CcAS>sw0R7TO}p zjO}_a(4uwNEBQ2ww<(hDIx=Pf1t`LJr`A79@kWb(3}tysn3i#^9JTr!0?Y}}Ecc2B zuJjGHFV@*j0{?1J-FgyT-*W5A)I zFp#1qnw~{(h0aCNZw6nS|Jn+1{-|fc$e%kqIYaxE3yk&OEdQSMSbumm_$%d3gVFep zlEx5EgB>ZyoP3?yzZGHEd=HziXg!^feA{fzT5j*7$Td-y9EbR+sVBlPBM^pGubHMCR zRBoH;$cP2ByxjMp{nc+u`kc?5#kd87xwVQH|MlaKimJR zH``v;9t20Jcb8IcKKa{ot*e~d&=vGK$8p7fT@L6$=A$S(=^dC(JX%!0X-8~N!xM;Z za1;sJKJL!&hZP{pnmaTTfgYN-v2%D?n~Qr#-(}?6?LLw8a%Y77X;ANV*Nc9RprTx+ z@jOh;`28-|AvA())03?20kA^BG|}DUZw}lyUD?K%zUNy;skW*~P^+jD&zB)(3764K zysNRMc1{+}jpp2ei%ThbJL_DTi6jfKF83S-)mFIlvGsCM6>Fqh96AKa`ymEUf=+I^ZE%s~_Uv^ZA5t`yyec2~QW z+DAE!vEgp)UT%K-5V$jxv480aI9VZ1r9%H4_tHEgVzpVXNkvZ8$Z)s_>lxiHV=2|i zdgAIUG9~YCdRFOIA@6Cv)DKsRZ8qPUH~;6t^vc?| zx!jkj0o+-oOS+OUx>V@z)Qwi$0nsm&<-uV+$V4vZjqwjIg(J9VWyG2Q{55G=;B>)4Hq5&)Vg`_ykZLX!Shb zRr3mj=S1(wOl&$5tQl?}EA_K2{2P?jY&u4FW!Ra;y39kjCgcJtUBC8J(K`AmV)-o5 z-%@$M>kD8a;mUIiFu|=Wn%~zX6loxwT&atFgB2NujRO|v;O6t3=74% z6Va;V%Slok4YRQV<{Lq-Neh~;W`p#_ekaI!S}WG4?{T0SeSiL-DdP9|wVe?@@hC~D zr~y4p^_MVz`H6&wOzQ-SGAMOuY=IscJ>EG?93ZY^U9NW&bCKL%Wgp~7#!~t(u5Xow z>*fy!Kz&K_+^YLzY4qk{S)0p$&B=`eYVP#ZMY&UFM};!v4$Q3POVJ4oh2p|R<@STh z8aqWgn4%Ya5IDY??eQbP7R(d$8+!L#$lNz)Z^1(Y`1VWF?Rsez(o3T`AT?u5zeSsF z6fT8Na5tSSjb9%Jt?F8y>#Qz*`eDEGclUlA1LfI#T*cY=_D-bodE?8viy*VLQf+bB z9N%TUQ%A-X*r9ntJB5SmW60Qdos}Jcd#`i!FReP+{b*3BHh;JAKBAV0d;7Hu75W!Y zopMco+06Rmzh6r}9H=oKlePQmd12J4FAH;6=DCoZ(!nxP0G67rrnW2u4+VdQKsyV3 zxrVAT6eo_lN^ul2%f+c&^@SeLU08>7qu1hEl$)u@l3W9#6EE{h=~TVBuTAnwrGLje zI4Li>(kIV7Ty!6FE}G{$@M)c^)D>lV+QHe$KGcadx=#K*`*lT8KV4QCnPn@a&Zi`Y zS$KZfhGPg6W5HPuE??)JUMR_b1-GU-9@aYQz8B5Or8Pv&_BIb)nHiW_m%b69zjpM zBCmmWf0I@AR?B@1SSMh!hhsSb=}6VvMGKeJNVzz#nlSAX8c;vA{tA}o;&G#-71kAU zVy*wZ@%IzKK6U58_MY2AyTf)%v!_GA74%GPo~qu3vofEW6lAt=wbj!y%C$w_NWB@^ ztJlXFT?X~-r)A!_4dB+?iDoMawFDQ$^LBp`PzTJ>UFirHg}~l;r{y*p%Va?xS#dC2 z>DLozhpaRICGkBtoluwqB+(tC{%-wt1SZZaOa1v(I0c&ho^OSEHylcMmLl6hL)bOX zVe=K;kZGIGcV$|m-tshX)`!z1NBgR~VD^BgW~|HAVj0~v3Vd;Qb>lrdJ~tVsjj9f_ zfyaimAK;0g8XK3?t=7P_y-_o<6zUEDH95von1*0yb7?fX^Y(R>?o87aS6i8jn*9Ql zS8+;vbSVvJuP#*qe1Xt*U8>?0-g6qJ@9igP}PQK_{d@C7p+ol}cczSFpC zNa2_ME2!=;ZFA%38TN4<1@_`l|2+J^U8uZ%s^>8gPOFk`MRsF&qWhg$KQ7ghn}XvuKLj9 z^6IGu1;=F{5S>d@21_u|+L!__RGw>h%{v|KBCDlaF2*XmM+$wbvZvyJ+(EkrV49Q`&SGmVsEA*q)$G8`oG*Ae&Fc(~`eUA8f_TgH@9uku^@>=!$NM@7TL1hwqO5|RG3$O0M1M^Fwz$tV z@hsDp%yo%$%6~78grAiaB)ggHOX)5>II;f=GwbF5T)Y3b;I~n;Py3yuMMIy53v^GW z$!3xE(su9b-I8m`{6uI zz4wFW0Vs<5!4({jv2u`f=hMU`X@s?TNy{UoS{W(X3kc%F+gjxqNk3iv4n_8{{)fKvP@E&*YBhwbg;p{1Es0mkkU!}{Br8+YjT16G^C~7OW6-W&E%(qB2O}1uZkFN+r z*4=naffHKsu8p;BbM{AWgj?fF#QCqa21?fIQ+t$9K zyuV&`^*+7o#4>#n-~>Rd4rmjL<(W&H>zNyj%5 z&yYPqCxN}B%UT^GE-rnNxXyA(Pr*+|XCh9qKnRZzLwm{L=!qnT2`(h zp?L?2dY~qC9JuNn?NGsK>GU3Lb$Ezs-^l9i!;XtHe;|27^OEct%cCA;f+1iZW6rcz zZdYmmS7{}L(aL5kS&@n%P}wHk){Vtm6I$WUgsd6q4$x^b7D1#H#TlM!LABrcV7VrI z1xU&G*Ks*-$4SQIo5T8b*W29(Dgkiah6`~^-LVNv8knY7B&`IHK6j~}vZk6arMHoD zn$*XxzpiDev5+saTe-RV{{*$$<)s9CxekHG3LKN$R7q}E0ec%i-@ZLV^2 zCXOOpUf}e06U9Z)Hnk*!vaWoz~t8H_>~^ zmVT4TC*8Md<=!b!*=!HJEb2AN9;CL1?!t5{Sz(2~PJET0?`pG}U*;X$`!!H4fJ??0 zN#whnMLzVXVOkT!T?#krVD}1<-L7A7HNB3})_yV?+4<-8jV(vKv5A##w zy3Wqy!Jiw)j?gXl5~&QOfL7MY;NzNQeTZ1T;!h(jOgd^@{kCy$ z?5dz6!vB?wA*=5ATx#yXY~W}YWfscziBi3yB*)MYaT=2lIUu?8+nDQdjprPxK903( z<+)J;;MBx-*i1L&>SU|-*-zkyo_7NoYa+{KKFhPIIW<2rs zm9^F$LHhSGqAI^eO;*&G%Yg2S??q~UdnNAM{B0-2h>u!#VuYmVP^qJX;})FZ@@g@i z!o=Q6lF1?KcN>0`8o{lxxE_>CcgF-ai!@wS4YJ_1L-P>mZA4~v4pSRq`Em?S$Xi}J zHggej2dBw)bqdrvpq8y^`7%hp70iWi5nY*IMpIhBc{#9Jug%+*$)mG27p0kj_jfjN zwT9kvZV{!%#eW@)VMj?Hw-mY+iZU9qmQCMBmX_1j+R$u_UWhfRWxcs?Cah@I&LV&3 z)%5Ojyt~JW)%{g)dozwx%dw0sS;fCH)l*-|KuNO7j26TvZ=I~j6YJQ8Hahhd$Y(Yi z7CS+<2Ch#V5d_IT6WM4Zn*Cr*!%(Yy4O!Gv|8`&`bQQg%)3JYX^DjRDQzME#o3X<=iQgd_KLr{&(3GSpUujSVd5B?Nicew`W2cg zz%8IPb@FmmTgjSK-XmAb5*eeXJDEe)qv@>By)37$HWhQCGrd!|tOZr6Q|jE6Q>SL@ zh*CY%vBARym1q{yKJ1C%?+1?P90B72874Udz_IUUz}pr!(=E>L#*fnUujm2Oy#k7S zkul!l&bSYH9N^m$_NDmH>#lP)%0&5pu+{6!GTv@#fBxzRJbGsHKK6MZpJoaOl-Yru2jbxtW>>&<)v z>Cg4|sGt&caqdUt`vx;sfvgW$I)mpCNc%~u?u_ZuC}|Eb%D#Ea-NC3{N z&v1THb=SAz9SM!{`H`QM%mo_PSk(i*22|p>v)e-$woBev97*O8Z@TK-BVBr>0?TsM3FSs~R@J0cz%rjka=S zL--Mt)mIuxSp`s%Jt-2{5f~*K`wUUQTZjhEKy`g>N5-{ZjMi#Q)yKj08LJ9P1H(!0 zDAPZ(Ah@IJX8_+5oNH)i-ukrz>t;Ki)p^?c9w770HZEtS4kP>*sQoBw$ziQ#`Wr{J zski5pQ(K^~ztK)+Aj`-d8r4gWFOom!5It7yuN_&;l(^qx>tc#NFXmWK6Mh<)Z<JWThP57T2HWXWcZU9)!zDVdAV|)$SnjtPI@@L9$u$g9DSf{F?uveU6KJO25DziV zE~vX^YznB8Jf5O?RlVmrHU}r}FD}J>Cvmb;avYgF1GZI$+xxVM{|+V&s5txVIXX8c z&c*QWCA|e34MtIFk@%)8bDU(;rwZL@w&ta|ysY-_mkjbzI>o2igh5b~H$Velik3(viJ_UF>RR3h& za3=)8P(%0vh4moS^+U~9wOlxQjQvxr+YzB|jNTl7$DHg>h&kTv@$Y-$TZ3Z`#eciQ z@&4aI@hUDE^$yX7`uK)J1=1;JE5b3$^A_1zuUekF-Xq+UIhnl!%vr>=C;3~Z#T*FF zV}(39{)uyzKa<~@XFgiuT`|+kAZ@{=oQJGhgz$9o-Q@EyF(6eVwHuFSQ^o+TWE-pO zPsKKuWHsrEKs6(2uV-Un&}#EMqqMJA)_*$~l2WZeJk^=;Qp*MsW!kvkY4i{9X)})y zbsSB}V%e7@T%%hGwdRZ4WEFdp`tqEYByzsATSN<32F)cs?S!IQ6EpIfpyuNwu*HUT z5lEY^tK_fQ5PD6lH(%3wsu_pW@&xJ3xYxQ&zbNVH_Q*gFXSOA}Bh-<<(mIs9o2sp| zW2Vwvj4$fGrEv-{ULK9v#wu53+S(dSySQUx*Y zaTs9xsWaKUd{8{{;JpK`b3#t72cnogUMT% zKNoxwOm+v86Ag2j`RiY}&Ui~>)I9m^t#C)`K4_Gz^1J5c&sSk7xf@;g0dmk)Eo{bS ziWCfIllG7(vs<<}l0bYUWcNgUN0vw)gFGaS&)EaU_UCSg=AIR?Jb`xV!j=W6pk3^4 z%|UX_hu6lLQZ#duH&a*_uGA%gWi`wvY0U8?`-jo674{XJ)E+_lhh|r7vZ6kz*gg&> z*NIyOzZ%r8=hGLT1>R>|K^JAG5MPVqiwA72BLc$2WMPj1#}AV~v4+&nxGh%oRUeC6<9{8DU)VkI zccw=RVt#Ze{<1IrZ3xt}Vu-Z7Hv(&08PdV{{|*CtZ_JB-I~?PFJMA#)#}3C|_QkMn zjOiq{x|m?|SN%4RT4F@}?eU0*;xX{$#_S`#)(D5=+p>>F#sA+!uNdwpz(eTE0C9NZ zC5Xp6z?Xk;-SDEuY2NuZHGtGxB#4^FIy}1VDk5o3i^#ECt|s?cZs*((*FR> zxR;k4aMTI8KZY(>vAWlSSd(w8JA2mjrxJP9Rh-J?)l1dnq&%htubcG9|F55h69Bx!A4y#gxI`(-d*;Q7p1=jlFizF{LL zRL>$71lh)Pii%t;f4Vb?&5f}?MO42V_)TKZ5B${AL{zt^j|(NP3(h4ao)I zS8G1u5aZD7`XR5aI>mTG(!b{*8AhP#4&3WtNruOZ!ky z+mQJ!h9_9RMe?ir)}Kjes>@#Bt984vlJ%oD+=ADwTr#M)j^diV8{OFJLZ4ZeHee^= zemePAS~|hsj*~ohBhf0&QM)z-lB~a_(d|J{hLeEbScql_8%Dn z_WBqV?Y61)md(qttf)uwS_MIsUOj({#VwL{Ohf33CS;Pc^^(mqi#&z0%|3wHr_Pt+qA{4g<`YOXaXbulBFtYL*LgZ66|e-?skMTiIS`c6`&;ZCz&OA`A`|rSb_UjkGlXYh0d&RE9 z_A!o>wI%Ld312r%yHy0!&9$IDXLF&&c$#Ve@~u=lS=H)Y6SCeILT+_G()#OIOm>;G zB*~P#2J;bPCsN$yph#h6*C#=(Wn8hu=%(36hNVQq52-K6L=1P3?!WqrdB>6U&&o^! zD44w;vxTL-mUS`>$#^9h`j&;K^?ewo`$fSwj`d}B6-Ut+YEt}VHH_lP`OozetBls>(I=QDKH^Afq-t?SpB}5W7uagK*FrQ zqV+LG^_y+!4657L^9WF#%6=FT$%=2nK`?bavO1Ls@e#nbc3q;iA&>+cEsb6i|Lv{4 zH}MwNU);R#1}KV2#XMdn&sTX4vmi*$*QY#tS?Fvx=CbkE%v{!UMRG&jo@ZHpA?+_# z>{nM4t`mplZ%+MHY$81p%Uk?1X*ReluT<(S*ZpsRI;}E2An()|RqRf`gjKYewc$(u zo?sna{q?LMAE+_Hr};HXX))Xp-~SD&ZOg}+>>v*Z+@(pP9lm(IRnii<+!@+M1BK)7 zw135s3eIMECb25(s6JyCx4tc*w{CxcGo-0ImC-*qqx%YJVoPHPv_*k8y}cE31zCK* znO5FNp3a=%#>ky+m~I~Gx1h|I#pW8=+KIBlP~2xaBPrGOZYmu?pmYVE;^ymGFL%JS zx&Z0go^}OQkFM*IAI9HLc1$O+oBX6gbT_j!mzwY5>|ttOEBYIS>h`qwDj09kyvM)^ zFwsF;L1LI@H$wVcPbKW+U9AaM`WvsIl>}*^bT(Dq-ui1@;g~^`Yvtmg-vcK8DEnp| zBU(C4e82?JL{sO+_YSX(U$ty6zU`*3?PF78Y{6aLTBaUGD=ZKDX`(@sz#c{R{INTG zz)csmE7-Amcnigli}$=AkP9y7)!Y>vs&Jk)p-b;@$-VAcDpsRddO!GOFO@<$GC7-D zGelB!LaIs4Lub3nlj;6oroEe+c`5)&Pxd=Il)!)ahovk_-h-l}Sl z1WPTAYt#5(jcFUJUP7GXuxPN$vuC_8xrHt(R{9;rc<9kYd(Qq5`E0)%LZPLoQ7f|$ zh@p95+Qma(;sH{Up{VPdVDv>UuAg6-a4+(TFs1cU!d7()R`nUri{E|z;$Hue`1?}| zt+B@me!l)mHbX2&>F)-vIl~dSRc+B7o`HQP11NUq@v_U2XP0B3HG64M{TW-LaY-nc z8R7bh94z}ba-GIuvd$Ip-nF6HG&+D+X|js79-jPGqV(BpNbXzmA@IurrznTY38-El zE(lUhjD^k?Uyk%IwJO1Ne=`|-CviFUeeqc>db90Qc&Z3T`t$_8JE3!}ZDE1W7wigK z9!1+^-q&G&jrha zV9Gj4Ngsh13`_d?Ba(Y1hr*5o2ICAPvnJ>?odB)D@mSadSA*8O9TVL5##HV4%=bC; zeKTB_`F;_7|LJ%g>F;yt`*rY`%=e4w`?WEo^mmS0&VI$H<&~0sXH;ymXGI^D)nB3? zXO!d{9fjkbb{OBhG{!jV|E3)+7SN~z#O0j=V+T&Ri%0|T(28w00!K$zX?tCTJcVJW zuirWik=4R2alIE*Cd}WkQS#J4SSN%kX|9E=9>0i2G=?s4LUWO|v8wg4B%5t*GEU^} z8SZ!cqfj36!d!FO+Gt|h3j501YmXrPR|I(4}T-Xf{xJq(;1 znDdhG3v)M&Brl;GmbNK}#-sANh2;XOAR;r{g09^MKXx9ZDXGwsk2bKjXnA~UzMrm&^qFcE$9>@MVm*Ds^%9d-Oo1M26DGjvUvP8e%zcv2;B(IjwS3g5FBjbW^5BlUHnet*A7W&|bW_XA~ z$F=vhtNLB;EG7mhFRKkFvLN|tr&Ov7(Dg^4Qkg5~7RtR7@#{KFawq=kn`2Q9vBvNV zZd*8$W|u2Y+R$)3jGN5;V5ZC=WAM;dQ7DRr+%k#OC#}s;mNSo}2+l z+eL|vq$IofBIL*RidMLK?mmtp85a4}evtxm7ZPn`-=HZ=+Ew^tB-fpwzOBwvc7Wug zfOFk0rmC;pWrsn_)}Wl48M$Yg6_mBbPXm(|_yW!5Y-;A|svgEdR2imVsP$P0+-!y2 zCYG|!U`kIyzCubRTUys!LG!uhHGE?h2adY&b5Nv(tPDlkhsv2Dv+O6&HT@dNcV8}n z>k{ZY$mc+{1AKdMch19gWOK7=cB*X(2Nj@5dwPrxN!YiFy7B;FGtQ%u#==q z6W>(iUmFBR`pnN+U5YUlI@iU*TSm#RGvM*f?t4l6{hkt?3wpsHiD4Z|=uNTiWsveJ z@QaLkqB=UHGpYAoy;;!jMLkF&iX1#n{5fmGE(B_u?A8w|7G%#Oa}r9=#T>}%q!!p? zO8eQ@TlG0G#TL`>{dCJfXwP2Zc}Y*g36iAjED^5PzCwTJ8!xxA;I>wq*^2n@C&&tR z0`eKv@Wt^}XZ#INA8lVAN6qNWd~Vub&YzPq7-q8H5EQ{zXYLE-yy$l)W-kA8P`Tf5 zjtKJ*?2g}!-Ef2;rGA(B751w6&Xvui-}+Y+ZLtZOhQKTrogV^Q>3`_fC zU}vAK8#&v7+1NKO;C<{2URHwECyR1zN^*YpYy~0 z=Pfig`G`PH$ApyWW?CCd;1y|VHpR00wV<9>_^*w?dYQQ~7S|&mS9%NE!OG@}vDZ{ zE&7?{Ei#^b7)xYgY+Sm2zL~b>B`uu7Z0#Vr_7g(Ig<3G9&t{UBLb=d%$Fyu|3K)&!byc z{Hpl>57d$`Ir3tJeEc+GwyDhB@=WZPAVA)3A0Jy%QH*wkalRUco!QwdDJ;Dc!aX>K z=`K!?HI6epn5-SM=j&2B3;TWrRGjMsHkQNoTq`@*iTh5O%I>Z+WHM(kcfeH8tv}Ft ztgZ@8X3*m3HSbWn>t9u_I_)hq;37?7I{9QtUjVD^UH)IWX(vxc%RmWt# znXh)%w^VCSl=8ZB;p_%bKksU_Nqt!9#P% zwkfXr9Ez2t@pwDqc|^%?B@uL&Mo^?ruNEO@=)cJdT3u7xfzfRmo$M%QaVW3R+BP0m zC2?;m{o1*~?3S+hCp<5jp~FXZC>=xEH-B zc^C2)#v=p)?XU;qaTHFue^?E8&U0t}n+q!?cewn``?`g$VvYRG_>GzNrj~ON2LBN> zy3D0%;J7fRZaAA!(i%+B8=$!+$DG+G%?rG;3Ut9p2WKK9bO&l-I*Xh2bl&s5B2Paz zkAi#q)Ix2Msr$^n9oSCu#Nl}9S`i#WV)Xx2HD?{4gNzfJ<#sl3vx_`7Z8%X5QdFTP zhkJ*A3zRFcE3n{fSSzb>pstw7Opg?=R2fX|wBhz{tL~ zjKDXH^+CGli2th5u`9}9%F0_G-TAJRTYEc!g;h) zUZCm&TCUril^MubJ%Kc8wy?9l4bR%{_d!{WVUA8jdWYvNtpl&_tJZi7rCWsNw(2KW zTic2x_EK9$xwdXf_Hn3NEx=d5Xm+b6v{|#MT|5jrwLu6SZxmY;6s{{<-jLji60W)* zGcKKNC{-SeXUUY6PoCSqrhZ0~2W5WF7pO&Mldy6s;_Vo zhAr*!4r)Ceu>UQMXy5B#FM~GLy95SPzeLt=E{PW-A#MxO%8#aW5YB_fnm=k>zoJ1_B&)9&*Ugr#k&-126t(mF5Mc9As) z7e3zx8h7l|K5~7xsZ5S*&0nHklE_b1)xx$qo|8J1W4rA&Z}4UVw=02{dr+g7*D;z~ zI@-~;U}t55e7@fyif~kavT3@}W1<0LN`~>w4Mq`+&caOfW(Dr=?BH;{w2G{Drrx`& zhfAA+oa7|US0jnsT6oS3!16uIchkAN06Yt`GSvdOYGv+dVN4(yGCgKQe`77gNo(8( zVU=<+dIEXX2V*xv@aU4!KcDPqm==PwtRTe;?Fa3wv!5cr^lKSEHvnoW zPUlc#4=)#wk!mMUDnV$0o2WX|dByy}qUV8Hn5)X$E}M;dv#ACOZ%tioLe2D{S3CK1u6Mxiz*Umt;&E2ajxqX^v-;r9U7#JECT-Is z=|Jq-R&D3OJtMg|?j%knJ%h-aLzJnowNBcH<}>*pjU_otkvU7FG2uyo$sP4VjN`oUR$MOXdX zatZT6n3a>0=_#dR4-Wn&Q+h0gJpybU(n?aNGh-Ra26eZm6L|QGYZj&P7mHp_bFuSk zsYen=Na2@3I)!}t)z83>FQx$_Dc`s0q#8|C4aKa_NyZKJ8+$Ntw?=ez9PO6QiFPQ4 zJ_M%vkDG2_*3=t3-lP+bbCk`~LEh;F{;>^VJ8!c2Z!T~oWb!x1zBFImTF;G2$J3bh zF1^CjK{c_5hyrlNE29Zv?c%YR>KmVsrT+G^&LD`=C)B4Rbk@t_e_{Sv#>xP-9;P^k zY2PVagk$-1u9~@={lYH|iq4yZk86SpF>NA)b$b zD)IuY4OdTIARe2nW4Y6JrzC!>`-2I+(8!oUqRpx^%<7a7*7^8#=6opW=&e!+DGFJ8 zJexuW;I{cHvEus(<(Jl9+;EMe&E7bpLUtQKxr7V){&^G@v~Z*j~w!WFU->j)rk?WjtZYw9-CWGwN9O>Jm;f? z?O3zzYImkKUIf`}Ra?7zZ1xnX)O*@>2MdYMd6y566iZqmPh1XD9RXqZNpsVN&kY^ zqFyd@duGeyzh6uAilbs43mfA9y)!=97~SrpS6>Fr+E`W#B7K5c2DP>3zFHU?9knCU zIqqDrTA7pJa#7$}Z3`duR_>A$c0*ByXU+DjmF!hVda72v*4DI*FwlNY9&w z5M^LQ36zTU{JQ6Z)-%i22|5C4`Cl`pa4QqhRnSi`R13cJ&3m&imOmFJ74L?KgW(;R zFIjUZX@=eOMRyRtplx=EC)Y9lK8L7z_FmvG7YTIM5#g7)fP4?`F^+hSUhZkpfJ_2s z`JEBn5yvj9ar&lS&cqq-owhAw#FLr&F zhtWz{pQiN}M=cz&PJao>$12~PBbs&-4AX17h}tl%LoD1XR-itDJVY2mSSrg*P*el1 z@KTyRpU-LW3RG)SmR@&d|7>u7dL7?foXQtLqk;0CedpMl|2qj&(}-X*ccr4^WSBcLjTw4=;|F zz9IWSefv$G;w9HbfG_KAu zN}he*`W%3Dqz^gHSaEgaDi!#G>PgFNIxv$Qe^b9kVQ27Et5RVX)^oUC^cyViaz1!p zJ~!2QrER9AeFY1(N0RCd$8!me-e-Lf z?AqyuiWl~i5o$qqaxPt^c&odT1*t0VZhaJD>abT&a~@nZNKpUIKC~mGC7(`yPw!HZ zZmJ_=)}*POTK{1WFX*qkM!XM*mmjeH8#JM|rv0=Dws=^%t+|+~=pDyJ+h>tXIjGznr@W~Hm1PKYfLl) z$9$r2>|?L_H`IqAg3T03#JSrC;}?h{bdC!Z6tEHRwy)@YY z^sgHij)l($`gx=y3#~S?p3=TGU#}{2T1v4DBNG3@9=%8C%X!agQOmF^P-9X@U)0hV z0?j22lxIh4V}&;qYIySfD(Q5(Zvpi;&gM8aRGR&^xN6KHIn~TTq^YhKg5ag$I07Zq zKdt%dmV(yIVJ#cq=@FtrZn}I7)WhgtOzZd4u3(?LX*90wxt;7ETxr5p;cc<2zpSqj zY@6mfvmPxYEw!R$`)$qDM2W2^9 zv+)g@vv5SD$tmDmh8`HQ*2ZHfJtBm)l@GVt+T4|R+-+U@3*=hXc>Gk(gUS3;C59~k zPQmL}ZdIu|)BaTMqLRq7zCH2x(~cKn59D)vn;2L2c%8A@-%8e7lhL4g2J~5*m2+3R zN_S;8v9==>1)PTyp6^ zPI{@&CQ?|QFZJc#xIGsalZ6%0OP=L&HP+r#?7}x_ECaoz*%!Pa<$CEhaMxKZx$fFq zKy?^Cn+9q!?k4Rf=cqN2o@G|9T5y%~3EYFaYuF$@Hl{L@i~F&AI`O!8Arxgms+Rp+ zeR=M-_SRS|t7>mLIEx}43##P==q!#lI)6{ca8%0m-ksJXqW|0=QRPX}sq@T?XWc=9 z$1;h=-Z}fFZbl&FBJ5{w274dvi%PoX9b`3^VXNEiydkEwp+?tQa@uaX zLDR%%a~>7XP3rw7Xw+F~mh37%7IpK7cW6c}auJ=wSuMYmo&(98T$Zsa(Va#4;EHU6 z9U8SJchET;?5>>?uafp~5$d^?#d|*#|AzC;>cWv4cx+XGd?5EsYsL%V3HI{3tB1&+ zeMGzQwH^+~vhS`W*O zQO(j^YJHv{BMsCMFBaHr`I$+4JQ((b%pK_=w!VZ~Pl@(QVa%5iM+i6a)j{3o3Lecl zE88jeez@K~tZT%25%%1k0Ifp6e$<0R!|iPQ3)8J-o&hkVM^7zpoT{^2k$8ef)~{)f z&&?%22~j*r-C@Z?`r0as&zFTK`>8X!WQFa;kTZNS{&N^t> zY{Yo)Zfjh@@`JLDnlJBzg>yTu-n6c$k?;K%Kz@8bQB&lB2F1tjY4q-$P3VTJ5`t>3 zRq8QZlyZw3!yyZ1J2WSOP8rmLB$iHLXhlwTj2x|8_Z)3$`wDrMEJezm2(1S-U4bKO zbtHkA_k!w{Kj)AwUHSSaP|qUXt!+G9FWg&{^fmQ|T(Rl~f_tw}7dlw@CbI-LES_JQ zi&;5EU&L=22h#eO0yk6_-HEkt624mdMOFrDQ(324j59`hJ>6lms87E+T6iL%%~jvd z7RDF+Kd9@e`G!8&dvHU=x0JO-Tpt5v=gqJluJvh+uJ|)hs|vp$u9w30h&5VsZy&*3 zFjVOm%Hh_T{nq&P-$e1^hKD@`whhAe_cc0l?eCI}Zv&MfZI5ER26#3ju&vEC*5Qf} z0JK0$zhu;gBP!3)f=0Xf+G}a9yT1n-)3HCfZ~V%lB}ApYe1vB<3JGtn-W#3K+7P&c zla9=#Ce7*;UNCo^t6h||bnn&X1gAc)H{&Ce<|F*xdqMD&#t?$kzE)oGAau^@d=V~8 zU{;CE9!~_^rPH9gi?gZ92xT7DHDTJcH#uL7mm=P_{tA|nhnU@_JRbE(LS^zJR?kM} z$6Z6%0wpwOZSxg%EWcVG%A#yDO8aOZryiO3Bw3rMeN77HAAN;HlTL{Ri0wOgiY@@h ziFv9?bzyz5OeJvYHW`A*W6* z?n;o0a0=BtUz)CLbER4su1rA=Twn447#XZAODa-_Y@Q)I%Sl*1*w#iBc%jmHo2$ks zv}uSOS(pklO)UCI{QXIZ<-AI5R?Qv2xxH|HJL7gnD$7g$Tk69XtaWsTGKN&Ng{s-+ zigvcs7`>r1>LdbNPOhYl>G}$kS+IQ3WZC-R%`9}4MiEGP4-M62&ZkJhyTN#7^je;- z<&i<{U}9xqG+kkB^*+p}_f)T!qATn9DY^BW;|nw5rK$XMDlR`Qmsc1=u+;H6$vB}A zc(Z{klnTdJ;wBptWE;;qiy!fqH9E!91@g4Tv`No4HPqMj<8@w19N zSp%Yt<>0y&p>yAo%h)yq!{l|N+?6fsRPbT#`fmJvuV|nrAfMsI&Wg!Cx%PXqlUf=7 z71Y$Pss>GP(&E?xF`J+Hh^JSTU1}FIOEYtcQnPWr#9KjqIETmsV1y_ebI=^IJRw|# zo{)ZbYgt${FcHR+-*Hu26J!kk=<1tvd zzpSXyhJT??cO3dF6 zk*rnS<2o3o^b7Zg2%r0Mr-n63+PTjn)}P$%(UXtU&MDt$^M(1Csq)Mst&QTuX}J{A z<|?lzKG^iia?N>vp2EB(y+4NPT5=PYU|M9gaqZg%N=FrVd7sO<^5+7;>Vq6#H7$EG z&(hj+^w?qX9^s5PEZ6tuvQ2UP*nHwW#(<-~E~1u$a$neM<#;Q0i7FUhPPW=?s}jv; z*F!+_Z4c3lgDtO%Pfs@M;|b!b&gXnMmKXPxHP;@&yQiAjst=9=X9GM5>Mf^UwL4-+ zcgw!^#&{T^J>>Ym;@p8vFoKm+_?Hqtmj2JsYvY&3I3jyrJdeTTo9eTQGOPx|lwP4W zweXoQNxntN*NC!ZJMnYn#mP?OX=YK}2~)U9qFb@xC0P9iE{Z_PCUfo$4O1f^~3h{9AgRN`J1R%%0UU z9Q}`(vCJnjx1scI^w81RKop3p-J%A=IfC?@L$g4xcOMLkS9jW?$(FUnt~CgZRTc5T z|G91(#w@FZ<*s0YCpKMCW?4~tnOP7_=_AoAP+9XHBy>CouJq~&rsJWrUy$N)+;r!oV02|!t^0Yf>r9l}5CY&xkI?$0VKZGq zF}UVC`UbF-+AZPSoSb7<&Ox*yVEk{nH&aCz&Nr%DI;lRx#l&;*H}yX9ygyg$0JZPC zti=PszIV2UV^FjpSsjNz7m`4cHMKCBjllpdTRZDqYUy&aZQLHMmx0mn`Lci>lga>R zGdhuMW-q^obi)w@N@#Y|=IdJP%w4pfdie^Lk+*|JIAmM7xr-jU0o5auT(qZ3lvTqqoG(t5yWMnUduv8uxuLmE?R-#+ zsV)Ee@}M4fe9!x-Re&bgYH?(NnAfCSB!Bk7t>DhHG%tCPm?@t_K3pv6VCB_O(sWIw z#x^=Kt3qbMS!fr>%bmT;bYaM+)yt7{T?N}nBgh3eto@8XW;N^KFuk}_P4fM89ZhDE zr<-ATbZAFaF*c#VV_6Wemk)i+0bp*1k5Wxg7z=;YLZH ziZ;;RAA&yu#?H0Bn_gwS11RsDr`_eeOm$*=DnV{UPwTH{W2TnCH4s_tBdbRweLQmY zm6%EJHivF(3^cxNh`38z*TrA&5nsnM^z{_DE1fOz!4|A^Y;1u)Cy8z!CC|WO_GFZ^Ed2&t|l7y^O7mBJe_^1lnBXdWyqI z_lgSoxpDs`z-V?Ob-1z|`90fD#B`5iUh(y2dyHl_PG^Fu2v=~{kr@?GxZ+s<&R}N= zRsM;bzq#J=r$f7O3GA_wwbjmMyoO^2GKxR43T^sar)pg23R0dOpgC(b3+%0l-kKwhM$Pc%NP;tHoq?l_G-*V1v&7Pn}=Y$q|(lws^g;aSjK8Gu94@c8=?lT*%N-M{ika1L(T% zf%t5;o-MgwF!JPJ%!BNS_eOJ!AdGBQB2P?8KkCcQcJIvD9%(t80t*UrUW_e}>cce)Antl8obM}c+y&`!Hjh}0jgM<;Rr{z7)*7A1e#du`= z{bb>Lo2~3;URL({909Jr;mRPvekm+#o`7;bXqIMD^RCYxo(?;0i9U#t6Xvv7e{8s} zm+t+bK6YsdDyui+`YlCKD0xj^AYA*t25>{+v+u2F@qP%XH;>Dknz@PNz*Pat{VIYW zIce9NQrZW!fK#k_Y_npev7l@->8EB!;W zTs2w8YSFW!CX-`H)9sBjg@4w}Yg2wX?sT%VosSd2I-VDR^mpF3wwC zPJKIpll6|AMp3SHS;n1Tj=!I4QNmS^Z@r1^Wyy0H0W*p}jv>S3(j>RtEYk3cgn8T%~fO4WWUX=Sq)}Pd`J8A$|AoyzpZOqsQ_lu5klrqL$ zNOs=#aaK72J#^0sXYbpy7PaJWnHF;(JYE?075~J!%Ad(^&0~rCJ?ll8E+}rQ3pH?>OCjq8HVo_s;i__4 z;M+!P+g{MO!H5O-uv{x$Z-r%RaoGmffog*MzBnq8Y7JBMKrNHXYr(bgmdJSJ^-$`r zVIcRKlvi!7=ywL+O_FrN|$u`m!SCr&~7(LvWM|q)66wG*1~j4Sy9FQN*x1v<&fEJPknb=)jvS@HazCQ zA1$y;-X-i`H43A{Gq7ZkVLz`lKkARb zr~yxUnwYS=i|SxlRSBDAEqr3mB4XDUwj=*rj>lOH_8+!vDo%4&LU{! z7u;ue1bYBn>5=!k*GTAvB>A0{Rh<=lE+7s{s~_^_;5Xzz$HC+ZTp&LC_#H7ZhrS(! z(M0!GB@<0G{4Agu>Q{GdlXYO?RnZ%EknY84f;mE5w0a_<&d8l#wlCjb=H3W$&{?k% zk6U+r`$#i;oO~EK!tr4I?=d?6`D!CKiAEh?My^Ow;q-UC5LAoivnZ=y3`YRxGs6_x zqXjDsMo?CT+~=w~&(m-Qg0)cnUh{P~;_A8vL47#g zQj^j%&3jTVjRZoIUZIj+_?%Z;mKpVjSq-f593~AqpHYu@f8Sy7Ysi^yD-Fu(z3W>T zD!?r`cXf3A)m9S8n{+lgXW5qS({_C_aP zewhoLk#&kjJO}J&7l~v!OnmZ;$c_M*!FCO}suNz5)B7stfF~tOskzoJujUfqtULJS zIMwStA)XdGM~>&x_24{@5l5;oh-yP8$Yb*~Xw3|jq`30dvcf$0$a|ut0O{rl!UXzv z%m_XJ9tv5V<3jp!D8T{1!$5OW-3{(4roFUt`AVByK?qc;;4(&TqL#>f1=nC_9OE5f>E&LYLH1yi-lfO91c1J|MJr+#dUJD=#LrvWnTcToRPbcr4U`&KLhV;uj8AG|HAyinnj53*dU#6j@w{5l&*}4*gpuY zJsk$F5#;SJvu`wl{mPYin03+0_r>rIP&E3j@d>>#gT(BHg)8obAW1z1ESx zsoS28@!;F!JZ5uvog-Q@nmS#@_4!;`$r?ki$z!&$s;x22&$m{YJYPMuLhZOKunxw) z63<6`c~8M;B7b4k0jyzqA!oH8r=2bL^K5lQff=f`ZF|-0gL}BNvKv8dO1|5OoB;bn zvF4I@yDIIf4Z9{CYO})}c7aJv!*a|p4S24)hYwX@`iv36^iiq1sJTC5o?dh3~i9HP_N0{#YmijOS zL#@xsxjS8@yVD8mEQmQM^VNOEuL8|T<*&J`I!afT8kGmK-uwQ#!mW*_6dm$!YJOsnY7p;Zqok*?n#D!sPFzJ8NbwnnU+j^Jzx8e46R|+vQ*i>}d-*lT@KzWS`1j|SyIchD=VH7r=O!+)OyuA7y%{G$|wdxQ1>&WLYU zNbbwSl-0IAT21=hiB)e+AWaI@&ihL!1l7zKC~h6++d=dBW^Lc5>RVRZzk}X%=5+k^ z+%{93h1(lob0JKQ8*xc39h_^yjq>d?*Jt2$0+dCPEGwK_`R}gR810>QUDG^&rWn-7 z6ZIHL#|o!4IR3gst&Jt{OkMl>vD0m?zn}Q{e0w)_!a_BM6jwRqUIrYiaJiy*a+u*w!ZzVeVO9^-g?!XMi=(>R+bWxmdoaQ-ib=s`PKG z25FU5^pX|RZso?-S}47Tcmmz4qVpEM7I(JmlDMTV^s!js;2{z3tW7NDZ!7ez zNw2ESYQClSbf1h*fo4B2dVKc(8KJB-TziQ*x2eRUsq0PpBPhHp<_J7Dp?khnr*EjFLrjh9ppz zY>?HJn+y#rGTs2{6=K6Iub0*qURMXq-lb{4xc{wQ-AHj&tSY*nMz94&=-c0PJu7P0 z_4-pN%T>m?-0b>yX=_ch+-rr*b%Wcm!b1kHayBk4ldGJ82Z-?|32ZzejvH zQ|)Bdm3m9tP#&u4%obBS^y?dnc~i{MzsFZiqi-Cvj;pUez%+e7L-;#Sc)v4qxBRsZ zYC@LYM*E=S;A_i%hU4+-*z36*Z}r!=SmLPCzq%o{w9JG*?H?do@Fds_~o;IrlN3U;!H7Pr6Z^`(B{QL3N&T|>L|u0kug=18$~I%WMS z%QNJ+G=>xV<#zt|R(L&G%}k#tlj|ytv_|WUlFV-AvIjV7 z)n0L#^w;Wm0yDH`PupuAbbM8N^YIiMwnLv5>V9iHhk)F+U0cCl%LTPLsK;fm?fpb6 zO@5G&$}>(#H7R@OY&YeytK}mpjO=atxu_DxZTjO^aV96hj$S=VH;=jDCNaIGke#^f zDm}H$*1{1$Cm|b&Yg)SN^I@Fi4$m(7vU=L~rYa|~1^6}3XFDs~SeUgm?K@R?W??Ou z+&S)-d>D+5c29t(LJg=&T?tv4+@b|ItvCO^j7Hn5L0R;s`1|dmVQ^KI&*rhtS{KS| zZm+5aUDn>gxyT46qkbnikj~njTb7;A+IW!FfjSWX z%jjb}Ff*H)wUm>&K8|c_p#8hsGxrw2N`-FgdzJV~^<2|h85p-$?r+eM5es&q=uPR= z>UVo0PKQa`rm=Rz^6J{eBSBdcy@RDFLGeGTmbZDArn#_d@GT9-7Pe=}Y8+np7( zq@T3v2|R=4vni}Xz6gEDS9|zYtm&#++wv%>%24dv7QkJH%(6eFaW09C>N0$${{-PE zBk{R70d^eoQhzU5K@GUlGoL$Xp8V;{>bRdqVMy%Vv5;O4q)qy@hxYI&(X2D6U(g$; zm-6Z0TVuXoM6h=eZPRaasqGN)TG(2@UksW_GMPugj@H9D=2m9eog;lz19DVayZ88h zD3W>6t4-}pJL3Pn+u_q!Pkt|jTyPue$?sLx5|P)Vwu+=(xrgGvLYcmcEkBc>IfvC+ zHw_%?HBKjXs!Co1Kwa3*1Rhrinj$UOuxRrqC@SL^y#a^^)RB!4$1+Cxh*POCzXii> z&tG;X)EA&_9CiXvuKm9U)E@88`!GiDPaU7=nviSWpeF0U>L!yU%~tS-?z%1he#CoU zS*JTKGV+w>QjEi1If+NaU-Pki!z7h5Y>nqg`_lR2 z6gx6vO*+%5)o=88x^B<)EXinb*%QF%?W$KP<$7s*i5EK$cE-W19->H&Pt&Uo&ceBS zdsSa7z-J!V%^pFVh zu8v}6r8CB_QigE&cycCO`Yg4fm+Rar!l+4StIu$;W%)1Bwq1esdpBA+f?X9tPFgb6 zVYv@_H=Mm>_BG%-aU=f~;HKURZixGB65r)haEiF-Q(*h~r0x3}OJyLqXyC7c>VwkR zuTd*J4jgmGK1JieUfbt{LZ__(rM(3Y4aO1(dEMq5`O~f2?4_^hYogFtzsWhON5NFA z8A`khrz_~s>f&pDY9=nd`foz#vT1B|zhp$M%QJtNv(&Nq)u7dy?PH_-d)BjkbfJ&z z3J=9Q9?xqRY0}3E>2Zbr1@xwf7FV0uiunE6NETY8BFE{g@gzjq!eg%mtaR^}b%qGa zIT{(`Jh*BQb9i^r==lF!hgV}dvc4M|LZQpdIyftPF9Ni;oyF7ja7)rM^0bYB?eYWk zerRqG_qoQ1)@>dwgB`i6-}ivlk4e{aaA;d-w(00!{U!3iu_LV;&#Lq_Stjp>Z%tj) zb`Sj>DbU;)-!rreJ@o4Ap&75DeW33Jr=krUH^HXsRzDxq%gYtJ#n}nc*|Me!>02PI znzSx_#w+6YIQ?5bdzagQ8w7xylT2>JN$$NIS7SGZQ zcM?(Dqv~3*+d_KPri4OF$r|!S zPrOSAe+Ul6f4jpWE8GF;`29NyauPxt&;>)0IgOBb#dxsTL&q5BHJ20Kvu zR`Vs0%o1=wM1X8GXO*`wmJ*SCx<(bY`(}*ls z8n>7APH^9ZE0Qyu&4|r%=5gRkURjB;H`_;W5)4cF^#tO(&YU$DX!dvaseKHn&w@V> z2+B>m&(j&NoU%+OwP8zty7Ix<80)QN8NHL{_bM3~zXQrkRIS%zd~d4W_eSvbB?{ES zH(e9TH6~+!s~$uBpNPMos9~v|Aw4CJ;+Fa~8pA{jRJGdA%6w$3P$_G+aJAK|OUbs4 zC|ehg!7s8;d-_pOtzbG*XQyyO`mglc9a14~Z3uLkRR?F}+~ca=G@ZiHDo;#PB`=yj ze0}dJv51V(pB&dXOjKi_m+FhkVmaW1(a?u$}|-)iEQoQ@bGmgLTmRMmM&Ze=Abp#G!kF1b(Nb@BJ7I&O!(A4ZeX^o+Tp z9y>mI<2|1(>3zPnR{qaL^HGnYHmwQT?JZtD29x{NBybYPIU{&DV+rq2d6Oc9U6@PKO()c+Q1skO>o;&OG>Mck2cor(s+L| z{(icI2Vx0tm}JP&j_`LoNE5-hiX74LJbng4Tn(QW~a)Tr(& zTAV$5V*?-Bj?6@0*ULcBK2|o!Y2}#ZsBVt&MQBgvObokd7RPCB+1C+aVV4Tl&ci6wK;(1t6fSq$h+ZejtlVlNnkz?w(;&HPR-V`oghyEj#W+Tq^-8H zj><6XwiM0I#W_h5zxjqI(nQ9VM-VlDD-c3!=r&tPM1I5Bc9GT6{oeix zU}jDC-X9?t?BjYe;kv41ZL9^c;N&uOTM5^YMnnGQz+v5$tW5qMS#8xSTo`W)--Du> zEX>cMmtWm%qK#lXaoSaS7MreT1!W1;d_4gQTTgCjc6MQ+=A0P8tKQ*&r*82H0$VZMJ2F?|We9S@OhoWNO2MdTgY5n1-roSJRxR?ngu&;=(QoSpXs zX~cs@>*!q>O>w8ZPfKNtC^HJ%fx3yZF%*Bhau0=0?T&9&b{p5ZS(Uu$u8NnZAgdYG ztfIb3+(lJFTN_9D8IX->%+Iv5_QMXU!5a3whB->Ix8kpOw~r-KQU%@z{t+~qDKDjs ziA0;{h5a&SAS@@JQ_jnW&v>yVpOvp zKD*wSUW`(Fp8n#NnTJ5R$EuPh*B>g9m}|hZ&qGalx!x+)7s$w3YiZP?wQeeid8&4J z&gL|6i5&*GrG@f;p**H$!hsVpGOGFQ;dPmk$ zFrUvn-#F1>PtT=Y|4Gpp0|w`uBeIh_y956b&s%LGdZ z>tiB80$f3}nvz?<)!zAhpzLjK?v=DA)9x5Sz;!Jni05gJ2=U?c?#nH90RTrjc4d8!jU;=cf`HM zz}bs2JXn!1nU%yz#9EkG^U(V~x{S?ABH zeHYdcTwLi{k!t`RonUU5Fu9gyYOlkLvCi5hv%Bl%M4hL*FfQlw4mx2IG{!a54?Td! zh!f+HHpqz@5r|wDhW$COrBi>5aCHa$&Y3JYtIkPaf0H}plp?h2X#KilIKLT!eMdYO zoCThWO*D_QM7_rd3yAHXrm;?fdXwsH+AOV4(=O}ncnsl-|Aqg)UiJaoAdVhO_Z-{f zS=vIgI!T!1-C(TpJbC{gM{7C@oCju-ZWeoiZl#&wzo&&>agU#&F^oabww9zhHL63J&Pcl% zII@OT#t>Ln1^UIcwZ7FN(Yo75Ru8w7x(EC@4mes;vlomt$jIP?wbD;0uJA@KNK;a%}3~-g8ua6}{E+J9Z)0@)WU%ju7XSx4I3)<8IH?`a}+3 z2=Wy??V9ET^nDoc<;M|R(T{d}d~NchM9y)dOwSIlL3Ttmgq7)L=P_F!V=kJ35O;f| z`xC?is%ARxDTA=pv$;T%!_5m6a%56t)jtcc=C|rYn5;${` zYvVGuYrK1(~=t&Hg%sbGhiSH>(_Uw^#66Y|2?zq@H9YOD@qq9W4qdMWc}{eDtsEsBQMiAJ9V6W3riZ)Kzce3w_wX}-#p9!gN98r};M%6cUJ zbx8CrGo;04I>aw1N`AI{GT-!(l?6kQem#Lypw4U;X1=@SqpyH^+hilw)Gpzw8tsd% z3xrhjT?wCWe{#1gRJra(JB&t*XF2Z~k)+Z(I8%wEK!%}H%f6^l&XDf@e)(eAo95fZ^~ebqT6vdtlb=@ zSh}6Wb@LjG2F#a7t0=ap_3;W`vLY(fiFI#HB=K~-p$kS;kNzJea}DLhCxLTDr~QMQ z$|kMPrRFQr37o8sIZ3=b6J!HtKixrR5BJM+NrKnB*AshVjnV#}mB>`{zB&8Is)Hdq zFNn6mpQD`_KeEkkE%?c)VmV^OZv?Z zq=$9pyd-|9_4g=gG2_!2^ka^nvtGLE%MYB6&%`&8`8A&99%whW@`EzJnrHdJ!sUTn zPk&`AS2pIOGK<1&%w3NxxUJ`6I@E-YSp8e3Bk7sViCc_&*7rHkXc2z@I0|hnsSzfhPv0>j$9v z>#VMQ3P!{eG3^tSX)CL%3`5$7dN75|aUp5<%y+kzZcFOT=7`)A)ZSIe;k1SCycV>e zx2&bXNX|1>E^9Vh+k*H!nXhgPegXWsJI`3s7QTOjF$K}u9Zj;3R)8xI+R8#&Z6({B z@}*rFFLFdTMiwOZ>+&4%*Ac&y_>f6@t&n=Et+!6)a}TVV^eueGd!5;DHCE$7BFnLa zeo*2$@HdmU&Omd$BKrV6uEQ($rZeyFpucP0rLp9DS%jx0M6nSBBKzix`3kN-FG z%Y#$TU`@c<_&?XZ_)Sosoa~!$RydUdFibGYUakNr(j(OGE^MCtsg@=2F@e{)kl%tqqN8*qxe^-m;rfpY052vSXMAau59iB11v z@*GUUS?g<|Y}5A)@MjNDKi};no8ANyg;=T#McRkvObeOiO5&c*iL|HmXc8i66-hhS z*?A_Yo!h6UJQ1JPGxQ?iJgRD2gJBlnsoX{%Hur_`DY3u1Wf`=Uv2`vV#ep3E4$^~6 zkxhc@yRyb2TYJD6$093f0lq+5&Ugb;?eJ)OaJ+|JX!hC|CH|Dhv9)>GY&!Ysbj!*6()5lp{ac!W>_k^BkEZ6UFJGnWmS9!D zHNtVF<0kx>Ml!8sO#x7(M<|aHI?rkuQJxFNQ^;k-R~_^)Y4nXnbE&^iiiQS2m45l? z@{8n8cP8@%WLcV$eP=SVfMNs2CAR`gI1A!Y&Z%3)05_wn||nxkmkDHJX}*l4C@s;&zxN(&W?S-0pS+s@gWow6+}?QDBGi)SX)W zUZH9RLL@NOrJ`+y$GBy!-@|n6xP6e-=$G;EEst1mhsvW8 z+FbWOvIeE?=f-bn7|tf#s;%oWw{SIOk>e;ukDa{_vyr~mdEQt<#P8)*LeC$UWZk$5 zH1p_oU>~Oobgri1AW`#4`rjjThGw7>ReLAJ0&%CvkMkkvXZQ=XOr&t;YWr*sGD&M$>*qvITcq)4m}XXP5=5-g$8QMq(c6^%7&B&JV8d z7*pglFTSj(hpa6Ss`Tm!*5ZZEeI7*+c;$z_r-OP{C2ga-Uo_1#CzMNor4Ue(p zxuiMtK4Gp|eS=IC8|$F#eD10(vbki9TFvzrTr`4S^ENbH(W*=7pKU_I98DmyA6mv|q5yyGu6Iq6yV33u zIs3R}Mcf0Q=TfnsW_=&53UKY49uQ5^Y$Yq|dYHmDK{b~7v9ZSHIKfj-;cPZ=7w&?g zIWab`6k~N}y_V{Jjtg}&zoV?nQn&G?@_Cl;0oAa(?7B=v(MSD4g-rVaaB99FVKd#7 zi^aGJG`fXb+8TtyJp0|(CGVHX5t5IVIwO=Ly^rabB2Rx=ZKE*+M)MrPO;@@kewq09 z2&SP_hun1;m`YrEPm3`yQMVi8vtG?VBp9BaNw9BjS1GRQ%)2j@cPye`-4qQ+*j%Vf z!+6Q`jhN27!+u?1O~}%_H&BMDzmAnouiR{FH+OR{s27hr7tJWiOeTLjztP1xbOPZ? zuTWq4ID*|=F6@UE zE#!}sE~|yBdTeM1XHoF#lvmL16kgVo%{|>Z%C(QI9n zq4cc>&H|+UN6CKzn9bydLM^~ag2wSJs^2>e z9J^gr)6sAQJ@XKT)#mG3L6)y6(N^v!eq&lrcVQle%bo%zC$GTdU3~rvz3{8aidUdC#nHYdWP1xt2grv z;_bx0v!=pV=MH;#Lj0Qe z?ELUQmi+L>M{n6T@zVPSUi#&0uE40re~72H>g{*F>z`iyqg}6m`TOs^WbA>JXovbj z{Qb#W-aq$YuX^{sMbF#&?_9CrFW>Vg6FYwC4~Bp7DQ7->;b}k>b-Vxdo1gjmi~jB-H{bjIryu;$y?1@? zAu{gf`Imfr>!Ans{KC7Qz3W*&`SXA3<#Bt~d|>|%yKWf0`A>iC5s!b-4_Sy$?n%chWz+b%Zz8}8(-Ua{n)AD`A2lk9V{OB(XeD%4<4*cb-fBT2;mG4)+?S{Ee ze9{%W{^~bwfMyI=ms zZ~S!hsMGV`EaUF_?ykT2Uhk3Z?|A;?#W(!nT6k#;18WA)dC{GBO#kVzn=ZWdr~drD z?_538!|h?y;t##2^?x#KP{V%-o{M73o&3NnD_zR!@(bb1Ow*Bx+U-mCQe)cEt z{~F^5{@_d3_5Ebv{Wt&a2RFUo`A>fRchIxvX(;^v-o7iIc;D&0fBS-u?q2fmul?+I zWnI08-uJoNU;Z<@&%F8R&;IT&z2duFg6CWR`T39f?0cTL?a@E~k6-!V%bxVrhwwam zj$N|l=YBAL-;Nu9E;w!i(g+a9>;Q7=Dujm&f2#6P@u|EGrb zzwX_SfBnw)bpOjjfwTKRzkT(Q55M@p#IyUq`jF@R?bR>jbJ*2?)z@y@`d63j{?0qP z-u~hLcsG1P&hwT#cmB(NKH+5pLm#|l-%Y13edetKe{|lZublsXKezkf%WnDhBcAs+ z-|Z6ozwj3yx$Vb4{@lO=Z+QF*-tvxrAN&@t3oiZSl@DC`zdpZv>_eZuYs0I4^0zn0 zdcWQE2S4?)uOHa?fj>C)AHV;?D~?_)^IY=FkLmie#Sgh@;&oqp=dT^Qbk9D)+a(*G z|L5;s^2Ke3-*NgKuj>E$>a%iRzVzBp{LP{lziay|U-H`5|JL_zTK;gs&xUvXUe9;m zGH1_s-}I$>Klb?#zUIk1|L*Ude8a^H@4RvQKfdGBTYmK9U;64Rr2W=ETK(2TZ+icZ zw?6wTFT7~`KfLKK!NW&>`3KYeYybMdbAJ7v*M01sSN#OzU}7$*XYDPQ|I6g7zPJC^ zKDhaf^PYGAcecv>pY+<^*ZtzZKK{n}FFF03J3evCsdvbE{`fufo^#EoZ{2>;Z~x)n zeetp{z34k~4sUt)$G-jAu~RqidQ0CIzrOa*=6s#ocWry+Gj87Zi*Md_#Vfx0u`j=M z-N`S@dA_peF$>-~^{oR(KJb}aPk-y0J2nfQ8GQG@EqcNye{R>+?|u6fKmYdYMpw%D ze9F$>{l+V{Uc77bZ+z+vn?CZxN5}gPABw*}^z<|Kes{^Ucf8=$AKf{A+ZEqKIM}K=$izzjycI-mm=9 zwz1dz)5e$l@D&gAFr4mxe&62?{=zpV_FVklhrjCWXCBt`H?l8FzkSs!{{G<)9r)qz zef+6^eE)YZeyHGK%e7N?es1DD2Y%;WYwrKdb3XCFB=1Yt``14A1HU|SeAk-i{?_)_ zul@F--8-NAn_l#)w_UmW z>pTDPB|mrlv2~vs7?<;TZt?4$yyyVzc;8C~|NQ1R|H0T#$vOP3Z$5wg_utsR|2N;W z?n?(&{QRfi5`SBaV=U-D`<+jm+q3=Ew><5gj~sjV*98xK&)xfkSG{`8z7_X<=+Qs- z?yFbbBXI8h^3Yp$|J5(;JNT?;JZk1mTL$|CpP%aaMt@j^$*Ukgiy>{!oxBSIN7x&A%@S{D;j(z5dkG}cd7yaw|-m?GlPs=^+`q1}& z@2Yow|B(mJz4~3hI`BJR-upSmPxsy3|8>*H=Ka!vH@xo~|K9)7E9c%T^knF^2mbl^ z?q9$8+xLCv-OqU4g6W?V{ICDeT~EFL)C&hz-FM%^zHscl-#8<5X89N2Kecmw=g!x? zqw9O`d%^2o`%eOA-Fpu|Y5CVi_I>xgcRl69AOFG|Ps@G0>9fyy_}gD`ou`~AgV z{^W=M;{)>k-u9Mf{mJt6Lp%CEIPakgKKs~@@0NYJ?T_#O_|oS+>!$C&8#s`^Ejc-v8mhxc0UW|KYd$7*6*C z&;H6MUibCmJ6`?H-~ImgU-ZR=dd~m*%IjbB@v9zj^BwQ}f9yR8KvhN8^T0hf)J!w& z2`-?BfT*aTk5y0>1;otM5Ku@2;^CH+`<9w%tCnfymbqk_yJ=QtWqn_Zm9|(`W?$u! zOPZzsoij6cX72KMESB-#r@(zPbI+VPb7tnunKS1+RD6HWZs*5RdX5|7{rN)=EQqhM zYU{}9_L`ene>GW}Zm;>=`rgx?xt!6_*0PHqZSROG{!3=n@{3vgbj!6d)ekoHSJEHB z_ryN9E$!md9?8K|3u>Nk|8U=NlnyOcuX}shRIlj7*`F->E+8*>BAr*gn!NOc6D^25 zx~c11!4IDKsw17BKaUHIY4T2V{NmLuj!pP!!tMPC{;2Qp9_tUhV2jRMaiZ-bMYlib zP3e%7I0D^!@SeV#7X{9mez@9zgLK~WYn>W2yrNK)P z374nX`yYCB*EC;>XWCP#J3hImAo0(+2YNg`|IX=Ql%Bt}p7egx7g{Htd$G|LuRCYl zM&%ig`)1WU5^*s)rr}ebhsVvBv+y{@r_Gyx&3<{Qcb}7co;?+`YIFKV%AXH^Q7>iS z@o2~PrN@^ontD9(E4of?*jjMlx7R}B?7R2<$MM5^er&yCcx6q~HXNH1JCjUo+cqb5 zCicX(HL-VW+sVYXjUC&@o4M}q-}C+JT2)=uSjSpNpIw+kX~Go-Fj;M`y?PpU_V4rf z9{;8`yHCP0ZzsVVKl;2KaE9HYKRar7JQ+Ggp1Fw*i2|^nC;T0bmLY%kZKW#R7n{}7 zZZ~!XY+P@*4Ci4TTKjrWJ~_@&>i^t2_3d@1WghX>zEKzGe&0U}aRR(OXr+uj*d-5* z>n=8n^e#xOju+oG%jt2^vz=T28b4r5InBP&zGJ6~wdJutKB=KO2i7Ocjg|6POii>~ zvc-Br%G8r_gF+W4%bT`JeT>v z>tX#@c3nJY&X;`~Ag(6@;oU_ndUlo_{?SW^jpEsuV+sdXlB>r^q!XZe!P<1}XuSCK z!nfsWKdvcBNh*<_&+KBkt;XZ_sB12p=P1y_6MD|QWg*mi^VQP5a_^96^>>X!!tDmq z-1RAoD=-b?R_ngA;b7?8X4C6P7Ks20vAuw8t=U~W`+chNx$840|+15JXtt}ptE%V%MTRFg!F26cs_E_gH@ZLbo zyG_ciH@WHFvRT#j;Z3UdTE2afORW;3_*8xH@wVgG&M)V4RyutEJ~@@$lvu$4;BrU+ zq<@-ti4JKZ>z`e3nEfQoxvO?8(gdawFbp>P8rAvsAhV^5$9vc~c4w~f?3H82bx+#F zYIQlUf09Qi++@bMN;GRMyYA-|IX~FxI?5msNcd+jW^uf~5umCQRy_~wb9`znr#!s= z8F1ZNEdT~=dN0w^X*g0YJtFKk>#MYxbUrgn>$S6RR=%Y49P2B-z0wiY+523d$BaL- z_3`sFUPaqW9|k?7$(iwORw|rs95cwj`%JCuIC+m$B&}W!U;o)dK3L%Lc!_RMYUxIz zbQfvI*yOGqI8Fs;DLY$Lc`SENIO4Qi-FwgMx_pqrzrVUF23WiPcEPo|6d{`AefPU$ z7*WqYPGJ@BJx_Z);^f{mFkb*-oq0o#!|1EL?j4YYP1;Rd>TDfv6Ct7<+UU+am>cMw zC~^yQuy=}eBX-H;N9akuK05_s=^^Hy+LdRmTxPyg4l8vSCT(#@G`~NA+SuijU;gxN ze7I?O=Xo6HUBGYli70MVn?cv$lLyf^YVjfjPesfe)(ScqYgZrYtCwK zLf5IoMq>5Sr@rYtV;ugyCZcwuzgugk%7nT-z&8gn)w*u1AUAouSCtVt=e(z~nl0QRevU|UKD|ZF z|5&qPNuGAM6HfY^^!V-FmnuNGCQ!a`Vd`9W+;a3b9INkak(`BomCBbB%<>Ep9Gl+k zQ#=xrHnEF$dC%>#KNPIMkmq&(+Ss(?>a8r$Wbo0+F!8&K$&|S;!#nY@Z-r2Qpo68l z=MTBuV6m6j>H9~M<&c0DV8!*iDLi8`bEM@R{X8Xz?oxj3@N~oT@1Bpk?UWLJrEi$(hv%twr-Pic2cL@;7yH?Pa2B0N)nT~hm{0v2scAR+#|wQA zbUFL4Z+dTUc;(*sp?V?`de2__;bzBe>(7Eohk)M?N4KvRhZ*$JWo%FCqs>$j+2spH z%0dO+3iE|83Tn^CDiOYJz4~|w`JYZs8Q1Uo+ImD>aTg5JU4J)-pq-Wz8V+U(`}loW z?_zk{|IlHrFj=uJZtKFALwFHv0SH z9{HBoEe_j;#gOrNGMl4fhR_g;>EAjHL}txYcx)v%xEw~c6ZruuxD2h0kExp}#bU0R zi#&{%R(%40b*A=g;`_(7VmArMs5-P1^A> z{CxdwIb1xGhPs*o9ukAokU&1=lSuWDTW1=|2sr@u3<*sMIDRcATE*ZT0bDid; zD>_-+;3lewGt z?rCIw(=WPrHdl~+ZO^B3JSgi|^4-7rpBcDl^(0ML11xlo_aPImT*kPS@5GdQ&-BiK zqBr*xKGMC6*4X>jjw7A=EpY3Fi@d80B(Fx-UNoFIsZ$RGT}G=3fFzvR!|UrDmWv1F z$I;1Aer3|TEgng%yG|z;?c3aUaH9}gZ=TC0=V-mx#0R@yx0nAyxZpPbv3@i1Jfe+5-`mHJ740bQr{i&SN3_GjFBHz1tw#Ew` ztX(EPbW|>`#arP%PXTXsC>uB94DadiqufM0Qf`32>gPk&h=rqTA>s=DLt)@-{lor_ zar&Yq-sf!0sv~g9rIjZZolm{wMW8!o2%fj^oTyiE(YTB0xW7pP;d&i7TmQ+Q*zXk3fHJq0#P&)2s_ zv)e?PNY}3qFjdOzvzFvlaz@M+hUe`af7X`owdj)xw*ujY@9StReYL{b=h2)PQ}#eH zeIAgT;WdM$y7&{f(Prw!uWG0oF!l?yk*U;<*K9qSy)Z)_MlbWV48@Wf6ArY zsZJPP{`dt>i_=-+umCAC?|tHD?N999!L?jU5NBl{$)_>{^b+3SUguSVeOHS8b^>Q{ z(!>V&Q3Xmc(noE61IkN0(PP`&;kRCj5NR5s2rAryz1JbAMsvvvbS>;<+~LPt zd$juOX_SR?;ixONiC(D3vf1UYn?Qj~!!Qb{12J?t7etjOH)~b_&pvkpXM*x+djt}~ zL~e7hs5tE_r7$-ms-j1#kihqVgac(OY&ikEt<$zO;>LD+4t|x&(z6fHvid<%(fsqU zr<98j0Wb0_RPLZh?;zbgwyLs|tr=0_J-sOSqR0+#_q&I{bKYL@!EkBEe8mW5EdE_N zb#CpBw@E1$*#UQKvT4TB`r61~#(8DKKfm@XSy?61z8f9Ixsnhxwa(j1BDl?|;A|U` zCD*AGjfw9JYd3p17w(jAaFhWyt;$azp1oKp7WhOsWxCtJ!N!9@uMSXi{~LBTe)w=gdMcu^zQaNSqV);zQA z5v*w=hoA{DUrqHJx#B_M=7-wwUt$a1wh-*V4j%btSlL%P1GAM!2!Z{m@_al6`2iFoTFMCDMlCHE{Oj2QaLUo<9-t$f z)yx(b(&jvW`*|Wt!zNLBj35yp!8%WX%%~Fd(0Y&jNZ8|6-1GFK0{~Wvo$VvdH4X{dK)zl)GR-2@z!o(7dkvUNNY@yjV}=H* ztu~nHx?+A(DV%hO9tSO%jTqq2|NINY007ofJgbx}_==rk@Pr!kR*^4$ER!`UntDk{xD*kiH2=oTBv@FNY~8lrUt|Sr?ZSd1TEkpV=7Oc zI%OLMrUQ*}GCdF#=gSZpfxsCQ${~A`Jcbd1=+voJkQ!(@wOjAFtij)GZP7tXx&nS< z)?%9LVDj*MXOuZMP;RT(rSsdx2M9UMpvt*btQeTrj+|P((p%pI!~nf5^QyImCT`t~ zVW88O{dOu)SKsCDF%0Y}(};VqEGWke>bN!VTI4DaDvKid;RKi4X|LY_kd-V|2@9ym zg>R}SDFD}d{DTO`k)xC?Dn95Do1G33@jBi^)RI9qBTEw{WMpfoYqkn-yUF1<=?-j% zr_GuJw|1u2`1rFAsB#8B@vYb5maVTu&c=fDnoV)UePN+ovReXL7r*QdPfHXwUDybp zLN=FjTm;a@!L~hXNeoeNi_7V5eySt1x`wN!#xSd{+34gG8oz5q768o=NM`sWb{=Q} z6BHm9E*{;qc(cnbxFGQw%({FU^YXm>64QFb*;r^zuXhzyc2SGol?zVOjXQNMZB{pQchth{!qscjTIsoh+^NU(LxhBEc zL=_4tUi?`@7vP?n^q|&T$MO@Yd$c1<@X9S!d@B7PqCjvlsC~Va$U7axI>etukd%hD z4^ijKJB~{KIC{tRm+sNI7o@vq?sVuw6ppk<5uFIGtx#BIwSw~R85%d^!qtP~Sjeuq)=}FDXAvjmwNypo|Y+Dzq$2g&hfY5Zt7i3@)Y8@y2*ccZa zk*>x+Hignr^VpZa>i_Jf8MFLHa%uf% zHyrruKVit6|Lmq2e;N8x1)2UIDJ2N|KUIQ_|Fi4$KVgXfaktt23FlM%v6z+}?gMr5 z>anU+aeg3y4sI$p%+12pC(n0kN55^Pqx4uhiBaxxek6% zV(4^WD5={H_))=U#PC@li?+gCCwzPXg2_u3X~l&Inb z2W269rg9-;Wy0_zmk*$N>WYn;UhJiC#jxMX>1eRmSX#U}W(>O4G*y1K2yABn<18rE zGoe5H>J=H?6o+CwXEBcY3yFa8XN$=};Y8qG!>1|l3_Ly2-&{7A^|^!qP`zsn;9xHF;-+F3wM?i_#>gQe_j(=(t}r)4HY zy{u0CF#|X7iYnswkh1@y>Wpb$ruSpTv$PjM3DfrQGUE}4L1l3>;aPWZ1!KrcTb&=k z(CV%AvE)d^p#8w4=;P7WD^d075e?(YxI>i}YqW$8tLT##Kw@z1R2NGjfAvJk(c)m8p9zfRUZv}>zliLkG3?MG(JhULsInC8{jwFrT{qyTa_ zM5-iqNMZ!2=~44hn3vN1_K#yVMOa#AY2j>-a~J9F!S@XTPfLnsw4j<8b?G_eD5iWuypN`w})&FEx}bwTK>Rgc!9__bm}I6^q}g{mJhy4 z=kQZrb8No>z8V18#ZYUdKGV8SZU(s;KfPk7aSuT&!Atqc!5ZK!a)=}$rKI(o&#gn) z3R-5kqk&{?T7CPSY#H6rFL~*1^4}raDxk_G;)WWnq+9}&f5h`sbFK41p!zN4p_!25 z7OGBvRF$i{UQ|rOmBqQi!(C|;wf^28W1vBIBMlQ87DdKA5GpNu*rQRe(yN>$L z^4pu{^-XH2>?pIe}{@@hG@`xZ&A zqLTfUH48fS>SPzRm^9_BkRt9YrC3*T<5xh+(2r1<*>GPqB9s#l z?oPLW7J;vsm@IQ>&(*JPx2F&KJQ*pl9+!iAilCsD(vxDQr{OaU()7!q#taV&XT!52 z85@ilh7wP%T&XmL6AKk5A6F%YtBaC(9~%-g-mbcD^p%`s`w}D6w@8~W=Xa258N~vM zAWr3#2t^(%@%Qnfn0RNCvLm+$xZ7T53y^HyRV3!cf}N7zaj&@IRdNH+hNVBv{r>=ZWOUii(}ep|0E#y23F-2M$22x9 zE5dbPTtgVU4YEx-HPV(}^eLC)&|>gHhQwpm$GQ;F{2GsjdFCP4esQ{fK5Xlhdbp?8 z^T?!2cc#`9DalW2cSdaptGm-5?3%1cewk&w$238nu685#*z`Ed5j}1*!||5*Q1yb& z;y?Sv<1ra{GsLEE*N}4bhjUE5J9B@;lB5k#nwnx9{yB;$FV{kypxpmvqMFg<`Bm2D zs!E8^`EzgttlOmc5~r_0#D|-<9UNUYgbG+zJlkkEDw;3HMomyrU^NB>Z-VxBFSi;l z!Pkr|zEo2aBGs*clB-8iIeT)G*pdhcphSG#)xxHQ@&sYimHIiG*^5d{Pji6U=Heuz znXIi`O)`0QH4T;aViU+qYqI#0k_BqMTO&Sd3Bh$A7!GZg1eqVgg^YN~XXw0!td?cN zH<%z#>tDe?;TbJQYa(MNGA15b`fWt#_piY5&Ud)?$&WGvECzb4Wn6S?MA)+esZgXO zcx!LCrY?vkmEwU&v~Q!JO{iZ9WWge*cL#76bdLYB=Ne>~|J@y`3v3LxOAIOga0i9g zUJ7Mh?R4@T({y+oU*+vyX~{ENBQTtLWt?-?W? z!nZj6y+~*4)!UppujW}5pmjC=_+g$^CqQyOGvw38eg3zV2t`8<$XkLt#dYjtrOudj;UU$YpB4$={fLO5Qy! z;mn;0ztS1Y$L`Ct+4pZW08Q~&kcm`tmYnMBWfCKQ$n52zJRQH0stQzXSi5C_ld5t} zp_oLCQqMgE#(-=&>>oR)>Nv(w7;rZ@(W*amdF z)DON0jag}euQ(~!Sd?UrDQHjdG-FIjjwrS4msP?)T7JXVZ8EmwXeORGijL9LCUt@q zRsT93Jz*-su`7SY)F0Pd8ct5V0$48$oKCO)Mtw!o3(yb~YpH?7vMFER=ctrJv9Qu~ z3r;k7X3pYrGL{}IWZa75fyLcP*HTEEfd62mshPQC5K63#%xUIM)y1l(1U`Ib#v645 zo8eGfCGo&XvzMLRGwfg}$v0KEvY@?3X_Ga01|0Id@s21<49qCC!+l)mvUf{%q?j zdQ0DzR8iU5{<$W9TeLFC1-4u-@s+`Es1n~M_CVM}CUn(2mJlBtS;wjme4>igbGy8y zV%B?e=|SBq&fauoYIEr zrTng0){UogpF&0JmiNsXLL)wYV5>?GcFgbFO+EWQv5Mixv`XA;>=X5yaZPnPv($Q#)lJxmzy zmEK_C$~$9KWEJy>^GCMA2vk*d>ArArQr@v& zzjofaT0>$cINl8iqeF3PEE_2g17eyX1|87 ztYCM+QP(eyn^haDG_`YVWNWvzk;k$FPq+L`vQe7{nrs$VSu%)lE4F*dePP4hnm-%wm$B<3|N4b0a*E zmYoD_ESBuJ$fGGFZ4y((H*v3y0ITQW`_yM&FFJhUhd4@Y7TLYfN$;q-D!u%7vA%=7X2i zKDDcQ%02_HyoB98l-&47vh8{#B$~a%LD;-w-#YJIb8$w8SZLZV8o<5lcHvSOPA|+o z?^Q`i27O@~0m*rLTUqzIx9t2szYUJ_+E+q>(_z z-PV(g{$NrBX9+BdQApx=O?omVK?nrngE)B&u=_3A(wH=1jBfp72+dJMGO}3P9e`QU z8!P^m!8hB~*a;Ba&#_iw7Y`Xj#|G(#Jc%k}oHE?%SMIrwTDIbneTTJToq9DYa1+O! zo5#<7kM#}&VN-;lB;O0>1#QZ5R7VeBfNIL=8xq;!id7iu?#h3WSc-3B zSiU}ORj+0pH1pB_!+YYF=BAAud_|u9l${RJsk_KC`k0h6FqMCAVI?CmKBnM~)Uh0@ z|7*S0?fSQqHv=>7hLn_je%ku;D5Ambng8J~zk+Mq%V1O@b~c@z9=503#gS%MCKizQ zO`OeKzhW51EzB(lu00iull%H7*_w5#XqdYMlmWAHl4I7lj6cqV^;%su2eHA8e+ZuK zRpLpgF@oXkYB5+af7ibQxBM~*(zT~<{+oD)jQ}s;9M^M?WzXfs9I^_8i zGeoOmH(;w-fRzR!iKsJPc@oH-41!y9%tC=x{-Auy)}(;~xgYuX1+n3cVgdG;-9_+Q zzZF`{sW_=})^9}9`RRM=P3cEjuVi-+zQw-t?rkd1qD|X#ttFZsx5NQWCiLxsu5>~c zW1{$M+1czL63664EOeZ2&yrmxYyJnR6LxeEL^|RqDCxSwunwDeh*?1nvrRd>W?f}@ ztiV6seE7U>2jfJ(s(#g>u-}$(Sek5jyjt)Ls5pn_y-Ajbhr`j-E2*J1;9@k7OBUh0 zUt@^$k-km{lQ4N+1`D%K#jywJbu>*l;%3@u??Ly4qhY@OabWbwNS=ZnRDNu=dC`~w z>+D)E)WI_WC69~%m^QqJKTm{dW6`o`SI@Eqvn2JnOm{}yIE;r`_~{Qeq=C>zYMLG@ zp>+?;TwAo5{cparyaOl45v-PR1=4G%CL&&A_|>JcTvY?>b&ij8+U>m9!F(}ZLN^9V zvEo&{#l!To-%5V%05Z6Zi!IJ|xaG$3*|0b4lf3d1z4{u@5`0ssq#9CSaD;3brzr0+ zDPg-(z=0Rp%Rh|HSz>7TePSt$Wj3m%2sK#9s6-foq>b_is4sOZ7;{CP(TV>&`N1c_{vCwtYX#Y)MkdWj4d=YsBr4Zy`D$Lvt89+OwuY z77d^^2bCwHuSAnQA&;c&5%vTWJF9TT06xOmaXf~DXLoh2YutE(2QnJa`WDs<2Ig4U zW{q@QXpPLQZ(z^um#Ieo>@V&GPFmn^Gs1;qEiETe%1-be1snay4*f=u zZ_#}s%9$Ftn1#2fh$Jq8Fq*PS*3ltA?gEQV-)#8Z&fYR{@cSEx<#{sPds64}hxp^DDG~Su6hRM!*23hcvla68LkFO1S- z6Bn`Cy_KlY9F$Xd!5=N70&L>rFTw)id+{cCq|6Am9F`TWX6uuilS@k39hC$UgT}Eu zX?JXY`=%fyOdql3O0%k(Ce(E;_I${1Fq`Me*iIalVvWXL`hxF64^{Vz#e1+az9AQp z({C)Fu9T$k`IO1rv2w<8*KxFArNQB+%T?0J_!{7=Nl)3zH+C7e1;$#=_DGb&Ele@m z$DklCgMyEK9cGQOVt&?GEhWQ$op&W?{pgB0Ea4{O_%$JT3Tq|)=u^~cy|4QWs^3~gNz@$q+-c@ySoggtqJu*UnfQ;>XC50Lqbs#XdH=Nc zbp-I{`*3fW9cN;!paJ!9Jcs{D@4))n$i{rk&Dab+rr5cA@Adzn=~OO!Jae=6?#tpr zZA||Baw>0~w!8eoTYq{~HyOXmuV(ylv$!6;e64H^im&(@;>A@&Nv);7f>&S~l|Dd^ z&w2~&pArctUf+h#bDo5`g9~ZAI<7OB(0?N#&1mLKdj?hv=TH?FoqIFf`(pi5Lby=z z+9T1&@=aeqvysf|xVjbmzsLXA1LxnW5Y``bc6MVe%II|6rqCDvR?~Oy`8OZ(ILLok z_U0q9{#1HwmGpW`jah>HBmG(D>OVH@IZMW*M~8OyV*8wa=F2;vzVDwlz_Rsbdy03A zShjn=)#i9B&Dy}fo#LZ%fM|Q%=JV!YfXifiw{HZ6{!f~>7`QP*hxwq}T4Is)uD!G# zZ(OKecISiVdyVU4@Z0)S&MHG*bB(Gw>EC=`?bdx?(mLY2jn!E>U3x#?sVV+V4DkD| zRf~=sLv-~~RJZK&S5h|rfBSkupzbpsrM>caS5}Wlq@%UQweoM|77B$IPkVrXet|>6 zqwBjjp5oB2cZ_cB!=1acL!((!Ua+y_1^KYp9jH_`kmKyTE?~9j2 z*)DyzONIaK%vP<@Njn{wGAZ6=nNsn?;l^gv7uf#uC5L)V-zNK0Yj)VClU_{L>+zaS z&7)AdyMCii|5Oe+%BdsuzOSN8-L#_NpZIo=F2g}?`;JP^DUR&ieW6x_w;T);#%iMd(6P(AX zwTE2C%h83)R`H9_Y3Z&6TD5jJu5W2w*wH;UC%nUT;c$O5}qMoh)5!?)+~>z!_9 z&g{yD^Y7P~Hb0n7EJj`OD-dV0hj6_t9r&9Oa$r-&Pd8)LVu}nrAo)Nl*sPvn={oYg{ ztnx2G6i6}CQ4gGRIQUA;r~+e<7S`#tQi)&doW<~n(I2f{kob zLh5SWORaCv!Ssp;5Xto1$*h*Al8qGPB{7umx{ISrPMDi%oRtH1Po!LbXtO~xw-LVT zDG|c*jR}JSvvp2Na8huj2wZT31FAGfmFPtFfZJQUyz?coW2gKLMKsWPl1Ov3OR-eF z_Cdl zu##rmMTyqsCvOGTOKq_cFUM~=LwPE$_Xe=l1VSwin{`GxR>ZF8%}%X|!&*LRdl_ zI+0|V+wWO+yL*!Gw8_@=FkAQ$O5>2qXpyeM5VGG8m#Ktk1^a;leLujr4b~%bbv43B zBU>C0Q=$7S$@;>d;@Bhq5)VlblO)sl9$6@lg_brN*c}-)Kq)I60|gjC#gJvJLovI` z&5;pE!u5tr#)F9HFC1RR++RwNW{91TLnl%k!maxQmax8m^Usg~AI1`8tmkKn{2;b% z$sFZn63WW~oOGzwS)iUKH`BWO@!2au@k-oN7T7tnxphNv`Az~sLsTBar1Q0CvYI%A z7$Ss9zoRqt`;}K=(An4&_dND_p_GVTL8l(O(r5ZSiBX%7p6%%av?I-Gk#gX+(dk24 z!)-=ttVfuBWk?wFdV+cPW2Px7Wv_m4H!7`VROOit8!ymmg+#UL!)kYew5`1Q5=x0& z75fSZ16tW|`G1~HkvfdT-V1oYR>b<&f5#sS*xRS{wJQXCND<%gYTVv}w5b5ox(T9D z{$A}f{aR^1$?(!YN<-r~E`8P?lyJ(tzZxLi@>bgiRqnQly`nE(|J%oO{tM$>|fr06Cq36 zRM91js$EcKKo`XSD7H$+c`(B(N;32+(rSct@>4B-iIBcyYA7R$i{_lB1gFsko}*$7 zfkgcllFFD>F z>p@Ckw)IaaPje*mcD~G+PTm%AE~PyiT7Ok}o|EnZo#m26+&$ZoaGLdpBHGMYnE$A| zgr#9f`+S8=zK1LA{sAFf?QYHd^pm1i%GM*I} zNnZ7t1Rhg~w!;n5sOAVg@5FamhCW${d#(~K6=TK)SSQvgkvSUhi1zM9hrTqvZC6X3 z2DR{O186kpR=K`B8Cdg#O<@o~r^I(Km@ly#{TBr9md=1PiqBj&o|Wgu6!Cd3UFFSPlX6X zqa14}pml7vu=W_|HnR4x!Cn>BNXbf^m{t97CSb3EHNoI@`Sm1e5vWOc5$RQU0Kv`P zfTupld17uDHFp~I9hk^LoYBu|QS{>s$%ASWAjWs6Vp@HfweX zo(UKNTDmVdV_M4WPSan+H9lW$C9;p^!>+XY&vIP_WFdQ`$lGoFMeVq6!V6@jC~Za9 z2RR6$^Q}PL%r=kjr}GQk&W-ZAl`880H9Nn{>uzwQlA#`qJfzY(r?G}+h%-%ex#@+U zZks{wIU#nliDRsUhm)*y#Q5>$yFuj)$pzulSP!-0#%CcJn2XKVL<0E$fc%pr1=06! zN90b$IcH9Y0vdiqqC0EBjS=wQ*7r0*2GL>U8!L!^E9Q%QWLe)il)oi)x46tCE+f_D zYiN^?s!c%-6Kb#Fz4iBRtG{mmpkzk&wCN;;?FqoX> z%Q_rd$`PfGI8xc6D;nG2APos;$lIgVvh)S_H!7%g-PV7P@wcus=kEpECSnOe5Pm^3 zRUce&yGL&oV~rxLyfGCMyj618=cBtFG;h2d>-F@gGB~u>su&J~`DX9|;B=Nm_hAa24W~tjP@FqD;3W zYYi2ZJ6gPGlrb@AoUS#fu9DCVzy4m};e}vMwa5q65ZPbkH}&8(=qx=g9vHu_X?pg? z*4V6as4gJK*RzyMI2rrdu+#p0LfTdNGXeToxgRxT?e>!M^|KkfH-nO#dLDvMg#qW6 zaV?Z9!VVrzC5fov?sfc)qhWvDiL=Cjs8{Pn=Y=mq+T%N#Xm-f0V0QA?jtu4?V1E)M zCc>(=2EHca3}b;D4Acyo33}f6J%+D}8wW6$Zy-e7p{mK9sk!3WbDT1)n<)} zpy3*pubtmw?5Sf&Hmm>MsG;6m4xiRr7ai4@`V`v8rsO^L1m%CdaDwWml$<{4%yx|1 zqLI2w0yz|QhG3LSAaCdzFB6@7w~TX+6s@hqgap+<$K=YGGZ1h-1U&8v+F1WX0Gk-2 zMYcpPvb5P52icAq@<+zCR`1NyTi{7aE}3wRB*zzYITS^S`zL{o{7>zXO&JVvBTdf4 zZaaBry`o<)V{cT(hY=!;=PmraqgGUD+U_fLw^kOEhh+yhw^dx$0Fq7n-y67Iml2vj zFj{{`K6I4cIq%Q8`yg1eWeHbexI@Q?IC{fRp2=soKW+~r3P91}vA?7WyKTs*AS)nk&<9efDmT7BnWC&Pkyg4y%V6u#9MTX8+OgB+u|9_5iD_D9VBCJ^VL zO!mU;?th=4q&QP|o>Cde9dsS}Ol-n}OE4Oka{2v>PC?jeY;@)CUpG5S%72y&@-b$nRO#v9 zqkLn1x`i8Gb)=SVK!dD3iMsZz&pwrA?~)$rHhi~st*lVKDhp*6-~AAF#=tc^qd-|G z%}IH1LQr1tbWX)#QoDz>?9K>gt_-?+=yeRcS{HG1fnfv^u+1NK38#SyrwIvra2@eRM~SeQ}q_K|aYbe8CNc{8oHn?FXd4`o2TdVI#u`Um0J|(zcI6N1A-VlC%7k&}i*>DX$6amXb4!R+N zH{nN{^~IQ-x^hDP!MoKz<_5$1*dj*>9J{_l;oQg%yuv5ubwwYAhCN2w`Km=7vwwbb zL{~7x)mAzco*8N7{F_xdTZ9wP$bH*Yjxjp6B(U%1Mc~=B7iM5S-+hL?Y6|f|ychgc zw5-YafP;1J)O6_N=UHnwa6q~x{Sb30|1Lb{Q0N8m*E?LgQ^=B{dSQ(^)CzFIs>mjn6KRG7(9QhV#3sn z0b?-9Cpp~ISvJSIMsET$8g=MQ2IKhncW`nmyXEw4u|-|%r=z%z{mS{wVBsX+@1tPR zc2%Z1NF^C0ib0fl>d^bHB_x@@XcTZp=j_R#vt5EIsf6G#braFO%4rmzGyDlb;?`iG zU0f5_qb8Ig{AVar^b&f<20``;Y;cMQon&*ck=Xr?A}q;Nh#MiS@6dappL(QU!G)eq zq0mnX3YggHQU z{f&b3a0I7zM!V$&rwq3BWv8kbck2y27sQ@7a5ig>q>gal8gN0;2j0RYoY0@l`wJ>q!}}d`>5Pz_Dxj=G zP;6cCu0om<7|p2Un2}eExjl@lQZ7PLt|r}>!BoiN)D%I}C};}$%9`;Wnu4kWFERPj zAcD9M%W0Sm3S1Q_!zBbXV*`?W{r_fbj&krO#DHA%#|x@UkJ&PUZ^7KnA0nd8{_^-t zP(FmoS%blT&GOf0@@}8u`ktXoG^QlE#*=%5LE}okv29P9m(t&yuzTbmt|2O!sM_$!4P3S(g&)<&6?~N7gKy`dd zmkI+siQV<*W>0-zwBVF)B>EO&z_AI)&-UJuAuA7Jdlmq4-EpR0tEg%qUtGCH36<_U9RA8CMD&HTQikN zK2Hv(-o3YYy|+R`f4$G1yE``xzv>GU9zat!!%3Jm6Jh|mF}J-act0r9~K^+FT7AzI|#C+7-~%jGz4 z?o~e?KfF9{8x{KdAuCp9j@H0EC(rtGf|k&u_+CGDPBm|5eum>=Jy_2kZw7a&Cv2xs zr}mrPZg!qI(!Pw7>fM@7>aNg>CuINM!0`09JuZUF7~9&K%TY18)3dF%gJgU;pYG^x zH$-kmuBQ8oBe$z#_W4+v0_y^3cj$5%>v;sC?OSV{E!U+EuKcwehq)yE-|N z{>C4>-ue}5+*bP)TzRN)P8T#$@4x?x^>CLqZ6{@P_I1L z{YfW)LErV~ske%L%xaw56|)`VHDIqNsSo9(xBORe$N^GkR5d1Miv1k8;-m1oJ++eLC1JEiEDO3x;c1gDtvTq6Dxv`5-X0sKlfb}_H{ zTMqb1YKzBdUS7E+!TxigIb10ebrHJ}OlcF`=+IbPIsYr{F+s4IAqu6_cDB=wJ7cq- zpa?+%{u!4DWx^%p>JVUPN530(kPEA&HLx<*7?8#HpBn(=+#jE(B?7o;n|gc;%M-i+ zp;3!X)-|qB%gvpcprt*Ba)Xu`ov%L&wk-^~B4LYKH8}&{jwn1ytnJB*+Et)gS8oA# z=^kP~Bot6TG?)XT@B5S>?*s%qY;K1TU?b>`@jIW6+XWTxg(t5%R)Lp%t;X5u4nsg5kL?*0dxRdsP;9oVuO# zbipF?5v|>YUq1S_xAleDp{|DwxZ9I9Wsv}#h1Q2S+qWm_Gm8CMiFL4kJHS5_oiEa9 zoMKiu!!Nc$T5SY0+V*UsG#gC3pG60n`G6<8Jx|lQN)kD-Vp-3Q$Q7a*1nLKA6fhs8 zOvJP>VCsZu9DeeBJf}{zSsQ*II^V7AWxo|W;qZl(xTuXOgoJ76^|95WX?*bO?gB$M zZJ)BMeTJXmO&Ql$1qk^xraS~tERY=Bx5h|wcLmR)T2g;=V-|dlbRWy`;6>k}^E)@?zxOGu1_iFES=ea* z*gDu#jPh1|{+lUw;ajt|xc6Zh$LQ0LdXj&-XNeDoDj}_H z+qRR5ZQac8*1dJ>z5A+O)v426U0?U!z5AbUoqg6?R(KtO3k9o_sV8TVE{=q>E{-kd z7KfR+3hT1Gh^jnI;}H#l+p{K=U?K%mglx_gF61;$}4l8_^s zZfiulGUGHC%ibIZ|Gb|Mxb}-dP?cS4ahWVC6ezh}8d>j3V|6>hABDv`T{3#9q{-@K ztL_uWv*qiT?L%@mU6QUlQ>-k%yA>$57ez0oz(AJzR^AIkZ z@iCfr1$-MohD1rhVZHTk`8Y1s`IfP9cV3#Kl0MItHV5Bah}aoM?Kxw7vX*m8%@bFY z&}vO$$bj|w!)0KeI3fuHOg?NXA(6CfS3I^w5iv}(H*bDK8Tr_U8zjE{Tz%_T_Msa= z$W%8Iow>;YG85c=D)OVv7ya5DJAC=L=1KI`d0Rh&HFd99UEi10HW~x7|asDmTR`Jz5DZ&SFV4>g+(o`Nz=C&g zwBB^5N8yb=%W*xtja2({gP6nhLO|N_I^TwCM)cXu6&BKR`}Afb=`_d}X!E*uq{`v` ztmezhO_JQ}B)0~Fp10E7pyl0}yHDgut*oVRv@_8r`^@Z=rur251>y7uDbDQ<_CTs% zcX@qm-5bCdd1KpSCI4afnU4FYmMT8vVriaN4#!%T?Dh7M?aJqE!*Ck31gF%=fZD6hHuwwY#Nj}2b2ZJ~4@!_p4`hp~ zpkobgMpqB&X4-HkMja2@FY_2I^uB%oxve1`E?5%tR!@=^4&z3ouyfNDEXGatA?Rgl zs2SUIhrzx|P=7?g0StPRyQ-r;`urCQ{49l)ChYR|MM5~T@8gDkeW>KhOsUb6fT-K^p0*kMU-apf2KzJt&#musg3;eloa)~huGZuK^ zT4>-I_$2&KQ%Kj`&?^7Ds2b)qDDA*waxn;o`=HduO*K?tV(D}u$JI=cXiCE!9!QTp z`A%z~?E)AQX@h6};Gkw<*iy>PoW&@~X+^G87BX?YQ2hBM#w&HBt94B9vDj3P*idy8 z%&!d2oM}LH6Zf?!c}USBIPVzDn(~*?Ld!g!G!+pTQ}&YhyRx{7`b3C{D=Y_wyQvT`DUaTmoJeVNJrWMT04*u0gGL+@2EX1tbr1m-w#E8!TwKDB@PK0^%~UsSu- z*dm;8GHTx{iSJ15B;OG;dwB0=3F{R!n`gg%wdp$Y#3G24>v*wg?KyHS`k4c37bxG= zSO%U0k&&08vPAW-rBk!_k!@PDHul|+3Kej$YQ0O#e4s;l6n#wLe&9}ID_;^|G~`ab zqnc9H;aV1F9OgWBytCb(dX{`fzH5uv^S*C?+&% zmYiW|u7qS*0T2HuWt7NZlFiWMvPoCBkPtg; zS1J}`!Nt^?5s1TQt!eY30JlM0y4jEJ(Gh?L3VYWWy)bcFXP3n~eRh4tIk zwQqNb*XL)7(W>eq=x^G{u=mUM2SH|@w8FG6`@Fouwze^tE*W$w&v%~CG@LX#M!M#4l9oG~?mcMwLM1!6ATEP^uOof~Nhe>319QPfWOV{@M3a} z;G6RhBiBSJ3j%)wUv{|8#k=u1-a?xH8W{3CG40v0?LLXSL)g_l407UrYzqN@lfJ-M z_=sRz&pn`pw@0Iw38{>qTk)J< zy8nUHxql!PkOf%KsJ(9}m9r}V z!Tt%6$sbYLh|ttQgeN#c)8Iw&N)iU_IU#7F6i`fAIh=}s{KZ3r!-Mi!%5qw-GAtG> z{J~iK(s=HkI|_xe&AMXbyk6@~b|E9)(cpIY^s%HZDACYq4fEaLJ$uhVK94=_F%vs2 zH{cju%+^I-?D+C8{;A`p6eD(mMV_WDZE)DReldO|>w8>Z0_(Y4?A84_TBU>d?Az6v=ZG{RH z{#C+;*5jERUqe;v`B!NZKnkD1^A+dZ$m%95I8co})lYctJfwhk43-cl)puvod*Y$A zyQ)R6OpcHh5%jzGnp@Q^i=0~yEi&-ocI{3?EQ$IiI=9)4In#jJqW-(&5B&}h}>Xs$%3 z_X0j zG9~{kz;zGRQ~0HZQMd3|BQbHV5%K@1$pC^gGU#{Ajv<()3K&sbdc=9eN(*{n9 z*47{hg(e~V6t&e8SUZC(JAiAJV970-f~AwnXoI-wCk^z0)@9sB$8pO%_p6UO4@S?!&JE&^eNt2sj)2_iBc3cZ)~%y&@*X0cQrlKMht|*ns;%X^lXpw{ zvLa*lU7gACeVyVgDw;3+a{BVL(tsaml@@-^J3acTh|OZ2^ObU2+loyX`#3rXdaO$M zJt2OiG7nMbz+z}aVh`YKf7vTlZQ~v3w8x9MAE0KPr%ALSUFBVuR=1m`4W40jK>m>t zbKA4a9mg>ci%9wqjHcBp^O@F|(wNmt>JJSaY(L3C7<}|{Dp}vw_)+DH-pnjA8i8`U zFvePVWlNLUsl)G@)<-nvX>H(sY-) zVT$2C*<|uS%2hBSr7=?54(&J9-hy8Rj21>r}O?w$RqN$c2J@DhYzSGn=N~hqTFIT~E(jfVeR2Te$3F zXdvU!MkFX@T=fN<--i)ng^1C@rr**kDYjbcy<%fa*^Ngetw78V^AN5%W~}UI`XHSd z)2dz!)B|ljb7d&n-F7*F5hJVoLZOa$`2#BGH}9FJm*v|vq(-tSkjCF~HA&33_**fj ztSg}hHZ&27i_=MJH}uXL zk5bs_h|icNGY;sw^gyzbW0L9U4U>J$sO7IG1YA(V9QBB;^{UTGeK#toCdK{}&SaRg zjNW+LYacQQ3hd)JXKxdZm%hpV&G@jYy_F=>5v{*(A|z#0ci#PLgF7$Sk6`XnXSfR`RuWD&5iO+tc*nGU^%YJ# zhaU!qBccP%*!?Mx@9b5-?gXlela=mlm?M|8Wwg|psrMSWU`2_sRX!8(HNLN|!(e&F z%tsIJ_wmMsBiz_gH;>?-bPi(=*7nW(?4xcb9L#iEt}9~b7QEA$&j~FP-jQucA{iZiBAd3 zD4g!whslx*b=ij2jzyD0vTw<)@&IGpkrGd3KbS#%uEVTB_mhdqNUJUL$T4-6(~Q|Q z$t2531p6-T!UH_fv3eTDjQRNFF7ldf@4Cw7&J`V9+j@Zvl}Rl~wJvq_qjh>Q(zWy3 z{cpn6%f^LeTW7kJqXfp$5kZ~l^P`@Lc9_VfPRB1bPnnPDH^0<_0_C+{eF>SNlk4oX z<)4CuK}RZunPcd7U3eFWzVw2|E#dI!WO5eemXw-A0!aZ`!_p+pp7FiU-m z2X(#zsE;1QqR=5?-Wpp3c)UrY(Vbh(>6Vq6Rx9gTh9r1)Q!jbm0QwcdpIRD~Rx*nZ+DIm$gr>q7PF z@P_60i8FJvFvEf%lGkyQr_?7Sv!Qzj2+yJNR!U_b<&!eh%bOyGC}Xq0B#)33hdlW=G1~BRwdvy z$giwn+Qbg$VCM8w2ZjiE-n2LKS4e=odA505M@ z$G_@rfH;iY;>AvmuEkqTPs+v)-lXVNWhS8Ee^g0aT7FK8Wuha&(4#&nY0%#6D`?*< z)0z98w?%E8u78q>6O!JLSQ0L?CI7Z&oYPG9IYOwN2;StLcN%vLT;UqJ3!|cG-Dtlw zG!G))r?TfUZF7oFM$E>{ma=euf?0a#Q{W4{#>grhoIRgd(CIpIXnaP)m#8xCCg7ZY z7C%=XFlLT1)Svsf;A)a~69vXx|A>_GUlmw)9>e$AAfm|0a^!&qyi4 z)LBZLW+#cRNu{Vok|^*(4e^}x*syyh`GT>f??0c|wiICQttkMiaeq{?ObSR+wLB*D z(l0aDt7ImTaB4oeFQhjN-inILORyb`Y9CMtEcWBc*s=@OO|ud@tKO^_;J8i zJMJq4#mmVKNyz;Mu3B4qCU%G$ErG27Yo(NKCseLS18cED?jOL_AW>0M{njawd@^pE z-hU)Y#Y$I3vW*={%}0z7P?w%w3yY#QM{f-ne~_qvz+Bwq2q8HZb}O)#5Z->gJx?9P z0tUjf5i>vdGuN|xg`zzfeA^BdKojlatD^9XO7t-`4CSJwsq_AjhtTV{P?WvDSqvx# z7!#l&HtuN}u$hbf$qgdQ#%=ec4G=RE86Y8bl**;_#?a6SQdhp^&Y{xmtYY<`YG3OtGuSiAtW8{evD@ql1xpd?=Ma#8UV(F{_p-<8@n<9Tizw@czS>vroWX(i%he-@){;S+sCu=BE7oR3(9n$E5g{HT?lCmZH*#&}NGcb(w+n^@U?Lu(T0G+t%?T+aSg&;w{l$?ocbiKb2L) zCitGLQEpVi#n%0$CeTjTj0S>0_P~tjgl}zl2k0)CSgdlU1JA zJu{6u3B@`toP}u|<;|Acfj>inntTbk#}RGi7^dnzC>zec>6>T%Zf(Od9kd*KG~dKb z;w^_Xw|kco-3FdLoXv$};b=KbqAlL8rkb_)i*|xNYvGlRlHp?Iz|dx)4_cVD_iYHf z0WDBOTld8Eblh?^=g;Z2r{oi&t=p2uN}T$1lc^~mD`Me}kIRKGaZ+7e+^|Kp>5XmE zpTZp%b)PXM<*qaVq0QCFJbN^CL|dAOwv{VJs^_)eX!LvjMx&^n72jxd5U>0jjn)oR ze)mNw9#^_aRRO3sOaDL7Xt>*1c$VtG`ZpS_3~l~Kqrx4P;dbm3wj=0|%e`{zR%0>M zxz3aHHGJb>TD3qmzc5Mzkr_0CzPkl*y6Uybn1<>4Fk!=8gRyHV{B!*Nw-zv4MG?vT zIJ#hDEa27Gn2G7Xv=+wzvsKf<;6z>jTGr>@Bg#e69mYp@rjgUS%{ON}(ty=ETB(wdqc zCPA;3-f^0&Z3an8Yqn^wj_F!CSLet^W8_lW$(!*<+{)j9(PYcE82?NhL`ldXZQ+b; zG2+WMK#K|n#C0o`h#<|8A|Xf^gLO*;r>I`spGe-#MJ)Al<^aSxDe&z9oJ|#pr-{EB z_kvbqn_yFzf(T5DfD_kFi`_(0+MpDnQednTH)k~k4w8Z>P({1J)sg&p_@vk=D zQ5ejd9z}thiL|8Ij$84Xi0Wkcdeb4B(2jwO){r1R{_@VC^FS2u03gaZ?h-+4gD<5A zz|MBd=pa7rb_5-i!oNnjS9x}W8@Gqu>9?;!$cl})FoE5aRJ-Al)kAJMqAHreYRC??8oG z12Z~x{;e*bu9}M-bRyn+FuNX>yso17sK!(c&71MP&gxw|q>QzHdpgQGL!9p1708xOIF~(Q&VU}v z@EZKCkaB6-4sF1Ax2wsLj>TnBEDE4DnrYYdVr+NG;4kEr^_(s^aLoxsHe#o7Z3&ZX zZ4db#PeB^eW%~5>q7{f*BBVba_`(9dDfNK~F)ve~4S2Shjp6>@#CLgb;K%v9?qZP# zs5FEbZfuRq)GPmji6%Sr4DJ~1DF^vAmN!wzZ5&&KMi!?Iw-Ie^)U=L1)?-f}HAiIkISXga~(6Cu3QOt<7FGMUhBwl-Ng3li0IV^U#*FC?&ZdUMkcAq+_Wj z=g6(=6n4(2j8Tpq@y2K{FwN3&K!ZGBDa8H)mfrgWCqeEwS)1Oi*S3flsRn0=cl(zo zwQ|!C1kbk$EM^Oz#ZcANx3#_Z)`0P!fxC%3QAC-NaN*>b zn%|(GHx377(>{V8V!Imb<*G+|pAzU<5M1mFxo|%H?B8u~x&;=vuU8rR&z7EQopmhE zUmPor4iHADBZgY9K__!~aF9 zmGq(@o(%yVBhnWaZ3_Vh!wsDBuLt=5z3UAJ;ma29X?*?v+4b(pF5?Dsr>uOk!tZ?D zxQ11b$jpK&uifK&!(vfo>>_Z7+*X&%Lp&fR=ZXg1R7Qgg2CF$oDpu?oQx*iyLQ^C( z12!~;2cz#b%9TGm&-eF_^`G$uFWpR<6a|M7!$%;RH<%x9aN|i_ivViZo?%1lH@*T< zUFIQqqU9x*mx$B3VWU};M***~_Uu)FhG=*Go2Zd6~LTW}|%-^s8fAjN6WyfgP zHl;sbAI5lg9_W2TN#ngwlG>zr?a2zuET6c8tO4We;aT?Oh4`~+HyhSw+jT17tFw;i z2JnQ0Fag32f8_79fG^Cs(F7+M?v*!kKe_UdSs}Ni6%apnFWhU#8!Lw$JICo=#*SZ8 z4;%2KeEuP0=MrXzViIVfsXnhIF~D)7Rt}uY-J!Kd(GKfq82<1_Ibev-8}w+{DrQw^ z8mNNmPiZC_jL1f)5Kw1e=biS-W&N(<_(81{D>r86!#b~6;ypoHGj7sfxv_n2yf7s{G*)lN__@uf&%knFSmt6Qa)%Nq3^s%C(ECME zwiFQY&z>o?9DC;|AXl13sfG)SGNU3_JB}Palzbw#emTEQ$=7M(tZSpNa8g>FaIOcd z9eo=lu_NBuy9%Rf&pT0R0y2bmlFfh%PQtax(!OGl?v=EfDXGf_ZPcD1#^KQA|=;8kco_D zP;;17aFgZCwZnj_4f+hISgXhbO|XeY)ajs-Do36H*TKxUmId<*4~~ijE_YC9ZNxh9 z%m{-4go?~?_dr}O0f-CPEGjir<6>NSKbJ@3?;0LDTkOC+75=L~&Jnt~8O@evg3SE0 z3^a&p&ZuAMJMQEOK|Pq0U4t+Ku^|HU|k$O#ub-a~Gf~Z5cC96~;48CXQk zS5WPfC!5M22Dkj!=Mhn+tV;_`Igb`=wV;buek0+pSCb4(WrL3MOf^PB+myF+)V#P5 zWGz!~x3sv1Pu3jwLa()B*X9Z00D?#+LD?y6i4W@m2t{)&qP3QF-#61L2^LY8G`H*%-WBKTaf-R5~=hgn&J0MgfajRl>B4?II$)OHl2Glh>b4QVaCgdt6H z-<pj!Ux`7j{nwC32C`LGUNWCZcJ}b%RI-rIC69H%7D}|LgmfHa^7x zUDjeJbIo%-!}SB8NJEV{rhMjiwK-bd?%zZ4tnJDJM@~H5tyH@Q&=>j*%aT-=^O++N zd)(x&2J}a=wmm{ezBG8}kKHX(2WKAq(+q{!&K~5iQdmURk)FC)VCvn&b2oj=q;89Q z%JH4a^rb`Uf_qFFa2-67DZ>tktvG>7;$O$TKN4qA8F{8Lq?kRjcF&S0SeCVh4DLc6 z)4*(Og~QGWQ-2%g+PsL*KswIKMRbKUI-xA%D)0?IfQY6w#yeTO;vuohsSCJ3yhidgqC9`DZfJJmGGTrH+#7RQftQ`&&a4aQ5MsS@wY2`h{M)pTWr z3lg$$kwS~YIF}9QhyWHE#--{nt|RMN{M;_RiN5Ca$?IE+#Muv`{Z?JQW*0E}B71Yy z!0_uQv!-DdF0mey>SR@2R_Je?pwtW<-O)8|c+*G~jirU#s=TTOD}Wdm%q&fe3m!Vc z?RgEwMHL#KG1+_5$yToC*1Ly09MKb85_gL$#@^${KIqKwo@$Hhc*{zZ)({kZv#HA8 z4?{H6zQDAQJ?sc0{sid;RdZz@=bzkD*91dpi1!7uZ;=%IBoSNm5%x@X)TV`{?a1Q1 zh;S$1n4;yUd?I;@n?lm$52V^tzg+Wx^^LtYL`R;dH-weBK)<5a4DkkmZ)-wA)@rU5 zw#BealW$RI->PqAkHvi6%cSbh3;c%9f6!!%LR|h*omTZAmAKkdalQ*3+Oq%l5>Kfs z6AFyP`^QJVIz6#Bwtg6@O@I2{R{XW{hSXCQUcM_4fL$eT(HqPQVg^j;DabFV%SsR% z6dOdKSg`76M!*0?u!g3L(yZERv1qZl)LFIIwjj6^zz#}>i5*QiPTYs5#IZvCo%F#yNbuG?pKGY{iZ8rHaAPd z{n(!;-&Z#x?`FyDSJP*I>oZ^WtM9Vvx!eT>s}z&Yxt8CXr(bfsUKZn5uRJYHLw9|? zP4oIP#&u~G;&*mdl$|ER)=zVs^^|^T*lz=3{)KqGN93S5^!+fKLjA2b?<2WLx5OPM zzFj#NQU2w)-Vr|CuQctpKX3bP@j}rUcE3sKExHt~)z#!Ia&le%`5?T!C{pdi^)Es) zPNV7NYe=~zZ|nUU+o{cS(BiLuDP4!obyjovYA=p3oqfG;JxDcYX8#KyRnP0;^U2mc zWy$^YIYjCz`{l^~-*a$zA1{`gS(6SkUN2AJzp8e>K2!hQFb<#DwBNUPl%}V%=AyQa zdgVkdUT~#)f?972FSE^Rv{ba7OhZFe`h3gwFVW81KifR-)t4ZaRco(LeXeTSU#rtd z|6X~*cjCRD58i^Z^QGdl)AXG(P$%VIfa6at8k213>hf0WyU1C0GMg-C?|;><&RXC! zoflC$mUhcI8Jgd6yAA{hzuF|}I{tN!@#VZc41c1-u-{5O*5#`Guh`2=)J9~N>^_&7 zT6JvJULMn6T>k~`sITwRVQ{(IuxyT8{fcY1eI5$`??JBX+hWvfT?R)%TsByar!wxi z`~Q8M_jNq?tJ|qV$KSe4w})&@SvZfT|7GO+dAe(~?3xseNBwn>gAe<5#`N!k_P-Ri zoGysz@bA*6%jLMsKU+vkwXmyX{#6&6st>k1qswqhm#-`L{rY8{?nL(A^@m15->Z)&PnXl`-(>&3w@Clx$6zV#%goyI1ua`2a=H-f&7T4oeEnO~3s(-oqp4|pku^)d)Qij;-*4m#K-6d~l zh;L`k^~ObQuSL2exL@njG!cNR0lO;Jt9OpKhsaS3DfTX%rEGgWp9!&MvYzc*{$j37 z>AoFu#SncD84ckj&bKz|sKIY2Bt5nbtwCSB@2s?EPJMqyp$q}{b{jaet zA9;Bk)tNiIc-H$M>~XFWmSt)#VEsrNCVCE!83%sd1h<(po9>7?`0Gcpq4QreY0(?T zd`=mdibBNAI1wd~7kZNH%o+z8^iL4$F6L3s!buSF0SChDWL#NWAzA5$B=t62??px2 zBDrn{^l0li!;Qw@dohu@wc*Dny)%U&qPGSb1d=B_iM;&BxyPd}*-!Icma@%{14NJv z;XpgV((V?Z9;tqa0_2g6P{+3=JJPOEGt|qF4?k@SP^^oj@(r4YN8F;)+C>}(dO)n$ zJOGJZu}rP(67I~$fO;3i(Y^~4wdgZLK>zlr{aznx9jWx|Ls6`y@D}OL#%T-u?YOATYn@r$1A3^N#2N%EcOn#C8o3sas#>*x-qm z+~z&2dl8Lmr$R5eg;1JM#Oz|~2?aWmPe4RQDw4GfPb4>7Aoc*L`XX^2T9|YDA4~2?`6&b-XQjGoRz?;}2&KQ3@-fDr@2R=QvP7`j%{+e@ zaVT0}R54V;yLs9tGLeFihMAFDg@J$xY;P|;Fu+-9{zZ$fNb~9$=TWAqH2yZt7Tx-qw&S;&f)Xfd*v90t4S(&tG1rTUC`>I zG+X;0(=d7X-vZ;ob}=#lDw<_WY-_;;p=6T&ejtf0Y2;MuIV_$&Xf)3mYF;?O9~U+7 zTT}Tyhi_NU;+)2lY*JmH=}KH6A&*LPK_`qG^%%d>;y&Dl&M9l6ASHi|c$__X+*}#s z_N6y;FSI*~(PLGduh~;MlN>P1Xg=aYsQCQV*SPAh8Z6SmvUg-7&2-~P+Lpv&X4;64 z2=C1}D#briFH=pXwA4BScini6puvT>RA;@Mvj8i+y8KJhivg+$-Z~^#G;h8cVFVSZ zRyG0Yw;~sSlZGM_pD8igTW5TZBK_clO1>P0$~Bl)=t5hVX8op6(yr{3h&dkm_n8@l zdZpHEFK`RmKw0^rsL!Mf>`R#y?VPPuGqoq1iP|AFe0Z@uDmI+xWKxaTSTLEQYg`{0 z6YrTxgt-=3ekry#)9TRp?fYNRxRxnn`XlwESox70mx_Axck~V5j^mz@oi%v@H8W(A32IzlAjn;H zF^W`qjdYD%vjEw2LE5)2L@JH~-Nx||92NKklg zQ^Ux=_*QN+=P~Mw-DTx2SO&`>bjc4Q<4|lA`-Mf(K*_@Xwj8Y(x6Iw9=v8>GhI@8Z z?aGbsF>2+4@4yV&@BV1w9YK3k6PVm#%6b{Y8Rx+QnpaDrT9H zLD$9G`J74$O&Qbv#+Dhf$$gspo1QFeZ(RWDy*}jBbnNSS8NwX{fq}Rrn;O6$I$zNO z#8g!XxRuHz$dszwg@I;K2j}761>(O_OXBz#~(S#plVaHE_8HnYqzRWZNjo{KjL&QyQ&-DHsI3=Z?byeCPSC+-GKdF=J{ zUe;ON?)fq_!|R$gF=LE8-T#2qqFM7yRVJLJl9TrhKTs%X)RY`a;8UP#No@77{APq}%V;ik#B z>gA$DnQ%8eGPynSX}w06N%1G<7ZoMpSZ+W5!w}=fT6_)p+4+K*0Sz9#^B!;cpu$4* zetjTHn2I6tW`Fb9WXrOI(3ocM-d~G2Q4=Y5O{zT+8ywDSGm6cVbSU?HkQZhU_@B#Q z&sEsfVuS~ykNvL|ARj#=MSlnidZ;CY0&t}IPp^oZPWiSFUjLpDo_LC(9bM|CsevKs{ z@HMa8JYxtA0_i|`0h|@dFex7N&ZtXE!DW;W;7JK5LDuzTx)rjBi9p(=*p_7oMr29{ zk&Cs7yAfn5wc`W3M3r}$ZjrUXZ)M%KNAlK~?1nn*yw+8>9-K*24fn;6ZwVAW`l=71 zt8y92SFMU?2^2)Qs}=oCsu2%CH9LpwmEt@+(s)`e<~*A}Hy28Bsr2Vc%E^JIMlxn$ zzc&@F5rUCAG}Ih%)X2j6EjBp_2P}0hQHIK7{j5JFv4#`fi)eaq`0F?XW2#4Lok4>} zixtH2<&HmrKe^Gn8^ap?ce1|MHvfx4LCMzuXmWjsgg2wWjBS5?)^Uiyhh5#Poo_?l z{*u6kJtc#S416jxP|TPs+qE3mJel4-mJ5v0JV!^eT(na6ax?OSM(oar@qv|&qp!Xz zM3Jl3hS_1}Nhyd><0m_HYFWjc3!DefCGIh!=rX^MVR2JvpL!k6XsM>4(X5CZPqxIwKur>3F+Gsr0!!McRE2VgT>FajNn$#&V_1lOajezOb(LB)z8QUcd@7 zm%}o*e8uE`CwtbczceQPsAK_C+b?+A*j2oXfB*(dL-t8O>MV@M{+k|#3*J*UONVK0 z`e$(MzzXG)379?gCXS3E%$i1vj#VBu&s3Y&gQ{UJM^F1Ca}&kpg`;-36vqWS@eb-4=5)+snQ`8bEME+{&QHHy6`C0jgi=|DJW1y-n zr1N+s`AaNwGG&1g1{hYqa))q+#r>RB`O0p>R%^~v9>yVLnatAVWb(!OaNnkbui6Ii z2J$M!Fu0{ILB-*S7C8J;hfmK>(?Efmwfn$8Fn7Yspu9ltpqgXwKDaxGrSjfDwr0I+ zv}?A2-HTp#=RSft(OeZ-r{(gaqV2N0YLLbjGCN14E04^<`#a|9OE5d8(O^tT{|wfx zpEHDwW#fM>s>NAciI3ywbS;32Sq!fCYZb2)g16UZTNu@c;%v(@&oyVKejatoCl;L) zh)>Vax~Sjwsp5aZm3NLN5KHk%swv>K^s%rX`WX(HuK>7YHD^hui!IrC#LXA@Gy=Cg z&@D`Ja4Hi#vb4{*a9|$Txz3qI!AH!o-al_^(rlF-800hp92DqbJ#&Moh&QLqb<>Kc z2q-ic4HL2j(CVZRXI$q{MmFmUh+eX~G}Rb5C7LpE*AJ=Js?UlSQ}KQhP72i0pEO0q z&z;VwyXV4TVY6gq!fih6J%qrCs>$sfLk96r<>*dQ1TYev*qn?yWjCRj*T@CB zpe7XWH!1Bgt7TurSHap5eL`Yx>zfYY37z*bz72KGBn%SqKKnF3 zsJfCjnEf7RqPJOG$?{YUiUOC3IHx!m@*uTKdz5s4ABq4vv^rdDhkQefdmJK5~U_-9lDfjhNVeU2`?QtHjyGTLlLd3=+ZLtI_n zSQ%CyO{`UDcAd!6xp+5-XPWaqJ@Cad(Tmsv3uyqGR~vDwlK41;`i%9U+24b8w27pb z8vd}s6Zws#7;@@LgvcdYAW~HDm$|s|vll4{ZX5xU@bPueY6On-<+~xw>F(){#o}7&t3{vL(h^~Trj8;&BWl5X{7@5V%?zfomg>3R8GUwu1!pg^6B5 z;1fpJ5UA@At}KDlB9Tpf$GXOKX8KH}oC|th@=^XO2DxfXI;#CVL=EC^W?&#O11zq) z=;w;5tUr_E!O0>2?`T=JQ)%XG0c*jSu8XYN&ckI}!;NxZ2YuD;BVmHp5w7G|^SAO#t-@ZFM&* zl8@$Zjdt10pW%aDE9!e2t4~v86ftqc{jwP`K3ID@j4uj_C);={@bIkAJ6k>m#E|Hi zeW_-m{Cqz}B)*W8hydi_>V_74Zkb=w*W+P;n3hV+%?EU@Ca0foy==imF%8Y|0xutj zp^vzo8plp(UPXA=5X{rFgkS}>@fy2y8U`yx{ibbRAD$YMnz^6#pl+Gt3* ze4TD}t9t`~D4yhEdE$L_D=h12FJ7CqI+U(6as6!DyI>R7Zl$w^4{RXsNb%zpG}(|&5eJW zFEH-9&x&mH&YIgVnrj|_2rjoIBa@*zD7SH_2G%WaxFr0Gmh^wTI)m_8)4cOsSM#Th zntm=Eb0a6}*LGr=6wq29y>9wLlvq7U>UUFv!1I3?kBTyOrb5^Gw}D8pv({?t{*89a za)4@k8jmE6>dK>Nm|kq2;QY9iORtZQCYL6ID2NS#6CXB;)pg*P`u00O0#1+?MeCX_DT*C-_Ie*XgADp;-$P>FxG}i7uF-n)jmq(m}UC4A5aBK2D_yGAq{W z*FG#;=TnXAAR1Z`aO2)ZQgeGvjVz?b2H39_?Vcnhe?Et%Q$DRR{0c1ABe*4~no&n_ zXdi6icO}yrD3V9{fvQhf=ahwV#6a1^4cC)GHHNPog9t`5kz7)JL1&a2hOM~sg<_s6Sm?Aw;AQ;1sumoodH-v#0% zBa(O7b#Z}&*~CM?;UgCPdf(G^@maldm3F6o^1SL!86+3PK7bwW5<$+bj8=GOQ-@e4 z&1?7`8aerG-QfyrJL!@AM^QoG3>yxsRpE2014JUR6kQ5yPT%NVLfAZl zMpA3e-OJw7#YN%p@3yM4j>pGJ^74oCCfgZ?sQUMUYYiZxtOhUNz72JcD#P7)y+-6$ z?eZyEY0bg$XZ=Mqot#_S-lOLdUPg!8?7l67mNv_?9|p(IH!{dCgb*6f?2XBI!J}1Yyj?Qz-=%E-RT@Sb(nk}6x$i-Cpn_c7z|l-HV@vu z?@<$u_xNKrsfv^C-H`(Nz1N**siD8OG<8isVmp(2X4ESNNq?Vvm{3bRhLy1BdcUc0 zJP)ri*=Tr{K!t8#I=?$!4y4wr)UP2NSj)*h7N;5&z#MB6&n#FqwaPfTDNy7Klr(&( z)!52TemwV5YRZW5XKwK(psCE}NzdtMb zTw773z+UALiI~$#3`LYEiY`CF%R|7k0I$|hks0|Gb736q_N&nMhPk+CG%F6iOlU5$ z(mJ3+VF9faEl?^D>CaXk*Nm*_wvM|}2Y^#^2gP_u>br2K=d*}jCcs%_dCxwBmEKH8s4l$7sP@@sFSuYp)r#s|%9yv`rmt~P#+3OI;WRw+Z$qB%C5j=Y zoI%#ncuB*3e>}dc=JRv6AN}L+AG>jS09&aFU@KMpy(>A5|6{`XC&&D3Ld}Oaj=F2a zv1c6iJ4Sjso`)0oNH&{wh-s?SRv~sQ>k?}Hd0-_nU@e`Dq%4)jd$Lk}I{GMiV-%MN3AhXOh5`!dJY$(s&*uJq%I%CB9L^7Ps>&ffeQ z!>t75;TuhCP87Pj1;_zmk)-Wf2Rm8tV&z1@STyzF-jhSM?=q&lO{5J72#&+?MCv3I zPkO}0VwDhNLdIF!aIv(j&0?K+r^_iT^+9k7xR}?z1nZX@B3QDO0e=TVTy6+mZXpb| zILe4=IZpN#u`Tw4+OK>N^llfjZc)(ImRsbLXOn(#={-^AZ9$li1qyE)kw>jR?KGeO zzKS)AI&|R|1T%H+3F&RF%Sx0ATld+5ZTAYO>Ew7BifcsqGR(QY^;KFTpfB3$EYT=T zzAs~iH(IvMYHMUtms=YZyv@?JlB5HZ+R&^BTQ7Y^xHWK+{+EdpQRzZa_Q!-cxU38; zL7F*&EeDkek=oE(=n*rv6=`TGo*IKj5ge6DU3DR^yFhzO5Ba#7_s{-TK0;*TUn(&o z^-Ssv94au7@njQNK}=QdNe7=SbXFeNs;2zeoQ1KWE*3Me3ZlANMJ24lG?>?a&)BCu4WqR)ikYMdyG8; z)GMS{h=Y2i!l+iZ7jw0uI*)o~fLa@=UTJhyJ3y_CsLrLeS-kl9=Lb}+U2DVGz4s2J zTBkJpPWPeRX)gV(qTHdTuX&Lt{NRNH>leOO5x)0|*RI@h+G{mD4L0bICpPF{%w5NV zH|VHH|N6VctjFI1)86-7LVX z|2E5NAF#!|_3p0i|9GfqcIbg^{}cYp@6t~PXha?;t46!7Z8=9FE|$Z~r&QkXyNCbP zePGR1TmN48?uzR_`gKGpt*8Cr zmL~*U69rrc^ZjExN*?R{p-MWKw>)ae%F|E(V|LkXm#xqE^BuKEe9Y+Bo#%;TcYpo2 zUF+>VcFzt_U)puqetLOM2q_2Od2!pv)#bAfy65HMQ(KJw4hI1lGn)vduAKdr7wPigwo_+7H&OYfwpo^C%53eodqrDAHiF!ttOl9GS8Yb^T2iN3|IKGPMwlLI7^BvRky!XnJ?xQ@_vXcZ}O!gNl z(k;EApouQNpKj{`UU5MDY;@|H$>_3@4v$|?lzp6p$sMasVoBDXi8~18k$W|h(NSeh z`*>_qq<K*F_vhE)&S_vb=>6`=>+Q6nq$1<7)3q^q<6e60+i7>| zHDMd9J@kNh{j%`W&?{F}o_poo$<<$6aOC&J_TqA} zy$G=0MgG|6*-T{i<6aT0&n)@1k;m=v+CM{c7o1Ui!+vkyc^Q+!fAZ_6)GR;w>Z5A6 z-dwijyDvZ8d8L!{#$kKD_WP?^=U#fxDR;d8e)Biq^=u20gowUdTKRHWgqPWH?b?RM z56UX-c8@~8$nCQh%yJ@6`3xt7Y6(sDf&K7NLfp9tpiDDDar)Zm|kygu9ENP ze-7LJ$rnpMn^e_y<>@O9SpVSHA7H3)z`Wr|#}!~*thWcFk-TLWSQg8YN~v1Ryh`o{ z8_L~2b`dMjPw!PX?e(?XDK(i3@U>42@yhJBQXtHaJLK3(7?1c+; z?C!W;Xc=y~m=h~4K~4VGaWv+&KirAlBlWl!KEHd;?{3D@#@@UlKBMcHa&?1%j*txubkxVnLK42&g)wxdP272P@l!o6qe3VO3e>V^3M$q^! z6>^SACz3jc#2H<69#6REny&EdQ_q{1`lFU(9((?Y%10R!ovJwNA@WFd<>j?O&4$hTf;vBKTcOPFAGkNRBJrY?W#~kHOCr_CoWib+m`ozWrwnkYvVy(E* zm+pOGw9FqbmE~E{-9BtWdFO$a+@*si^FX(uq zT^twmmtOpEp6P-_NM6q(Mabt6u1$+5|Uc5ouyYbki*O*s&sBaq;ku7>gKL>M#05471MMv3OX zjvK{)nh^`6lw9Y*j{8uPX5UeQO6)u@-3r~5sG-YoMx`JCPMtSW9)0T&hs7bJZGgQ0 z+}Wf`J8d?)XmemRStfP~#K(GZ%qwwr>G!XwY>QErXhl(qe5*0STOIu>ZKt>b-~QBT zR0Qpi4!X~wD6(LKoEpR!`Nr%uxEVP(n_F;5)<_mDBl591H!ho@QXPINYV+ zIJC~lTbw$TbqWgc&H}min&a$U&)mFDUzy#r)#NDDFX<59?5}&I2&6uAtk>w1*-mBXk&J;+F13NLx}mk_tJ-5dI|^d42|#1d0NK6>1ac8q#MY zl@RSKxXi;M>GZK(Ku@}OQs(rskWf6Dw5a+zi@|LX-AdHN1l?!&`%Hhh{hwoZx#-eHM@8BSAAQ;$&0t)?lwOf1u4gPpW*Q%aEs=1=r1I=9-}MbhlJU^%S6b)ZK=HSC{B|9ln{X zwV5YIR#zowy4uTg+RH`Xi_y22ueDTQX-Vbk?d7^~LIlxp3Nh)Q-q@A2pR3Wnt@p9hzW*%Cv9y%o+g1qYq_=i2 z?(&(GvLyEP{@L*9-W7WLm7#sp_o6a~EI(}R`uC_;jI%jTtHj*5(Q6*AVkYdF#>~%m z&HQ}Jd?yMaJWR)*$@oLin7b#r9-ADf06owu{^71ClBOWjfQ9%^w%|dE50NfM)@+mS z-O{Q5=&GH792x3Yw$QZEK1$}v-9zM=rFsH2DLn-AV+x~SiiE9frot=w{ja~@pFDY^ zU*W3%QlF0BqZrBaZs}4{S_blo3~lW&SBE@Ig|sQSc0h?jjTZG$&vo9`=_ury(?R3i zV42N9<6ZBXpYD-1=e|zq88`;k|k_sjpdRC|6+1%b7Hu4%Qb} z2^+LEj6B+?kYc8sy3~xUAXU~0B(BVH5(+-XjU3wP~Vq2 zZ!L9Nkx=Q_Y;x5H<3p}3+nCZg(hYtXMcwe+d*%SO24jYd(F%lDYw5{3I{4|`R5W@5 zKRO^0Ww}OAj@<<0gMFyqhV$165#VzBm6m~b?X{4FeJ)H->D^KSRdYyJMBOHJ%Nu&yhjWTzGTyeK({%$a9H~Q;nQe zdU>vIH=MG>uuRN2f)|i z8)nx&{;C}boz%I~MF8^XL{vr3{%^Wvum0BnoVwl_HNWuQZZ0qJV#;X6mio;SZrqz| zgPe=ewntoj1`DBYCR@FVOUhIAc&u{%{FLfd4!iCABZ!Zf<0UZ?9X^7`UgE$uG9(>G z+sNd@yy1Bm4wY=bwtz|`iY z#wxxqgmZ;HC;YN~EwaB&)>yTU&0AE4t}KKeJEin4y?T3hY;N7DiV~&vklqpS@({S< z)XYv`$}6g(b4g8+cha(TWnmLd+rd^Iy0q<{{W_GHKBHoKhK*#sQ_T0k!L;F~@Nc2? z8~q#p>x6zf8Otiu>pIWue;stLH}DmYP9z6jk9oGLJQYUbgps+Btu60KtnSw?b_pU< z&2ZY=|60}AFr5qczXo}VJUI9y8jxqi=Gk=rVKp^f-ZSfX_W{Og0h8C8>3rLJ79Q&i zlUSj8{b~uSN6v*ZrcT-QGjv&Je_)e!0zG=`^5?A`Y`P;==<<9dnISSK_1jkk?V}>^ zoBU-9kdKrc>)k;q9fo@sCG$)kFV8H4+6>xfNJ++T3?tQPvm^A5X0&pkcQ5E|F?cUx zTZ^NI8dhg@8sN8E9Nb+_u%<)m*#}$3Zf84J#_sTyu>qcFV78YUh%_FHEUqMYWlgjS zwcUhYwet)+Ev5N|bHr z94eX{9m+aHuPrU$W{Usa6aPCa(rS)wYrwr#UDC{qiqazQ7X5H6`vUxe`VmRX5na9h z{axtl?=r2w{<;Tye`A46A0t!8IM6=p_lwl+BE9aAHiLnJXETZ)bfUQD*Maqd@}<1PqT%-y_&ay?qVe8gs8dekl#hxf!bsxH}t`Kl3uYBRhmtI zIVxItA=*7lG_?hv%tyONkk(t~USUZ@U8{{&23w~PL}5X5Yn;X{shu{lYAR0$x<3VF z^$LBII-X36)uWnefQSCt5_)YWX+-UMP1%zFo+3yK>*X6ED-RzizcFX6v}c8uy2R~# zct3huP#C47I0rwEE~5e3WNER}D@+*D-QdMsaY= zxF`Az0GLk{A+?Xmo4k{mD=tb@o-cbMc)m21i9)rF7Q zW>&jl!+@$4)p_Dx4a^^)QyPB5E)RQ6oK(ETActb9xPrWNX!+IS(}N}5yL8MG^Gp6! zw#QWuKQQGFOP>P5pH6w`6K}NRJyr{{#JnEW(=6UOSBp1|_cYw^lYQ1xw~c-|+<5bf zn=d{1&9pwzHbl)^dhE*7MVlTff9%0mZ&-HtMbDk5@(`}E!oTG*aUmpRgdX+=r?UAD z)!*0OjtF7$h5Y}!!wYW;-Pt~K-2=b8@rvC1JHkBec#*dkLTK}L4hkW3?INHXg%i(R zim`(@aJjmjsK|U zlxz-eO0Jerq`*p613n<{3YxJp7Deb;5_9 z7A(XymAoHdEF*EfH-n5&PO|l#`3f-2ce3i4{S$3|*p@j6?Z^+T5%3EiTyF5Gi?bEL zi1=&>u2cIYZQ;Xw#LAviDl5KSebA-PkNx<$t9qH9o6Nhu`^*8_K%F{+v)SUIlEw5*G=AOc2+_}`PRVxo)cxJ^LXSCG* z?`6OI^bY11cTNSE@P=yWZ{UfYA83Mq_O0iH{@j1I-SOoKPgmT2&)S2N7cYAJE$lDm zp*&fJep!2rdPye|>@U4cE^1NwOTWx2HO|C+4&iE?5J--W!izfS?&ywUeB44yN4v@5 zcU55TRqJkt+jK*hbm^1DyVALKHER|+boUO|S=_@{hShZSWp0u}hcHSla%=L?hS))_ zqGZ7H<#C00o_4u5zA(#;1kwh0@89`EEQ!~%#i38>jsy`Wh&Xy8_HiQb4MAaFzx)DI zsCA1C3-83u?hMptP1kN1yXy>LeiM4|UBDq(%JDV>u{WJ>s@&*+p6zklRwh*44-JvpK2TTG1_ygdLG%OJ@;tOz1s5{?fDe#d4p@)SajE( zQ|~_Wh^kpXo`2rAFFNsjh@-Shu;3JkJ~RB#Gmoff{qfvKe$ajU@}z6aYZ*nQ;lH?{ z$V8)nm54ZHUZi47!sY>_2y(I+-R5Dsh3z+>oBO4f@=KYDak>;2n7t%6#efwV@VMoj zC1t9JuL5``Yh1OEYi}uwRTU^(l=_)o3!V%wC8ie z!=8Z8ll=LITK$)4&v%CxZ;;OqY483j+_FtR|2nMFpV7*^sC3ip6|KxiS{Y44%mCMW zqL!^k(_!&X(gERm*;>*_E&T}Xd7Nq?>By4*b)K`shdIyF!_L?wRVamgQdKz9d7h`G zH<>7nL6>UK$@>^V(lu zz5VQ~`17Rj<%>=}uAt<*{5dN;d9PbXo&4%-{#+1F9{*z2Ay@KgHKHWkx#;ARI#XBh zXAJ=(8noy0wdXC`^Bo+(k$svxv`%G?{EqhgJ@~W!VO8*_jE#PlE!NBwPO+FLrLtvc z=-BB9uSJ)7qphBj%QhCd5T&5}FOgM)@%)w?Z$4q5~{Cfd^%I7(` z3|wqC^QRV`GZ|g@f$@4PFV(^z?q@a{!+jTxL2WRQ3DPR8zlZ;=)GmC(q5&te)R(19 z2J>1cAY5#zF$imsG0+eJ8ZU;xZ9p#XKRD6NMjTA#PtE9tHy`U+hG{uGwH+~3#Gmrm z6#g_7@im2)nc|C~{$nhd68u~EZeaU63!gchUWz#Et%y0mXD-K98)7k^{{TfzMrWlt z7!w)F7IADBA)SGn#b%_>1U?C$K!b(+C!7CFL5vZ;nh~EZ{3j3bnTzxb_?&_>zK!EB zo0lu#Kk!BZpM=jM#MLsyD9|I!=ysgRgwJN=jZh{BsW+R~+RDGRAkK@>&1+!-pAJ4@ zmczCMW^(~ztQGN-jrc6&m;-#a@LXDda?sHxeFHvPh`7#A;8VkAE3V7HJzF`(3K64I z1byb?%G8W&dMF{3#)ICZx`U)VVA0nUFf;i_e7A8DD%Rq|W%`Ga+@x51+e})HxLJ3G(1F zJdgxFm?yNC%|ee!A@?Su{Im>tD$w&3fyHdz=P4M*BYQ~T^Q$7ogS1qDbPRj|W<9Lw z*+|X6584G!0BtdG+iZNJ9l66)

=UIRENP@w7hYiPdEauHXgy`&7hGv$G1did|4M zmdnNc4r$kOkcWO1sB$=dXNQE<;m08-YtN5r&pW~kx5?-KRYzWjJgp8?4*8Sz{Ji%3 zl2*^_+VfjlnZIf2?`!EF%3FGj9lDn)bLfGp%%SSWkoY7$Y+AY`$#Xw>Ca0YSCo#C} z1ZaiZnwhx8a4w16dUx0Me>_w)JM_S|{|W!)cj>3|wEe_!cT!396Hk_KER;OK|0f`m zobM`)(gi^BHcFB_vD2#z2|}*uKamtXMMY3?B==GZW*)xBX{5MjTNr^y4(IfV$t>uGbh|})0k6l%Ig^K z%AbGRBkx__xqR--Yev5OX2z4l-gM>HKY#V`vFp3aLJyXIa?p>4{q-4F{_Kz5o6z*& z1+`tLANp{a=8ChGRZ^t9ACZbKkq}gWJA$#eTgTT={b@IQF`pZyhl6 z&8r`~eUEoO9hvRI5vS^u>Pf9*HeEDz&V&b=ZanRc2R~>r>)7XP{(b!aoAbLDuNpq@ zr{~+&?!m zu-xiER~V}qUG%Q@Ep3*()u=mKSY+K5$cjz54{;CW0UMRr-fYC%Y|)D{o?40TK|4)|wo zzgOXv!4P754zFzy)&+H_Fs`+TLFnB~>^=0d(kUllor$g0I+Qij8S{|1pxB7>lW4sj zQvsejUJj@rQ?wL9qrN7roq9ug!AZ$Y1)7Ytzh5pxJPwO2lmgE2R4n*@DXN&T!Mr&X}-TeqTC6f+% z^u$)vyFc~@ z#WR<7;KbyNM!Lc36K^PJqI0TDIpoa&43k&${ZgY-mkddv!{gTz=?H5d2PQY}C6;9E z(vemVhPLd)U83$%Yajk&x}IsUj5)0C6IcgQDUB_q^fGd(4fgIPByLC986H^e&ddyF zF-bjKm>YNQZSP-m-uStn-0;@DNB;Vp_qjld*RkO$GF-===zATzp10GClqxXjxqhAI zd;Pjcyen}ZblrOV*7o0i@cW!_->uiBzw-P&Z#~6!+w0cJJT>KOg^h=9yYrqok6eA- z)PKG7llLXhhcPiPyO_bb-Hv`>{8kH2k>+@C0cT_G8x^BP zPe4jFTVPy>_sfHEhEeEo+G*p^_F>QEu;FECn-d0cD`NKS&6}TlE_UZOa(i52UbYyh zhaaaj{2uE&9^U@D$H#^@eI$l}H=Pl$^Ms7_x99A6!9KJ8^Mgyrb|!az0w(J(Q66sR zQc*jl+(CiUx^8Cpt_j%6jrd_=E0@^HCAM;jtz2R&m)Ob;@vR*B<0h}lA!a&TlU`Oy zsA@Jgd$7)QXg;gy_MKcMY<0D%#nZCcYr=H}MA8;V*jp_&K_L`ErJqTtZ#jw+?E)Te zzi5LeVW&G_v>k=*9T}2Mb(JE4S1%=fK1-hJ0JNi%Rt+p?AT z9bi8RePvQ_qr>4i!!tFFW`zN>j9{;CKr>u*Wf(B3?VgPGMmcg5W(s)Z-L%-^@fNm= z-c4_{w%Kh)oK>Pk0l1*;HF!Iq4Ji2!tuSyYdjygPMRHnWz@xU}`!LgcD!>C&vVxHd zNagpIgnl!}j>UA}nlc6UO^1hmW1(YP)<_bK?$@{(=V%d!KO5te!QT+_qexJw*p={p zh;MvDX#7Jpm~FbFbxcFbB){w{gyKikjrtu1U#62M~qOJql@JW}9P{Zp^%R zc#fXt;+E#TYtZJbiU9+)^t!mnQjs2OOl&*XO79nBErN4;VWXUZ!>04fbYqQacX1Xs z4~?~LFCSJkKwQdf#q_|f)Q~3K!OBs}>nD|-|IAI*c;YxlE>_#cIQLn4t=2(u6xEG1 zwg%_MYM~OO`A>D4Q1#$9Qg?diL%IeBtvLz?NqZ6sIo@jIdJWEQqgy}BdCLalPf=#7 zp+FqS8fAkw-M2@w_tEr-%I?}8NqF@UE)yI~mv?AYSckME{peF1GD!yuP+FcRC}0_y zED~KT6N*8SF9@~{(Lcq#^>$hjTIsF3#gYEn1U?B?+l;1YJJi>9T;0{0Z|0^M+~rj~DGNN^u9#Ea{YrD?p})Ly?8$d6Kj~bCo@XHsl}0mhpAT~X zWJX@qLABH^)*s1f%LNUVqP)_98j}|MU-@dLk7V;LIJ@5wJz`!xu6k$HKati3U>pFf%?w@cZhM(!qZZ7=Gz}W7gj{ z=YcC1{`Ift7ia#Skq;bj8w^_B!T>F=t|0^hTZc;tf)O{(t?ZWY>TK2Af@7MI96H!u ztl+sT5$S!*^>IAhtXmhW(iLgUX~;*b%h;oj4xoX9ScBL;h-ShfU;Zze$-z&|&Fsww z73jH7`azKYJG$wcZW>T8MD=ns+y-HTp#skhP=U8<#9GElRS+s00;=ugSNZP;$3A`F z{c|(smOk{Kt}mV#cIpyF$1OZhIHM{Rhds-Ya{%u&on7|d8P#_p#6JKo-HP1J-FAGu zCAytiRY&Y&935PJ+i6;o&>p)QB{X1B68lV7jD#R~6k#08Q(*zGLY@kU90C&nPI!}k z+3~mkclG_x%=_UbMTal===-N%!P7>GygM13kjj8>KY`W`}zO-zkle#mG|9PHR<9F8C`$bdkpZww?!V{ zDa^;Q0@aQ|;tW|EtYs9CE)TctC-8DvY=z!~VZ4XQ$2fvan-H@=6l=vTitIr<2C4O) z=4W7=6g*0&FC|pF>n!%z15hBz`WV}O?<@-D)IQ}G0B;$QY?yo95)6Sn@#tFz|dD4R^hmX1XR2T2)`Ng6P zkuS7SXw=^(u_VfR`;uj}XrGp{4(*d2U}O$?;AC$<9b2@aeDry?L(2yZ6Kuq8b5jXDZ~mI~c=w3kUBMZz*dn_1W{MH@y787mru& zd2c9r9-FVopt`Kxi?(Vb;A=KY458%v@19)K^pr9q;oF1U)1pI91lnr2Lir9%ci{o0Rxs~;ljQo-rWegz7y!PwV| z0>B|$Y{?glf1a>(A@ld3+hkpLB}@<#CWr|W#DocA!UQp4f;hxY5b1zf9W9$if6b1G z1DODt`$(#-OYEPTak+<@Tf`j&X|hO1*`RRfb`H!}34PNzrr#8UZA6Q)-t-Arp|0hY zJ()a$maR9VIMOZ*FxF|cpDd2JGem#L;6>FSl~wIn#|j5J)4)yL*aG@#;_2;dEmH@> zz05RmSmzj2Dol_F(S`33_B-%6z8!@YX;8EV$7?ksQ@oQ61A%5?N-~axj6*TmZy}^$ z$sRE}oj6t@9Q}3&gPBe}RgQ|PlXYsXO=Et>Tq}`Ylwkr)F#)uo zU{fAhrXeNCQ{oVg3e|n3UmYt7n8;aaKsf z0Y5V(IW{&^+m#|Xk^Z)t3z-_9?-Fxuno&~GcL&o=hXF5Z85y;##JM1Eb8{qc8Vzo<_tlK}YrbwXPf8kfLOjZ;P;v^0u{98_!f6vs9~oO6QenVjic9 zH{=FNLXgSlV3$zKxBHZ6M}>Wwz#0hh4QE#&N?UPZ%g z4lTgaIG9?#G8Yy0|TPK6OaiuGb{ zU{q_T*#-?Se$=Z&5)B2%cZ_VU51hXHOW`^dDtLmSRPU&@CU^{JCGSR@1lR3$EYhDA z$c(2C=htdRSGR*(i*CmY^b@>Rn)nB&>qObv7P&Xx`zF6VoH9Ij%X(EpK8n<*5Szh5 zqCaMfW-sI4vTBR$hL$_LLLWn-;CiP-8sGD02z>S-&juK?SfNWUR&{1NXQ9#sM30L* zD!!S95h_Aho4u}HLE(z4`O|B?_8w4f2gc9L z%Nb(W%Q0qW|M2IBC+K0`AmT80$l)+{8uY^x^e}%AahNy6aF{y)I`qR+26BhN!(slA z!(r@n=!YlLAy;C=&+NEhtzd{@Ezh7lwbzP9G9>xQe}9xX?;kAPLU3>YJ9P`u<0LxW zLVsxGtE*dxgf;uV^}lkpm1e!^CLsN1R!uH{1X{fn%9=#TzkS8E^!Zr>0a=7J#T-wAR5 zC^Pn*WsojoS6WK+2mgLM_x|ZShLXl(Fzp-Sf!TpuK*Ubp@zrdisE#ckm z?|dtzAdSsn>TKd^p%IO~CjQnLexny}ZB2ajHG?a0_fKcDGqpDH)!Xc9Eatf8>CV;O z#8-c_>v1^{njEMWC%$@|U5(4!0nz0^wK?(C=j?i14umfIuhogKUT0ThFV97Z{nPFw zZn_UeJxuJ_`fu06#1C`P>S5wdnO|)^%s}y<(8KI(%nsZ-C838IntB+&mHn%%6Y=lU zVCqDo!(>7ylF*4HbRr3zNJ1x)(1|2;B41UV2>e1>+HaWmT=X5Yl4&Ruf$H5X^to-@ zZFqA?x~1GwMqw5}mY#)mssPd?FUgktyfZaxH7peSHu4)>?7VGjO^%2DP8xG-3;b*M zgU27D%m}!ZBheweI)jW&aQn69Eve)>Wuv5|^K@qqNnU4wrflKFfeyjbyq>0dICvgn zpzcV1ZS~&j?{%^uKWFy0q_hv-jP)SqdcS3;Y~{<>i)~vFbe$rW37SycWf}64X5?1A z2ya?cP{Jwu8L7@RgD<*m=!%)!A97G@rahfm zbELgm^sTrllJ=8d9`(n=R&ej_KCGf2 zwMIuH174u?FkfCBK{}TX^jSEmA3I`QLmt_agIhuy*uBvHQNZANx{H+pf_ood3aCI{ zPnsot#tt4WM7rtYrUftU>?}So*#79^$02qv-Vi-k*jfBws8WI-Ms`0QVWSB@AB}do zbsU3eJ`{(8Vw&iYHP&JK6itTktzug&z!*Ug^9Q73z#m1D0bKSu&r*u-5cpM?G9!2Z z##f1bu8fXDyB(MX?la;}gML>!Bi1ftSp9rm<)$(H#7p6zoArqOKAf*sh!UwHzg?XE z`g^^Wft4#+4~}UM&Lj$9r=r71UkXq^U~?5a3R_%@Z=AqXY!Gimm0qSI*~&eR8?J#t z)fiRG8HiLvajr?mhuudH;+V!-pFRGOW}pW$!maNfK!gsgV=Se5QtOya7fqcr;en9BZ zzxQb43Ay~e?IN#@K{VzXgqCV)z_?-*j=P&t440K+hKE=YD`ZnN7?K1W!}#wvv@nM2 z;m_$>!ueV}+v~yyIq5fOWquKEl3``pUEzh>Hn0C{J9AdmZ;{FS`>^)?eKqxNJ!?-CxjO{1y9y$TpzA- zp3f}#)B)ofk(k1tZwt?t9ZPyhEAzV6_}@z&m7Prbr$|iWP4A(~Bp)Ci_y{J46jGAs zmHhn)_&mv*o5u5pf3xHbnK1ka)vMv-NWvh6GsIr=c>Z>L~TSz_zI)_7Wt z3>Kz#7NdZY6sD4mSU_!#IG+F6R{$_#4gC3z>e-0XOWt;#&xJoffImMJDHp+?Kd8=+ zcuBoF>I?qo;Om))&pC`rAn61=98zEMTZv-$q%1$|#nTsG@$Y#f&zXAkRfmkd{49p? zxl>y9nYQ7RmDiP3Zn?5_$HtV`{>IbK#=%{^dbKmdow@#{`@?)cFvUidykdM$a-Isd z54-x3-lOOI>w%w+*-|@v@jE-WC%MqOCS^p`ii;-Axv(h*Zv`D5=9hc=j;l3vnqpfyuyA1^o8} z{w}9P9dpB=tN(CRyKDH*Ea`|GH&7UUvw48|;fS$p^n#XZOs>>`E5-P52tYNrQcOQyDs~n+6 zefIb<|1)OvlPl&vKBwk{AGBQZqls^y)kp(@c`y+9fde7+Of@L!!1G$3zs`;5Jt`bT zx8VxahihUhYqz4jH)>=Ps@-)KyK0CQY5QA{=C0d)CeSBV$9VK(jg~5Pq2BUJ|EJQF z_75b_Zj`8jy{>b#m?l1?tZ9~2wUD&gyd#$`W}V(%x~L~dz&8n5TxI}l1qhqBuX#>i?KQ=(4_-LrtdUf1Qp7|i zEqBsfxiCH%uCzR+_CDs^m94nvtUt5w?~H{SfIAe&g%8;JAY0fUxv{gh?$3YE`G04= z_uaI=&->!?y4vrry6OGji8XY8!MdyYI=L_+>!!7|0g>4)*3S;127vXmkvgWc^x?2} z{=`9NH)s0GaHzap?_kojkxan!gVMD|ON|CexZleXE29lX_IfmAy}%`u*G3+<$7}x# z&0TOt@eTXEeJ2WMFHj!m53mQS#tLBHS}J(hI^0S1I!H>xGs`yQD(NmUY^1O$)km6% zQ1@mkXrDSoTke+XQ1DSSq4rnFhD60bat8uwNh&!Uc8W9kx&vxb$p(?WQI;>gN)zOg z#4j9gF2Z2UH5fz~K(3AwWQ8KeDPuZ20+lKdSa=bJb0_6l4IZZfN=S;+mya2IjHgE| z6+$>x;&PwLf3omBt;g6Iz8y$8{McLacKrKmHRs)Z%~8*1cQzcyGe%P$gIx-2+$%AV zay^DsZopuuvv4EdZAy*!v6Y{u%JUY-UVrm%t~_l-Ma@}%-12&6XYMITY#gAJZ-%Zs zcjIsWRDSEjx75FJ%a5i^adJY%-@J9fJ+G8*zwrM~tE{|cW(nocxH&@!pS}3+D63*q zrdQznXm?noOs^5?GAt_-W~p_!q_&IWR#GLFW38l2pt3~bWvD`t{6eZdOcK}4rGXcb zkq3m(RMS0K98vpj*$e;a^Z;ALjn1?+)h(oYWs9_zU64P7FbWBs9 z5#bK!IbBQ7(w=8%&*j>4llHt=dp<6_J}jS`we(d=CBR|fpMNM_0Q+ZnZ~2(?web0} zf}|q#e$p&Wk5C7Hj^U%6v?<)^q&}*ePI^yuE$PE>zI;v|ty)SxMAe>rcvw9jp&&~> zS}Oys`s@Lc7eWERL-Nzw8~;(yDcL}|xmrSz>QKs5Eq%I{UaURO(4I@eEzWaTfu2$p z9_>8O*6OU)$}dnTNNLjEU8w3Gq3JtDOw^u#f&V$v3-m44Juv?*x@*s=cb|Dg)vO=S zKkwTYop}BeB7Is;&k4_U-Cle5trI`|)5ga>>lW$L_dB@ngU8lXm0mXg_*;(XyzNbq zKI6>$?%urD;T3Z?UcF-+o0XbN>3i{gbjtXx?Z5rt_c`IdTdzxh<@tNwdWzvn1$nqq zO~d%@6j#2IH6j}#mSs*ti1pruvRX)3MZqx95?jrWLW-fUPj8`hg0!_GaouP^3ir(?54qSyFMse&}=EM=u=o}PKL8$`1k^k$2$dFGj|TVrl^JT_b8G`m`C1*l>A z#+%+In%<^2op#lypMLR0%uR>UQkqC`0qzPrQBmG2&KT)jDoyMdy?NJerS>%`2Rhks||;8wC@kVI;6^InI<0{huUOoe0PGEN8paUK7u z#+_~{PpihatNHUnt+rC04?7qz6A0n%4{Dc?Ht=`rc?sAJH=yRDikB(pwJhK*!SgI^ zZvnzm%i(OqHmU?f1P{Sqimr>G_oY0g6l;T=dAQev{8}tsjwQ=^oz+;fikGOx5(G22 zJ%W}cjHQ?Yn5gF^n|NM@Xcw+^HXzIm=q5?Fun=LJ%}W#9(C0d=r2)&&oFQKaxZB=5OCbs8^G2gfVu`ddD84h+Tc+2HZx`dJ7sKBPu z8SS{#U_Z(cf3-+)W&E9LY;zI9S;}*P&g$`LkuW@3fHf0c)Ur9ewnbPM)RE!9Qi~Xb z-p$0`*P`}BmXol~#MWvZ%9`nnc}TGQMx38S>-Crl@YL~gKn0niB^c9rI5M@OH-u@k z?^W228(XzFvNZ^;EaOkRayHF!YBhO^P6IS%i*Oz(I$3~_*CXr=VwP2lF@_QnKBCB3 zV%bykw^GpJO#Y0WQ*b3w`>$svnmC!*lZkEHHYd)+wr$(Cv18k|v11!M#>w|por`n+ z=i*eYs(x#&zUYgtTGgwc_xVXRz?flaNEL-nLWEjnoqjio(%1^!TKLDq{G-Uf4!VzY zfF~*o`|q(2-a`Yh_aDcH?iD|{L5|tAqZ#@U>*VZY$M5De3a>h?AMIHr^5y_F1~&=t zXv^Dy?*YK|iN6y2@a|q*B2c6b*6CY0s zF6VdCng*oVBr`@stb3M|Q9PdpLN;butj6ISB97m$9L?BC@F}Lh>DLQ*N>dn_zOE7h zNvoTGZ1D>0x#6i}+$iMd%W`wYH;AiW&t+!bG){OB#*#~CdsZ@;N>@R7zXeOj8$CH@ zwHGoR_zEm$U+$2X!*@qS8@EcI`;>{6)T!;qE8CMCq)K#?8$F0O8R0nKWeAxik_3Jf zY>-O|bB&cx2e6Hm!#gAjW!oi_bna2VieP?G`jgoZQBBc{u2RIE(<<>;B$!#!}1yg#Jm=?sJiDB+EbR7ahT-SFyc8O^`mDvpcuC0n(*~ zdEF8mW(t#N@xBqlS5PG%p3E~0XKH(dfcc7tIjx0#C^dl5V14)F7IU%^lpA%u%Z<7_ zAivb&$o(0?!0RsO(YU7fL&$Ki-k3*Vu5mwm({v9QQ8xD`)oohHd^9F3Qw3071Mq@G z0ZBOX>rt{;@wn~qml^sy*KLWwixINhG3{oH6oaWaj$KnhZ7e2^&Sss14(WeCfN}Sj z%Dko82hxOwX``2OwpNIR1qrqw2dZ_M0F}01h8m?t^#zTLd#TJ3%)?R=58ldF(SiAT zDpJv`2rxu1t_vJSPHf-Y;eQ1%iRgw(wY;LeSqB~U@!cSQKK{8w;z+4TWjs+)UpyPf zqer(d80ahP@jiguCA!rdTQz8g1aLc9KI-+zdO4RCjgu59sm(c5u)Zc`mRM|$OmvFb z=Aiv7*q>z0#^~fFG(94Sa3)Mr`qp^rBA#r%J-Zst_hq}qFMFL5 zrWEJ)j>lHz>+U_rXWRXdiH)1#TfdhhH=LU*ccz~z_N}WcgpzCzbqWg@LU8wo;;tUM z9epM{rYtTT8A31$RKL)No;r==OH&I5$EUd#*uuZ9sEr&13vupaCsjx@9-}*p+@>vU z!Q3S;RVhrfanh7W+Pn{KLU3reQp=ZE!c;Sha@3yzKHOGUWQ(PNMNaBnn~F^^gx~ZV zVn2QPEA^IMOFVJc&U6fDs+ReJ(6F9j)m;o$AW1q*3IZW>fuN+dA2>hJ}B{uJ_RP!lKP~NtNIdqbK0vVzn>z808qaKz;ogtD< z-t*3CRw9#=et0lAB@>RQ^{nD=+{%8BrIZ~;7krFq|B}btu=ab$rq&lDWtUM$ZbItE z2a){=qXuLkF_!QHh(J(^Y;;mCR!g8X<6Dn)Tq1me(RHiBRkRMpA6eb&qU^3DemL{T59{jh&|3hh{F5Is0-Y}=W^fF;2Q;xqBjwUm#fH0MtG$Kx(5mt$HGiwCHrXL_r zL?pC@B^;8bky`TXta`8|uBF(hS&Ul;1Z;9qo9DFE3MFA#R0PYpQHg6It9g+Fe;@jw|9Mn>U^FaaQkvfb0N{ZxX^ql>e+v>C;ZSn?zp`BpGtnfTI-x zY*4TyjaDr&xSQ|YVaL~;SYS1vG?foARwy+1^^s$!ryQ*k5{DvJ)LBXQVyjJd^O2Yz zMB96Kh`D{LeRC$Q&@|sjp|n${?3IQ&1=nH-tE8W7Pkn&SXs@-^x2+0<7T?)lQ+5HE z9|^b#`=C`X{x(Xrg5;Vb({;Meen_@0 zKsKg6SP4_>a@Za^ue*-2@>y*dHZjqn+BgacdVN#qV5`95sJH3svU3x)(!MdE=~us1 z|9E{4#`suzcy_1MV=7Q~bcH4XbF-yY8qrQ6dsLdMWUe$dVc3=qfZnfhkjB6-}1iA1Y_; zt~(@%9qv56+J=LHIO|MVABb2s2`dMDH&Ep6`c5a#E5%#l-@VouB>`{fL$BMb(m70rC*Otm>}5_%!xMRXtbIcg3= zr`_8VW>dYMx8(`ER&Bb44P60=QVUNm<;wyv`G(|h9`-0E+@nO(9Ve}wI_X&*Zc2L# zcyvWh^EE>F2p(}sX3BcgqebzI2bX+c&#HV_;u-VRmW^;Z9s}sb^Aa%w|4y4%e-ay` z6P4q@p*tk$F&HV-rotcw2V>St$QX2ekTW1A_w7Vhqczl_Fmp=IZX{mA|xWvR|%2oqah(B~7zW*WusFMup(ye}Q^p z?g=+wUhiECzau`g>kg$`wUVkwDPRg}%7k*-zL$#TDE3qWqS6Iv z2?cusi=H3EIRSg~%n8O`6bFJVbixUWqr>s8* zFAupc{e13q_A6!D*KZOnjxVhT7+Gy(J|D~-$In|iAZ50kBBFS9lIRsW&z+GZ3x_G6 z$p1j}2?GzIvpd6do)=RGYF=YG!$DUqM^_efTz*pYL_;Cd9DtLP&(;%)4{nOL@`P-( z>}IQzY=Tc)QXkr%7{J@BTe_~sAik8qoV06n8;>78OGxzj0v^1&9>9E_t={jrt0nGS z_E}@NWkC;E&;JKKe`qO}jg*zoGoGXD*ew@wUcbk67)|DO>?+D4MR9)Vc($$!k>Gw5krwk`CgR&$YV;- z{sgaG(>pU_8ypf=_jp<2ySt6`E_i+{O!&OhFZe`4DEa%qZoymnnCvix%u6AMbfPgp zJxv^}m`Ev=wV(JcSwNvpWHk= zyaVjlk)|>#gR5G;m=+>$hD5xuv(g@n!#dIGJ|n{n|9{v##x8|A)n{@PmFpLXFD=Q>Z)R(h zhh_3&7%h?d-hh9w>jfGmKielF(+*yMj_*PB8!sF23;Ds@=_b{{jI!0Pc&Zibsu}Fi z{p|$pnoH$6(An?(51ct<>&3B4fk>t1^OFv;kED1OblDdoe+lznT7LIGv^+-6*VM<8 zw0PD{|IsC}69R;%KX(h<1&=S{A@qe&{ft4=?@d|JXEUr@<=i8R9}%k^(@HZ`Mh~$) zM`|MBIpH6v>BtpIwoCo#*-;B{Z@Jpn;4|nb&67iHYjIrCBC>H9zIT8wRJk*UY<2D8 zZ5UZzrtd1?e-c)wIb2tqO|dHBkCu71*Okmh`< z;kDr3HgL*w2y=0A?Ymt(lT>9dHy$-NF#62TZ7t97!LeB(O@~|-jp4fIe$n&fhcCos z{W!@3+uPEaR>4BMpCY2(*xBLze>uQ2w8M=Z$!fsu!PKYPPcC`#=K8GL5F{?M6`wG)f@xQM+V; z;NY~>dK|3Bk|w4U##re2%}S0)_*n~cVvVwYVzI8<8CPR;LL z1Nj#?#uMdF}oN1wZWhpn7(0an$9q3DxX%75?td)^dBKWqa<#v&E@* zn4!k!{EC0&$;r4LL?nAgv6!GSY}sadnhA}_yy?1zgXlf8TfRs4=A!O(lR50IqrmVH z?vj)3vbtxc$8-JTAP=P21pxih=08Re*y%|wQqtlKowBFE4LFE5PpC}ThcaH&33iIF z2}Lp%W-{q|KKu?fVBJMp0jq|&*mXPB{h5`??Q1ULG-oWN{CbeG-rT?ZUvUnO_F}az zU+&jRd;#YoIvRYbhJyAJd%@ZxM1N$fhC?vXZZRh_Qg!UpU(`SYN_3hs!8C65$2DYn_wCf7FTu!}>O0+&27pMLeSfu%j zf1iZ%Nl#Yes~O89oGY@Vtch(NcgF;ERKG%~U;9&%qe)gxx*-ia{TS1?X)H?9v862 z0LY^B$@o<<>qbW(q;i~ zLUN3sO`7NbCvdvdW@H#Eqr0ZG;U{mb$pEzYQbV6Kn)qhzH-1x=W4vl-t@6cyIg z(Ok)qoUX6jRvsqdGFznW*c>*xbUISJUO`cLKUcj@_DVW&#zgPcYeaKmTO9`f71uA~ zTzid%a=s#a)w$eA3_b%bK~N=wzJ5c0Ryy>Jd=TEnQ;>;;@N${6(`x27Jj`g}VK^5! zR1oD17|)P@L2lDTo4}HNlVnTw>RFg`rjak2SE9`2C7ZK%9&9I%d}BM<`BMxO_%g+^ zu@h4827c`8a`;Aq!c6hQPb*~UTrM4wYOD35oT^~_LdDxui4@{V71d6(GfC=YeWtfW zYUD7N(ezOcsB47?h4rXtvJv!9jI!(K%fF@l2g7_aD^MXGkT=qeO@k2z zl;_}$C=)amKh8p<)z*$jeFEQF3jNo|(NKLDTm3y`a7!b%gGkmEd-myX-+zDny3l%% zl;Kpl7fn4Y%6@UgubQ?Bg0e|fK5-nFW~;##sYibfzVuVjrOMB9IN|PB4p%H+Q&7lC z`=`2kw>C=>!h3`rH)lExO?Akb7sk{d+$yH_pg^Dv178$VvF6d@bt#5#*E;Q|U^%EV zjO%oj$`&Ey@DpuuJ0FSeecjdL-{VToW&sSJY4xgdhSjoajEO5V*t|9ue?7VnU#;2N z%JVBRBCw=QD3rp4LkC#c(li>NVMp6j`J0T>Vu16wNa{3f3x=?^1~;p_EXfVs9N+4! zo=vO-W%Fj5x{_(K5#bB^P|Y*$uZ^`wBI}>+UvF9}B+l(F04U@AOMY3rgo@!4PD8fE zv4K){U-o=E28ZeC7vvq#+4 zDPe-Zt-Uq#Irr$_I29mX-nO=8xs63|W}MfqqTnFg*V`&AJ$`_9wrur?{izfBzHG@} z2X<~&7j#zj&fKiQLHmkG6ypQ6#e*is=hDZ5*s#!~_0$<

(9CGdf=!(P97LQPW@H zs4W4gI3%LU4kS)lF0unruCOT0OP=Y>1qPQWe6Pl%&pBGI#~yH%Zt)|fy|oTAd?IgEP2pg& zTf8M!&W|c;vap{%4_y#7Q~8pv@xSHw7yZ?H#*Ej3E)6NXS}qdfwO=@WuH!I)H|=G0+&S+3W*#){fXh6Oeq4YQ<9apOcDW`t zP19+=d1K?Y$?<7FI{smh*SAUpV?wof;*h~6*Ia|L0NI!w`E)A{sJT;HT_6^63|d_J zrU(w0EFTX56_R9?@V(v0Wj&5~Tw+Ir#ca#-0S*WOHu@Apu@+eakxnq;4pX=~0YbrId8ox~Nff;X@cAdlC6JBC}8b z(s6WiODlr3xbTXz0(4-*h{5f9TQO9F--B-;!f!f%Akxy0+54AOeA$a5dyc33YwjP* z`%zzG4UkY>HiycYx9R4c1!pL88Kk)=HSz0yGpRTj|MjQiOBH8G)xTD$edZ`+HHWXn zPx&cZ5YMniw9($QR6HYyRoVBA50G7cQ1PoR<9U|X2;qr5IrhrO&_I6S8u^&VhspIOZv-w zUcFQvO0`tWRIR&cMv`dr`yZOw*QpsIs%&$*SG&bL_R8{_wJC4;Bt+Fdjh#mGHiY7L zksB?CRK2KHd<_K-{F3fi(LZ*z_mqHh*?zd)nT)V~bK>}%u#o`>wK>`DhOtYFNDG5=TlMGGyKPZ~0)On0 zjZ%HI|0$e`%r~DL?kjVIr6N0FfiDmz<#nD6T}O})QToIS`(T!-OXFE(AQ! znS~T!Z!NH^Q7@L`FQG;HpI4mt=Xsu#%7SWmZQxpMncffoS^=4rr(IWt848a!90%mP zm>bkV#i9*Ann=BU#!O#lq@JH8 z7L`bph7%TRlFO2f_C_VhMP!TMu;h3JPOK=6nq3vYOUfuwFb(=yj3$mz&I?G6@IU~@ z8S`zg+qKSqx~CN<1FASv7)3UdRJm?-y5y(WBR6=o?EAoRc^jUesPIwq9p6H2bnapZ zILbhBSMK}i4!j;Oc3y&^y4y^IL_a0DCMNY*$fM?p+9`vrdTU8agj)YSP>sSG8JRhi z7%}$ag=Fk@R}NT6ndcyswv3sAc*ti&=#O94i!|bHi?sOIDino2))(>d%R4{Ek%5=` z?jdq1`#(~HojgzrQ@)pqj1#9;@ls5jgZ44SS+Cytn@js;0tE?l{*+^MC~LIJIjEOs zUO?bc&HB;7vDWk5N8yKfZre7-d%-!Fl?mAe(W*eXbSEYj9r<;{um-!c?rNJkwCTiZ z8^_<&aqu6+BmV9*kadSa3N~v%k;f?p0TERF0>V#2cS|A1%&sK+vhH1=SsT-)fQ~8< z4)WD1@i_}_UP-oew@%gUu!j-g`@(VE=(Tl$?fSkIY%?Ye8MsRKYk=*qa2w18E&dcj zvP}JLM?;~u%~zL@E&uUfiK4s(>|ITeB$w;sqRL#@bMxZ@20X<;Vcqyme}7*x)D5qp zmFw*4yVo)?q3V-Y&*yeJdd`g>E(vOTkiIY;8>W#MyPiaPhKY7o9o^9G>xev;_iF|2 zwybm$^`!RVY!>r=b)^^rmR87y$WmHPwfQBb_(aQtcQGO#t`3r2xLV1xj2?Ie6{4`y z&;UbtiJt257&PPG_OS`c*-( zpM^4PZ8TQeEz~A2#u{>${(1u6A!%W~Ld7gM8fZU7-Y2)&{hM-R#0DF$X18E2oMbjQz$CGC1@u8Z!wNxk3wpNvxLr6Frj%?O`baHn(RVp zj9)__w>MzM;_m0fVqmtJ52E;~6lKR0o4c%I6(Mwvm~uv;>6+4NThMlO@v8sK#$QwJRb`}1h8t%T z=T_pqnM3nrfV4%k-ku&Rz15j~3w64cTvALhspW>lrUw*=Zq`*YRK>qR*18I2xZMJ7 zv#zD0nBMhCng{XFj{XV0Z>{v)wD78=b$Yog)wx3deZ=WV`r6vy@xHo$Qw@0d6~(YK z)GdPs3x}t~>;oH+<|^0az%xQr;DOFUP^)`RR8URreKhUUvVY(8Q0_ufJ~d|P&P@47 z%8CAuRGKn)4#(qoJ%Q)liB$oces=sTwG>J0bR=&Dx@ik%;|Ag8$j{xzmf z#~Mx??Ds_*@0gw(*_+?6)GHZj0{)? z`$1$Pfh+=xwjsB#B#3OrqYcSVcc_W9-udq|O_!r#ceVuT9-rAm6{SZztlEU>@%YJo_H zxZ=0Y4gLknBJ$>n;A(^Vs-1oO9jMf0QZi3Y;ouVU>ryQpk|bD4qrIi$GSJ-uFzb%{ z3r>GaekZ^S!PLc5h_E|r6bLoxm=p6COnZ)en}f>}-$%Y>1-wV0<8m$LN(UI4n5eq4 z{qh+5>;l|}ROxnEFGa7`BU(P^`MPS}VhyZol(EL3+F1N#4ZReSfsqqD{7|rKnR0Ex z45`2j84s!b@Aa$v8WwE>JJ=5`tf!m|&p%_v7Hvl5oUeh`zGK|Us`THkz1i~!RC+U@ zi}jQ#8ZdfTt+FoZE5%F$dIsNZEBt6lh=LabP0H8WOc8px;ruZ)tZ!blW94sAC4KW6 z?RJVxm1{)0AhzS0v{DamrkEk=1K(+V3LQfo@iugtxbO?eOe2OG-3p^-ZdSy>?IFsS zGH&p74Fq5QG~;D7Ss=U|{9#Fbp=lLJ_>`9kh%{XcftI;50m44w1a`%QO@BKn>Ew%g}d5OrMJnM_D=IJvWb8G3v^x*g>Byf~xpy6wqv9Y_1v77{(2FJmHhLQye@af;-256Mkgq2S&po!S$)@L%r;KssV{u|mMgdI*^39xHM_ zMMOZH+QN#vi!UK@04V%A)jBM`tb(SBE*LV=KbqMQKAVr=B^_#%5bB*44zM^d&Pjhb zl$OI0qA<6KEB<#a*RU^~#yL6UyKcXC$8aVMK;o8bzDyPkWjyx0lArZ%1*{_!Y{Jcm zikf!!@eWH9dSmNI_1V}Ztn~(V;w)w->-uw*)V!EBqm=;KbnT9uanB8H=*o3TaXZvX zsf+uEFBrs74qnd-YlcBVZv0CE@2CDIPbBIGkKj7S?}+g@CuoYn_(ekZuQ2pTr5Ww zp+6`V)`5V>-j`?~Cc1OUCpEvsFLs0*w@G zf5R4Xx|s^5{e*{!qziC9?QpIW;-54}=L%dz1bW`#32=dPGV0u&`r`GClSY3Nu4$k{ zGSvM>uou`Z{LaGOw_z0;bu8q)tNS8$l>?9Hc>s?{Hok1d)A9`H*1Tz&$ofIOw4Fv> zOFft6R`pvNa0m85?@tfzWYkEB4?+N-skcMaJoNne(WNS=vT`_G|CuMBR88jjL} zqPOVe>=WS`USOQo;|=svY33!D z!gTa6e(R_Ho#vyr%V5m9u7lOM3Ry?>fnwT-Eh}%pzNd-E-!zW7t3}9(u!*_K%9XMQ zOCU24lwA@17e9Zq`uVZ;$ou+sN7LQ;7!r}Zp{42&|2bgB zM<$~zmGgO6KkQ{}X>?YMbM?CVY4tv?vge(Kk)8G~c;+7rFT@7iwBL)t$a*3C9L-sq zI{uh?fH=~9jsARId1dXot-uM)dXoEiwP*dIdspMRP!H9-Fy*oH)?{J4sd<}bqVtUT zPmWhKRuI~GkVWpTc;|)%a z=QyJn79Nk+2^6`{$8{YSChi+vqv6bt!+K43Yo5=zZi24v?)~aRXU|{R`?iM%z|P|O z>Q{@s6k4k)554Wfsl90T7t8OWYPVRJ+LIxs+#|m013FDj*8WVcS;?GZ1LlWjA}AI4 zVJfo&<);5sm}@E1)n&L)sOC#4CzD$vZK)l9Y%b9M>`=yxoK2wKVzpx9p7x-~M8u2D>LK@=bfdb8-}+W9|j= zaYCKy&aWiNLcxOeB;F}^T=oK@%I`*wA}|kxXvC}?Q}zH{`~#J;n^TZQE#mxP&x(t{ zDStM8JVFZjdqHIpRN{_u0+0e;;6z~SEg0Ct-wIX(7(&+6$7Pt3Pb5%wNIRtA@wI}v zD2n-RW+Uoi2@@eyg?pzQ;}t4SxIGlaTRQ7?I2`2QyFMmQg;n&g{?(6m$V@*SXg4Bp z*MmqnOCFB8w6V(zBiB)YLW{Q=;|kbzQkcOXOtf{|sDDR%Jcb=qE#CplJW zks`QCNgE`a^TDz*z*#+Z5-ru$S-k3d;oFyJ5X7P~#3?{hDCm;exmEQPr(@KxdirxF z!cxWjYOE8R9i0juCVzJ3UPJ#r>9FOEFE}=RC-s8+%@)JfeDuK9X~rTL?zAbcY>;H& zO@89A5ZdYI;-3EBQzt`RjK3RvV0jz+ZGWrH<0x$%&?0?&`xXxz9sdRANI!ujRr1761G85t+v3g^KnE2KdCY2?eNMhC`NbJ=8b40r%qc;8E8MrfL6t} z<^I4^CC1}yUgUC@rJj4v1Kh&8`dpV0A~R%l?!HR{i#U9EyQC{!iJ#tPuMjyQFk~#H zu|%W+(XaDo&*UMuYMTE-@NLug+{3fA3YDfY?CPBFC$-6V)ux0fW0y2L*lrmQ`t!O% z0r9yvEXx}{%Yeo^3nfM+G`pC~kFRtt6;O#xA}N>AS3vZaGHK~3Sq z)3{zk!e#6{iIe=|+*j&-`=aC=6qh>;85>#tA?0Tj>x_LArJu-EI5s#N@Q`PnYADee z{xz73705-oeA#cQVM?rQ+Q$GH5-AwyfxF5m`fs(0K5WR)1KiA-gO?KG)cvK$R-yfA z%~naP@;gc2pj}cQ53@jvZq;NjL!m>++xpKXUg{BP-Kcra{34!#f=liXYQ_ih7$g?xX!PZgc-15cvG_=ood+;{pe&32hj@G=6^$&YsuQ?}lP^dCAkgu-zh@#Bfi$W^VEe`?B45yG zXH(xc1-Gzp^~zrQmgi1cWFb;}HDxR{lJ#ri;q_egUh_5QgQZDM#~|`{p{xB8Ogfy_ z_wlqbvdAMdHm3NZ{>^`Lr{FF<&0v@q&^~a-o*oN;6Gn>#w_{{A0L*4dgpd*SrQibE z>XGeN0}km*%E@{{?!FejeK`{rYO=?6mkiIiABcM z950tH6J{BpLmEEUmv=4#?-0cvQiE)Aw;Y4$yRpp2jpHm`*U!v_WnQ|EGlQ+pv$A!C zXUd!}e2P(pBT0>Tb#decOseu|tMo|G2#3Hlz^0`@NaVC}d{QW$3E9tX=rHIK)wopo z*5>(!pl#7S@$bcPnOxO3oA^<97I^1JhA)cFgx^E_e*jRln_HIkD^0>R}2|DeD82aGu3iG+8F=${A2apJ=!{Ne`9g$9#ElV^0Z8)^s$oIP$>HVSXWf}0FgR(8-SWk znI&nBj9iM1Uzg#|NS5o)GMLe&td|RvZ$B30UX}?Yo;xX??@#$=OcOnmJ3mKGkKWIQ z)mH(9(bs_P;V*lSU@=iy>_UeDj5i74O=icGk%{6C@ZFEIvEAODPgAdRUs*wlM@WM$gSFz6C zXX>E)dGz1nExsGJvgL`FNwv=_w4Cee9Jxu(qqw6KUfU9vcfs<9OuFuqw7iC9_{1+E zMvM%6*A5v#&g(SSH9hS_U}3J5x1fRD-QV08ewbe?gPHPhyNg4VaTFK|q< z6V}q6+Uqec0ibG(Lf~sXi0F~5>sXskQsw+*U|cTsaT*BPRX*Y1({fz%=rCh(a5`nd z(6!Uc^=x*TM53<`(C!Y<-s-95JN?dXq5SB))Z)#4#&Z7RbemIwKS$@To%OC=n`O2H zK<#Z1JJ{xlezvmBTcw~6V_iFsE=P&9Utu1j7qdHy<%FSDxoq5^N_N|Fwq?>T*m5(+xe_)}O$kc{t%(z<*>ssJh0$JL;R?#^8 zx(Ls%d$_3$v!F3R_O&#X=dQ;}2N?!rZfi{=0vzI@- zSiBy3+Q}RB!~HA2dX5!EazHhVPQB?mXWF12A0*i%(-#os$=wl8#L(i3>5;t)5KLZq zArc&~c0A=l%!9B3OJz{9ZLp(>n*2DQpo(Mst< z=$^mpAYh)}s^#G~!k+p+n(KE5tbwq%^FqK>@WHwV%_zK{?AFlEUtrH)Y8QeK=Rs_s zxMj1LGq9xjuKw!o;cB0O(`Os!cZY{5z{{o$2<7@)Cn2vnNmKY z&9(z`*6S=s8ZQP4iQrK1g&)(49pWkvJ)BC-;)o%IOL|Yo)ollFbG*qyC_3uza0+|8iH?TEWz!?%jtE64i~*nTM+`ayUU;f5-rz0-6!f=d9~>i0P`e z%*ksJ3ZMIVIL7-jZF;0m(1&}Dn(I?;v<{06*m=`+rZ!pK{`TT@rDoiwJUKKEcpZav z$ToF5%whs~+^t`xZ<+JhY_&*feXJNA`Md?6S^%G(;xq~Bvu)a(XDc+P+CJXet=T~> zV!Y3OYL`vfHeFZCOmyD&+iR@^okt55S7lN;20Y!{U1HfEC4L`w%Uw20C~_*^@1Z4? zN19!aK!d3^^HPeN*25#Yw_zghXREX47d~ztm-QpyD(`);lK0Ck+sDyHvnD(H3yE() zbi~Ki5zpBb=jTbgkH^-TH5aSHjJr3GOV{afgk%`tb=s-h@oDt=+E-}~D~8XKVx>Og zaLV#o^VZRtv-!}w{CV%teZhKmo$AhxyBLkRD@E;KvzPYxss)hV3+~=%E<2bmJFs~< zfd#y!d{C{ccpa6$M6-?EgTs$V9hg1$FXp_fF7{0~`fLxaJA0oE8DWefv8R-ohzX42 z2^u@bB9!lyzt#5WF7OaWErLvpDKB)@YF;B&ScavU+JQsW&GoD9W~b$-+Ag&&kV-^f zZ=Lt^^p)bpM?asrK}dXp;xfsxdm627T{Kp?W%8%iKD_ZL%bIJ+KI{bUf!Stb{FX(1 z+T3be(cFCM>QLI(jyGW$fW@CQrW)gMc(0^E@TRx!pMju?@P2-*lmTw6)gs?jqy2Hm zPSraRJXTI}u~>&Q?iezbT>nb7`Y!6+8Lkaj8ljRZa$F^~x>0J%UR<35Zk4xdG({zK z$}jUdRMT=f@V0+oc4_3>G+Kh_J9bWWQ(P?1w!ZjED~OKKTu&0X=wIBesh-NF9M**~U_>`_CChI2c(S}ACn8g^S$Q_|zwVs6KjAb$uFtkl3{94lBwf{w$9mZbpwE4S|XJ6q+ zlBh1IQ{?-{Fa*Twl3T#2Tx3hB?4LQfmRQG55gsBr`j;qPHyi;N4&8HQ1>+*a*}S(~ z)h9xqrxyY~b@b=AGzObmYqiOn_qSd5=O6*ui+Su`CF59TeKLA}L|&X-dOF_A{v*lI zT%M=%2@vObC%%dFneR+`o+$GhO4-)An}(^6NPd5z##QYg1dt9W^bYhUz|L3CQWaee zvRcWDoY0Wo8Vj7Y`7lm8zA?*sBbEtYcKEKTV|qV0C15$dLR-P6@6gomGdy6`k2>+k zoTHL84a!BF#69+Lavy?pzj_J#cg+2do>b*{WJlO<2%6zn3aRt9qs`16%mnNU`UX~y zRrJvDhEJ}~iS%>5zu~2lnfr5pz+nh;bcroU7AiSQ?_30tdg*nuQbZ&G zT;Cud9vVN2FHe~W-(!v;d4BoEX(y)vSp4 z4}@we{(^ClD(TD=-NsRf=Wl<91+}lTlguJq1~Z&~vzL#w_1&8`6A1DB^K_o5CL~Q1 zCHM1(N7Q975674(CsMl9{LEgzrBto{TNZiGi-A+%(W#0|F{|~n{jcs|X;fej4 zkQ@DapM?IP0#~}tERCrO226O|0_7f7WGiYwwywb-=ES_UI2O&cr&#=dHpbG(}Cq?!7bJlqnVoGxB&hp9-uIzN=5SpnKzVadW0`;>p?N8=_nNQnp=>yVUsWbMTc=fKMwXJE* ze(<*ZURaJg8?>9wCc~f>IX-v@w%k_~2f9UI38ZeTfPLvrFd6FE0xj$X1}8+ee!-!= zd5v+Oojx(Hl3%?o7S%FdjMbL@bavQsVCt=q6Ve`uVzzsGxN^nL+P~rkmMLZRP7G z$Q`^$)vUC9IUCqO7uBMJ-GOmTE0$qxHZhvJ7m?!H`|~ zJt}vt5_38{g7A|lK%1E#piLqUd-PvJDOe`9Bvo~Lh&y=rnYG;}%~uK5g|OwsrsjEb zH)bkR1*gi4Tp8H7KR1|eHF)7+yXYd6Ull!*qPp&$55e7-OB27ph=l7xp6uM2^9Hs@Q4iC62fD)PpJtutA59qlPx=d#uYi#%ajFbo z`V4=3l|MA}ch8f`5BJWY=*JSP<6zOii5(9EV|P2bDJkhc+#9cEn?0 z@J8Lh>67Z&C`=R9wjm(ccSL$QT7G`M9jq3SWZ-w5Wm?PwG8 zqAUIGP)`8<&eN0pW^>+l-`u$en9LvqV{m;m81BUx?td~R(FEgD{O^%gohSA*XPkcP z@)vd|w!1a&73@b==tCX$$fyq{xF9XF?VXCH!AyuP|MAT4%cWU32LOLcC^Ez4qdXMZ zDjX$Q7=wd|bU@gz;6CZlXKx8qQ89g$GqpeQ<1N_(G_=yB;BB+Hgn||9zk`!(JJrI@ zHNWMc{x%A=>iw0?ZxDg?#sSU#)%JRHm;kRsac&Ve$r|Sr17OuzdRjQFvJR(*H=g`3 zCo@u8H3Y{62S7_Styx;CsTl}&Nz4j)Jx?Z`dibx0ME_oGUAD)C5_sYy;%VU@W1gz8iH2aCe|N6Mn@*a!&JqKBHLzLC&muPh+=r=zA3kKp|CujH8vV*5?SLrO;Wm+9wD zMpCI;ml~by6>|rlX{Kc5ZLpKkQu8m#esZR|6pt+t`Dl>($7d)A|1g{L1hmY@=4kWk zqSJj0jGoqV17X-*A24CQMRf4$_dI(G#M!vU?9XHfkq-{wry}(Ks4_F+E)JqsuqTPr zAq%qvSLw|i20pkwHa}UK1TEkGuyZCSR2)sgNQGr;yd(GoAO}??bO}RwnVyOzHTvbJkFbqdx%=4j&~W0-C8HmQ*K_Dbj;l;ZU?8smNS^nRoUd~m+9cZha{bKbx~slB!`&GIygEB-I(NDe zsJ^{Qm<{eksbUgrh0m2(L;gB<)zixn$?M$b{=W!22j0xWhFec<+nBmjn^R0}w@+<8 zwQbwBZM&V?wrv|{zV|!GNltQ-J3IRi+}X*s*Sc1KI-{gpEpOfKC6V{dSWh$i*7vE6 zmg8T$2o8lBshp1z25-wFo@?#~t31B@qsK{1Y`~fc0H^nO^@>su=yY#}OT^XWdQ?D@&IKq$pMtyAy4BJpH zRs2%}4HDW&Rn*tbG?a*->&$D>1#hVfntu-vm!2VrazN*3BQiz7b$D5@%z-t<^m;`C znXhb$oic} zu&iG(J8=iXm6ig4w4~Z_iz0DfxJhQ&URFl~D+|zW_P{wDehI+JK#P?Y@>M`r-)+Y9 zNs`TnfA$`#_h(w05QUrT*9cK(_t!cdpO5a`?aa@fiW5{@aq^r#l0~=J+0Z{6gy|(p zDl$+V1ho$cw4($~?s(WF#-xa~vLDNPAgZ@NDqMY4czbCG^)QfXi$6hI{s-p%|CI*oG+ojsno9h`N-5@ zJKj2GLJYN5(!k{PyWx!T6nfM=L~<07(X_B&s5vA?Q^R_i5_%GeJK z4tfJ>TX59);$AAyKYj!Z)mfk8g^dfWkd1q2UyqjJL>qO@78qYxNG@y^jr@Y}!>tMq=x>(W_%0O+#? z=&;_KSY>jPDf*BEj(!Z|tBkqx3gIahVDP>%A8RnO$8$;k%XUj&*@a1?v~PrH9@q7v z;znzcq5)OV1GtvCKiike4IvQaYLVVtrGg8Q372m2`v~D#@Jz);WN;CgNBVIz{%*A&K<~c}E5dp`piuQM}_HpnE=&f!U z?$XHtsQAQ*Hb|5#IttI1EGku`E>{kxN&3%g+7S&#qwOUd`^J(Ys571qY*QsJ#+9Aoy}BjR<+Q1({j1w?gOQh%TtpS?XEnYo_)1lqwA z&mN@}0XQd2Fa}9#y|B_UXDu4MF5PySe+^v9(fZ|*Ri8#^9~V?0&`}?d&7p4Gp_gEt zt&FAQ@C>M*3DbOx6I&64vfdHXsN(V3 zYS-AUV4~y`LPAD*9s%?`w=4b!V){Ua@W9#)`)D|gyHTtCo4+LMmCdEm*D)({p z&0_DW^lO6%f;0=e{utC?77I;~)-vfle=AG~XQV+Fyz-Z+4UkX=C}t?|t_Tf__-OKaKcsz!5+ zK(kOhng>cEDo2-=J1<$oA*!6~WwM9vt%@r!L&p-Q%jA8?Z-@Z80!@#JnvV0xPLz*M zbTK-k1r3UMjZ#AAdPv0Ugj3q;m*cpEv-W89{$yG0+G`$Ex~(&pd&{3F$1gevB&T8abwN}Sa637SD!r0-X zQRL$EgW?2akeh(r6OrTg%XD@BFA%i@GUtU8W0rf^7-1l)W$JnMgSzkOb>W`ed3GUNc zB%{V^jKg#JVtqo7!F}=H9Lt3E5`L($_qPdUrtHjFX1eg)BrJuIHGS5O7gl7Lc)e~; zV!qF6plZ9cX_j93^H|jwDZIX|q#`1vw`&zB;|N~e7mtK~L+*waz-{YJ5-`1Fv8VaO zHD>-{hL4fi@Q9%p53kQUxaHdec5S7f9o7E0@&)O@qqB5pjYl?jX56U0&eWU0UFEw< z{q0nx5cDW0P3!tfykM;+IcG)y2gm%&-aWA{p8z%BE8YK%4Qa@@FS3>yBhs|VA& z<+Y;pYL|A^uB9KAxS3tZE@43{OOnY6^t#ug+t0|M?4xi+{nMF&o`eD%nfLT!F2XJp zQ^y>Ud%iy@c_?$y&>Qvss+ljHgBJyblOI&`TGwXVzz!!l2r-gldzf)`Os&AGIY(T> zX%_q@ME&&c^x!Pd#fj=yb`tRbM165RcaQi}dC-K;EqL=xFX;#AM2A`4nyd;{c3}S6 z@w6yf`4_YV6Tpz0s~zcpR(K4;w55h?oq@`I&bt;dzoR**A2V;j{yx+Pmj?m%cbj2> zd^^&zy79b6pe65v*#vcHhDy?YXVR1yp5Ig4wFysL(&f~>Mdopa-u)|ZqE^S;t_!4X; z-dz$d+@xy_2z4H(>H z$Gw6f}SrcTWb&n;)w$v{J6hK0xJ%!=`;%<1ai!8Nc? zaVSsGtM^$QKJ_KtuglBXuPxo@$-uRpP0g}wiJW{5!u`Hf{x9c&a6zZ&a8yw=iJ_b| z;}rd>sZ#Ipo=uMTFy-SA7oV3SrEJOQa!e%GnXdNdo@ri}DHOb+t&fxI7|tuc{qOMS z-BnpLIGKOyRWwoPg40yw@j+AtqR=?pYU;`uD3E&_&x*cfbW=@H>8*SaOfNE}tx#+F?yl zFpaI2qc9U&5j5oicKwGmd@^Un@B5@#*u|x43x`GW!47-Z46*hE&cB^33Mj{SF|DqN zX9$5;4`1v?e~FLA1a$TKk!jMF`wS|&Iz7xcs&;%Fd5SVH%U@Gu)RcF7#lYcy^=(sb zkQfaXYxzS&bH5dhT62RVJ|w5rk9*%`%wM-Yc%2h@+=p=xJeO%Aj+n2PXO9LMG?#m! zi|YL_q8CTPHUgs&uk@P#Ib;(y^9YhHF>}WE(B!)xS;*hkbPnogFJP7u7ih_K9CsD% z9_5p3HOwl0L~twnzP9ef4!cf8lJa70g3i=o{qF0jT(~$pG5D?!I$|Fb_Jf1#_O$j& zTj&_128CO4$6ioj4^i9dH=<^hf0RM1-X)ts*>gp5pQI03j8)6b@E~)|B&>>*HngV` zzbAB-XUkHDj#Zy3c55;dK6!tHBovNLCyG z@TKG*X0fSr!~`;5)*t`#FViZ$_T^9uK_T9e^+!9n_HjF2&q`1BiuJ}eK9uu5Wun!E z_2*8R)GMw9(gNX$jI24(iw{EDFcz&>lN_Z?ya4n=i3yi)A;JAu)WQz zmnY==L=w|eS{DDI*!53Su045}TioHzvMnsh$`@*az{k2LdrQ=zc~p0D%h*R$&-YRr z_ix=CwOA(uCZ3Yz>N501ywQj8f4V&^nQ8u;t(H!c$%>p-Rbo?*4xw9Wni%+EGXiBi z%sqz^Dq!WKvNXgpo=1GOOm&G$bV6~!Lk9gBF=fJem-IFYxnjffT^x%n^|gmn`RSUR z&d1bbO^;KuFX!vO9>Hi0X7$Dof>>G{BnNq1xd7j4@Z5Vo3T!^F3OM##M@$*%A8F7PGN2|gF=a~xcW$SlYsID4T|iRFTz#T{ zA?i1=3Ore9N9cw?t)CL9I~_bp$!fn}6KRoGzwH(vEy9iM*$%ybJI~L+q(yCBdmi=X zxAFD7OX(d1zH;pe>y+WNOzC(Vts2TqZ}Hy`&VPS4FK_;0M2dcfSKTT{#w{d>(Q;z| z|B!$UPuVmnN}w?o&(4Z|cJY^>Xl_PZk~|C5qBRw8kTa~P*mnIJv%KkXm$U>}O}x>4 zzC|{zDm*iCCjXc9(Y~y6Q>`f1{4_RgvdkWB0H|!9*(~-AS3lM9{^LPaUkn_LE(aP_ z<<^f0uG`hloYNL6P8B(85JW8ti5~wb(IQFsW5|7_s!9s#l?Bp~T9xO0AWwC_`3f?v zhBQ=$fjc*mP}$UbCSE+HOaquS>$TU^N2%IHq$ZK;Lz`}ub=zLI62IO04OR*{1UOOR zH1UhxoFUz)kyh(*@|tHyi>wHvbfe*I)rQem)UtL(9F zY+|A1Xx&#>a1&tX!B0n2Vyk{zjuN}qBQD*H8~|mQOLHeW!FR#q<3&Ev2GfjsfM12N zjPpYJE5L_D=2QbEnMli;RCm2rv?d@dImWaK=LPU*4(2j&^a1ATX5@D1$BK-UD)2ZY z*trMvsjYS5%tLmWtK!Q|X~S^!ZG*B@qixO6D!jaBg@?0X7Pa^(lZo+xEEG7kti{#i z{ZS~S(iD!Or_%Pj-H*-{1XP)zMU<5;lNW3}US?^6j|5u^c#u%duag zWaBaeekr;WT&5LCb^J&}WW(LvllCgKylH0JnX6PXh0V0nQZ1z>fUv@o?L%C7ox12- z!oK{NVO*=+@5-QV6Lsr|SdbcMC+>?l>Zy{eF*bf5XO4z$>x;kZ*wgYp+WVg9=gyD` zLrL-y>bSi^a$yD0X??k{X|{l?eGF<#w}AqnOW2+BE~}JfpsL` zDEt{mz@zUl=@4D6OwgTI3G4@?@-2}&ipK9~RgE$$S^hLB?{Y3l+Lw5d{)-xvU7&8c zYf`dVGn$cuPgI?T6G(Q#&u$pLn@oT0X|l2sGmn&lr}d!4I{lIm%}r~d=n2FOuYr^2 zHEv@jzoRlM3kQQuOtm2Trxh@_sR{a&Jt1n zzIdjtg??P{&h9#Hd#v3$Be?>+S__-f%}tdFmPv$=N%W?x5DN7Gf|<`Np#Q$Y_Y6O| zofzFElvv3?N7^4i*F2jz&)i0Sdjl>F zP1IRY_`CJ&5Y7ym zFvI4?FM{R_y+{6-3+~l7!&IRm=}76GCw(~X@2?;0d~V)vyM0D4f6NE;bno-}4SC+B zHS%+B75qiX)x8q(?CkGpr!5WxvkMDl-WuOFa=Rl4HJ$pJk-@o*F2441JjJrqeV#xf zFW0dj^4RcuiG<^Yw`sF5ykB$qe)?GEedu@Lf2wx*a?AN)3HAzKiD2??UfeNJoeGua zs~Vt4S_wDB*=(jQ-$+jdz(TKr3txf|IrlfW))qJpCDX_-_5So6MdRg5Nc>^0HZ%ZY zPQ*ctcFp3<)k-_m%e?|BSmJ`1eli&-Gf*q~i2bg!5fupuxI*DQ z$+SL)C0|vm`y{k6OyE`!)>h1!6L%Es6EEwBynAZ-&;h2=&f^4HmCaU7Ywd)86O zS?!=N`W#A!Y1b(=mp`RsQ~(X}lD{;EO4O%H(+a-yHO5O3rYb@SGQ@w>w{a4?{@X}I zy~FIg3BR_2z-VCO+-GhB`4pU*Y>N*#d;)7#!&N$-So1dE6^Jq~U=UC7JpQ#l>3Zl4 zuu<*1nDdZXt4Oy=`y@L#(Qf@xOX(>s-I-+%6GmoQOi zY|?L)qzd>30STM^ z3CKI!VZ`^Q60N`}_g*LG;iW^Cv$_Q0v6z;(aV|y83pC5Q^sRJ2C(qSqJHtmS$M4%M zhZ=w>){mw`A1ChbwZDh+wO2}4f-~=Rl7ij1%s840r!8Q1A3aby7cuBpA7QB#N z6jf3ijg{O3XmV^HK==8T2YPrBLe0$w-Y)PFZCJBInjS zuW7QDJ8O|s)lw!2Mpw#V?-#$%SxQ$oR4C;p^KD(MWg3?(?!*XHH}qGRF%5YVe;Fnh zvQVFX6_qh~ZW-zRmKx*ELZQO#K#4Dd& zvI;?*+GC)HB{BYTr^I5sPJ9$O5q)LQ8KND<@yLk$z4ipt-l%)MT(RO-n1GFGJ% zFN1O|n1~SER>1zeDM3j$M)16AFZ-K|@-ZmMlU^13JP7Sd&}N&9Mtr|bt}uzBQOao9 zUsb|FuMWNX{wuok9!&1zsYchF?`}S9PQzn;d*LUecde%?DH4)b)+g=^ri}t6sp9#T z#b0CfqfqTg6t)8uT-hVBKPdpBp&yck$e+~zvRYr!Q`_uK-XCBLnJ*dPc-?_Fop%*m z1;Ep|t|)R##gYdYP?kb*lL{OIh%$-*LkgLR7@oJKHnxb0b(vVQtQ39i=mEkcsv4{iIVSH2K+UK5w-5t`J><_suujWRrgX0X=QTzsGN z8?B_SYNiD|ScC1lP?<~u{qAsjAX!e@WH>bSCOSMH^9!wc$4c(dkp(KwODS03D$HZ0 z9M~&$i!C&T(SPL)OeF9K&28(f$IgVxzq<#zdOSGKMCmiyau@<_0`hJh3M5$Y`TEq% z|A}5q<)IHf*DO_y_nXN%eEIY~e8V7z?k$}D-JsS>Q{RyM2La+ zNWW-tIP}Dy{Zs#@Pn2IVhkGi>h;((LZ%$~0_XoC-40=_VLodUT$ELDW;iSr3@lpBh zgYssrs-y{>Of0#G+C6WTjIBSGwEW5KY zd2*aNc+SG$gz?vkm$V)XmiZnXFVDIvD&f}k8?$(tFq2i|eg?&8>X))=(k4jdtO6Jm zDHKe+|Cv_LfZx5jY{7P9F4OxI%=9HZb3dhk0ZWLj`n}5k+KM2EgaU#!<2+kRVlc0>*T3xT)~phP=I1`CBWq^c)0{j_K8Ez#BOA+N!0z{1fSC=h6T!PL1r~=D0JN z5DOk68wWOgf&v?OaRD-tC#jkM_1pyt47rJ;MpFWu2JbT|i}FovFAJR&4>o4v&iv5| zw^c2>=H8ICNvS#_WmG4-7pm9Tl&_9F!qWDgp!w$AsmGi zFfyW5{>68Y?Z@blgvEP=M^x^QU(>kBFguvS5NOU$y?O+R={tB>j!&FtJnU;EmfFgz z@cQNq=w^*n@2R)mQVw?T%Hy{O&b%TaZH@Wac4k%M#}XSbN*xdJ?0U+NPy^aF{%sv{ zyVpWH-gi-wJAdW`9Lns9f!Xx?VawJgWZNYs$qUeqG?AvgMjAPz_E`Rfx?&g$v{cQ`+>J^~E*7czWlUF7=L#ARI{WJ=P& z@_-qfPQOlHJWpC+vcd$hYG4($ zDxN!Z^+||4^BmZB3x`1BF7y!wN&+(_(`*)DFH5D0Fu}J(TP;L!;7)bVYji6`y4>08C7uh_aZPM47{*lvkB z{(W8`nMT@TBc>*|RJlEN`%T>T0i;}W`X~A%}ED3BEP)~b0 z+rhV~EmdT-z9i5O(}d|OIEOT<>>m7O1>7ARz>?63&ex(5A)C-ZqY2>{=}G(nuCdjs zaJAdFkQ3B8rrZWP3o`QtZY*C0`66u?%m*4|xA@>c-LNniMIO7!L9PB9mCIP*A4RC> z7HbYybN5Ncp?`Ub#h~R6h`I zhnB^gd_tDGdb-hwQaiUktYim$!)SZ2^v~Mx%_YXi&Mb%#WE>+d^<$I%qPxqxMs6sp5_4jS)zKF^%l^N8=bfccA3R308sWMA;L9DHYv+j4oa91Wcw`?iVzxm@lz z7f2nYpBmDp1B_VdQL$CameGq1X>|;#LIuA)O1g?`!y0a&fX71jq}e6%?ecEfKaCxs zf>QI~AcC(pw9vu@E25FJ$p}cE?ur<@DcK6+_ zmob~+^FsLt_xd%nRlo{>T@oQeYickA7SE0=`E(Xo%tsDUTCEf@jVpVFqFOc{fe7lj zo3~Ayv*(x24N@(T`IoID*y}YdHCMA_6;W8od3TShUgWWM9T!V2BF(GJZ~c92HSx5ODx59+g+Q?^@*;|H zEV4oaRv{MjJEV^F7G3g&yzNT%$tH;b4w-Vvi4s0D)sNl^k>atk!&A#1v7Y#c6of5- z8go8z@T345MKl}zsE?@SlY~jU`jEhmqS%JP>F}(31#I`)yRX9+%_E$-JGha*SGd^t zdy>X`coXEO?%k!OGSMc>Ot3^vu;4d^ivixfv}jGUJ_*zY2r|`)FFg?lSvRg@YS}bZ$LH;-;)4*OOak7Xsi=;iw_}-FncMbAznq?%d;YlAZja`^ zr4@Bwtj4;eYYFC3jYUAe8v&cf`OGfE2HS#3Ri13tRN$gKRsoR|sb%RjK$sO1)0B=V zDu7<+C3fteuwn#Ww&k)Om>tu>9alq6MXA9W{iAp5$YO#S$vTjNA|5ArDakhEeN3 zn`QSc%QmkcMQt5%d_Rxy1@UF$rn(0AFO{nMCewy1@8i!mq;n3=K()*c*jF!VJUPn1 zMjrRR_wAWN2}yZOb6tF#)l^UUTml)=pH(zazbO<^UyR(9r|!5|7A|tDmoyegBtaDVsbcny}O- z;#|?`qis-deK)&; zl~yBp%_nwjT!dBExRfTWgc(8(aMq&(4Pyc+uIzMuV=o*>ePu%o$Hpe5Q?4 z3iC2;Dm6oYc7^8lu9_xmcr3fO=HY^X2%so>BPNh_ChxzA?bDf<52Kcruk_ z-6L8$rE)!2)M!A%<2@-Ph6soxcbi&sI{j1yg$wlA#&N@uoA#NffZ}26UTGtLqGl%j zlq`i6Ljq6?H8Adga6@dT?HBnkT$P*IE&l^t7E}TGVm4zV4WaRuK5mNo{Y5&3{%`|O z9Su`&!gmFpa-rho!(-;vgkwURU}>@8)ov0RNOd3fXy4{*Wj3s)^`3NF~}(Nqr)uafmTJ>>AlJf%0}@2eDzZejQY zh#ooHLptP)<}KD2SV@ULXW{(eVBH%jc`r*dfWUvm#F|F9 zyB=c63O`43XL18s1#hOcjU9JJ{-Kpg#h{_L_uWyi*Dhk0(nhJ89w#C`ooqyV4LSwk z@d|dqpQs|%(xRs~8#P?sTs&KzLX*hql4*A&H%_1I>x+5V+K;0ZFih?C6Ehsm9RG@W zNHe_Schr+(MJV0eCjEv!qKn5UX7Y?nh8U~uCT@DP5EG|L+v@15SibayZ5UlAezCwJ zvYDCn{h&OZvmeu|L+n?Q8^o=JI>vgra2B)i?00R4xj!bc8>EDkv0A0bga3tAq)E<# z+6-)?ZN?MTrwtelYu9IP?9CbX9TovgCRvR0!SXq79!}mKO*=_q||aB;@3QX zVQR|VrpTN(@mC#-ojyX8I4(!C=(^X$h+HRy`^&7>#_zJ`Ax^(RaG(S5Qwh}f?~Sc(>^AF(|x#NQb~D0eTrSZOTmprxcC|9_f8*r&$ahSy&jrXhVJPi%KC}ehRqv7 z;|0!)eO4+eSN&L>b3F)F{Tk*Mp{~DJX9OERsKIY*QM=)GBW6;07y#okUyazA25c^M z=cC0zrObMQSISkKiARd1l=LKScLwT*Bh^U9x+-eCg6GBrCxA6-HGt9WF})I_|a< z)inV-xu-z;4Ex!M60|A<$KJ4tBX<@R%?VM&jjXIlz3 zMIya=lebR1LS+j=jFqen2ZaLUY0`i)6SO0@JvM0Oz($c%9(Egt7pWF(GgvUHgPKr$ zc}dEJR=d;kW8_!T*(@@G`}7bvx2&Zhg{-lcg=(7Z8VS1_tZ>$8nvn_XsF^P#1@^E0 zO}TjKBD-|nJYsXzoP6^Vld#po-^XpvyaU8%VAuz`5CL28Hu^dKL6ed&K{(vKGJh>K zE(OCyCCSH58)OY!HMlM3KsYx_6me)b#;ZfQ{kiHkf1;?EzTnRp){4M`u%)){G>i?! zzOvm+k2ugUuT8>;?8K-hNa6%mX4j4E~I`SC!N;9167g&Hkf4;cYt}ubi zqRE(1rum9IhJ;~nSB`eBi}$g#+QZFfyDkVfZC|3C)OQg+<%{e&>s^BpkwFKvNtt~T zUUO`YyAV*^8*ZM5Bhw4g>FdR_MfyXh9LmRYh7v>$9+6NS4BGS$Cq8G#P;n?bDiKxr z_tmkRQclVoP@CLP%ED=aj}>ZR&Q|a(t`;?(nEytyEYkz3u>%3TqBA*!KIgLO17g z3?|KbCdnll1>^TzwIgwqo)|*9a)ROTBLq9K$&Q0`94Pk;tfj#VmSgW#V?Sc&%Ofor)Ka0AS*&5I zs{wC?aM#@eB-ur`;Yci5_tbIEyX>w^OK%Hrtj7(= z5=7oY#hCCN9+}4B1IU_@a*32EJv|7xgKBGJTTAGOPCBVzC=05$=znY{huLyFTCt1C zmJGq1t88@XJ71(0B&vdR-);UpkUcbIp@|XRFD?!f1tOnKECYz?tDu^+(2^P(PM7-2 z*gC8kE-bIZ&J2}i2YUox?uuBgmt%Q~f-Zlz@i%hlCb*K*_deM`~<$;aRjkTzx4s9H(ltl>5H$9;m( zPf+a%)*2?967J)HMBJg+$&WBa>kYLKdCuY{DfFEAL7mb;%iJFr_e=B(qco=FRV@!t zcUiud9rgFKovX&nMNqt=t+BLCmz&{Ih%;KtI~rAvS)jg93GHYj5Aiqc#M5CW25@S^ ziCtG~Z9A-AlzK(lE8E;7B`~(EkC0CJJ*w75Sqsg4oAPsvNfJb~x)&NDgSg!<(vkR^ zy|)LC1anxlbqh2Og2KPS>W1P+@jWrX?`d7k=F1r@9TDQaM|ih&A!Nk_yj$@<>KA3rcW=XNF=YjbP?MWJf{#Wtmd8tA zSJ{OQCooP2<$cztglSC=ST~+q0*6}%@;*QH3I$5PA`%tDdyI8^?EWQ^E)YRqt) z0P^ybak#nm)4w>8hp2RJ7{L)hjDt~MJ3W6U!~9kN$t!LYXg|v4bN>z79+ixc*gwg? zRlYtIe9TYm1L^2(XMRt5wWt6%3lAHdC$cLpMQ?Y#Ewty=7^xwRR22@wN{$VlzUIX% zdE;U5qMrv}YID`lpOBANP5l;pu*|R0XR=LGKX&8p*Z}k$ACgUSt%+}=!>aFZij5Z?u{Vs~KzNHYp`L`Oq}D)Al;hes)x(!~}O2E>t4E3H1-yfro&_VphnZOgNN42H(u zV2}I$To>d>@)P#2nHJnoJ5~IEI=Lmi{6M)+1Z69D;x5eFBxhXx@f>T#QOdBJLe8Bx z3nORbYxjd9e(1;az7xQ|y0Q*of=jV95u)#py=bZgF+-6H-m< z1HrQAj)RO)==DBJO(b$ksrVI;aH_f1>;S=clOKZ*`F&Wfn1coe-N`DeJ4FXIzgS4i1G*PNce`*w)XymS;#qRZn8)UBQ!o^E z;g|bKnvoVqreOqQ08^+=Uc*}DU8w%Q24tg>Y=Id4q5S`G+4cO#Wp}7}WUFz?g!X^A z?7$!Sw`-KXi#gcK24O6TLtioF-A6eBp3DBQfN$hbF^#g9Qk1(@z)o)MZjLiHQ*Iwl zBUeO_-bl(dL{S$=5VtM6`PjU^{amlE@vvNf-rBzFYCF%d;9vHzrR5Kpu0y6!Bya@G zGrVv?O5PMGbgD@L@p9lkDp1*@RDYuZykw`9szVZH5WNl@lYQ>MGC`^jGB0nxRB{4l zo$BRQ2?*D|-olfjwzuk<9_Qq|&37V^az0*d;EbBLSoE5c!YQ2Cw{-Lx;@8N|t%8f( zpg993c}=O)uMF{+b`=~VhAG!bIDPPby&!DyFz5Bl4!`p(VjA?gs7(5%?0Xejos+;x zgd6jN6Op7(l(f}JUpKsEh&EX+^FJ=!4j=2%skFRF6aXxkuKz2T=cBLUG0pd%&Sv0= zT8GnlV7bbo_T$6$ha5~FcOOZCO7)V2F*|w9qq${{m_z?QrBX{e-5$QuNE&9LH~iEb zM4yLyZnd4RD{}S@`B?fC?>QFV?|%ZVqqFVK-Mkv-k7=SVShX_K$N%ywx-&vkTX1gRj)rRME5xR*ba#C-<34^bl$+x^6&G#K=6a9Pv0}nflDxAhU-zO zaJO54q9oYQ?9P^2`LG23wHd;Q6t?Xg?7Q@K4b+!wUx5HEOyiQYMj3JIK;6Q_NHw=n zKLLxuk%txGpWnm_Qpam=0v&ie>ohPyhANlw(Rrea`TjVIVbE=VZ@jiif1T<0{j&Z2 zF&(o_?Q1t?+RLeXh>!>*>X3;A8s!*d8y&KNcffJK60nB;i1Q-pBq8_7n9MH1wAM(e z;O**rzKgFl^7xUIWxC-`0&)c>}Ad|RIfZVTP{T-k|P%SklRu^bQsuE z4k%b4Rr>Rb46e;)qRFFiS?v8vPZ~Z?8IpwxKq;T@JKv^^a| z_1td|d2g(GMYvouKT6XWL@R*&hWty&rZe0-2k+9UOIZ(VT0HmV1kuqGs$FBSJbB@c z*Rhp7*QAq<`&`qp!V^#ihdB4&+MD$cJRfO-e>)_UMb@sV|6KuKb(Ufu_o2U=(Xjvj zH2~N?W@qjTNhuyFXD=GncVe z4fOUi_ma&_2Y}y9ZY5YFdwO8Mzk=IEdHm*jIJxHUvGUq$K=S$d*ifU|DJY&54n?r9 zG_bY3R{uNO@wLUq2IJX#S1Bwa$D(baqeYP~0o|*HX+>+vP4tCF9uLRj1(uX}e?Rn!lm!9{9b{w!cs76RYTv<*=x8 z?CMa1cFgYO0N$4UEOFV`X zEhs+l*P#WIY?QfvopxVmCGNrw_PD6JX^<{+p7Pl1mD@)5tFT}#7Ts^HNT2yUSK}8CC@N7``@j)9mUMz6MBhi?I5eH~hS*!8AI9ANNN^NSq%}Qc>=kYsJ z-8xb9C&U^X$ujSjg^+TIw;j%$|630!3Md)TVpKsIZp~Yu1w~(a-?%@U(z!~G;$&f@ z2_oSyJ)5)6Gu^6mwFk0kGCle4Zfq;eBP+(~qF^N6mq>sX{E-dzmOR1$;?n$V|e z+{}`661#KRRJ76_7Sq47M2#p+2(3ezP+egx?R$w))1MEkOA~nf&l_^dU!KF#Ag^PG zEXe8^yk82#52oCO?XfYuqvoNM@M)%Ge(*;QQxOkkv?h!U|M%G@F6Iw-tsQO> z)BhQSGToM?OQZ?k=9J#_0@MA=!fsiM)2{0TRq&j)k-z@~>m9;`ALb~k7>trYCawPD zWMjcm*EFaw@}eMrx+S~eUM6+5vJIG(HU4wk23gk<;_vI!rKKcHM&g^yFm>@q7WW%V zQ+Wf+tyZ-cjqqfvK*lP+`Zmgb;%>K`ytT}ltINRqp+V4?s9)Rp#2TygTCk&XE*-1! zk%Mu^zS|DW6L@7jt-6h4q47Pv8_KSBcUKMTRzYTFl3((S<}aYYpgBeR5`Qk~t970k zWn}&f@3}IndT>~^=hdMU5Ur&9#g<}0G{ajBKN)@8!&fV|8eU*dF-1M>GTZfqZ}18&0wT^irSk}Nv?$U$%g>4i7hCod@$ z4NOQa2D>G+;uV7b(|M}j*aYn&uu-hSLfsjg5UyoI-$LKimKfmBLi+RY6#DrNm;x0s zg&17qsFmcpYI!rNYAm`5;!rVP6T!>mHr;O=D${{xX;;N@Ak(Sw65nT zEF7F(B2{|NYWXFa4oF?Utu|}JkJj^3h$;5$|3?7BAw1q* zJ2%s0^IY4!m2;1LX4Awtg$<3#!*i`SJ~Y&>o|BDYd(0RtI^qtx$7t%Zdt=K)`9#5? zHX1F-BcEs~!Cu?>2X%Y-N93d$bR-(L+?8*{ne4qF>f*52)Sl1UVNZWY) zlo<{6B1COcx+#ZuCqi{LHKP>Bb4=#1p^$S6X*!WNRhxCYX*wuVM^Uu9#-MIsq747Y@tb;|-O#-L#7{lZ zcy;%8k3LSI>7MrtO!vaw>Ju!(UOq7hjx{nC%hp`3jcv+(mfqXxnWe-&5} zPT>-NYO+u1)!si3Tvzvw-Cz8E>u>Ho@}lS8e%6vl1^T~StysZ{V2_W7kn#{_TtRsv zC|THjF%`ZrSEcB>i@h_Ly4b%~zA%^iH%wocJ9g7`uD&pS)0QtxJzdB2g=zDzZC{vE zXMcV7`CmD2xaY_rrW=c=L@6XpSUj!C(`X6p_Qy{{A10-mwh0$ z^pm6i@TPCiUi@nTrnB-Hn9j=R$IC@RSPABSi4s9iq=D@AcNYz0iw3eq1KFa1 zY|%irXdpZ73}j7pBW;Iq8$9N38_7&X%HKWAPZ2}1YtKd6(V)^3<>!q0&3zlW=?}qV znsd>Sh^)U?sbVorATGmIez`VyYgFwGG!zXYOsTq8V46&b?A2OqJ zrEJh*W=sF|$VZY@?de%E@35#$?E7oite)C#=*nel$gn4G9?0VUXXs$u71= ziQ|IDjJ$3#GKquvVGHXNLyOmG*MJ8Cbi6*pf+xQ5NV$Vz%Ka!@9D;TqpKE|>ZHO8J z91(X&XhOWpyU#tJGfs+=;jK5RIhL(Gf{_jYhFI;yHFANUus%bLGp{d*w+b*GaARH9 z6g`Ia5j$_u9S1F{ws3wR!&eNewN!Hv%@^FylWhLmvqT3KMqKgVc!(e!jE0o76r2z4 z7P(2s46nq1_jaS#gEcU}&1gxw{t-{ALrvt67_lPA1>4AVX1F2MGX$a3hEE(WG84g&Ei#Wz;neNsZF=R2Y;O^}c%)q{)S*N0YndN=GK2)AZx6m51}D zM9%J-q>ehcPe52mvO^u!POrQ+294@5hJg>RI4!JJN;e^o&D?V&5xczwfRA!T?dKc4 zu7U%>F(t*h-{^zGQ8eqrR9KUbUF5@~5CNeOc-8g26fvsf3r`f66ixP}h(%0nDLLoF z@z~+l_0q&7Ic*e0+SLKh9`8g z7Fz->b_QB}sgDXDgFV&M)KXQy|{5Qv3=J|0XMnsoLDB+K zlPaf2zo)C@IQ_75Xr-U&f{*Ed1MRAhAuzB{Vfx3(k;^8>&M=5^SzjF6Nsb9qPdgwm zRDxZ%bi-fe{Z%uqs(b8AIHGLUOv^!PIjmL}1X^3{E=5}7usV571c&=wZEY2_;3h}D zG1adGSUD#A>@||UMxZ_nM}WU5Ag`oqnH#-JpznYQQ*e~EtwtkYBl>L~sLowN%kBD< zChR?KZ{vDrR~Fc`b%gsl1u~3+L$AH==b8)Kr>(BD*612|iD;-sW51cZMV&jC5yv{t z0he`k&>zO8uk2Jq0hi1GO9SkkIuF;$09>3Rf_jHe@W^u2jJqF14L(M z&%PeH0)m!9UiWgxcxYadt@mMPU{PbE=Rb~1bQ=Wu8t?!8(7S#?elT1I~4WAxAwwUd-3$!t#!Z zK{fs8EH}N*p`$S_Q5CsL{-bTmozs2HZ7I1p^7)QD_gvlN^_>S5JRE2sQyl#XjJ|uH zhdV=i?>N2bA-6qmVJPNs8ROu-{BRjdxAtN3MlVc@G5t9V*JmC+ww*y5E40mnYxlXx zp%WsVLXfTxJ!U7m-|GLwbP%wdMd$*_2sd~D~kADGS<&)gkxzDsQU5wTjVo^l;~ttx z0AxhCUbW{lVRr9vF+STMJfTQ0+Q(BrO$r`C$i_!RdI0|&GvR?7E~E)epwKF0j9!cv zRL%k!BYzncEReDC&n)-$=!p5N}u!%Dd-x@40u*oy^SLeNSiIWDQLA zId`9P_St8jefB)e4udU-5qwg zaywkP+DAKFxgD*%IAOWi(wSkjXXu#6r@YN(RV z!aI}D;SNLk8&UROrjlxk4;vX#Xh~j2aZq$4b}dA=rZA#0hyquaVHJfa(dAgtB4Je7 z2$%}jNDqyq-;9Rn!zjYk3A0P==;Cv|TC0{GWGZn>XTZlDh=@Z@)Mt?^PCPc^R@72KX;;BD7#SVm8a`Z#`S?hWHM(g>IAbrEu^^W! zpe$^SmZ2DmhVA!3-=@(88@ysUEudn__as=(B1dGNul#4TzmS9VFUF+dwN#|8cI{OK z_FNGs3Buuh?RoQ=)9QpfhkDM}_c;*n8%OuA;eM@^_Sb<{H~f`got}aE^MAi8%=@B2 z_#jPzX@U%R8Qjj$ek+;@Z(0~_v9lF1PIt>+r#xE*FsJs&TZhBNhP?`B#KapR zYb4|AMeH>oqkdQ;&>GDHr|H9{5gSL9IzJ*#Bs=d|wWdF%7+F46+QWbP^K>S~8z}s$pNs9U}1$WYqPyX4{yJA%Q3P*e&U*Y~oN#AXFgup#r zZ{AkfGd3q>)j#H~9n`IKs11)OxEvjIWMjvk+1qyg-Z_5P-m~3octpWX_7)o+A@Jn3 z)SY{pRumq8?SY?d>i0bB7@F9!d8v5p;_cL#M{aR9wkFH)jXu~54K6HP2Zthdf zInOSRNxokF_Oid*@Q8w^bah5&oci0yRnM%ccq;c-vp%%p5e0uZ|N7>_%xkI1PanT> z_ui_fM%nO)f)7hL@Y-EFs?$2J{^rB_SG@o8s11)O`0xj3fBR(2{W}Vam$Vj7-j;GS)rLnDJpH~t zUzVL*l8NG)bSpINn!QPeS+sjv-t{G>;BMP3Gv&E6JXV{2E zTPmlf56`^uo(+#Ec-Hz4c7M|lQ?Po+*Pb)g>#n|P!y^iweIfggm%5asZCc#WaoFC0 z(sCOfQSiKf#6{n_d~LzzrNi!VKfZLtIvXBQ@caQ~y%zmv^2mNWOE%0uzU%xn8y->c zg5E!R|4QG_GSB_E>yH*LX?ZBihDQ{9R6(Wm_<_grwk=CswBXP!;||#Hh=LbBlkjL} zpQU*%%X;0h@6GjhSKIK2f{)((b=M#4Z_e(wbz`r28{IVz*zkygkJ-HM>`S#(dD&~a zebzSc;7x5dJfh%ZKY68dpQieB$F7XaU4DAgssFa&5d|+AfAwTpPD|d|wdXhg`>NZ0 z%{Dxu;KeT#z5VfH^##|K>>v8#a}yS%*zkygk2|@z|KTkYQwPjD`j^%flaAkJ!y^h_ z(z5Tf@$X&Be|+)SDU)M&6t&p!h=Mc~(J>qxkPHbP?Z(n_;TJqJU+vMth`?Rp@?iV)vrM30YFVvE+2OoK$Yun+h@td~x zSaHkTUtUy8PLEA4e|L*ixN(Q8xZ_b*$){?`nHJy0BfFdOK3lNmoqe%8qJFBDobB+= zgDcubW$bvT>fGDU^?IbET5|5R`^g1AIhs1L^{d>LA8a0SSS>j};ZRI#-oU)Hcdpm} z^TV6wO;$@T^lLiU>(zb*7dLd@-gUy$_kMs&&qqPMvc1Z)?7K>JO!d#*h*` zBOOqU4(Q)#^hNpfq*1}1z;U2v;^;Og3JxjlPGe$`*qexJP`ps`lde>GGx|na$^0Q{ zt6SyRE|2}@<(>+xJ1yIWxu%Fq=@TbSSLQeYB|Lmwf(j)CS~_nFf!@jhPH>VNLaR`GA!HU43QqmiprF<^nf(I ziJz4B2+~>lbL-mr-u2w(>%Wtzl#A~0>-=65*H(}Gc*&le zd(OOc_<}^GTy#{og&U7_JeCu+bWw5gfn#wEl2OV@hM&vI=s($af6GT#UMm>0{Kc7B zf66UdMn`{{jy_eHl_zjKNVF0iG%ZdYPE1D9IXFVx`@9?nO~tXxgWdt72V}0hIPLX} zQSO&gI<5Krp6)$Wx(pn1_o;@JmkM5O^{no+=;HYlwPeuN;z@l@RODaS`uLPx?;cxS z?MD~%Z%2+UKIN&}iqFsOEjgX>)~?s$`Cy@%+zk3@`3Y1`@>tzsr<#sYLz{kkXlPXtsML&krHS-= zSkTLwr-DZHZ-LrUmEADztVzn-8SHpUm2bQl+v_DjT6pRJ5$RCnyhAi zJ+Y|oEPgzbBK0)4j~U)rCdBCQ%D6Lkod;Ds=2_19o#>3?27i1MWaP92mQnv%w(^z*N^(qqdTb`z)dQ z_bA7;x67K0avhcLOv@pAP!W~sB7Mez@w2qPY9zJ#menT>0rGWU}uK2HbX zKylk*4Gw0fC%BM2{BC3k`PU>i50uc{m`++>S4$!ts~H@lx10J-a2Ym}i;T5Ge~yHV z2 zYrFGK3r*2K2>QNJsP079HES~2A7)mP&4AA{>;YI(x*Eupa<1b zn#Qvj^HGHXCmE>-KRfF28^X%= zQOy*Fcq_m`ST(z5>7;@-61~2m?d-gBjyU7y;2LDKPe|;Mb zJ2HW|usNLQc5^T!k^YUI(EjGj=5pU>7|-xSHWXuUmK& zo^mRjp-RoYZJkIfslXQNij|a3KtXaIIqH zZ{$u)!v79de#vua4nlpV4%0ETr;sjGh1XD(Y6#Ot<)gZk@g*O!3)bn(w?0a7Hfs$k zS7~rFe50}CpR4qtp3ZLj^@$Ynuj~K1l21@euNfluO>oa;D!nA8IwWiJm2cCibPC)C z+ue%sYT|1X>d)0ti%`E3OK3DB6DW7(D}T-6+9UN6FfS+QRuh7xV|vY|`eO;@bz+bt z&Kl+(nZdPPr@9_U%>-RtX3osV^14yQnvJEdCPz^Fm3YMEw2d0dSx(7IR|mVo!A3$x z!9Qh=#jEgdvSlnzWi?yxNfOX_yZ~6-AR}^9>0k|u2ZSyj!HAq8&$A3LA|x|q%UDEI z33WUkh3Bl|@Tz-&mC?w`X#D#UI!zKSk4FZX9U1TO5HbM*3)5Asa;^0lLhI0Jd5&BH z9N~-yLLzuUxIYPmwr-#q@*h#CbQui7|1t>wBP`bW#RpH)TZ2}N; zY<)x?5RD}XK<7tTAofFuRVWL0_;6$##IEWBIXOURax#cq@nW6bI_FwHf4Bf>;sw%f zA!Gv5;bVadO+-pNJ>IjOkDsvY*wnlV4hY{4#Hq*wq(fg>Debnj1)GhVq?R?+Ju$ z%?%=3+#r%$1~~)BAZGv>`RI$$cP5{~D0=X}N zuvn*w3xwZXAa`0^ApR;B$W3V%$W1pF_Rd)ML0I=o`pbjRZy`e5p>l)VEOUYB84x?F z404|f0tbX8O$P9IAx?pK&j3c-7v0LhOcvh4z)uJ-(uwui;omAa+#$O9pxCQWjbn!odO|cOk4mGX8~_ zBZ$60c+g~!TVgWEsZj>H#Q}5I48mI^gIGLd5Q_)G0wEb-&U-L-d||Z1&@6`uK}L8< z2pD6=42M=LMIr&W^}ThQz8}M4-H4wuTc3fRa+%r=13e|4S1l4xFeBHZmNJ^6=#G5a zA%-`B)<+SEg0Ec@1a_xdUyDpvivyvvWCg*sR<4VRR1o6GO1Vg%qKIg@Qn}yL+Pgv; N&1qb*{{;X5|NjZQ5%d56 literal 0 HcmV?d00001 diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index 336c17bf8..0b385f28a 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -26,6 +26,7 @@ flow_params as multiagent_traffic_light_grid from examples.exp_configs.rl.multiagent.multiagent_highway import flow_params as multiagent_highway +from examples.simulate import parse_args as parse_simulate_args from examples.train import parse_args as parse_train_args from examples.train import run_model_stablebaseline as run_stable_baselines_model from examples.train import setup_exps_rllib as setup_rllib_exps @@ -59,6 +60,36 @@ class TestNonRLExamples(unittest.TestCase): done to the functions within the experiment class. """ + def test_parse_args(self): + """Validate the functionality of the parse_args method in simulate.py.""" + # test the default case + args = parse_simulate_args(["exp_config"]) + + self.assertDictEqual(vars(args), { + 'aimsun': False, + 'exp_config': 'exp_config', + 'gen_emission': False, + 'no_render': False, + 'num_runs': 1 + }) + + # test the case when optional args are specified + args = parse_simulate_args([ + "exp_config", + '--aimsun', + '--gen_emission', + '--no_render', + '--num_runs', '2' + ]) + + self.assertDictEqual(vars(args), { + 'aimsun': True, + 'exp_config': 'exp_config', + 'gen_emission': True, + 'no_render': True, + 'num_runs': 2 + }) + def test_bottleneck(self): """Verify that examples/exp_configs/non_rl/bottleneck.py is working.""" self.run_simulation(non_rl_bottleneck) From e06b1c11c145b2351c4adaff1aecd25d31aaaeea Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 10 Jun 2020 09:44:45 -0700 Subject: [PATCH 242/438] Converting base classes to abstract base classes (#935) * convert base classes into abstract base classes and add abstractmethod decorators * fix typo * revert changes on this file * NotImplementedError no longer needed * allow gen_custom_start_pos() to be not abstract method * implement dummy get_accel() method for rl controller * add docstring * add docstring * add docstring * revert changes to base Network class * add dummy implementations of abstract methods * fix abstract base class tests * import new TestEnvs * fix import statement * change TestEnv instantiation assertion checks * fix typos * change base vehicle class to abc * resolve conflict and raise NotImplementedError in traci and aimsun class methods * fix styling * fix styling * fix merge duplication and excess whitespace * newline at end of file * added ignore to abstract methods * moved testing envs to the tests folder Co-authored-by: AboudyKreidieh --- .coveragerc | 2 + flow/controllers/base_controller.py | 6 +- .../base_lane_changing_controller.py | 7 +- flow/controllers/base_routing_controller.py | 7 +- flow/controllers/rlcontroller.py | 4 + flow/core/kernel/vehicle/aimsun.py | 52 +++++- flow/core/kernel/vehicle/base.py | 165 ++++++++++++------ flow/envs/base.py | 15 +- flow/envs/bay_bridge.py | 16 ++ flow/envs/bottleneck.py | 7 + .../fast_tests/test_environment_base_class.py | 78 +++++++-- 11 files changed, 274 insertions(+), 85 deletions(-) diff --git a/.coveragerc b/.coveragerc index 3505cadcb..846588761 100644 --- a/.coveragerc +++ b/.coveragerc @@ -19,5 +19,7 @@ exclude_lines = if __name__ == .__main__.: raise NotImplementedError @ray.remote + @abstractmethod def policy_mapping_fn* def main(args)* + pragma: no cover diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 41780826b..cef92d573 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -1,9 +1,10 @@ """Contains the base acceleration controller class.""" +from abc import ABCMeta, abstractmethod import numpy as np -class BaseController: +class BaseController(metaclass=ABCMeta): """Base class for flow-controlled acceleration behavior. Instantiates a controller and forces the user to pass a @@ -63,9 +64,10 @@ def __init__(self, self.car_following_params = car_following_params + @abstractmethod def get_accel(self, env): """Return the acceleration of the controller.""" - raise NotImplementedError + pass def get_action(self, env): """Convert the get_accel() acceleration into an action. diff --git a/flow/controllers/base_lane_changing_controller.py b/flow/controllers/base_lane_changing_controller.py index af009f992..eb2b566f5 100755 --- a/flow/controllers/base_lane_changing_controller.py +++ b/flow/controllers/base_lane_changing_controller.py @@ -1,7 +1,9 @@ """Contains the base lane change controller class.""" +from abc import ABCMeta, abstractmethod -class BaseLaneChangeController: + +class BaseLaneChangeController(metaclass=ABCMeta): """Base class for lane-changing controllers. Instantiates a controller and forces the user to pass a @@ -36,6 +38,7 @@ def __init__(self, veh_id, lane_change_params=None): self.veh_id = veh_id self.lane_change_params = lane_change_params + @abstractmethod def get_lane_change_action(self, env): """Specify the lane change action to be performed. @@ -55,7 +58,7 @@ def get_lane_change_action(self, env): float or int requested lane change action """ - raise NotImplementedError + pass def get_action(self, env): """Return the action of the lane change controller. diff --git a/flow/controllers/base_routing_controller.py b/flow/controllers/base_routing_controller.py index b001dc62e..17048ce7d 100755 --- a/flow/controllers/base_routing_controller.py +++ b/flow/controllers/base_routing_controller.py @@ -1,7 +1,9 @@ """Contains the base routing controller class.""" +from abc import ABCMeta, abstractmethod -class BaseRouter: + +class BaseRouter(metaclass=ABCMeta): """Base class for routing controllers. These controllers are used to dynamically change the routes of vehicles @@ -30,6 +32,7 @@ def __init__(self, veh_id, router_params): self.veh_id = veh_id self.router_params = router_params + @abstractmethod def choose_route(self, env): """Return the routing method implemented by the controller. @@ -45,4 +48,4 @@ def choose_route(self, env): is returned, the vehicle performs no routing action in the current time step. """ - raise NotImplementedError + pass diff --git a/flow/controllers/rlcontroller.py b/flow/controllers/rlcontroller.py index 61f53f11a..973de8fc9 100755 --- a/flow/controllers/rlcontroller.py +++ b/flow/controllers/rlcontroller.py @@ -37,3 +37,7 @@ def __init__(self, veh_id, car_following_params): self, veh_id, car_following_params) + + def get_accel(self, env): + """Pass, as this is never called; required to override abstractmethod.""" + pass diff --git a/flow/core/kernel/vehicle/aimsun.py b/flow/core/kernel/vehicle/aimsun.py index 3320d1515..ce0d026e5 100644 --- a/flow/core/kernel/vehicle/aimsun.py +++ b/flow/core/kernel/vehicle/aimsun.py @@ -403,7 +403,7 @@ def add(self, veh_id, type_id, edge, pos, lane, speed): def reset(self): """See parent class.""" - pass + raise NotImplementedError def remove(self, aimsun_id): """See parent class.""" @@ -517,7 +517,7 @@ def choose_routes(self, veh_id, route_choices): edge the vehicle is currently on. If a value of None is provided, the vehicle does not update its route """ - pass # FIXME + raise NotImplementedError # FIXME # for i, veh_id in enumerate(veh_ids): # if route_choices[i] is not None: # aimsun_id = self._id_flow2aimsun[veh_id] @@ -525,6 +525,10 @@ def choose_routes(self, veh_id, route_choices): # self.kernel_api.AKIVehTrackedModifyNextSections( # aimsun_id, size_next_sections, route_choices[i]) + def set_max_speed(self, veh_id, max_speed): + """See parent class.""" + raise NotImplementedError + ########################################################################### # Methods to visually distinguish vehicles by {RL, observed, unobserved} # ########################################################################### @@ -560,10 +564,30 @@ def get_observed_ids(self): """Return the list of observed vehicles.""" return self.__observed_ids + def get_color(self, veh_id): + """See parent class.""" + raise NotImplementedError + + def set_color(self, veh_id, color): + """See parent class.""" + raise NotImplementedError + ########################################################################### # State acquisition methods # ########################################################################### + def get_orientation(self, veh_id): + """See parent class.""" + raise NotImplementedError + + def get_timestep(self, veh_id): + """See parent class.""" + raise NotImplementedError + + def get_timedelta(self, veh_id): + """See parent class.""" + raise NotImplementedError + def get_ids(self): """See parent class.""" return self.__ids @@ -611,6 +635,22 @@ def get_num_arrived(self): else: return 0 + def get_arrived_ids(self): + """See parent class.""" + raise NotImplementedError + + def get_departed_ids(self): + """See parent class.""" + raise NotImplementedError + + def get_num_not_departed(self): + """See parent class.""" + raise NotImplementedError + + def get_fuel_consumption(self): + """See parent class.""" + raise NotImplementedError + def get_type(self, veh_id): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): @@ -627,6 +667,10 @@ def get_speed(self, veh_id, error=-1001): return [self.get_speed(veh, error) for veh in veh_id] return self.__vehicles[veh_id]['tracking_info'].CurrentSpeed / 3.6 + def get_default_speed(self, veh_id, error=-1001): + """See parent class.""" + raise NotImplementedError + def get_position(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): @@ -806,3 +850,7 @@ def get_lane_followers_speed(self, veh_id, error=None): def get_lane_leaders_speed(self, veh_id, error=None): """See parent class.""" raise NotImplementedError + + def get_max_speed(self, veh_id, error): + """See parent class.""" + raise NotImplementedError diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 706504027..d97ade984 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -1,7 +1,9 @@ """Script containing the base vehicle kernel class.""" +from abc import ABCMeta, abstractmethod -class KernelVehicle(object): + +class KernelVehicle(object, metaclass=ABCMeta): """Flow vehicle kernel. This kernel sub-class is used to interact with the simulator with regards @@ -66,6 +68,7 @@ def pass_api(self, kernel_api): # Methods for interacting with the simulator # ########################################################################### + @abstractmethod def update(self, reset): """Update the vehicle kernel with data from the current time step. @@ -78,8 +81,9 @@ def update(self, reset): specifies whether the simulator was reset in the last simulation step """ - raise NotImplementedError + pass + @abstractmethod def add(self, veh_id, type_id, edge, pos, lane, speed): """Add a vehicle to the network. @@ -98,12 +102,14 @@ def add(self, veh_id, type_id, edge, pos, lane, speed): speed : float starting speed of the added vehicle """ - raise NotImplementedError + pass + @abstractmethod def reset(self): """Reset any additional state that needs to be reset.""" - raise NotImplementedError + pass + @abstractmethod def remove(self, veh_id): """Remove a vehicle. @@ -119,8 +125,9 @@ def remove(self, veh_id): veh_id : str unique identifier of the vehicle to be removed """ - raise NotImplementedError + pass + @abstractmethod def apply_acceleration(self, veh_id, acc): """Apply the acceleration requested by a vehicle in the simulator. @@ -131,8 +138,9 @@ def apply_acceleration(self, veh_id, acc): acc : float or array_like requested accelerations from the vehicles """ - raise NotImplementedError + pass + @abstractmethod def apply_lane_change(self, veh_id, direction): """Apply an instantaneous lane-change to a set of vehicles. @@ -155,8 +163,9 @@ def apply_lane_change(self, veh_id, direction): ValueError If any of the direction values are not -1, 0, or 1. """ - raise NotImplementedError + pass + @abstractmethod def choose_routes(self, veh_id, route_choices): """Update the route choice of vehicles in the network. @@ -169,8 +178,9 @@ def choose_routes(self, veh_id, route_choices): edge the vehicle is currently on. If a value of None is provided, the vehicle does not update its route """ - raise NotImplementedError + pass + @abstractmethod def set_max_speed(self, veh_id, max_speed): """Update the maximum allowable speed by a vehicles in the network. @@ -181,123 +191,146 @@ def set_max_speed(self, veh_id, max_speed): max_speed : float desired max speed by the vehicle """ - raise NotImplementedError + pass ########################################################################### # Methods to visually distinguish vehicles by {RL, observed, unobserved} # ########################################################################### + @abstractmethod def update_vehicle_colors(self): """Modify the color of vehicles if rendering is active.""" - raise NotImplementedError + pass + @abstractmethod def set_observed(self, veh_id): """Add a vehicle to the list of observed vehicles.""" - raise NotImplementedError + pass + @abstractmethod def remove_observed(self, veh_id): """Remove a vehicle from the list of observed vehicles.""" - raise NotImplementedError + pass + @abstractmethod def get_observed_ids(self): """Return the list of observed vehicles.""" - raise NotImplementedError + pass + @abstractmethod def get_color(self, veh_id): """Return and RGB tuple of the color of the specified vehicle.""" - raise NotImplementedError + pass + @abstractmethod def set_color(self, veh_id, color): """Set the color of the specified vehicle with the RGB tuple.""" - raise NotImplementedError + pass ########################################################################### # State acquisition methods # ########################################################################### + @abstractmethod def get_orientation(self, veh_id): """Return the orientation of the vehicle of veh_id.""" - raise NotImplementedError + pass + @abstractmethod def get_timestep(self, veh_id): """Return the time step of the vehicle of veh_id.""" - raise NotImplementedError + pass + @abstractmethod def get_timedelta(self, veh_id): """Return the simulation time delta of the vehicle of veh_id.""" - raise NotImplementedError + pass + @abstractmethod def get_type(self, veh_id): """Return the type of the vehicle of veh_id.""" - raise NotImplementedError + pass + @abstractmethod def get_ids(self): """Return the names of all vehicles currently in the network.""" - raise NotImplementedError + pass + @abstractmethod def get_human_ids(self): """Return the names of all non-rl vehicles currently in the network.""" - raise NotImplementedError + pass + @abstractmethod def get_controlled_ids(self): """Return the names of all flow acceleration-controlled vehicles. This only include vehicles that are currently in the network. """ - raise NotImplementedError + pass + @abstractmethod def get_controlled_lc_ids(self): """Return the names of all flow lane change-controlled vehicles. This only include vehicles that are currently in the network. """ - raise NotImplementedError + pass + @abstractmethod def get_rl_ids(self): """Return the names of all rl-controlled vehicles in the network.""" - raise NotImplementedError + pass + @abstractmethod def get_ids_by_edge(self, edges): """Return the names of all vehicles in the specified edge. If no vehicles are currently in the edge, then returns an empty list. """ - raise NotImplementedError + pass + @abstractmethod def get_inflow_rate(self, time_span): """Return the inflow rate (in veh/hr) of vehicles from the network. This value is computed over the specified **time_span** seconds. """ - raise NotImplementedError + pass + @abstractmethod def get_outflow_rate(self, time_span): """Return the outflow rate (in veh/hr) of vehicles from the network. This value is computed over the specified **time_span** seconds. """ - raise NotImplementedError + pass + @abstractmethod def get_num_arrived(self): """Return the number of vehicles that arrived in the last time step.""" - raise NotImplementedError + pass + @abstractmethod def get_arrived_ids(self): """Return the ids of vehicles that arrived in the last time step.""" - raise NotImplementedError + pass + @abstractmethod def get_departed_ids(self): """Return the ids of vehicles that departed in the last time step.""" - raise NotImplementedError + pass + @abstractmethod def get_num_not_departed(self): """Return the number of vehicles not departed in the last time step. This includes vehicles that were loaded but not departed. """ - raise NotImplementedError + pass - def get_fuel_consumption(selfself, veh_id, error=-1001): + @abstractmethod + def get_fuel_consumption(self, veh_id, error=-1001): """Return the mpg / s of the specified vehicle. Parameters @@ -306,11 +339,14 @@ def get_fuel_consumption(selfself, veh_id, error=-1001): vehicle id, or list of vehicle ids error : any, optional value that is returned if the vehicle is not found + Returns ------- float """ + pass + @abstractmethod def get_speed(self, veh_id, error=-1001): """Return the speed of the specified vehicle. @@ -325,8 +361,9 @@ def get_speed(self, veh_id, error=-1001): ------- float """ - raise NotImplementedError + pass + @abstractmethod def get_default_speed(self, veh_id, error=-1001): """Return the expected speed if no control were applied. @@ -341,8 +378,9 @@ def get_default_speed(self, veh_id, error=-1001): ------- float """ - raise NotImplementedError + pass + @abstractmethod def get_position(self, veh_id, error=-1001): """Return the position of the vehicle relative to its current edge. @@ -357,8 +395,9 @@ def get_position(self, veh_id, error=-1001): ------- float """ - raise NotImplementedError + pass + @abstractmethod def get_edge(self, veh_id, error=""): """Return the edge the specified vehicle is currently on. @@ -373,8 +412,9 @@ def get_edge(self, veh_id, error=""): ------- str """ - raise NotImplementedError + pass + @abstractmethod def get_lane(self, veh_id, error=-1001): """Return the lane index of the specified vehicle. @@ -389,8 +429,9 @@ def get_lane(self, veh_id, error=-1001): ------- int """ - raise NotImplementedError + pass + @abstractmethod def get_route(self, veh_id, error=list()): """Return the route of the specified vehicle. @@ -405,8 +446,9 @@ def get_route(self, veh_id, error=list()): ------- list of str """ - raise NotImplementedError + pass + @abstractmethod def get_length(self, veh_id, error=-1001): """Return the length of the specified vehicle. @@ -421,8 +463,9 @@ def get_length(self, veh_id, error=-1001): ------- float """ - raise NotImplementedError + pass + @abstractmethod def get_leader(self, veh_id, error=""): """Return the leader of the specified vehicle. @@ -437,8 +480,9 @@ def get_leader(self, veh_id, error=""): ------- str """ - raise NotImplementedError + pass + @abstractmethod def get_follower(self, veh_id, error=""): """Return the follower of the specified vehicle. @@ -453,8 +497,9 @@ def get_follower(self, veh_id, error=""): ------- str """ - raise NotImplementedError + pass + @abstractmethod def get_headway(self, veh_id, error=-1001): """Return the headway of the specified vehicle(s). @@ -469,8 +514,9 @@ def get_headway(self, veh_id, error=-1001): ------- float """ - raise NotImplementedError + pass + @abstractmethod def get_last_lc(self, veh_id, error=-1001): """Return the last time step a vehicle changed lanes. @@ -489,8 +535,9 @@ def get_last_lc(self, veh_id, error=-1001): ------- int """ - raise NotImplementedError + pass + @abstractmethod def get_acc_controller(self, veh_id, error=None): """Return the acceleration controller of the specified vehicle. @@ -505,8 +552,9 @@ def get_acc_controller(self, veh_id, error=None): ------- object """ - raise NotImplementedError + pass + @abstractmethod def get_lane_changing_controller(self, veh_id, error=None): """Return the lane changing controller of the specified vehicle. @@ -521,8 +569,9 @@ def get_lane_changing_controller(self, veh_id, error=None): ------- object """ - raise NotImplementedError + pass + @abstractmethod def get_routing_controller(self, veh_id, error=None): """Return the routing controller of the specified vehicle. @@ -537,8 +586,9 @@ def get_routing_controller(self, veh_id, error=None): ------- object """ - raise NotImplementedError + pass + @abstractmethod def get_lane_headways(self, veh_id, error=list()): """Return the lane headways of the specified vehicles. @@ -556,8 +606,9 @@ def get_lane_headways(self, veh_id, error=list()): ------- list of float """ - raise NotImplementedError + pass + @abstractmethod def get_lane_leaders_speed(self, veh_id, error=list()): """Return the speed of the leaders of the specified vehicles. @@ -577,8 +628,9 @@ def get_lane_leaders_speed(self, veh_id, error=list()): ------- list of float """ - raise NotImplementedError + pass + @abstractmethod def get_lane_followers_speed(self, veh_id, error=list()): """Return the speed of the followers of the specified vehicles. @@ -598,8 +650,9 @@ def get_lane_followers_speed(self, veh_id, error=list()): ------- list of float """ - raise NotImplementedError + pass + @abstractmethod def get_lane_leaders(self, veh_id, error=list()): """Return the leaders for the specified vehicle in all lanes. @@ -614,8 +667,9 @@ def get_lane_leaders(self, veh_id, error=list()): ------- lis of str """ - raise NotImplementedError + pass + @abstractmethod def get_lane_tailways(self, veh_id, error=list()): """Return the lane tailways of the specified vehicle. @@ -633,8 +687,9 @@ def get_lane_tailways(self, veh_id, error=list()): ------- list of float """ - raise NotImplementedError + pass + @abstractmethod def get_lane_followers(self, veh_id, error=list()): """Return the followers for the specified vehicle in all lanes. @@ -649,8 +704,9 @@ def get_lane_followers(self, veh_id, error=list()): ------- list of str """ - raise NotImplementedError + pass + @abstractmethod def get_x_by_id(self, veh_id): """Provide a 1-D representation of the position of a vehicle. @@ -667,8 +723,9 @@ def get_x_by_id(self, veh_id): ------- float """ - raise NotImplementedError + pass + @abstractmethod def get_max_speed(self, veh_id, error): """Return the max speed of the specified vehicle. @@ -683,4 +740,4 @@ def get_max_speed(self, veh_id, error): ------- float """ - raise NotImplementedError + pass diff --git a/flow/envs/base.py b/flow/envs/base.py index 1abb8a3c9..c4462e8c8 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -1,5 +1,6 @@ """Base environment class. This is the parent of all other environments.""" +from abc import ABCMeta, abstractmethod from copy import deepcopy import os import atexit @@ -26,7 +27,7 @@ from flow.utils.exceptions import FatalFlowError -class Env(gym.Env): +class Env(gym.Env, metaclass=ABCMeta): """Base environment class. Provides the interface for interacting with various aspects of a traffic @@ -614,9 +615,11 @@ def apply_rl_actions(self, rl_actions=None): rl_clipped = self.clip_actions(rl_actions) self._apply_rl_actions(rl_clipped) + @abstractmethod def _apply_rl_actions(self, rl_actions): - raise NotImplementedError + pass + @abstractmethod def get_state(self): """Return the state of the simulation as perceived by the RL agent. @@ -628,9 +631,10 @@ def get_state(self): information on the state of the vehicles, which is provided to the agent """ - raise NotImplementedError + pass @property + @abstractmethod def action_space(self): """Identify the dimensions and bounds of the action space. @@ -641,9 +645,10 @@ def action_space(self): gym Box or Tuple type a bounded box depicting the shape and bounds of the action space """ - raise NotImplementedError + pass @property + @abstractmethod def observation_space(self): """Identify the dimensions and bounds of the observation space. @@ -655,7 +660,7 @@ def observation_space(self): a bounded box depicting the shape and bounds of the observation space """ - raise NotImplementedError + pass def compute_reward(self, rl_actions, **kwargs): """Reward function for the RL agent(s). diff --git a/flow/envs/bay_bridge.py b/flow/envs/bay_bridge.py index dcc5e991d..0c5570367 100644 --- a/flow/envs/bay_bridge.py +++ b/flow/envs/bay_bridge.py @@ -234,6 +234,22 @@ def compute_reward(self, rl_actions, **kwargs): # The below methods need to be updated by child classes. # ########################################################################### + @property + def action_space(self): + """See parent class. + + To be implemented by child classes. + """ + pass + + @property + def observation_space(self): + """See parent class. + + To be implemented by child classes. + """ + pass + def _apply_rl_actions(self, rl_actions): """See parent class. diff --git a/flow/envs/bottleneck.py b/flow/envs/bottleneck.py index c647e1d6e..a5f1508f9 100644 --- a/flow/envs/bottleneck.py +++ b/flow/envs/bottleneck.py @@ -472,6 +472,13 @@ def observation_space(self): shape=(1, ), dtype=np.float32) + def _apply_rl_actions(self, rl_actions): + """See parent class. + + To be implemented by child classes. + """ + pass + def compute_reward(self, rl_actions, **kwargs): """Outflow rate over last ten seconds normalized to max of 1.""" reward = self.k.vehicle.get_outflow_rate(10 * self.sim_step) / \ diff --git a/tests/fast_tests/test_environment_base_class.py b/tests/fast_tests/test_environment_base_class.py index b5c6cbc17..ee815393c 100644 --- a/tests/fast_tests/test_environment_base_class.py +++ b/tests/fast_tests/test_environment_base_class.py @@ -13,8 +13,9 @@ from tests.setup_scripts import ring_road_exp_setup, highway_exp_setup import os -import numpy as np import gym.spaces as spaces +from gym.spaces.box import Box +import numpy as np os.environ["TEST_FLAG"] = "True" @@ -25,6 +26,41 @@ YELLOW = (255, 255, 0) +class TestFailRLActionsEnv(Env): + """Test environment designed to fail _apply_rl_actions not-implemented test.""" + + @property + def action_space(self): + """See parent class.""" + return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover + + @property + def observation_space(self): + """See parent class.""" + return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover + + def get_state(self, **kwargs): + """See class definition.""" + return np.array([]) # pragma: no cover + + +class TestFailGetStateEnv(Env): + """Test environment designed to fail get_state not-implemented test.""" + + @property + def action_space(self): + """See parent class.""" + return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover + + @property + def observation_space(self): + """See parent class.""" + return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover + + def _apply_rl_actions(self, rl_actions): + return # pragma: no cover + + class TestShuffle(unittest.TestCase): """ Tests that, at resets, the ordering of vehicles changes while the starting @@ -311,28 +347,34 @@ class TestAbstractMethods(unittest.TestCase): """ def setUp(self): - env, network, _ = ring_road_exp_setup() - sim_params = SumoParams() # FIXME: make ambiguous - env_params = EnvParams() - self.env = Env(sim_params=sim_params, - env_params=env_params, - network=network) + self.env, self.network, _ = ring_road_exp_setup() + self.sim_params = SumoParams() # FIXME: make ambiguous + self.env_params = EnvParams() - def tearDown(self): - self.env.terminate() - self.env = None + def test_abstract_base_class(self): + """Checks that instantiating abstract base class raises an error.""" + with self.assertRaises(TypeError): + Env(sim_params=self.sim_params, + env_params=self.env_params, + network=self.network) def test_get_state(self): - """Checks that get_state raises an error.""" - self.assertRaises(NotImplementedError, self.env.get_state) - - def test_compute_reward(self): - """Checks that compute_reward returns 0.""" - self.assertEqual(self.env.compute_reward([]), 0) + """Checks that instantiating without get_state implemented + raises an error. + """ + with self.assertRaises(TypeError): + TestFailGetStateEnv(sim_params=self.sim_params, + env_params=self.env_params, + network=self.network) def test__apply_rl_actions(self): - self.assertRaises(NotImplementedError, self.env._apply_rl_actions, - rl_actions=None) + """Checks that instantiating without _apply_rl_actions + implemented raises an error. + """ + with self.assertRaises(TypeError): + TestFailRLActionsEnv(sim_params=self.sim_params, + env_params=self.env_params, + network=self.network) class TestVehicleColoring(unittest.TestCase): From c27bfe526fdea244757393265224b10acfde1cc0 Mon Sep 17 00:00:00 2001 From: AboudyKreidieh Date: Wed, 10 Jun 2020 12:14:24 -0700 Subject: [PATCH 243/438] pep8 (mostly --- .../exp_configs/non_rl/i210_subnetwork.py | 2 + .../rl/multiagent/multiagent_i210.py | 2 - examples/train.py | 23 ++-- flow/algorithms/centralized_PPO.py | 108 ++++++++++-------- flow/algorithms/custom_ppo.py | 17 +-- flow/controllers/base_controller.py | 21 ++-- flow/controllers/velocity_controllers.py | 8 +- flow/core/experiment.py | 3 +- flow/data_pipeline/leaderboard_utils.py | 2 +- flow/data_pipeline/query.py | 94 ++++++++++----- flow/envs/multiagent/i210.py | 22 ++-- flow/visualize/time_space_diagram.py | 13 +-- flow/visualize/visualizer_rllib.py | 3 +- 13 files changed, 186 insertions(+), 132 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 65131a6bd..90384b207 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -241,9 +241,11 @@ edge_id = "119257908#1-AddedOnRampEdge" + def valid_ids(env, veh_ids): return [veh_id for veh_id in veh_ids if env.k.vehicle.get_edge(veh_id) not in ["ghost0", "119257908#3"]] + custom_callables = { "avg_merge_speed": lambda env: np.nan_to_num(np.mean( env.k.vehicle.get_speed(valid_ids(env, env.k.vehicle.get_ids())))), diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index f55917e49..028e5bc7c 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -19,9 +19,7 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams -from flow.core.params import SumoCarFollowingParams from flow.core.rewards import energy_consumption -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS from flow.utils.registry import make_create_env diff --git a/examples/train.py b/examples/train.py index e5b918ff6..1235e6241 100644 --- a/examples/train.py +++ b/examples/train.py @@ -164,6 +164,7 @@ def setup_exps_rllib(flow_params, """ from ray import tune from ray.tune.registry import register_env + from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper try: from ray.rllib.agents.agent import get_agent_class except ImportError: @@ -171,8 +172,6 @@ def setup_exps_rllib(flow_params, horizon = flow_params['env'].horizon - horizon = flow_params['env'].horizon - alg_run = flags.algorithm.upper() if alg_run == "PPO": @@ -244,19 +243,22 @@ def on_episode_start(info): episode.user_data["avg_mpg"] = [] episode.user_data["avg_mpj"] = [] - def on_episode_step(info): episode = info["episode"] env = info["env"].get_unwrapped()[0] if isinstance(env, _GroupAgentsWrapper): env = env.env if hasattr(env, 'no_control_edges'): - veh_ids = [veh_id for veh_id in env.k.vehicle.get_ids() if (env.k.vehicle.get_speed(veh_id) >= 0 - and env.k.vehicle.get_edge(veh_id) - not in env.no_control_edges)] - rl_ids = [veh_id for veh_id in env.k.vehicle.get_rl_ids() if (env.k.vehicle.get_speed(veh_id) >= 0 - and env.k.vehicle.get_edge(veh_id) - not in env.no_control_edges)] + veh_ids = [ + veh_id for veh_id in env.k.vehicle.get_ids() + if env.k.vehicle.get_speed(veh_id) >= 0 + and env.k.vehicle.get_edge(veh_id) not in env.no_control_edges + ] + rl_ids = [ + veh_id for veh_id in env.k.vehicle.get_rl_ids() + if env.k.vehicle.get_speed(veh_id) >= 0 + and env.k.vehicle.get_edge(veh_id) not in env.no_control_edges + ] else: veh_ids = [veh_id for veh_id in env.k.vehicle.get_ids() if env.k.vehicle.get_speed(veh_id) >= 0] rl_ids = [veh_id for veh_id in env.k.vehicle.get_rl_ids() if env.k.vehicle.get_speed(veh_id) >= 0] @@ -270,7 +272,6 @@ def on_episode_step(info): episode.user_data["avg_mpg"].append(miles_per_gallon(env, veh_ids, gain=1.0)) episode.user_data["avg_mpj"].append(miles_per_megajoule(env, veh_ids, gain=1.0)) - def on_episode_end(info): episode = info["episode"] avg_speed = np.mean(episode.user_data["avg_speed"]) @@ -316,7 +317,7 @@ def on_train_result(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray - from ray.tune import run_experiments + from ray import tune flow_params = submodule.flow_params flow_params['sim'].render = flags.render diff --git a/flow/algorithms/centralized_PPO.py b/flow/algorithms/centralized_PPO.py index 8f3b9f261..5f71f865a 100644 --- a/flow/algorithms/centralized_PPO.py +++ b/flow/algorithms/centralized_PPO.py @@ -1,14 +1,12 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function + """An example of customizing PPO to leverage a centralized critic.""" import argparse import numpy as np -from gym.spaces import Dict - -from ray import tune from ray.rllib.agents.ppo.ppo import PPOTrainer from flow.algorithms.custom_ppo import CustomPPOTFPolicy from ray.rllib.evaluation.postprocessing import compute_advantages, \ @@ -19,13 +17,11 @@ from ray.rllib.models.modelv2 import ModelV2 from ray.rllib.models.tf.tf_modelv2 import TFModelV2 from ray.rllib.models.tf.recurrent_tf_modelv2 import RecurrentTFModelV2 -from ray.rllib.models.model import restore_original_dimensions from ray.rllib.utils.annotations import override from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork from ray.rllib.utils.explained_variance import explained_variance from ray.rllib.utils import try_import_tf - tf = try_import_tf() # Frozen logits of the policy that computed the action @@ -37,10 +33,10 @@ parser = argparse.ArgumentParser() parser.add_argument("--stop", type=int, default=100000) -#TODOy class CentralizedCriticModel(TFModelV2): """Multi-agent model that implements a centralized VF.""" + # TODO(@evinitsky) make this work with more than boxes def __init__(self, obs_space, action_space, num_outputs, model_config, @@ -56,9 +52,12 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, self.max_num_agents = model_config['custom_options']['max_num_agents'] self.obs_space_shape = obs_space.shape[0] self.obs_space = obs_space - other_obs = tf.keras.layers.Input(shape=(obs_space.shape[0] * self.max_num_agents, ), name="central_obs") + other_obs = tf.keras.layers.Input( + shape=(obs_space.shape[0] * self.max_num_agents,), + name="central_obs") central_vf_dense = tf.keras.layers.Dense( - model_config['custom_options']['central_vf_size'], activation=tf.nn.tanh, name="c_vf_dense")(other_obs) + model_config['custom_options']['central_vf_size'], + activation=tf.nn.tanh, name="c_vf_dense")(other_obs) central_vf_out = tf.keras.layers.Dense( 1, activation=None, name="c_vf_out")(central_vf_dense) self.central_vf = tf.keras.Model( @@ -89,15 +88,15 @@ def __init__(self, name, hiddens_size=64, cell_size=64): - super(CentralizedCriticModelRNN, self).__init__(obs_space, action_space, num_outputs, - model_config, name) + super(CentralizedCriticModelRNN, self).__init__( + obs_space, action_space, num_outputs, model_config, name) self.cell_size = cell_size # Define input layers input_layer = tf.keras.layers.Input( shape=(None, obs_space.shape[0]), name="inputs") - state_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h") - state_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c") + state_in_h = tf.keras.layers.Input(shape=(cell_size,), name="h") + state_in_c = tf.keras.layers.Input(shape=(cell_size,), name="c") seq_in = tf.keras.layers.Input(shape=(), name="seq_in") # Preprocess observation with a hidden layer and send to LSTM cell @@ -105,9 +104,9 @@ def __init__(self, hiddens_size, activation=tf.nn.relu, name="dense1")(input_layer) lstm_out, state_h, state_c = tf.keras.layers.LSTM( cell_size, return_sequences=True, return_state=True, name="lstm")( - inputs=dense1, - mask=tf.sequence_mask(seq_in), - initial_state=[state_in_h, state_in_c]) + inputs=dense1, + mask=tf.sequence_mask(seq_in), + initial_state=[state_in_h, state_in_c]) # Postprocess LSTM output with another hidden layer and compute values logits = tf.keras.layers.Dense( @@ -124,16 +123,20 @@ def __init__(self, self.register_variables(self.model.variables) self.model.summary() - #TODO(@evinitsky) add layer sharing to the VF - # Create the centralized VF - # Central VF maps (obs, opp_ops, opp_act) -> vf_pred + # TODO(@evinitsky) add layer sharing to the VF + # Create the centralized VF + # Central VF maps (obs, opp_ops, opp_act) -> vf_pred self.max_num_agents = model_config.get("max_num_agents", 120) self.obs_space_shape = obs_space.shape[0] - other_obs = tf.keras.layers.Input(shape=(obs_space.shape[0] * self.max_num_agents,), name="all_agent_obs") + other_obs = tf.keras.layers.Input( + shape=(obs_space.shape[0] * self.max_num_agents,), + name="all_agent_obs") central_vf_dense = tf.keras.layers.Dense( - model_config.get("central_vf_size", 64), activation=tf.nn.tanh, name="c_vf_dense")(other_obs) + model_config.get("central_vf_size", 64), activation=tf.nn.tanh, + name="c_vf_dense")(other_obs) central_vf_dense2 = tf.keras.layers.Dense( - model_config.get("central_vf_size", 64), activation=tf.nn.tanh, name="c_vf_dense")(central_vf_dense) + model_config.get("central_vf_size", 64), activation=tf.nn.tanh, + name="c_vf_dense")(central_vf_dense) central_vf_out = tf.keras.layers.Dense( 1, activation=None, name="c_vf_out")(central_vf_dense2) self.central_vf = tf.keras.Model( @@ -142,8 +145,8 @@ def __init__(self, @override(RecurrentTFModelV2) def forward_rnn(self, inputs, state, seq_lens): - model_out, self._value_out, h, c = self.model([inputs, seq_lens] + - state) + model_out, self._value_out, h, c = self.model( + [inputs, seq_lens] + state) return model_out, [h, c] @override(ModelV2) @@ -197,11 +200,13 @@ def centralized_critic_postprocessing(policy, for agent_id in other_agent_batches.keys()} agent_time = sample_batch['t'] # # find agents whose time overlaps with the current agent - rel_agents = {agent_id: other_agent_time for agent_id, other_agent_time in other_agent_times.items()} + rel_agents = {agent_id: other_agent_time for agent_id, other_agent_time + in other_agent_times.items()} # if len(rel_agents) > 0: - other_obs = {agent_id: - other_agent_batches[agent_id][1]["obs"].copy() - for agent_id in other_agent_batches.keys()} + other_obs = { + agent_id: other_agent_batches[agent_id][1]["obs"].copy() + for agent_id in other_agent_batches.keys() + } # padded_agent_obs = {agent_id: # overlap_and_pad_agent( # time_span, @@ -209,17 +214,20 @@ def centralized_critic_postprocessing(policy, # other_obs[agent_id]) # for agent_id, # rel_agent_time in rel_agents.items()} - padded_agent_obs = {agent_id: - fill_missing( + padded_agent_obs = { + agent_id: fill_missing( agent_time, other_agent_times[agent_id], - other_obs[agent_id]) - for agent_id, - rel_agent_time in rel_agents.items()} + other_obs[agent_id] + ) + for agent_id, rel_agent_time in rel_agents.items() + } # okay, now we need to stack and sort - central_obs_list = [padded_obs for padded_obs in padded_agent_obs.values()] + central_obs_list = [padded_obs for padded_obs in + padded_agent_obs.values()] try: - central_obs_batch = np.hstack((sample_batch["obs"], np.hstack(central_obs_list))) + central_obs_batch = np.hstack( + (sample_batch["obs"], np.hstack(central_obs_list))) except: # TODO(@ev) this is a bug and needs to be fixed central_obs_batch = sample_batch["obs"] @@ -238,17 +246,19 @@ def centralized_critic_postprocessing(policy, sample_batch[CENTRAL_OBS] = central_obs_batch # overwrite default VF prediction with the central VF - sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf(sample_batch[CENTRAL_OBS]) + sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf( + sample_batch[CENTRAL_OBS]) else: # policy hasn't initialized yet, use zeros - #TODO(evinitsky) put in the right shape + # TODO(evinitsky) put in the right shape obs_shape = sample_batch[SampleBatch.CUR_OBS].shape[1] obs_shape = (1, obs_shape * (policy.model.max_num_agents)) sample_batch[CENTRAL_OBS] = np.zeros(obs_shape) # TODO(evinitsky) put in the right shape. Will break if actions aren't 1 sample_batch[SampleBatch.VF_PREDS] = np.zeros(1, dtype=np.float32) - completed = sample_batch["dones"][-1] + # TODO (ak): this was not being used, so commented + # completed = sample_batch["dones"][-1] # if not completed and policy.loss_initialized(): # last_r = 0.0 @@ -267,7 +277,6 @@ def centralized_critic_postprocessing(policy, return batch - def time_overlap(time_span, agent_time): """Check if agent_time overlaps with time_span""" if agent_time[0] <= time_span[1] and agent_time[1] >= time_span[0]: @@ -302,12 +311,14 @@ def overlap_and_pad_agent(time_span, agent_time, obs): print(time_span) print(agent_time) if time_span[0] == 7 or agent_time[0] == 7: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() # FIXME(ev) some of these conditions can be combined # no padding needed if agent_time[0] == time_span[0] and agent_time[1] == time_span[1]: if obs.shape[0] < 200: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() return obs # agent enters before time_span starts and exits before time_span end if agent_time[0] < time_span[0] and agent_time[1] < time_span[1]: @@ -317,7 +328,8 @@ def overlap_and_pad_agent(time_span, agent_time, obs): padding = np.zeros((missing_time, obs.shape[1])) obs_concat = np.concatenate((overlap_obs, padding)) if obs_concat.shape[0] < 200: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() return obs_concat # agent enters after time_span starts and exits after time_span ends elif agent_time[0] > time_span[0] and agent_time[1] > time_span[1]: @@ -327,7 +339,8 @@ def overlap_and_pad_agent(time_span, agent_time, obs): padding = np.zeros((missing_time, obs.shape[1])) obs_concat = np.concatenate((padding, overlap_obs)) if obs_concat.shape[0] < 200: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() return obs_concat # agent time is entirely contained in time_span elif agent_time[0] >= time_span[0] and agent_time[1] <= time_span[1]: @@ -341,7 +354,8 @@ def overlap_and_pad_agent(time_span, agent_time, obs): padding = np.zeros((missing_right, obs.shape[1])) obs_concat = np.concatenate((obs_concat, padding)) if obs_concat.shape[0] < 200: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() return obs_concat # agent time totally contains time_span elif agent_time[0] <= time_span[0] and agent_time[1] >= time_span[1]: @@ -353,7 +367,8 @@ def overlap_and_pad_agent(time_span, agent_time, obs): if non_overlap_right > 0: overlap_obs = overlap_obs[:-non_overlap_right] if overlap_obs.shape[0] < 200: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() return overlap_obs @@ -492,6 +507,7 @@ def __init__(self, config): shape=(), trainable=False, dtype=tf.float32) + def update_kl(self, blah): pass @@ -516,6 +532,7 @@ def central_vf_stats(policy, train_batch, grads): policy.central_value_function), } + def kl_and_loss_stats(policy, train_batch): return { "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), @@ -532,6 +549,7 @@ def kl_and_loss_stats(policy, train_batch): "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), } + CCPPO = CustomPPOTFPolicy.with_updates( name="CCPPO", postprocess_fn=centralized_critic_postprocessing, @@ -544,4 +562,4 @@ def kl_and_loss_stats(policy, train_batch): CentralizedValueMixin, KLCoeffMixin ]) -CCTrainer = PPOTrainer.with_updates(name="CCPPOTrainer", default_policy=CCPPO) \ No newline at end of file +CCTrainer = PPOTrainer.with_updates(name="CCPPOTrainer", default_policy=CCPPO) diff --git a/flow/algorithms/custom_ppo.py b/flow/algorithms/custom_ppo.py index a98af6c2d..5effd0ce7 100644 --- a/flow/algorithms/custom_ppo.py +++ b/flow/algorithms/custom_ppo.py @@ -5,9 +5,7 @@ import logging -import numpy as np import ray -from ray.rllib.agents.ppo.ppo import PPOTrainer from ray.rllib.evaluation.postprocessing import compute_advantages, \ Postprocessing from ray.rllib.policy.sample_batch import SampleBatch @@ -17,6 +15,10 @@ from ray.rllib.utils.explained_variance import explained_variance from ray.rllib.utils.tf_ops import make_tf_callable from ray.rllib.utils import try_import_tf +from ray.rllib.agents.trainer_template import build_trainer +from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG +from ray.rllib.agents.ppo.ppo import warn_about_bad_reward_scales + tf = try_import_tf() @@ -78,7 +80,6 @@ def __init__(self, model_config (dict): (Optional) model config for use in specifying action distributions. """ - def reduce_mean_valid(t): return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) @@ -109,7 +110,7 @@ def reduce_mean_valid(t): vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) else: self.mean_vf_loss = tf.constant(0.0) - loss = reduce_mean_valid(-surrogate_loss -entropy_coeff * curr_entropy) + loss = reduce_mean_valid(-surrogate_loss - entropy_coeff * curr_entropy) self.loss = loss @@ -266,6 +267,7 @@ def __init__(self, config): shape=(), trainable=False, dtype=tf.float32) + def update_kl(self, blah): pass @@ -285,6 +287,7 @@ def update_kl(self, blah): ValueNetworkMixin, KLCoeffMixin ]) + def validate_config(config): if config["entropy_coeff"] < 0: raise DeprecationWarning("entropy_coeff must be >= 0") @@ -306,13 +309,11 @@ def validate_config(config): elif tf and tf.executing_eagerly(): config["simple_optimizer"] = True # multi-gpu not supported -from ray.rllib.agents.trainer_template import build_trainer -from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG, update_kl, \ - warn_about_bad_reward_scales + CustomPPOTrainer = build_trainer( name="CustomPPOTrainer", default_config=DEFAULT_CONFIG, default_policy=CustomPPOTFPolicy, make_policy_optimizer=choose_policy_optimizer, validate_config=validate_config, - after_train_result=warn_about_bad_reward_scales) \ No newline at end of file + after_train_result=warn_about_bad_reward_scales) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 4a4ceb546..9806413e0 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -134,15 +134,20 @@ def get_action(self, env): if self.fail_safe is not None: for check in self.fail_safe: if check == 'instantaneous': - accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel_no_noise_with_failsafe) + accel_no_noise_with_failsafe = self.get_safe_action_instantaneous( + env, accel_no_noise_with_failsafe) elif check == 'safe_velocity': - accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel_no_noise_with_failsafe) + accel_no_noise_with_failsafe = self.get_safe_velocity_action( + env, accel_no_noise_with_failsafe) elif check == 'feasible_accel': - accel_no_noise_with_failsafe = self.get_feasible_action(accel_no_noise_with_failsafe) + accel_no_noise_with_failsafe = self.get_feasible_action( + accel_no_noise_with_failsafe) elif check == 'obey_speed_limit': - accel_no_noise_with_failsafe = self.get_obey_speed_limit_action(env, accel_no_noise_with_failsafe) + accel_no_noise_with_failsafe = self.get_obey_speed_limit_action( + env, accel_no_noise_with_failsafe) - env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, accel_no_noise_with_failsafe) + env.k.vehicle.update_accel_no_noise_with_failsafe( + self.veh_id, accel_no_noise_with_failsafe) # add noise to the accelerations, if requested if self.accel_noise > 0: @@ -286,9 +291,9 @@ def safe_velocity(self, env): v_safe = 2 * h / env.sim_step + dv - this_vel * (2 * self.delay) - # check for speed limit - this_edge = env.k.vehicle.get_edge(self.veh_id) - edge_speed_limit = env.k.network.speed_limit(this_edge) + # check for speed limit FIXME: this is not called + # this_edge = env.k.vehicle.get_edge(self.veh_id) + # edge_speed_limit = env.k.network.speed_limit(this_edge) if this_vel > v_safe: print( diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index e1f69de98..86868c5f7 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -115,15 +115,15 @@ def get_accel(self, env): if edge == "": return None - if (self.find_intersection_dist(env) <= 10 and \ + if (self.find_intersection_dist(env) <= 10 and env.k.vehicle.get_edge(self.veh_id) in self.danger_edges) or \ - env.k.vehicle.get_edge(self.veh_id)[0] == ":"\ + env.k.vehicle.get_edge(self.veh_id)[0] == ":" \ or (self.control_length and (env.k.vehicle.get_x_by_id(self.veh_id) < self.control_length[0] - or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1]))\ + or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1])) \ or (self.no_control_edges is not None and len(self.no_control_edges) > 0 and edge in self.no_control_edges): # TODO(@evinitsky) put back - # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: + # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: return None else: # compute the acceleration from the desired velocity diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8ac9c3699..df8992c20 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,5 +1,4 @@ """Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info from flow.data_pipeline.leaderboard_utils import network_name_translate @@ -238,7 +237,7 @@ def rl_actions(*_): write_dict_to_csv(trajectory_table_path, extra_info) write_dict_to_csv(metadata_table_path, metadata, True) - + if to_aws: upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 7553703f3..62e6f6e53 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -57,7 +57,7 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin """ try: os.makedirs("result/{}".format(table_name)) - except FileExistsError as e: + except FileExistsError: pass s3 = boto3.client("s3") response = s3.list_objects_v2(Bucket=bucket) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 89432e260..184c7217a 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,26 +2,64 @@ from enum import Enum # tags for different queries -tags = {"fact_vehicle_trace": {"fact_energy_trace": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], - "fact_network_throughput_agg": ["FACT_NETWORK_THROUGHPUT_AGG"], - "fact_network_inflows_outflows": ["FACT_NETWORK_INFLOWS_OUTFLOWS"]}, - "fact_energy_trace": {}, - "POWER_DEMAND_MODEL_DENOISED_ACCEL": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], - "fact_network_metrics_by_distance_agg": - ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], - "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, - "POWER_DEMAND_MODEL": {}, - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL": {}, - "fact_vehicle_fuel_efficiency_agg": {"fact_network_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"]}, - "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]}, - "leaderboard_chart": {"leaderboard_chart_agg": ["LEADERBOARD_CHART_AGG"]} - } +tags = { + "fact_vehicle_trace": { + "fact_energy_trace": [ + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" + ], + "fact_network_throughput_agg": [ + "FACT_NETWORK_THROUGHPUT_AGG" + ], + "fact_network_inflows_outflows": [ + "FACT_NETWORK_INFLOWS_OUTFLOWS" + ] + }, + "fact_energy_trace": {}, + "POWER_DEMAND_MODEL_DENOISED_ACCEL": { + "fact_vehicle_fuel_efficiency_agg": [ + "FACT_VEHICLE_FUEL_EFFICIENCY_AGG" + ], + "fact_network_metrics_by_distance_agg": [ + "FACT_NETWORK_METRICS_BY_DISTANCE_AGG" + ], + "fact_network_metrics_by_time_agg": [ + "FACT_NETWORK_METRICS_BY_TIME_AGG" + ] + }, + "POWER_DEMAND_MODEL": {}, + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL": {}, + "fact_vehicle_fuel_efficiency_agg": { + "fact_network_fuel_efficiency_agg": [ + "FACT_NETWORK_FUEL_EFFICIENCY_AGG" + ] + }, + "fact_network_fuel_efficiency_agg": { + "leaderboard_chart": [ + "LEADERBOARD_CHART" + ] + }, + "leaderboard_chart": { + "leaderboard_chart_agg": [ + "LEADERBOARD_CHART_AGG" + ] + } +} -tables = ["fact_vehicle_trace", "fact_energy_trace", "fact_network_throughput_agg", "fact_network_inflows_outflows", - "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", - "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart", - "leaderboard_chart_agg", "metadata_table"] +tables = [ + "fact_vehicle_trace", + "fact_energy_trace", + "fact_network_throughput_agg", + "fact_network_inflows_outflows", + "fact_vehicle_fuel_efficiency_agg", + "fact_network_metrics_by_distance_agg", + "fact_network_metrics_by_time_agg", + "fact_network_fuel_efficiency_agg", + "leaderboard_chart", + "leaderboard_chart_agg", + "metadata_table" +] network_using_edge = ["I-210 without Ramps"] @@ -171,7 +209,7 @@ class QueryStrings(Enum): FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ WITH sub_fact_vehicle_trace AS ( - SELECT + SELECT v.id, v.source_id, e.energy_model_id, @@ -248,7 +286,7 @@ class QueryStrings(Enum): FACT_NETWORK_INFLOWS_OUTFLOWS = """ WITH min_max_time_step AS ( - SELECT + SELECT id, source_id, MIN(time_step) AS min_time_step, @@ -283,10 +321,10 @@ class QueryStrings(Enum): COALESCE(i.source_id, o.source_id) AS source_id, COALESCE(i.inflow_rate, 0) AS inflow_rate, COALESCE(o.outflow_rate, 0) AS outflow_rate - FROM inflows i + FROM inflows i FULL OUTER JOIN outflows o ON 1 = 1 - AND i.time_step = o.time_step - AND i.source_id = o.source_id + AND i.time_step = o.time_step + AND i.source_id = o.source_id ORDER BY time_step ;""" @@ -306,7 +344,7 @@ class QueryStrings(Enum): SUM(power) OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power - FROM fact_vehicle_trace vt + FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 AND et.date = \'{date}\' AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' @@ -384,7 +422,7 @@ class QueryStrings(Enum): COALESCE(be.instantaneous_energy_avg, 0) AS instantaneous_energy_avg, COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound - FROM binned_cumulative_energy bce + FROM binned_cumulative_energy bce JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id AND bce.distance_meters_bin = be.distance_meters_bin @@ -405,7 +443,7 @@ class QueryStrings(Enum): SUM(power) OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power - FROM fact_vehicle_trace vt + FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 AND et.date = \'{date}\' AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' @@ -482,7 +520,7 @@ class QueryStrings(Enum): COALESCE(be.instantaneous_energy_avg, 0) AS instantaneous_energy_avg, COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound - FROM binned_cumulative_energy bce + FROM binned_cumulative_energy bce JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id AND bce.time_seconds_bin = be.time_seconds_bin diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index c9b63b23a..ad301a3f5 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -1,10 +1,5 @@ """Environment for training vehicles to reduce congestion in the I210.""" - -from collections import OrderedDict -from copy import deepcopy -from time import time - -from gym.spaces import Box, Discrete, Dict +from gym.spaces import Box import numpy as np from flow.core.rewards import miles_per_gallon, miles_per_megajoule @@ -20,9 +15,11 @@ "max_accel": 1, # maximum deceleration for autonomous vehicles, in m/s^2 "max_decel": 1, - # whether we use an obs space that contains adjacent lane info or just the lead obs + # whether we use an obs space that contains adjacent lane info or just the + # lead obs "lead_obs": True, - # whether the reward should come from local vehicles instead of global rewards + # whether the reward should come from local vehicles instead of global + # rewards "local_reward": True, # desired velocity "target_velocity": 25 @@ -161,10 +158,10 @@ def in_control_range(self, veh_id): If control range is defined it uses control range, otherwise it searches over a set of edges """ - return (self.control_range and self.k.vehicle.get_x_by_id(veh_id) < self.control_range[1] \ - and self.k.vehicle.get_x_by_id(veh_id) > self.control_range[0]) or \ - (len(self.no_control_edges) > 0 and self.k.vehicle.get_edge(veh_id) not in - self.no_control_edges) + return (self.control_range and self.control_range[1] > + self.k.vehicle.get_x_by_id(veh_id) > self.control_range[0]) or \ + (len(self.no_control_edges) > 0 and self.k.vehicle.get_edge(veh_id) not in + self.no_control_edges) def get_state(self): """See class definition.""" @@ -358,7 +355,6 @@ def additional_command(self): if veh_id not in self.observed_ids: self.k.vehicle.remove(veh_id) - def state_util(self, rl_id): """Return an array of headway, tailway, leader speed, follower speed. diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index 004172765..8daffde86 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -262,8 +262,7 @@ def _highway(data, params, all_time): edge_starts = {} # Add the main edges. edge_starts.update({ - "highway_{}".format(i): - i * (length / num_edges + junction_length) + "highway_{}".format(i): i * (length / num_edges + junction_length) for i in range(num_edges) }) @@ -271,15 +270,13 @@ def _highway(data, params, all_time): edge_starts.update({"highway_end": length + num_edges * junction_length}) edge_starts.update({ - ":edge_{}".format(i + 1): - (i + 1) * length / num_edges + i * junction_length + ":edge_{}".format(i + 1): (i + 1) * length / num_edges + i * junction_length for i in range(num_edges - 1) }) if params['net'].additional_params["use_ghost_edge"]: edge_starts.update({ - ":edge_{}".format(num_edges): - length + (num_edges - 1) * junction_length + ":edge_{}".format(num_edges): length + (num_edges - 1) * junction_length }) # compute the absolute position @@ -299,8 +296,8 @@ def _highway(data, params, all_time): data[veh_id]['time'], data[veh_id]['edge'], data[veh_id]['lane']): - # avoid vehicles not on the relevant edges. Also only check the second to - # last lane + # avoid vehicles not on the relevant edges. Also only check the + # second to last lane if edge not in edge_starts.keys() or ti not in all_time: continue else: diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 5c52e196f..0ab658f75 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -169,7 +169,7 @@ def visualizer_rllib(args): else: env = gym.make(env_name) - # reroute on exit is a training hack, it should be turned off at test time. + # reroute on exit is a training hack, it should be turned off at test time. if hasattr(env, "reroute_on_exit"): env.reroute_on_exit = False @@ -302,7 +302,6 @@ def visualizer_rllib(args): print('Average, std miles per megajoule: {}, {}'.format(np.mean(mpj), np.std(mpj))) - # Compute arrival rate of vehicles in the last 500 sec of the run print("\nOutflows (veh/hr):") print(final_outflows) From 4e8769b85c0a7fbd9418ee1d033e0f152f3ab62b Mon Sep 17 00:00:00 2001 From: AboudyKreidieh Date: Wed, 10 Jun 2020 12:41:40 -0700 Subject: [PATCH 244/438] pydocstyle (mostly --- .../exp_configs/non_rl/i210_subnetwork.py | 6 +- examples/train.py | 2 +- flow/algorithms/centralized_PPO.py | 95 +++++++++++-------- flow/algorithms/custom_ppo.py | 79 ++++++++------- flow/data_pipeline/data_pipeline.py | 20 ++-- flow/data_pipeline/leaderboard_utils.py | 6 +- flow/envs/multiagent/base.py | 1 + 7 files changed, 120 insertions(+), 89 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 90384b207..3fac52be2 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -243,7 +243,11 @@ def valid_ids(env, veh_ids): - return [veh_id for veh_id in veh_ids if env.k.vehicle.get_edge(veh_id) not in ["ghost0", "119257908#3"]] + """Return the names of vehicles within the controllable edges.""" + return [ + veh_id for veh_id in veh_ids + if env.k.vehicle.get_edge(veh_id) not in ["ghost0", "119257908#3"] + ] custom_callables = { diff --git a/examples/train.py b/examples/train.py index 1235e6241..112b7fa0d 100644 --- a/examples/train.py +++ b/examples/train.py @@ -283,7 +283,7 @@ def on_episode_end(info): episode.custom_metrics["avg_mpj_per_veh"] = np.mean(episode.user_data["avg_mpj"]) def on_train_result(info): - """Store the mean score of the episode, and increment or decrement how many adversaries are on""" + """Store the mean score of the episode, and adjust the number of adversaries.""" trainer = info["trainer"] trainer.workers.foreach_worker( lambda ev: ev.foreach_env( diff --git a/flow/algorithms/centralized_PPO.py b/flow/algorithms/centralized_PPO.py index 5f71f865a..57fdd7e33 100644 --- a/flow/algorithms/centralized_PPO.py +++ b/flow/algorithms/centralized_PPO.py @@ -278,7 +278,7 @@ def centralized_critic_postprocessing(policy, def time_overlap(time_span, agent_time): - """Check if agent_time overlaps with time_span""" + """Check if agent_time overlaps with time_span.""" if agent_time[0] <= time_span[1] and agent_time[1] >= time_span[0]: return True else: @@ -298,14 +298,18 @@ def fill_missing(agent_time, other_agent_time, obs): def overlap_and_pad_agent(time_span, agent_time, obs): - """take the part of obs that overlaps, pad to length time_span - Arguments: - time_span (tuple): tuple of the first and last time that the agent - of interest is in the system - agent_time (tuple): tuple of the first and last time that the - agent whose obs we are padding is in the system - obs (np.ndarray): observations of the agent whose time is - agent_time + """Take the part of obs that overlaps, pad to length time_span. + + Parameters + ---------- + time_span : tuple + tuple of the first and last time that the agent of interest is in the + system + agent_time : tuple + tuple of the first and last time that the agent whose obs we are + padding is in the system + obs : array_like + observations of the agent whose time is agent_time """ assert time_overlap(time_span, agent_time) print(time_span) @@ -424,38 +428,49 @@ def __init__(self, vf_loss_coeff=1.0, use_gae=True, model_config=None): - """Constructs the loss for Proximal Policy Objective. - - Arguments: - action_space: Environment observation space specification. - dist_class: action distribution class for logits. - value_targets (Placeholder): Placeholder for target values; used - for GAE. - actions (Placeholder): Placeholder for actions taken - from previous model evaluation. - advantages (Placeholder): Placeholder for calculated advantages - from previous model evaluation. - prev_logits (Placeholder): Placeholder for logits output from - previous model evaluation. - prev_actions_logp (Placeholder): Placeholder for prob output from - previous model evaluation. - vf_preds (Placeholder): Placeholder for value function output - from previous model evaluation. - curr_action_dist (ActionDistribution): ActionDistribution - of the current model. - value_fn (Tensor): Current value function output Tensor. - cur_kl_coeff (Variable): Variable holding the current PPO KL - coefficient. - valid_mask (Tensor): A bool mask of valid input elements (#2992). - entropy_coeff (float): Coefficient of the entropy regularizer. - clip_param (float): Clip parameter - vf_clip_param (float): Clip parameter for the value function - vf_loss_coeff (float): Coefficient of the value function loss - use_gae (bool): If true, use the Generalized Advantage Estimator. - model_config (dict): (Optional) model config for use in specifying - action distributions. + """Construct the loss for Proximal Policy Objective. + + Parameters + ---------- + action_space : TODO + Environment observation space specification. + dist_class : TODO + action distribution class for logits. + value_targets : tf.placeholder + Placeholder for target values; used for GAE. + actions : tf.placeholder + Placeholder for actions taken from previous model evaluation. + advantages : tf.placeholder + Placeholder for calculated advantages from previous model + evaluation. + prev_logits : tf.placeholder + Placeholder for logits output from previous model evaluation. + prev_actions_logp : tf.placeholder + Placeholder for prob output from previous model evaluation. + vf_preds : tf.placeholder + Placeholder for value function output from previous model + evaluation. + curr_action_dist : ActionDistribution + ActionDistribution of the current model. + value_fn : tf.Tensor + Current value function output Tensor. + cur_kl_coeff : tf.Variable + Variable holding the current PPO KL coefficient. + valid_mask : tf.Tensor + A bool mask of valid input elements (#2992). + entropy_coeff : float + Coefficient of the entropy regularizer. + clip_param : float + Clip parameter + vf_clip_param : float + Clip parameter for the value function + vf_loss_coeff : float + Coefficient of the value function loss + use_gae : bool + If true, use the Generalized Advantage Estimator. + model_config : dict, optional + model config for use in specifying action distributions. """ - def reduce_mean_valid(t): return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) diff --git a/flow/algorithms/custom_ppo.py b/flow/algorithms/custom_ppo.py index 5effd0ce7..65291f1d4 100644 --- a/flow/algorithms/custom_ppo.py +++ b/flow/algorithms/custom_ppo.py @@ -1,4 +1,4 @@ -"""PPO but we add in the outflow after the reward to the final reward""" +"""PPO but we add in the outflow after the reward to the final reward.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -49,36 +49,48 @@ def __init__(self, vf_loss_coeff=1.0, use_gae=True, model_config=None): - """Constructs the loss for Proximal Policy Objective. - - Arguments: - action_space: Environment observation space specification. - dist_class: action distribution class for logits. - value_targets (Placeholder): Placeholder for target values; used - for GAE. - actions (Placeholder): Placeholder for actions taken - from previous model evaluation. - advantages (Placeholder): Placeholder for calculated advantages - from previous model evaluation. - prev_logits (Placeholder): Placeholder for logits output from - previous model evaluation. - prev_actions_logp (Placeholder): Placeholder for prob output from - previous model evaluation. - vf_preds (Placeholder): Placeholder for value function output - from previous model evaluation. - curr_action_dist (ActionDistribution): ActionDistribution - of the current model. - value_fn (Tensor): Current value function output Tensor. - cur_kl_coeff (Variable): Variable holding the current PPO KL - coefficient. - valid_mask (Tensor): A bool mask of valid input elements (#2992). - entropy_coeff (float): Coefficient of the entropy regularizer. - clip_param (float): Clip parameter - vf_clip_param (float): Clip parameter for the value function - vf_loss_coeff (float): Coefficient of the value function loss - use_gae (bool): If true, use the Generalized Advantage Estimator. - model_config (dict): (Optional) model config for use in specifying - action distributions. + """Construct the loss for Proximal Policy Objective. + + Parameters + ---------- + action_space : TODO + Environment observation space specification. + dist_class : TODO + action distribution class for logits. + value_targets : tf.placeholder + Placeholder for target values; used for GAE. + actions : tf.placeholder + Placeholder for actions taken from previous model evaluation. + advantages : tf.placeholder + Placeholder for calculated advantages from previous model + evaluation. + prev_logits : tf.placeholder + Placeholder for logits output from previous model evaluation. + prev_actions_logp : tf.placeholder + Placeholder for prob output from previous model evaluation. + vf_preds : tf.placeholder + Placeholder for value function output from previous model + evaluation. + curr_action_dist : ActionDistribution + ActionDistribution of the current model. + value_fn : tf.Tensor + Current value function output Tensor. + cur_kl_coeff : tf.Variable + Variable holding the current PPO KL coefficient. + valid_mask : tf.Tensor + A bool mask of valid input elements (#2992). + entropy_coeff : float + Coefficient of the entropy regularizer. + clip_param : float + Clip parameter + vf_clip_param : float + Clip parameter for the value function + vf_loss_coeff : float + Coefficient of the value function loss + use_gae : bool + If true, use the Generalized Advantage Estimator. + model_config : dict, optional + model config for use in specifying action distributions. """ def reduce_mean_valid(t): return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) @@ -170,7 +182,7 @@ def kl_and_loss_stats(policy, train_batch): def vf_preds_and_logits_fetches(policy): - """Adds value function and logits outputs to experience train_batches.""" + """Add value function and logits outputs to experience train_batches.""" return { SampleBatch.VF_PREDS: policy.model.value_function(), BEHAVIOUR_LOGITS: policy.model.last_output(), @@ -181,8 +193,7 @@ def postprocess_ppo_gae(policy, sample_batch, other_agent_batches=None, episode=None): - """Adds the policy logits, VF preds, and advantages to the trajectory.""" - + """Add the policy logits, VF preds, and advantages to the trajectory.""" completed = sample_batch["dones"][-1] if completed: last_r = 0.0 diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index ea5307dad..50c2c8422 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -13,13 +13,14 @@ def generate_trajectory_table(data_path, extra_info, partition_name): ---------- data_path : str path to the standard SUMO emission - extra_info: dict + extra_info : dict extra information needed in the trajectory table, collected from flow - partition_name: str + partition_name : str the name of the partition to put this output to + Returns ------- - output_file_path: str + output_file_path : str the local path of the outputted csv file """ raw_output = pd.read_csv(data_path, index_col=["time", "id"]) @@ -39,7 +40,7 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def write_dict_to_csv(data_path, extra_info, include_header=False): - """Write extra to the CSV file at data_path, create one if not exist + """Write extra to the CSV file at data_path, create one if not exist. Parameters ---------- @@ -105,7 +106,7 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): - """Delete the obsolete data on S3""" + """Delete the obsolete data on S3.""" response = s3.list_objects_v2(Bucket=bucket) keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table) == 0 and e["Key"][-4:] == ".csv"] keys.remove(latest_key) @@ -114,18 +115,17 @@ def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): class AthenaQuery: - """ - Class used to run query. + """Class used to run queries. Act as a query engine, maintains an open session with AWS Athena. Attributes ---------- - MAX_WAIT: int + MAX_WAIT : int maximum number of seconds to wait before declares time-out - client: boto3.client + client : boto3.client the athena client that is used to run the query - existing_partitions: list + existing_partitions : list a list of partitions that is already recorded in Athena's datalog, this is obtained through query at the initialization of this class instance. diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 62e6f6e53..afc2fd8bc 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -1,4 +1,4 @@ -"""APIs for the leader board front end""" +"""APIs for the leader board front end.""" import os import boto3 import pandas as pd @@ -35,7 +35,7 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin Parameters ---------- - table_name: str + table_name : str The name of table to retrieve from S3, the current available tables are: fact_vehicle_trace fact_energy_trace @@ -52,7 +52,7 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin avoid burdening the web server with more calculation. The date and source_id in its name is always going to reflect the latest leaderboard_chart entry. - bucket: str + bucket : str the S3 bucket that holds these tables """ try: diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index 7104138de..881461d63 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -324,4 +324,5 @@ def apply_rl_actions(self, rl_actions=None): self._apply_rl_actions(clipped_actions) def set_iteration_num(self): + """Increment the number of training iterations.""" self.num_training_iters += 1 From 67dc1216e2f4822e9017f0f33cde45a060e810f4 Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Thu, 11 Jun 2020 10:29:40 -0700 Subject: [PATCH 245/438] Update lane change mode (#948) * added new lane change modes * replaced 'no_lat_collide' with 'no_lc_safe' which is the new default lane change mode * bug fixes and PR reviews Co-authored-by: AboudyKreidieh --- examples/exp_configs/non_rl/bay_bridge.py | 2 +- .../exp_configs/non_rl/bay_bridge_toll.py | 2 +- examples/exp_configs/non_rl/minicity.py | 2 +- flow/core/params.py | 97 +++++++++++++++++-- tests/fast_tests/test_vehicles.py | 6 +- 5 files changed, 93 insertions(+), 16 deletions(-) diff --git a/examples/exp_configs/non_rl/bay_bridge.py b/examples/exp_configs/non_rl/bay_bridge.py index d7d78360f..f3e0c465f 100644 --- a/examples/exp_configs/non_rl/bay_bridge.py +++ b/examples/exp_configs/non_rl/bay_bridge.py @@ -48,7 +48,7 @@ lc_pushy=0.8, lc_speed_gain=4.0, model="LC2013", - lane_change_mode="no_lat_collide", + lane_change_mode="no_lc_safe", # lcKeepRight=0.8 ), num_vehicles=1400) diff --git a/examples/exp_configs/non_rl/bay_bridge_toll.py b/examples/exp_configs/non_rl/bay_bridge_toll.py index 1b8268aeb..0941823cb 100644 --- a/examples/exp_configs/non_rl/bay_bridge_toll.py +++ b/examples/exp_configs/non_rl/bay_bridge_toll.py @@ -46,7 +46,7 @@ model="LC2013", lcCooperative=0.2, lcSpeedGain=15, - lane_change_mode="no_lat_collide", + lane_change_mode="no_lc_safe", ), num_vehicles=50) diff --git a/examples/exp_configs/non_rl/minicity.py b/examples/exp_configs/non_rl/minicity.py index 23b232480..35d5edbce 100644 --- a/examples/exp_configs/non_rl/minicity.py +++ b/examples/exp_configs/non_rl/minicity.py @@ -18,7 +18,7 @@ speed_mode=1, ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="no_lat_collide", + lane_change_mode="no_lc_safe", ), initial_speed=0, num_vehicles=90) diff --git a/flow/core/params.py b/flow/core/params.py index 5a7467580..79ad8d689 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -17,7 +17,27 @@ "all_checks": 31 } -LC_MODES = {"aggressive": 0, "no_lat_collide": 512, "strategic": 1621} +LC_MODES = { + "no_lc_safe": 512, + "no_lc_aggressive": 0, + "sumo_default": 1621, + "no_strategic_aggressive": 1108, + "no_strategic_safe": 1620, + "only_strategic_aggressive": 1, + "only_strategic_safe": 513, + "no_cooperative_aggressive": 1105, + "no_cooperative_safe": 1617, + "only_cooperative_aggressive": 4, + "only_cooperative_safe": 516, + "no_speed_gain_aggressive": 1093, + "no_speed_gain_safe": 1605, + "only_speed_gain_aggressive": 16, + "only_speed_gain_safe": 528, + "no_right_drive_aggressive": 1045, + "no_right_drive_safe": 1557, + "only_right_drive_aggressive": 64, + "only_right_drive_safe": 576 +} # Traffic light defaults PROGRAM_ID = 1 @@ -897,14 +917,71 @@ class SumoLaneChangeParams: ---------- lane_change_mode : str or int, optional may be one of the following: + * "no_lc_safe" (default): Disable all SUMO lane changing but still + handle safety checks (collision avoidance and safety-gap enforcement) + in the simulation. Binary is [001000000000] + * "no_lc_aggressive": SUMO lane changes are not executed, collision + avoidance and safety-gap enforcement are off. + Binary is [000000000000] + + * "sumo_default": Execute all changes requested by a custom controller + unless in conflict with TraCI. Binary is [011001010101]. + + * "no_strategic_aggressive": Execute all changes except strategic + (routing) lane changes unless in conflict with TraCI. Collision + avoidance and safety-gap enforcement are off. Binary is [010001010100] + * "no_strategic_safe": Execute all changes except strategic + (routing) lane changes unless in conflict with TraCI. Collision + avoidance and safety-gap enforcement are on. Binary is [011001010100] + * "only_strategic_aggressive": Execute only strategic (routing) lane + changes unless in conflict with TraCI. Collision avoidance and + safety-gap enforcement are off. Binary is [000000000001] + * "only_strategic_safe": Execute only strategic (routing) lane + changes unless in conflict with TraCI. Collision avoidance and + safety-gap enforcement are on. Binary is [001000000001] + + * "no_cooperative_aggressive": Execute all changes except cooperative + (change in order to allow others to change) lane changes unless in + conflict with TraCI. Collision avoidance and safety-gap enforcement + are off. Binary is [010001010001] + * "no_cooperative_safe": Execute all changes except cooperative + lane changes unless in conflict with TraCI. Collision avoidance and + safety-gap enforcement are on. Binary is [011001010001] + * "only_cooperative_aggressive": Execute only cooperative lane changes + unless in conflict with TraCI. Collision avoidance and safety-gap + enforcement are off. Binary is [000000000100] + * "only_cooperative_safe": Execute only cooperative lane changes + unless in conflict with TraCI. Collision avoidance and safety-gap + enforcement are on. Binary is [001000000100] + + * "no_speed_gain_aggressive": Execute all changes except speed gain (the + other lane allows for faster driving) lane changes unless in conflict + with TraCI. Collision avoidance and safety-gap enforcement are off. + Binary is [010001000101] + * "no_speed_gain_safe": Execute all changes except speed gain + lane changes unless in conflict with TraCI. Collision avoidance and + safety-gap enforcement are on. Binary is [011001000101] + * "only_speed_gain_aggressive": Execute only speed gain lane changes + unless in conflict with TraCI. Collision avoidance and safety-gap + enforcement are off. Binary is [000000010000] + * "only_speed_gain_safe": Execute only speed gain lane changes + unless in conflict with TraCI. Collision avoidance and safety-gap + enforcement are on. Binary is [001000010000] + + * "no_right_drive_aggressive": Execute all changes except right drive + (obligation to drive on the right) lane changes unless in conflict + with TraCI. Collision avoidance and safety-gap enforcement are off. + Binary is [010000010101] + * "no_right_drive_safe": Execute all changes except right drive + lane changes unless in conflict with TraCI. Collision avoidance and + safety-gap enforcement are on. Binary is [011000010101] + * "only_right_drive_aggressive": Execute only right drive lane changes + unless in conflict with TraCI. Collision avoidance and safety-gap + enforcement are off. Binary is [000001000000] + * "only_right_drive_safe": Execute only right drive lane changes + unless in conflict with TraCI. Collision avoidance and safety-gap + enforcement are on. Binary is [001001000000] - * "no_lat_collide" (default): Human cars will not make lane - changes, RL cars can lane change into any space, no matter how - likely it is to crash - * "strategic": Human cars make lane changes in accordance with SUMO - to provide speed boosts - * "aggressive": RL cars are not limited by sumo with regard to - their lane-change actions, and can crash longitudinally * int values may be used to define custom lane change modes for the given vehicles, specified at: http://sumo.dlr.de/wiki/TraCI/Change_Vehicle_State#lane_change_mode_.280xb6.29 @@ -943,7 +1020,7 @@ class SumoLaneChangeParams: """ def __init__(self, - lane_change_mode="no_lat_collide", + lane_change_mode="no_lc_safe", model="LC2013", lc_strategic=1.0, lc_cooperative=1.0, @@ -1051,7 +1128,7 @@ def __init__(self, elif not (isinstance(lane_change_mode, int) or isinstance(lane_change_mode, float)): logging.error("Setting lane change mode to default.") - lane_change_mode = LC_MODES["no_lat_collide"] + lane_change_mode = LC_MODES["no_lc_safe"] self.lane_change_mode = lane_change_mode diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index b791bba64..1ae2d1cf0 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -33,7 +33,7 @@ def test_speed_lane_change_modes(self): speed_mode='obey_safe_speed', ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="no_lat_collide", + lane_change_mode="no_lc_safe", ) ) @@ -56,7 +56,7 @@ def test_speed_lane_change_modes(self): self.assertEqual(vehicles.type_parameters["typeB"][ "car_following_params"].speed_mode, 0) self.assertEqual(vehicles.type_parameters["typeB"][ - "lane_change_params"].lane_change_mode, 1621) + "lane_change_params"].lane_change_mode, 512) vehicles.add( "typeC", @@ -89,7 +89,7 @@ def test_controlled_id_params(self): speed_mode="obey_safe_speed", ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="no_lat_collide", + lane_change_mode="no_lc_safe", )) default_mingap = SumoCarFollowingParams().controller_params["minGap"] self.assertEqual(vehicles.types[0]["type_params"]["minGap"], From 7ce615e2f2990f7a8e67404feec065256d9e6876 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 11 Jun 2020 16:45:00 -0700 Subject: [PATCH 246/438] Visualizer tests fixes --- flow/visualize/visualizer_rllib.py | 2 +- .../multi_agent/checkpoint_1/checkpoint-1 | Bin 10209 -> 20358 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes tests/data/rllib_data/multi_agent/params.json | 40 +++++++++++------- tests/data/rllib_data/multi_agent/params.pkl | Bin 17562 -> 21381 bytes .../single_agent/checkpoint_1/checkpoint-1 | Bin 582 -> 26194 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes .../data/rllib_data/single_agent/params.json | 26 ++++++++---- tests/data/rllib_data/single_agent/params.pkl | Bin 6414 -> 6687 bytes 9 files changed, 43 insertions(+), 25 deletions(-) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 8c38a91c1..67b9768c3 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -166,7 +166,7 @@ def visualizer_rllib(args): if multiagent: rets = {} # map the agent id to its policy - policy_map_fn = config['multiagent']['policy_mapping_fn'].func + policy_map_fn = config['multiagent']['policy_mapping_fn'] for key in config['multiagent']['policies'].keys(): rets[key] = [] else: diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 index 0693ed4b62a9cabcdbecb267201ea862144f212c..d346e9dc58b39a5b511ced70927eac1d0d32579b 100644 GIT binary patch literal 20358 zcmZU)c{oKn`xww<#`NGS4uu8$eLzHzgEsBdrxPk1j+#8YJS!rC1?(e=S=!wiG{{Wq^O+#b5w zKg95VMoGB_I{Y(SiYKNQ#S;(l6d!#cBy?@4FHd4GZ;apQYscz(%Kh^eqmAo*OpHe7 z+~(`glU%=Tdq7Z_;rhT}U&Day{-K-K29M4mI#)6rF{fg%fy6 zZoKij&Z3^eqoy!CWmi|%1%F3>|NJ=%h4NI^?Y9*Y6%wkuFCvJRzCj*c`hXiJ55r%Z zI%qt?g}8;2xLcADKIQmh@ajke^W*f(&~(AO!7})`(v_~dCNJ1UW^mVE83At?C&(F8 z6im09$gC1ortzNk&~uasB;U^xoYhnk#K+7Mxb_*~M*kn+GrkWlrJcd5**wNY0r?!RumkKwT4)Zs6eyj3(CyzB8LZKiRn2f!Qb;b+>zTn@=8An zIAStP%Wre8)w~c~kZH~Rn$ro=s|>+QIg?#HMo7@`sfyGFXTf=sX;fdzQ392>bB?pY%amdS$Rlg#JFSjjS;K} zsKc$%x)>rgjQ$~Y(CQHcSt*_lmu8cZY2-Tt&(Zn7Y~Rz|QQ<2xKX zjT3;6h{7Dy@-uhj#Ro~ z$qX%lV%9(eFs_U>*A=Eu_x zVxrVb-%ve4Lsb&7`5g%}WW(XYxIJWp-U4zxPoMcH_YMo=uS5FX&!AD90*fAKbG=+2 za?aiihi=hXNNW1Y+^3~5vt&FsjXc9r%T(B2XTasvGlJ*(v$*3|D&Y`2o@=+)o-1ED zotsqhoa~jljyj(8Kmy`mV~d?Y(}EJ-gizkZb)GW+$79!RUK_%j zH{O*0c;3`Fo|@nI{dSQLrL(Q|DhfFo8?rfu>IK$+U8h;QWp3yE5tHZtP4sA}SnbCD zl$FZqXdA<+QIz3NJsZb)cQk>MXL+yjnv#pP<^HM6x!Mf=hgqtu@<171h)GWgW{8|Ml70&fKf<`xgaPxkrrQsfY43)_IQquh~1+ z`fvCB|HIAHf6dqYkNH||ylMXp9NMEF@Q>>+cFrX^p}{!KS(+=ha3N{=Jc4SnayVUj zJ&5-w!IW7Vg7S;0c&sLZ7I6dUl~5lz;$_F3^^4*Do#_C^slr&VAudp`=*9PkJ3%Z_ znj8IfJeNPa5?4B@;rY}futzZojlx{HadzjySTqZzaywz01{YH8ys+FZAB&Ytxj_mt z0?r0wZuIy*8Z9D;nPcx^`KJJGz&#N`L1GYh-}v1EvG{jT&@npi*+#@c5y6QywIFCU z0OblrVz=@dz_ZJcG|n7y-ZsMEfvwp3a-rZ{RyEx7oK6nh9uLLMaiBN4oK$TkpjTrt zxqo}J;MuJ(f$ioK;3kpLbWFgH(z->0+i`-?%)b=D_0_DgVcg>|`i>JO0zADn2bx{W}t zIs@{qW0K&}z%YF`=QR$`e~DTRE>xW% zVCn6TmO*O-*>2TP&6|%Bts2}LYA4XDvJdtq8*=-M4!~lcSMWXCoHcL`!s9lDIKSE# zK+{uDdRK>A?C}ok-wvZfmbSq7U;@dJaRi}l%J^Nx5qV;A)c$injNz$q=kwn}+Uv{s z)F>ABn*O53s4rM!X9_Jnc3jOJ18j$)J~wVdKbY~%u*u;Kl-QdH9t?hkw%}SElGA>+a9K|Cr<@(1@V zNF76U>7e)-@R;cf>wd`yE^#fnGrmf5`5qmRse2agIR%3D{46|oHxjq3l@qLBWVtV2 zox#`I-B>y8H09}p@}{rzRQ!K@=$`;E<6jTb{f`Ifx$*S>Hvr81Cjk7Z459Arb#z`_ z4VjpCfGqu0M9ntcpxxcxBs(R8*uGDrw_OUTnQ1(|75#|$*mQ`Dxt>oPi)zW?6M1CS zh7`(|Jx$eS=g?fkG-B(0gpAWTLJYGG(FrXf)U9#!oss$Uct|CwI$uI`>obUYS{8|0 z+CW0K9i%*iP~NO{|0mNh{WsGv{MT@!{}^uU#xwcH@c-ltrvJFUypYhfj7-?o-pkeu z{UFvh1@ur!IbAbokGF$s@F!hE+h+b?9TnHXxnmKyuK5j3N!t&F^D^lY6(_hk=NP-f zP6Jyl9%DSL%4hglL{=%PB69=^N=m(Xa`N;M|APL~PM{7Wxs3)tZrN`AJ! zvvsi8wIDyY8rIo+124jR7N^gv4m znYAg7#@;T#LH9GnqV^a`xOSWNT{T5+b19koHV%?tChS|`L$qNzS+jwMhCUBaL)aWl z1{T5$Ek53;*1-*J!Z5V>4OKle2^XtH(x1tDF;7z)rDku&Ha#QUNVeed$Gg~)5Le=0 zHVFc{9hkJxnasF~)i96SMJse7VPpSfJpa3p4roTh2ZMvqykQ(=DZ0Um<7I{9r}%X0$1p zhECynr0}jU1g);-E6_Y}&=uT&*Ce5;R)H`>KKhf~B!(G7iawxeofC5;Vu&{!uv z8~crFh#mSei(8N4%-FZ|_nSq;qVOph_wX(WnjS?KPBx@3?D~n9Rz6(asRD1_1Ot?> zL^-Tv_hxyqa=l~m+mu`6^_U|xYHtX9?8qTbODyOoAqRStza68C*22C!ui2IZjx=@X zGBhM+5c%+CqBupBRa$tQRoe8G-Vc}qCfAMd>OxgCo;DW(%m&E=8w(od5)Z0cMX*9R zh7Lcif(Gdf=>@CIaM~QSoUI+$G&4uaBl2kFd3>W6VVD#JwGtW5y=B$dr z@;~BmCUGmB+r5A>mpo2eHf$y~x$^K}{Vl40-j7~Yy+k&B6=!ZcG~geX6Ev%|j;zX_ zik7FnvCqGo*(#$3UyL?mgS-wtKGjdRpWKKFO7BU%>nXhD_K;CB%fOP-2c#=v4y@05 z4`M@obkpfG5NNOt)k;lp@LMXf*|KDe0i~OtoPq7%cEG92b3t=L2Afmk1k@@TkJ7`? zea*S?$3YPibKQvvh>D{TEs`Wfe=J_|>BfjVsYKKGJ)NA@3i^*vvtvFjfYeq^sP?HL z>jmk|d&4cX_pdeH-Nu6nUZUh~tTeM|uL1E6X6awB!{aknk$GJ=X^h%icHEd~Xb1{H zmmLPMa``GUTkj~{F0=&hUvUSHw=+Ca-ax(8OyKc}XndK$BhPn8!L6h6_)+f^>II!4 zi(eN)fJ6q}n^Zt*K3dV)W6zWKU*Ayw0TIr{(-PRKW(H?|$x!1ze~D}AQVbnBgUuIR z3C$3O^{EA9!>grm^G_=k6DcEfxj3vz$)y3`4Df@}1Jc+b4)3=+;k?y#BunWO38^S1 zt&?5Rme)rVI>lj!%VF}VI|_c?eZV-)Q^K+}Q|Mo(3^*ly4@qk`)n9dhy`11e!!IMh zeWC{#R!t+VR${2+avKTi7GN{?ImuI1rzB9zCb7#xtS$ke7X(tl?ax4~(iwm0Jex zJ-&}jKGRJ6Zk~imld_S#csx7%SU6q0wUSDGi@;+4d;Dj5NieV>k?DL~MfC!mQ8?5M zBUd(2v!8dE8^dQfGa4o_v72M*1@|PNhVjT(?jU1>Md9*YMYs{T269Gp&|z>jcuajl zDkt8C!+ok?w^bL~uRmhNR8G^D;y4m9BeAjPjtP{NtD#7BEGy+*4?>;Fz`634Qf?Wo zl}{rV1d;TJa4uPBz(t!2srV{rgzD-dO+h`m z-%&|dU2Y&wAG6`TO%c+W*r>cZ1Ex&sWl{l>(stz?330OpjT<3wb;eV2(2S39 zB^&VLnqJa7IkWL;`ET0RM;V)oAz1w3G-tL&KP^4`kqEU*GR3Pd(RyJouu6=_%bP1e z*4Q2SPQ@6L<4F|NTFBx#Uo;o7Y+T^s&I6lDbDdO#Zv{d;kS+`YHyfJ&aaDssb;#UxokBi&Jls7 zwi@)+n1hUWwHJz?n~G~T&!c8h0W{^!bbRik399eKV7^`o_3IcQkl~FL>!!h4j!`x}O%>bQNciy)a(b&LgB}lQ*PSA4kMJRK^AyngX&jc-^^-rEl_;n4ijsp%BL{%mlyhqL{f@sL`R}Ap7Lf zH>NCE9t_%fIAcr>l>K7pA^*+fQd3O*ae}nLDq?(pEnN`ZPg}p6L6^vM z%F7YM70(-q@yS{|QzJqBh0j4~odNyP`kLHa&=+)!vl;%PYgbpG8Hm( zUy=zOx#-xrjcIi{P4B;wg5!Tv*pt`d>GR*eVAwYg-_CQzj%{!GB@y2k^LTL(J2xJ_ zk68;zd=)xY?+be^!wD9?6lQd#rUO%*iwUb3V7~Sc)BJO6rp6GNl^er;OS7X%0gHeQ z6~m+RW?`|;V#xR9kON03`{7S9X1p`NvTg_Z<>O=Me0ZHWKDtVsZ&uODm6|v^MjTwn z{^oz+`LX2%J884xM|#!qEYVnQ!bC?G(s7&Sk(k6Ku&RG7{(fFd*N7F-=tG-G(vc~+ zQ6K~%dR!P0pH4U1DYCi+pUCCSn(Xq~O|-*{YR zapoW{GROq(vjXz{Q66>qr~^3}34mwU5Y@Yxu-C-{s-?Bydwv24!n+ugcfz1({)6#a z&_Ui>1;DkLk0EGy0=6eQz{I_iF{y7K{#Muvcdk{l9j{(fE7XF{dtUHiyA=4(_9dG- zVsPV!SejPRhL{~k9@?zH*?xc)BMqd{EeI`}&Vhn#HhthNjs3eVVZu-y3DkVax(Qz5 z&&_f;>#iY&L|f3oT5>z(F)@(ra>6~wqex)p2m1Zn0zA0#84||_bn?+Wa23zO&8u$m>m-m&%&MdA*FxyT z{5z=i!30WoIU(ne6-GAN&^V)0s9Jso&A*7#E7MG1Fn9tgtZ=0-)4$Q|N$R-i<|(-U zeFk{em2g%mU8cWocR;IkC2P$@!_(L=thLQ+8nyi}@t3?z1;vq^4>_H5|7LRttn`IB zoI!Hxz;e=k`z#GBh=a(#{vi4)AM3j{h%Y|@JX5o1NwyuVxy>in@<#E1Z7%G&=!$Bc zxv=OO~Rsdl&qJ$?F ziC26iFf>;K4ZjP+{nA?Y-KzxbnI;a7mHp(K-FKF2zZhKW67luemqhsQdT3Ac1pReG zbVIuz>Ra-NkL)Hi)e&Y6(;&Khl=K}C4WjsjOEr>`DPwY-YO54b>GDo`SMLyAP_hK; z%wCAq&hk6r&$msocf(lxK!$j)Pe|= zDEz+j8O}5nL(2;dG|9yn?tH8v_y1VadOH=;c+3U`Qasog=ZvokeewFZ2#nLUht)cD z?9|UssB-uoCU?(Yx=w2~w0j??b$5#SlIvyh;eNGrUn@kJkB|y_+kSbo*hmR>~=-xd-#S81G_yh~^ zZ9a@c9jRE_z5t4L8>51e4E8^^h2Kx7qQu8!vT)-X=HTKfxY93$tfhnW?~!~m?0bvu zJRySy4;K-WBtEVER)BL(E1<6QHt?Ueo;18#fj{arK=agHCS_OzrDh&rilncTH-Ddy z2hVpRPeBe|zAy%#D>gXcq8Fr{c)>s3w4a8$@JYdp=Y(e#$}|7J)YH`HALwE6FFUaO z4?CFc#+&oM(8KB<=y9cGHs?b^3xB}0_$Ue&W$cjgmXUZ0{?}`FOIy36vyG?9qZj|j5tQ?G&w<= zf&Xc&E%RDIi^KN)11IkWvTIXvr79_Rn1p8rK3%)jQ7 z|CkSMJnnzdht0p}!#DaWjWF-SeQIN{=}kON?kXd$fjzLzT1Fs^sknPzIlfIOz?~J= zNSbu;zEC1;la+3;%DF*&XZqqnCm!mxjs=ZTGMLB7!u|dk@KP!sidHTlkG9`|v-OF% zAt#h3*W?4|wieC5qL1xgHnD9>C-RT&6~`M|E9fno@layRp<^#PLyB`gy7eS7(Va%* zw|pNAi)_J`CTn;&{w^)-lgIkb2|>#+G;%>WZNs@m0B4sjI<)-7kJ~NU=e|i zYUF4Gs2IHV$bz!FH06$_#Eax3iC<;|%_`at$(el4S0hi@Ati~|PRU@vk`Sn$A|Q7D zE|us*m2e>=AFjcmMd+87@G;5ieMB4=jhPcvwdF(fPa4{5u=2WIB?(`4h_@G7r> z=0-(ggu_L;BlbLKlyRBn?jiiMW-NA3)4=#Yx=`N!nt7%u0kaO}!;{f<=soo>cpo1k z+BOalyF~@sDom_L%K~~cVIjFQWfo0R`#=&mJfLM&b~x!}6bW#wgq(1SVWNsCEPisd zTrfk)=}M4Owh8z{i8wbM;PuzZbdhQoQ>ZY3Y}v1Z+2b~WiiHVGnW#!7eht&|h)<2a zlP&P_Vjb$A_!N&jJ1~XIGr*vCIS76Rk@Y<0cH|-sEJDg2Bx|o_AwSDtl!T9m->=@V z8&*n^{<0iWQz!xJ7mt=amc>vtn1SDC_>q!ZreLz6m+Wbh!AXWWu(GO%bs9IAQEIS^=^5e#5%C+i}Zp6T#K?+xTdF9I4e_jH`D}!A)D$;Nr{g5LvT; zmJi54gy=&YTE3K+4+$V}j5j{@i6IAeeZq&!GN@#NEq*^34Sbb)sA()jUxO~1^DP_a zr|?l^eH9VRp9bEY_vqrB6F8&xE$LSYpl05kluuS-#a|tGDwa*(jh2xv8m74AN*e6u z2~g~;2EIwv#!IQ;DA=@}=H}04PiHm~XVoLP;mT34DUBl5<&Qas_~U8G(+0*>Ab~sD zYq991<>w4FWigisQL(gEfAf>h%-9BtQge`VQx%6D z)3M$i>9n=u(B38&@^apgiRq{LuDP1PJGGTA7F>bVpJst;j3Wu;AH|YWVc=G(jDe0j z;d5jKT`TMa$G#;r1UvN7<*CYu^UKKv!xq|8eV;kiSPGlnK2ZOspUGW^1n{1^n=rY2 zM)br+P?2n>;Tqq_n~qFU{x=X#30nR)?5R-mpe(GT|>qc5a(C z?s7}O{(g1%)$^B?6z`>f7uC`%IXQ9^e9?9H9h9ErgZ2-Dpy}Zkx>95rX>u9|F5ws1 zs>58oKClzsU6)6_!&QWvz7f3Ei_vW(5gyW z!7~DReLK>-nM=17h0}(O&9SIVSOq2Q0~aF;E_c8!pw(;BIEW)``(tbuO&s*Q8_!_-@ovWI>wB*mZO z(K5^dI4@i9Q0hLMccTMZ-rU70Lpy1w^L27Cua=g^&H;Cg<G`9#!nCL zif@A21q(2Ax|Cq#m=HGkWaG}jEbxkT!R`oEe#8ARbe-P|UQ@Vez4Ii|#&Ql#-ORqx zw}9=7*5kUP97xvmrt-eEuzd0$acvnO^SDPLjJuTF-WQA;P5yvU?HMMp;S!xUAPGWj z%$%*09>JcZl_cnK7Og3Drndez_;>IEnS6~UH#a2VxcmcvFTKef^<4U>W*Ty)8o}8G zO9`#*p|?y_L4CAwcj4|+sy-(NeeNg0?_h86UYCVUuYBP*r8KYh3kY>@WDJGZ;-yJ8 zU~KuAK3{u*JtrLr>!t$;rY|RFSKNYqyX|22zCnKbUO9B*@aXao4G@%`V%EjvF(XQr z%-Rnz(A;Q86u0^jvu9eUCp`lv?K=woTa#$?!FRwt6iWjx+7kQgU86`kj$dVNfW0f- zu~LNrulL6p-Vq17GJidLL@u6jF{ZS(V>?dooe0N#5^ze@0dNbCBxRtL z7Q9cTJ{PutmA(e%ggjx#Ot8fVMZxTsX?M{FhR9B{@8m%DTHHHlKhu<%h#swB;5nuY zuY@lEzT_pc$Tyx1c^`&z)=bB){4;3z=qTKDdI;!XOY^uz_ZleC16lm=%BRuQZz>>U05bGj^C#t@Xefe^NeQi_W zVq7#XdtwfPaed?>T!iZ%J?L^9MHsWXntT#6PJ=Fk#c7Op^=$*TYj6b`N99wXhtU5LwAFl|4JC5F@_I=xEQ(YFe9eK!JYd6DHzH6btWG-oz zAu#>TVLJBPUD_Zw9}=<`gVx|!=qnnHxgGj+$FXEweYBKTq)VgONF}3FodV>?4Z7N| z0!oi;hPMa)ux;;Fu)lV{#7ASTVNbX`S}%SHGi96zCpwXCW)w!_el@w1Xvj(9zoHH^ zPk>Rg0SY^oj-rqP?2(&|kC(q;IX`7Uw{0HYxupp!qoSba_ac6{|03$4K8rnik_*8v zf>EQ!fT1Gs81^L#?$7sxynQTrAzMd(tg(Sf50(MKD6TNX7qQi$@1@^>c8@krQ(J`M4@URPsV7KZZYX$LZ^TWvUEoe@0@E#j0oT?( zVcHG$(0RsJv9doBoYT)^6)K<~HF^*PD#mb#vtH@P#VF5-dw`WoA*>NVB zWDRGLRr!6)i;x(q(&kBh@2XJl-6E1CNTUu{GKs~POwv-sBlLL%^_0vcPMbDSIk{Bo z_$!@QgC4b#PbXa`qo`iuF_Ia3i=DaDo!Ixgl5;Wz^u(tu!gCDeIsISNhuOcXj|Kl4 z?))FaUEFvJ|BF6c|3x3hQd}~r#ETTVm7v|JSfak6g6|WQ2xHBQIgf8=;)Vx2G9+&d zI!<#zVTmr=WG#%HfvMy{;tdq4Ji~rgjtBJ)D>&jL1(&Q$!M;?5y%~^8)KpB+?{EM; z_3$jY`auu6*4|?*v!@UhE&-!F9h_IEMRO*cq6;8{CdOZ&?(apwDDwt&x0Dbhdnb}N ztqydT!%=ceRSOO$C!%CQ79H_;O?tP8GLI)k61884shHg}&J)>_V6`m_3qOb8?%^7! za5&Abe*T^0EqKZ-_R50w1(V2YkwCgWbc7h*s-jzOGQ`O_1F}u<$I2dh>wX z(oaFL3}xtEmJXuB;`s4-GUW-aK+cQ(Bv@S-c2_Kc!s@zX_`(Ts0JI+rJc@vk1vuDY3cb?6mU%RfSveV)a6P$2<_wiQxza|M~G zBe1utlsw)fMKl+cpz59_Y)jEDtZTkcZ%24T#^8K9@62z~m{0MT0kBTy!eBneiKBkx zE)@h%L0cUSq>nFyVRJOzdn<-1%8eY~wKrL%#me|z`!PKnXOHJAPJ;R2=XB+kE10)& zDNY+r!+ux|p?G>aYKRx&tuL00R81!vxoitbQMO`VoRXuW*3XDv@8QPHnxTY!CWDXv zYJ%FUKJvp~mMCi1(0ZR;BvQWsi}PKX&LzpTP4zm}d7y@E+ONn;Rt#QWoeZaL%R;!o zgUHLiXIII)faf(IQgJndPT`Bg{aR(~+R%4wcNH#HZ4Qs0Yp_cki~7$^*89V_Hlz;^*S6=}L6*L4DA$TsP@JXg~GGw=4*<_K(bwH%HB@N@!G~j4zf&lED4rK=QE& zs$C8y*W%iU?fJ{}T6zs_UAqNx_hmAs3J&-?zJa#>sDyagpYU*3INplLq)(eFsA6dz zEibrFPTDjRqaOpLr|Ay#3ww-er)}ZalR|26wizwD7QuO~N|JN?I1yPa3xC4ppnFX$ zy0o4{UQjc?%lIB_y4%S7I2Q^r!`;+BdInBe9mH?j&t>G-O-A4Sx9FOChv>wzaq#u& zLo%Yp;Du|KsKxa#R&j7VhS{DW`@iL*`}jlXiBFgzeiaR^&_LNmk@%wKHv9bgM|!|A zkenV{#&(P6QNODk7-N$}SAEYw)$koSWv(dm(Xg8Is^2F+Rvo4ue)iC5_kc~}7r{^4 zTHKlKh4JUKU`63nTy$$XyGIa5^Et(s*49MlN8W^c8!u439jCDImN|*g8r5Q_HWGt9 zmzj@C6Y=@IA$sR_5C&`*p^D0*ofb7mjH=j1`!B!bJpZ|e?mXuX*e^|LqggsJ`xp7} zG8y`;v{^|H8EVEkLtkFrOD5E}k^829Xw{W|`dZ>DiJEt`u}f(UOehzHmf3}v=zfft z8YRPpdB;iL@f|c=Esk|i*vd&5O{wy#*N`@|_xu=NOSBhH0X3x^^t9R`*!HOc*43Wn zteG4|ZXW+mI$lQ8Ydz(#Nca^!nO;uB1Ow!f{UuV^a0h%2?Waq!F4L>&=Qsh2{xTDi z50EP5T&z6lPR71?LEnv?jc=7pu(;74dskeb*<<5TktpK2oJ6>hR7tHz#jtZ;6STV+ z!X|5dvcA_m1I`s&(I*E&Km=1cnPOYW$i+5V*nN*)xcY`O=vYEzhNn{JIn$wZ{~NY^ z?P#f{=!*k=I>cxZ2M(`H!uvi`@t3Cv;rcB{gLt{oZrNh$eZT~=obIt=7gUBU40BXnQ-{*lEo+eNMuI zs_W@yv$yPAbvIlpHpE(uKShF%IDnZ2-s7X!zqvKIE^;Kn$B>@_V^vwc))(>8@X z%gSNp4*qO3?_3FYhHg@~q^WpnMHUtR`KeK%NexzeR+93w`=RpP37jxFoR(Id;ZGV> zvn(v}#4i&yp=FmQeo=oyhlg8nugYv-cz*N>6@jm_Iw31B0gb0_C5^gLf^Q-7FfTbA za^}1wOAmad4F?6pR%9!j+_njIA&nUAip9^yhpBH^6k9ubuA`kGm zG+O8}q#uZ;DShkFIMtfgxoeS`CkBYfQWw0JWrjm`9J-||VH8<5(0c<4P_=6d`c-Qa z+ZhBmR4bxYwIrii^v0i6L9NsrW);f^hw$f;ga#diPB=>CiXPIl z!Bg?EXa`lECJA9`CqaJ90epBj3{Kj`Nwo9}s6gXNcKw9jI6yfTZR!m@j>iocAY7XvsO6Y#M6XeS>8=NOA1KYj@gSS#5xLUMPk7_j@ zPj->7H^xziH|D?}?W~rstHVE{ABnKc1FDmz2Qz8|SQi6ZcI%g1vZU1r#~Q3Bwhq#8 zG_8hw)yzj(_cV~+w*Vsgj4}OE28o@m1Ye%0z*<#(DnBt6XDBSj8OEl#fnS1JU+UqS zSPcEM%n%Q3`%OMcB-7XhPsm%BsZ_W*(B;`@IR zn2$`)(J9~)y^;^@E_C4jJgS)-3R?S+h8@c#rU9bdzrxxiWaKB^^v)kQn@_X9jzk^x^oH4e)i!BIG31GdilJ z%xMirx?(h+%pr^5UXcr255Gy$OP$FxnNo7*+CfH4v6MJ2^Tn@U*3)&)`>@mS5SgpG z0FpCuuxR02vO!S{T#V+>*4RUYtd7O*TjiLz-4Nk;B4rx9!RXo}PGhw)(H}fS1-bc9 zrA?q2PLP2YKWW6=)41yE4=NIqL9N~^fjH+a6aD%z^hC-M8Gap`k{QW}Z*_s+l}E|5 z)MB*!(TV7w55s29*+12W%-?N0>4e+|^o2t$eNm$gvJQTjvSmE=IN(RMsu;$;BU)KAp=aevd>3y{4YL{YLi{+Hc({s6c20wU zfDyV{?-G?A=8T1VU#gz;4`7~$sLKTMMS02EEk!XhhDja=X~k)0!V+2YU@;Hos5>)mUn z9j0w`Ugu@7h|Hr}1J+RH>4Wb!3g|Ch2D8MW*;;FP1awSH1fR5A##MMb*{1c8-rM-nba>*)i}sAVP;QkIF>fLZdx7G{Y_*w;yvs)7_>JvY%luJ8Z$f!uxSV z{Q|9hcNaEna)WdC)8X10VPa25mD(jn)a%(#c6ZS_n7}SYb9Pjv>=^}p>kKg}qmk9| zo=#=^#)ALP-(VGeSIgQ_LgCur%;@i((sfJ@vV_*1q@HP2H3prUd?ZkG_E_=hO;bMIF zqYs+j?8isdC1mTC@5FWT15!756sJypOosm+N7wfWL^|d*`@T+vTsqTB`kM!d!;;JF zS1&`zsMl%?P_hMuu1`z>nS$S|mg9+N6H-ugj{f}S2G1`oz!AMvd{`(YkP4_lLCP`a zNT&f^`nQleJ=P*0^-9@A-6=%ec>-CM^Brtg&n?!=I0f*2oT;YEO2GkWG*0qzoOsQw{?d90ui(l}{q8eB{MIUp=+H+^D*n)4n zy(nvO1L?=r=;!4D8E-C=hmGHG($O)3lOJy32zP`^s-;0@=OL8!i=f;Sd-3Mi`SgwN z7fxZl7?@ca!1&4-RGk|Ni@h#nPMm%+xU56r}M0^(jD58Nru zIITAxPIe`d0znMen-zhK&;!oMJs1^H7e0Ig-U|A+9m==j1@8y^qzQdgR=7s3CjtiUC7>ynbTMn699dylP{TL=@1Z_sn29ZY9`18QHr zp7PeJ(ZgN+IHT1Yo)>1I4lGQsM28q?GA0#0sdrD_HdWZ>OqvZWi@SF@(WIrVAy z_xvBcd{>5fKX)<4CC1WhT7o~q`ia3cHQaLa0En-fIC@qv9ewSKkynyOS2swa;8_^8 zrS(LfH;D0Xlc_?}CgjiL!3FMJS~7P6-U`#;D)lO0MMNRo^|P?bTxLsrp6^3T=6GQ|UPATpiO<5d|$llQ6=EU&B=-I!SC)+5@H1C&;tF-?VFN44UpdiWl<7a7UIK;tlkIPc_Zh>241xTjxVn zN)vhCwS`7*m!N{b#f+g^2Bgio#@Tw+8moNXkmA}b47<7rJu{Rb+h&NW-ucbhJ9=0n z>~t;WMQj1n6Irx&Sd+Xg98adbKZ`B)mLR@26=u2hP;cH&3|dl4E-8x%nBzC8qq~gY zZ?GnKyf!7-v#$`nBXLk|ybogv4*@6q819l60jV-W+WlcHtR6E=D}J`Ko$Jd9H^7-J znQ;_XSM0!QLuouT?;fr-4Fa)pFFeS!l8&_R^zQr>z>OSY_DQ;9tP>aaoyvmd+(RVJ z{4zNr{E|-Tvc$px9jq&uirp2G+}H_1^w+%?^!ev^U_6_UpeX`2cQv5#4sXm4DyKsq zpF+vXdb%WH2K0O3z5T@qP#%5r^`9 zC+UK6We7A}20b<$X!9%r&T|8@Fue@t?l}g_o_?pp8+Az!jH<(iH5tthQxmqCp9j4#!kC(@7Br`MvM6QxO?D$yAgEqC)j+5EYdnzeJ;lfNqgY4LFiWC}gS(_l;|-aA?ti1qldS~N^Dn)9$uWITu^&G;WS4?AC7)&{}7yn^#l^!RV zVoIJ9soplGRf*y#6V)SDQUj=9alFwK-p18u!&kh zGu@=v7V62*9^F)URU|xoC+Jb?r*UkESsgdXzXbzd)nb-x7<6$nsNVq@jc_tzFUzvX zqI4R~cHT{rJEtHz?<9^Nc%2fwi&?5m4|YOh9Pkr=rAE6XY+Lh$sccsQi{*nT;<~t+ zfAautaEZ`yj3fIX;TG!d-Q@j z5;IxmelfPd!3qq{JF*G8524PsgQ)wu7cDqMq)$hzeAzmaWJTS#%huk#55R}^p)P0a;Pdx+aVFJ7@Swmx{C9!2!Pq2o~)$HR#c`x@ z*^o3$Zi+8iW}xVsfDfu$uzmTj^5zE{C~4OWN=)4f>hUtnpm=z0m`b` zQERyc_}DF?(qe18?$v>JdTLS66*rlm^&)ifc}&k8C($I?bm(y5=#^p_ED1Lvi*h5d z?e7B5bW?fTmks>FGyBnwKPI`&Z=xBijPRYuH6d)24BqA|)3n8DR1tHWep@V{-D6Ff zwRItG-K#=AHudu4z(9FJK?5FLU=5i^>fvRO20c0cPM$lsjLhd%$u)WwFoQD}S^Kwm z9J3bL?by+Ld;dJV^;8go?SpBKNOzf-f03uPJcdr|*X--NX!4S!5a->*S5+M0FZO$n z4=q3P!*1lVE6q3A>(h;xJ2DPVspl}&RtX%j_#5?I=fkqMd+CGgB6*}=6uUGx4__K@ z5^)Gacv;4Q`y)$Ao3fT-mpqbJ*Hy{2Bk%Dt*#I~hwnCoaFc#0RiG`(;{K?SANA3{B z@qVHe)aU6^sL7rMdG~*zu=NvYr{=(-e-23`ov=?-)oDq=hG}e-Lw3>f^V(o}N}!HX z6_KEc0E-Q(a5^~;)t98>=m8I@(R?Kw)9YqIzZ%fc+7x)|c^eZiC|u|}q7cGI58>m6DZ=^aHuU@|Wj&sDqG^6LiyS+Iwytr5 zqQ}J&cMWsMc%}rAQByc?jD*nP6?jTNpUoYkePQOzQcQQ%!2^0OWYH@TntQ~+ijZ~U zS&V`a2d?0YhBDH!?Z5&}Ll!skDAREa;k~dN#Cq0wsv7Bu8J{%+NyGPNAeJB zdaH}DY&{Nrz7&=8>zMwjC{Cqf5gXf;0!EX|nY}y$?7WxI0H<+K@k<`ASs98e`Ww(w zw>0Vu?tu8+9rAP1Ft%z`J}ql=$1`(ZVe8_zOtbbWd#{&-Zs#LV_nZognUlcuujbQC ztI&!QXy1=FD@ zQjmX7G_8!C4%H*L}_qs=EnWuBFJe&jrFjEqBIl8qmhz-K@Io znJ~x41rk;+q-foK;tTv9O3ygL`ivUFUcR?tlY7}fS9vUj4)LXe)WML{^osrIqX_x6v6q_OC_&8f zP?Dz{V)swDg2@+=lUR^~P9%t6(c(T60FCY@Cb zJx_dsUE*y!V}vHN)YuDIu@T&^k7X#`84iJ~6S#q0n?a(R4O45Muv?e<x$}!c3qDD|)oSgFjpFoA`fnnEe)5$DU3#B9+L=IKT1>I^1k!Ugi7c zN(aiJy)=c6{xuZV`;COT7kN7Pu9$BT@49vJN@k#6z@nB$qPG}65Td>VHjj#tpJ>|# zdb?LZtq85E`roF0&SfyB#0XkKdr^4A5`L+-BR5UBL|SD!biC`Cd|Ofy)*FSO$BjL7 zX{!b(^4Vl#JrBJWYhiCsRZuypPV=mtu)W?MhsuM)@hCGCK%tFB!Qbh|>St|-% z8(UeBaW*|vSVSvIqT!Ea|DbUwfzfluqH@szkVJJb?V?T`n(P41`Rj0*hZ$U)HypmL zF2OsVRal5b8nN@B&(3tn6LL6{lb31F^mH27JA^G(%wdN^vI`}{GAMj-xO|YOC)*g3 zjyWNrq}Xde1x$^Ho>tdzz}H#4fk72Ek~j6WuVhk3f9cBqr#}8;fH?hh5S+Um1Q%PW z>wgT8Ro@Me;F@$+I!MN1m&+Jm6u^9!YNL9-C3~~b9Cgm?F_q>Z?3|g4)eST7O_m!L z@`-G6as*q{)gte&JQyR|x1r3|8F%Nzv*L?7xWPM|Jt=g@h$nm4t3Cm&;oCmEY8=ky zeLN|TIUB}oyyIB2uN|ApB5=Zo2qsz zP26eiT<&F}nWTm(^Y`zH8=1yl!a+}-TNb{LTd$=c1WR{w1&a?CUGdn)jkU@WT+8=y zi{r)#s*A1*$sMs=$(uvM^jjN1F(JpH1XehE3vpt$vl9ZdT); zxv!Lj1ZWFBHEI$c%Q|6XSSDw6>ks~EnXb_FFiQB;->UGzWhQJ|c}XHJ=O1lTIeCW{)&TMN8C-wE0 z`nlR^{uscIsHOj!f&b4c0o_$L+DZd|%%Eq4bkp|(YQ)Fl&%al)oquGrE1}P%H#>FllfSQvhnJhL(~nTR-dm)70*w9*V#6`m literal 10209 zcmXYX2{cvT`@RSv$&g4&6b*)=TK$u^&epqmDkiULJcu??8{r^Kr zJ8if5A6zHi})gh%fPln#~qUq3f44xMZ-Vm~e|RDM;c!d!bX&v9d&VL}z1oSf|b zjg|j3_9Ee-O8yxZ1==DaeSW`D&|)fV4|xp33)XX$o^KU)HV5K9!5r?@#EkBW{ zt;51e?%WmwH{qbqAVG&{VbP0e+{U7Q407?|(vFufct}zBZ^L|;ck~Z#KxyuqIqUJG zOdOoH@(}v*`FN>=5e}@&!^Zv7&8(;B3UjPfg>#zag<#SL>Z*~jxcLe=L|>w@Rt+>f z*_*3?I{ zxqEV=xaVhGCz?zNCdWqz*Iv;PuADSZ=rX#T8}~$!D?8tWdn7!Nd*i@+h#U|H#fO_w zs@hy=+odIJ$aChVl#HNURx6IKc!6EAzd=Vo3qN+0lJ{5rxl&OsIHN5M-Sn4n?Yd*g z4~u@dbI?qQouf$Ng2*hN)xi3zy5WWfI;Ik-W?yhyV&=)g~+p_yCgsN!@ zYYGP-;iDq=m6M(Dskt3jdVeV=wf8VDS@{@e@tI=IjlvSn$fi8r*YaZCiql0L^-t!U zKG8Z(=dN;&s(&TVqNJ49o_?5j@M}HqYep$&^~ExtMOZ0MC$E~9J?$Xp^M_o{zOX!w znq~>-idzDY8dY&(FPHMpR+REKIv(ac-B`veytJOfP0TbozcZ7!Kg> z!CED7kU9>dZiiW~ICs3_xD$SSn~jppCR7=>kjT5#;pp*h{8q1uZ~9hY$1@L*^m;^Q ze$rv(zMW&c%Z<@Q`8>`ZT|)gM7vO_Ksjx@S2nIS^h_X=yJ+vW~OzAiZ4lfl*b(Aq| z@=8Tr=MpyS#SgkhgwpdjQb?SNH<)ZN0mnl(snX8CU#G?y6~2?68x@nn~*E9Nd;hv^@e;OvE`A#P1FtsVa!{goU++AEv% z$f!e>!hFzd3}$~T%!Xxk*O)2xv z<8vu=W!RDBrN&5SEu9b=a#rP{sC2bTSC8>&BNa&;?%R;N$@u`jpmz3WB2T5q{`Nr>>a*M zj`Nm6=m%dMihf7VTQ4KVHH`Mthk4 zk69$w>mGetD~@X)QZ!q$6p}Q{@k-DnFy4L?(^a!^8Ak~gzBos!Zi|6L^mA%`!2*l3 zwIKbaDDZAokP?k#dfHP7KW|ATF7YBz=wnXMrklO>Zwl&3CgI+;0$O@s1-?b`!75%7 zE8PEMX3D0J5WgzC>^wqb$NLh_)KYjUohZ0FO$n=y6_P_ex~Qfuju+mS!=g78aIbzl zU8l@MvT|9`@2g@;Q_S z&zcQ}Z6Z+Sq)Yx?%B0gnSFznwe-Mi~9?n?`v zRhy{Xn{KkeGZ3_!*W&Qgc_1Zn4d(@1zz-uUY1>FC`Mmuc-tJ!nB~zEcl%)W=b~=zz z^N374H5I?ioQiq!hIrFu5e9h=(z8#wRe)tY%-itE! z^jtYsJZvkoRWc9VzIM=P+i!IFHF@m4kjg}tPQ(6$QS#X08}nOK1RXuOuycnBof)En zFFdTlA|Qtr{!GFpjmg;EX3Lr$V~KtZLq*G{0vjxb6+OZ1?o&agBU?UV?&l`@`G*{8 zq_i^Z%em~!G6NjJFjAux5c{nxKW3IF$sub4j@6L?-!fjS^Zi$?W_q=7NW|6lVn@z1*)}lki@(A(4&K!!LIxg8zZ?2hwpZh2TP|@)$)82y;qe~ zemP6!t_6^Ou_~N!C=JF%?ZullE6CC`6~-l`3QcvNQa{yx;v!m#WlOwZ{?0sXjo}b$ z;(%#uW$C+JEZR&nWxBR5!gi_Sg8S35$){J_aN{)_YF;&q4BKXzWoY&uryC zd)0|o&nQ1mT_nD7AI1b{k=R%{a&Jx@E*ouS-F$P%cTN=PvD`;H zl(k7tWCL^`SO$_UGO$?lIrIFR5o%?(lXESP$RFcbpsbP%lC`>!-1?bxt<}I^4O-As z^PPHg_)sw36Sh3`#pj)IV0(5hY&Y?Q+8|5p+PfVaKP`dpKev*eNHb={IYTqCpr#Yfz3gSb*~HV!?w#a6xyi77;R&=qXoAAV zGV<#)#nlrUVeLj+^jejU;bt#b_nAnN{QX!5Ra@#+u#ZaXj;&Rzby#ONg_>R8z)W;H z0(NQzn7j8nbK<=*O5_HU?5GYB+oTKqpUyE}m-UH(+*-^?XTkZXD!q2B1bzM)z_4f! zy_N2Pg8Y04&{;&ZziPocjyC=}xCTpR8PGi|CqwMlPUymV4EXjA7gjBXx^-u%Xth6j zZ%IUcjUA4XENszG0?pq>n0&OA8tBM^ET)<92Jca|$qCejxk)svbkTulk0P3mpcQ); zQ%ddd;ss?G*P#iv=9H+}mlJn)W%3}r#q^A0li>Em2zZ{_MMd5|r|jQQ^xT{XVJ3#K zdgTQ0IPO48= z)cHT9ZvzgH#dT?%$`8rZPHZx;oHEub(FgMGH!-_5tpoFO8t}5Rz-&qQRt!J20*fj> zbC|^vwEIa4&W($~)}}jTn~N#7x#qE(d)4uCmk%8Kk^;RYE(Et{!iSh45-jk**^_Ru zp8xJro-G5eJ;P?3zb8XjTmn6IVIp#ze~^v>NxI8H7qmk2apg%3QX=Jre_W=Z{G0}i z+q#q*-ndSEzeHf;ZwqRq-AUG|)G+TXkC4sbktDq4GaZ*P8IKtmQ5)%bAZ+CkY%PTT zoEdOCX%_|(1u{uT0Tg~7!IjDa@}wl1HQKq4xqNpeabH_Ovt{H#WobRFNQ^;u6<@Y* zUL9&nDkDz#Kmx9d;O6VM=+%c7WXH(K)}_aclR1*#$ee9`cv={`18-A|13# z?*ty#CrFIlw9s4>f!I+qyP z%8)>P26m@kVg5dIrLraoRKHPpCUTX3kn|R-!;c{W3F+gIL%pW`C&Nw6FFadjH(#Te}A%f*n?4BBF*3Av+l z;klavgt;hSZ0|LCZ^t_}#kGvdcpymydNLR}s|jWMt*GAV8K9v%7b`_;NR?(VtG=m; zEIt{Iu8~)0rjj3=2$4r~;e6bud=H}NPqV1zjWGCikS^S`37R&Fv;4>@Xr;Ih-3^?O zDG-HEuQ!leKA}+1R13!wPBYfOl(Bc@BqUF`O4`RakchUKkeqCeCTr&~i`gT1iob`h zJ6ejT9Q{~hjkoN|HTAr-h=1hu$%$l9mJe-pK1R+in~dNb0e^mqVEgA0vh>zNG8&o) zDfefA#@CzlbKN&JwVpb97L2fbU&(u;5bEA?IAy>qkI#|ldeYnfiTQkG7 zQ{2ULma382sPr#>qjQ+4-`RbFQ7OhOK>dWNCog`{KV!a9i7t=ayE_ ze{$)i>C(Mog8cO&rj71SW|HzneDzBKrYAI|1rpV#`ALWVaJtv&nf+I#ZT5+C%Jh7L zoFF6e5x=_kCa*-iiJ$a#h1o(w`{n>K%8wBL%(HrH#`~{iaJt75NT(yiJRDHkdYL|K%TyIml0qlN3aIzcsDb zkTpGZ_ybQt!19e=z2SsJOc1y^Z!%Rd@a272=wKT8uGn;`hpFjX{Q&+h%W_ULM%Q`JNeEsh=j?e!D8NF#=#!U$n4%f*Lo4w}pHA)l6>IzR!5& z@Co3_Z$rf!j4;d73c4C&xH~5~awmUZhkHJS3p3`+2yI$!!=(-hs8Vqj-Zg9k_xg9V zYlZ~(rukIv6~pzyRQo{g!ryV!Li;(WW$AL={(5ky<_Iy^lPi2tsK-^?q|L4K+%3e{ zKIp1yEBx~J7;>Y>3Fmg~#bjO$)TUSovn>}0%VtFj8x<|FUBwt@$?xEbpUl9|Z4SaB z%lX1RKmTG=iX`{XZB_2KW!5e+5vJd~3V)Wr0>4^u zu6glJjD0MDTRqIV$_{^-&h@~2+c6GDZGADyOojWs-$VHJus_$MQ$^U_be5*|iNYQ3 zL0aBZ#_QZy$}9G&;B}AYbDU}lIllA|C*tr?jyPM+@sX+Gz5I~L5wEG_B>y_ZvllPs zlzk}Y`N@}YN<6AKX%>0B2ajENnro_f@6<|pG0_z~Wz{N9Xw%!~rt-1%L(6#U50>%P zDJF5$>Z&+@eD-nl=T~xK6-s$CjVd`&$J=N_-U~tNuq9pg-+snM(*j-7Z*f%3WoUF2 zLmXGw!C;Ub%4yd@-n|TgOw}=PU6F>|Gq$YNrTwU{`+?eo>?e|;j#NqeG}FO3$`r>M z;==R=G;qRZ#$QStb06E|z$FDx)LIL6nwI2(n#mVi^FpK`d=j`C;CE0Lm+Z`x1xAvJ<(A9|hJ9p(0ho*L_b9foE`r#G4 zw=j^IwX2qX9Mk+hE?5lq-i~BOff?#_Wuu^aJ9sgZX_re98kgvSs8s>|xu=sV=xM?^ zvvfH2^cxWy+sjiO{78I=A7o7*`#xHCj~;nBM6J#&L&MRd{LrF6h&(WlEPA|;COI6y z^z~J+5fu?3xB4e>YFLBuRsm% z>Pn;Stt~i9*PYxj8`H=72s7c9Jn|rvi5kBeIy1MBkOPNl@Uwj+&1(m-t@=$*bo8)w z?th4n@)G#FkBb9)$05jNv*E7K$;F)WwEV(Tl72-FvPB;=lCNDDJ)d-BWX0jKWhy*Q zSAl`qh7i5Pn610YMRS`qQ52ogp6H>10rMkQxVS&{xaSalwn- z)PI#bsynqIZ&?s&zLP{IZ2d_MpKIU}DRo@E8KaraWN!Om;#0+EIOPk;os1cTDTspUHa}Sp zyF#LokWP*y9>kTo!$j=%ezWhl%m}~uBo$sa20>3S>=RqgPCPgOh08>#-)b9B4thj0 zpUJXQXH%HBVT8W?84nJX8{vxDT8unhOCs-2B_XQ1aO(I}`oU~7$|tyzoH2b?EOQ$y zNONQYgSsH$?MAXSCy~B;f0iv!eM0=a=i-t<9XxH8M`sj2qZ$V1iEXzrr6 z@EZdxVD!K{Z!@))%47BRF>rih1OEJY5N_Q+2t~%vSj(rkDChfq`oJNSO11=(m7_{D z_0w!vIO7mwq}GPyLx*5Ae+t|>8c(G+J*UQdIc$%fB3z`2RA?1U>(^J3xQL06*=CQK z4S6JA_B(%G`!G4;t4MWkzM!Q$jL4#q7|fG%fj`CPsdfbmi|xy>>7X?+h&}+?^M=W} zQA6nF4Zyt^eR!HAiun~DG}h0A%9t-8K3|=oqB5WT_a%c&YST6Q^yG}-y81=pk(?_K z(|u1wy6vf;znKY?I>ZiZ??cz!vrvI+0Rdkd$>2%_nEL294c?N1^PRnE&z{RP*D@cR zCYF;XpGRbQ>UsLIH^eOJ+A0E4b{MtuIPG1`AoH%5F0W94t>MKWFIS5PE9>d5%GIR# ziUAm3wx(@?3t-ZM3~YMcL9B|C+2jX~Oq{3({0UiWX6IQ+5A`&W4yDa>!`>}q>dJWX zeS6(lZe58nqMM0P*E72LS~03VRKb^bZ_}->7trSxxwPi+M5H7W!y@`=ZvSjp)T9B^ zuU%nZoaiRG4{yTMVOgA8_<-)3yB{xfCW23WIb6A{hYO8%jIbk((J68oPCSWQo% z-N6J+la~|aE=d?{K0*?&w+kL^J4DwlSB9F&8}U-@GsdJx3{zKRgZth>rX(T5w7u3D ze7`l(<2TZv|Lr7HoS%SqKi0tUj)`cx;|A0E^%MC$Qbn#jdW^jTj>yh*f}5|$oo`P_IL= zKQil4a+rZ?(NA#@V20b!90QvhHu|oDaRE+n<*}1;Brpq3eB8ziRPNjjz z$MW*13VfHEfvk!s_qby=)*lvynvA)yu3;INzOG?iTZic3sXrj+#Sst&HjxR{18kqo z7e*!X6qGEkg<$Z-JKg(LeV2gY%y}GcAj=o(D0ly9L{ApuMK6ei8*|pHWOT=itQx_3$vtsOCWnfsOEKW7L zBEYxf@kv2C8g1=|Z&OY}OGrLj`1J%{{CdN5Z>9)7d}0avYZSO63Q1(|7*{qHXo7{D z6#n)bhyFq0f{Vjtg1qww=HD#k=r7)ab55$G#cmTQoqhyk6ypWg+NRP@ zjwVEU$iUa0CgwqGAFXh1z(=Ac?1_bU;bwrrm^K*-Tl3_omMX!^(h_KS*_ZBXjDxuj zJ4v{GDt62|2#UVHsdSMMEU1%Ya;uGTO~eK~<|bf5KbEsiI-#bqK50 z2KGk~$$v2e71o`mrV){tE|zcBAC-w07Hy;#^V}d)ejSdRT|&-mQ^8@a9n>kV38pz| zU{kyq)c$)wQw;~{lSL}fV%N#Ud$zIXU-#1a-rf8!OJ&&4zieQ-`W$?xv7dCgx(k~3 zY=n-kC(J5vg=St3qxxPRwmZyX7mw>>VkT&jymDXGbj2gS=ujJxx%rL^Jn+XGzk2B= z5odaGx;^Fv9b%I@>&e|TK12=c!(DfGqViK61|p?k>gF`OA(KdJglf#xd5?(+2jF9s zJN(>x2&PE}vrz|c(&G~=Nr(O>JaTP>k>YA##79rGvO7dRU(yEpqyols`=+tS|)j*bc-~O z4F9I-XGLM@EH!A%&Z4Q2LV8^+5a#p)evD26`^jBIw!j-T?)Ss}i`8Vmv=^*Um4lWL0{5V?f0E}IT$eq>BqSW7>K3P&(`)#& zxMe;`2~WnH_MgPEHk<5xQx3ePd#T=ue?+7C3(f!Aj+67J5Uns@6*WXNr5p76g>$5@{|J45(VoZzY{9SH2^e6~i$bx@@Ig(9!u?1XI2;b0 zYfWi|$9IxaI}1l9%fpj2E&8E;2Xxm|!rCQYnV`#*e*6&+&Nli#tG;QZ4D|B z;DgUcTG*fiYvIKERRFbnLFE24FgSC8lpe_--@`R%X4EvS_*#t#i|-KK;-{>k&s`KzY(HgD(Sz!Had03yZ9HLBqkP+J}g5UGx zvHRV6`p~@>-%DI1&Fyb_9_iv;ik!Titub%qnD#PcBPxMZuGqdh#A$+&G zNW)un@Z>gEIC;MjzHVL(!VAjW$Es=6J@yxK&1;ww!i@PC%Fjd8v=?M$!?-a`{~FaW zlVc(V`Jf&AnAYvcr0YKg(qBdSu%dY<2D(iE{*G^KSr}mR!vip*Zoo`pLn8k3t0&(w zHc}uxozv~FUrB$bj^@05tn+SQPGazANMjK$QB@ANG#X1|Y) z!@MXD)c%x4e{Vg=>UbrBLCsh8@C_AU`C%wObO;1RpRmhv5}USJftj&O9*3&a;L6yZ zv!<%lOnO=t%0IAzSie{}_9`EG43ofhOC<{9lbN1d4-#<|Q6?KRNo zvw~^3918XJtLcMnRqU&h7iK;76}(MP4)AUcRdLkJOL<;trM%ihMLd7$N}hRh2`_PJ zDQ|W`C8yrHg5&?Aio^UZ<$WfVoQ=AroVT47oHuv#dDq7LJ;}zUysxe$JoC;X-o~?a zye$30oR}?zyv>=1c};2sycWqy&c(87o>$Kij)iS0Peb7lFX>-3&(JbSu=|}7uX|d& z>FTgI{F&|(1iJSY^Cv%>CeXfgo_~G83x0jlC(|mLX7=mH9@ExYjNsb^8MD*fPfT%@ zky+V?eZ1yl0#obrgZ!$?C44O};IC+!#BW!*YN{Ifm^VJ^6#r*V9?yJ0&1{d-27cay z`KH#zL;SLte|avM?tDGla?XveE;43U6A7IX9;)oWGeUpNeJ3?`ynRP_(6*qMF{hrG kcj)-Jj_ZfUYM`g=*s1U^zo3x7FyH_4+llW9m59*)f66*xxBvhE diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata index 7eef2ef15bba26f49eb7e79079714b5c7015bddd..febe7b205e46a15ce78f3248344fddfc47a3eb3e 100644 GIT binary patch delta 147 zcmdnOc!_a>yh3Vfs%f%?foYO~Ns5V?WolxwrMYoZvN4dKY+`6)IMH4^!=N;Ri-Ccm zBr`WPz9hdSF{hB#-68FY_Claw1Rsz{Eyyg+Pf0C?%CdQjM(_fKixbOIQ{oeg<5Mz| mOA6ULBe;QrMTwR1WvNBQnfZBz91*NQ5d%F7J%d8dQau2Xqbj8U delta 117 zcmcb_xP@_oyh4hlr9q;lk*PtFQId&iN}`2{shOprp-HNxMUqi!qUA(;ZLfq^5nK!m z3?-SlsqrQGC5btOtnLm)-yTdlz`zi}2b4)I$SlrJNiBxTvU!U{@B)R46U$Ol;uDMG NQ!", - "Box(3,)", + "Box(6,)", "Box(1,)", {} ] }, - "policies_to_train": [ - "av" - ], - "policy_mapping_fn": "tune.function(.policy_mapping_fn at 0x7fda132e6c80>)" + "policies_to_train": null, + "policy_mapping_fn": "" }, + "no_done_at_end": false, + "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 30, + "num_sgd_iter": 10, "num_workers": 2, + "object_store_memory": 0, + "object_store_memory_per_worker": 0, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -110,7 +118,7 @@ "sgd_minibatch_size": 128, "shuffle_buffer_size": 0, "shuffle_sequences": true, - "simple_optimizer": true, + "simple_optimizer": false, "soft_horizon": false, "synchronize_filters": true, "tf_session_args": { @@ -126,7 +134,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 60000, + "train_batch_size": 30000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/multi_agent/params.pkl b/tests/data/rllib_data/multi_agent/params.pkl index cd832aa1c3eb1713e608fef452dbe168746e4cfa..192cf7558830fe2e280e383cf7777e9ee669a7f0 100644 GIT binary patch delta 10700 zcmb7K2|SeF`kxv5n(X^dMD|^jEoDMQq%h5DW{eqTk$ptAOrdwQXr-jIC|a~AC0Z0M zT8Jdls#R2||9NL5{ciVv@14(Q&YW}J=RD_ppY1&7ol{nT%&9`^W{Q@kgdhZ;biNr+ zhR%t@tYI)?iA)wBje@$Btsw&y14Bu;KPH|7IuSetHWN#sV{lkDo{V8pQi%LO8&1h% zBIJpDucV0Qu-J?^3>MF(Fz6Tpg~f(uqvjiSi}7Sx3=$hdW-uu!3_8DnCr@W!2n;$A zgJolgbOMxuGC=Lnf_9=7dD&Y^>reX_#$l5%1R|bDGj!Ac=Q9?A$;KoS$rL<|$O6wv zSQ?86tq?H~v9pne%H@=xjUv7%TSurv#08QU)wCwj7;CJE^hB0b9EZlHU}K1MwiS~? zkFmny!4y`3aIY8K>jk%=xuWW7zbP>!N(_fd#1JVlWHv+;T?8E$Juc3nQ%DRZ6eC7c zU}6ar4hsV_gjZ_=jfveL(TQwWtf0eCF#TkkWi_3f)dB%)KbUjShKDSZ6I!iPZlsT0;(t2-HZU z0VPrJQ&b(2#Ym)WUGWTCB+6DADpAy`>s3%jA)V@O$R#3?_E7u`HON}pQCO?XkXD~Y zSA~MA<)k5s{Ml>&d=rNF0;wyr-LCbBwI&kCuc{3D<12B zb#QjIb+&b}cfb+sNp=nnu1-SMR*I4)Q_ybXzfb8xi9;cT33arQ(PJkiz0*%^vEf#>U|Y*eU&)k+Y!)&0^$r=(Q$KFV=cF3j|tgY%G>VP9)Me){JroB`8N3u`7ZexxgUD0r8SeyCwK59 zF&GM+!UovUG$=yH7TE_C>S%Kxk{^)=c#1JZHi(}jEQo9%GSETL;gfrL5;O)-B8ko? zKP5jWKLLC?bNH~f1zQ0I6OYBm0^b2Vc_6AKTQNbTz*)r$jSrrYj*Ww#k}tyz5``vc zbZ~-#%*fk$(!jD8$AU;iq|tzZ1i|)89-sUnZv@)`kYBgaUN+760VYVn)6W4e2h^i}{bDw1x8YOw5&$J_x?K`6Nj4C{Ot}Fu_K^vWkyq z@X6nRrxGUy1OfmOH~|2qh#_(4!mVwZ@l*8yRXQv-@;9nFsPkkom;?@%HYp%s z-wX@}OQ$n{!+@QU1>^>NL>_~QT{Ec}xGM-A`Kvt#?jY}`>H?0yB6(`S?HC+FJOv*M zM9t#h;ux@dv-nY9VlEV~t1CvG18AWVUA4*DAziJ>+I3w$E|DjL!NkxQAm$6_r_KcA zf-yW1AP9k|SsZ~2|0(E=JVhWEFej7AhyfmtflX&hH65Hpq2V|PE=DtC)Eu61X0c5GAI|04wr!hsIx&kicfVD z(2)7$(#gd_Mf#ee0%NGrpN;HyxLfaQEjV_o$s8AfJQ%yj@bAvGY#!!jA8ux!#rcj(@Lo&QRGc*#70ACTb zl8!)cmcbW$Br@>4Wz=Vp7%(+vX_<45{EdC^15dc>NEehZoPQgW{p|V8;Z~^qR$<{B()M6wTfdGIz$%Z^p zftlqA(EnzQuABl-O^5X&JN4`z(5czru|V(gcp4=hGetM4^69dun39F$H|pkJ!m#)K zA4MP!Z{Y+CHa?z~jA6tJ=ce~)MG*z<;E^H|04`-sMkcz2W0ply!?yhDzm3-l>{u7irNTlR$#9$ zsh7c&xLZcP%@fGqx*TH&2A*GRg0hpZMHYz|d-2vOCp|~nUp>OUC4Vg$e2$}tF zc%U&;-RS8K2wS8nut;?_f{mmgC`b|#cExNY3K~5yk~ed)A4n+umSI9dy+T7U zXuk+P^#NGfFQ{M&ofz~PA8Fv10Hh>R8YP6uQza&Wq!U9UVwrRdi6~hA-wjou(do(@ zoF;~Dffzu+^AyCP1`~N`zM0(LV$olHl-dc8l@lxjq^iQOJQ+ZFvTG=8GDdh_VDWj9 zlZhy(%EE*cd>oNYW)OhiOj-j%5XZ#O;J{{X!F`zva#cnmk;$YGfW-*n)t@ZUO+F|D zh*mSe`7ee-$I`&Yhh@gVw9_ILoOy^(?)>nsR!H7JE+%gx@zPf~yZh|-cz z4TQnTPmk^(3Q{R>@1Kz|2vX27;O6tBLE;V4cm{z;1KaLYE=2Vc44H})u%f6t1(JjN zgkpm`gZ`RU2~q)`CY*2y56A>cB85f4QD_u)GKNV^0ONo?aQ|u$V2c-;%H-*JnypZ! z;Uy^Gh~Ri9+_sdN7!zm!2ih{cog%Riv96>y3EQt+tPw|EcgLpG1Ct_ja z;%T5ChQ)#Ris`PXftuQ;sowx=;a+G0Y*54P53BiaU6T3*Z~+;zDOXtIPfIy?l314T zP{yZz0%X&y4c@kGhs-36Z-3L^{y1saHvx&l;$Y>{YqzG%=EGMc{#aGL+muYi*rj=`Z5hy)Cd z2m|{MEEl05hUILoI8yNWlK~b7;?jR&5HDEM^zl;kcnlNdu#7l9Pgy8cVbcmDOu+fz zFfd60haqs#;er;F~KOoSxY3A znEd~&Ffbl9r6$h*qb4q_ETNvf8>Iv<^)gqlE_@MX$+Wp{a4{|WAWPWf`*>mCdh^{01w7+gArpGBboO|c%w0_kwXKk508hb=CXp`cMFQ3Iv&)w^3RnQcY zN@=g~&1oR!pSE1nG3+7Yzo^w@=CzLd180xjj%(;Nc+Y*ImaG^NcYL|%(&FQEu2ez9 zrI&cC{PDXjD2hiM`ynQmwpdSMpVf?_r447lh?h!dX#EhWO<1>YLsmjx0J;7rQ+~G2 zu!`OMVfo@?>yJ_`&+i=ab=N7sRI_1?RUTxpS98Vkgxu<#?-(g*v^R-E9wAi>bd`_t zc}xBNOYilTb4w-H=83+ve7v}NSwQ(<)11}apCf2m3&i?oJ#5dZ3}@H|wUt%SwL@_O zwjR;_tL65(NcWfKCmxl@J^e7d)k7=%ldh$_RJ~^B;>g(Jda9Z~)*tjV&o1N65KU-e zH`^Y29GqxgswfjVix^LPI9O8<8D&5qmzWf-OFxyjP~$MSrEl??jL?*D?EZI%kLmj9 zhr*ABF_Fz;7xu>goHuIgbvG$^RU4IcOTJm3q*QQ{Us++^p^c4n_ZwNqZfHA{cXEfK z*-I-@_$xR6p5qqCk|pcq?PbUh@Ly^qwqwF3bQV8;>gKG$S#aIlbl^;Q(R&hG52Mmv zIZl(*H6G6!Gj-tMfOpzwxBI0%({mgsuG?=KQWtqt+`)G=4|am-^nP~8pVjK z95XJdthq?;*w%%ndw63zZg$>1b0}x=NUUk86=9&^qkq%E^AS7Z(wKw0%jdA~pQI9c zhrf*8-cw%bxp&6Nt*`t~ndlM3>uDG@>;B}n#_F>Yha)t@xDGp6XV&(8I9AdAv%Wg< zy(HbJJ4N5t!}v`Bd;TKwh^}kt$9tNJq?R3YInB3oMiH^iEa@jvtMcBfWk2|tE;fWb z>@xG?pwFD2d4oebeVI{KnwLKfC8HQ*Zm7B@x?-)zHD#;NoPdkB?k?6@PLR9Qo_fvr zv4ttK?*>-&DXom#RQO{S!cs|tF(ZGG)4&eM$U0ora`5Enn-50s9c~qWgZhH{CT2D! z+rDA+?%LMi^)1yyGi`NxC?Q9?GDEr;Vf|f!DE(Pgo*?x$=4XMe?nFXlB4_E)6R*`K z`e7*u8of{az{w)jLTguNB*V4Pag~In$@z-@nT4MVn_vFe&#eo1ayxwa)^%rO4Ktr@ zNFIB?`Nht`l0Zv2zm)a`?WlG4g3%{qcbv#w>3h>FeBs;cv{Z+y_0a+46Db61ufeeu z3w`Gfr+&_$UcT$LZ~1je3(v;h{`IeCWm;?L%(>bkH}XOI&C>espAgmP^EBDV+m0O6 zm){U5(pB70W*2RopT!-q4NJOHr@FF#6J77Fxv#XP=+=1|PJ2!%D1?)e>!fx4qPiBD zWNk*w-BwZ8SnjuA@4RZ34pyqS$m9b3KwjrTHBLW!(_sV0(X|RkkEh%>epgk|c<}RB zY;m6YP^vZYltHkgss*k2X&L3^(e!8aBtwm==cwcKBhMx(tIQINxO)xo$9fXG4=s&< zcIO+X-FSVY;cCShae?HVW3R(7+btJcDKF4$@LIS-J!@2^W-Xs_qQ9PL^5Ae>Us8h8 zqCoi=9zLa;f{dC~`&IPjipZ{l5k<$#p1dCq-R6iz&Sgnv*L@l;@rqE4FurNc-xC(E zqBPrn&FvnCk*h4OB|(Kd5t1%$bBBB9b(wj~^Wm&RG{&q!R%cUd1-ia-*Q1uS$Mdm- zo_pIJE0+h%Ihc>dB53&&YL)MUTDV;ok~bbX@Nj!z zxut>|ap7%IMnB?I%>MV0UTYhNK6KtYbboY9+`THZhn397erBURJ{4#B`1YeV419z<&XWEufi`#egn|(^mTF%>DP|WX||J{Km{{2GRzzxpLZPXUo z=sN*tb1gfg4-l|zAI4KY7jp~I6O`k<^_BE{KK3u9!nDg}j6_J$whuRmx~rTrPmz&a z;jG=1*j0Y*gUAR?_E~oA*2XIXJ&}%-+8arVklxCI@i_NbhxpH;?c%34Jwo6X54X5j zjr?qWhj*YtEXDz|X85sU}mtbJLt)r}B?Y1X~{omr!Uai6?t*dmCAm=`b!m}hR*7xM z>*aofuM1@pyFzhoWmg^)Zu0EM8Xqe@DbxFIVEDEEt0GL1(SoSsN0m>R7h9VuoWH7_ zcZ$45cC&^|+$X{QRt7V>b;Ig=4b(x0vOs>VbrqLfp=HFQsKdVQN%2fr~t@-kK*?2qRoV6dD zvpRwB{eps@?KmdCcoTh|xpLNX#`kp<$r@haA-alcfpc}`=j0@R| zUwSjUyNBG?_Uyz7xv+%7j}Ezq-}Jl~xfUVS{M6vMYlDmS(iiT|f%wI%F>j5^chAav zxX%CBTNi7Or5Qf>3;iFA;}^}aJ<`-vtR-4;m=ajtvC3I#@z@e{>rx%g%Ti1j%ez8Q zY&KXkL{GhPpTyp^)V|2m`#WLlId}D&`fac74Rw<^U1aeL^W|H{DvJ4sToMbm7y0et zK6BM``BwWa$h*2i98y@Y)z|5%W(cM`qR6{s?(z8U<*D|=-rzw`lOn+`Nr(H)Da8qWo7uQhqb9q9Sc6JI1{;FQ;^z< zfKx6XB!wKF6d4>pVz3;R;E!3Zn6TH<)uE{2O>P@s_@ zt6U1V{(Zgdj$5Mc_AL<;CR|0O;3 z03d`Hl?HyfR4yq0CHrN+&r zH^T98QJS&hFESWkmmWllQ*o4ReA4#*XzEH_E8SoO1n zdp)jp>#D3ubrC-?RL^di&F-xdr077E{VB3;`=4T-YHK#2{oe7KOFcW59kFh&THorO zA-kx4=g$(YgQiy)n5wECP0Eken%>I|QQcCttu1?Wy?nR+SX@?mRP2#^N8-5cs$H|t zp)a0X2}xR}P5)R>-M`GN*4Mb@5FyvD+4&_`y<=aR$Fq{R3NxTo$g!d5I>*2C zb&WqYKjV<1_Q>kAMp#THtMpN>cDl^F!n>Dlw`JDMYCT|?nkv)&e4gE%w(<2E>JPu< z8B`NS=JviwSY;D_RHk3F*KhN#Z#jiQ#s*0~p%)$2ZtpYRC_38N8@QLs{I=VDe`dg{ zivy9)3%QyXY62ew(8ywSkBG&y<1Wr;Cxo#rvIo7vrg{McrdbUEl6LDjACpSgo>`k)6Lf zIcAyOk`a7`9%<~H?H7m|U@x-5%~hss^L_A-;-l_cT$FhRk-1=NqoR>v&Xu=&0?kFw z;^!axaN6EPtlnSI0 zynUgJ06nf-=ejq8L07Xrw_Wl#x>wV1vFP@tfqVPiCysIoR*QruE$};C6`+tOYPuo1 zqNMn(-8qAH6hiAuj*a~7v$NaV4tBSUSjH~a{n3DKIOCJiQA7>J>>oW;OIU4yy@v3f zqx~{lvv1LsoTd=juU}O~Bh-~S_Y(uMvt_e2;{1AKg1J%OZ#O)9H72TtlCUj0GyB>7 zwPL4Ec$D_7tl3g*#nqx}6WLiF5}fr9(yq*lW_Xv-*V=v1KBD|0o`%P zdXt2^o{JPDR;&CmDr=d1Sy{AOWuT;MVt-a7Dfraj+)VEH^DnGX`{;z-cMf}u@L!yb zb6m9ar0CmE_18zYy$#msDk*+Ydi1m(%693xV@D4x^dA^PDSTQ=Iey~ozJ2=ooTu5C z&8>qv*8=68JC2i$5a&H*2cN}qH*e`jYhT)R^V4F_BSQoYccm4bquu?VX0`V46Nr>^ z?P=C3L7P3?aUZxRHODU*6!KT^ktFjlp4ZQ$K~8i_|=8Iffq(J>5mBUb%Bq-Rn7ptcKq3J&d6vpAzqyOmJgT zAx!y4#mb|fnW=k-FP>-xqGT_3CLWg$r<*-?a?dAN$uo`EOJv5<8o{g9wOXIq=7DYp z=laHxcbvPm>p}EO+#Av0)gn04vO1+kV{!L${_~ELYohy~$#FAnzlKXhq4)QRy}Va( zReNO18d}E%&kSGnYDu4k^ZmYP%zl6Twy~o0%$2b=yI8Ya<3d$>9Omp&D9j58PFZmB zL>IZ>)n`N1u+<@XCOgXA943gos|WKfBB{zxI27)d<^Nvb}iBzR#+%Sq5cVj+}yxO?j$>Y1$ zf}ffu?fNE{y)Q&Yr00z2&U1@zmGrPlo_o>j(*D@={=v;oD$bTQXIiTa@z{mlU1NK; z+w~r3u$`agk$)R)yw>jp_rl879+NQpt#zXEuUud2$%&#q`_EO(pD7x_DmbzJ#dw9@ z;I4^ijHG!@wdO_#nH)L8sGHBZR*r?vTCyaQYcXDn)H)1)8YpnS+@3A{Bxm_NsaLB` z;`T`fOAWtybkp-}u>E4>rM6BxUOjFgDzbO{bEK2h+OmzK+eEYu{1nJp>_>F@cdrO(_@MG8XvvyO7x{VX+Tzg|***e&Eq0TRXiIc^xAC;xyBuYo!HdNKvJy%)0nY`f$qq<|!*Ea|E zeW+>8C@_mivic#1H}K&`x!VI{@le?vArMSL6^YVI5M+ahf?ZI+f^uRjo@XJRb5F3v1#pCcKh0PoO{)|Ehg82mb(*W>- zjHra3zRvHXVM0(NxSdZ16lD`e8LRSHY*g=8p~#aGByP7ut}ObhLC6kcWfwr zJB@}93c=C{ol@-k~;&5GypHGSzlTuAb~{b8J2bn>Y$K%AW|d_ zJjg!}Zd$YZA06(aKw()K8R`EBRWnovw3Xd}Oa=j2CGM>gOXW%9z$^&L*-P_6STSK_ zN&uchWjdf3+;H7LI0UL7z*c#}2{|oMK^g^12*UVaX*d8wB}C$xjtAeK*riZ9EcXEk z9#{Bey&gls0};LyEDevL2H;6}8aW7qBVnmjCI~qYw@(NGvO*9QavEAhFdk}T@H#o| zcC~RLlL)v7CJ?R)90vP0?%;-RfFnROD5ogNxmp1&6b*i~$AX6yT?M!>7+*3DgJGhn ziA-lEsI8>nOmj|P9$^A$t4+||2_TdDe`$>WZMZ6M0(4i_vj}DIWBdpt62>ni2nYKP z3s0FS%1cDFMbdjy# z7d2^m9Z(OnGlc!|G$@Gtu#lHyAcB4be(mq)-{$e-ffH(dYyw&~=2#;KxCfLMV7lD3%n0r!u{ms%SJ2 z%ivR_;%OnlKUhNGp|;hgKtecdX}Ad-+q8ib4HQBO1G_b3Wpsckh9CtSp+zB)2tHa^ ze|!*)%8ZBPfu4YZno?}=RYy~fLkB1ZNs4Of-Z0PzaXv zS91u>83rsI98AVQnxK&VVJC<63qeFDX+zJ10!SEakT0tdL#1IMOdu0a6oiNZ1wqs}?0e>-$7Zd%P_<+g}*`fXn zO>kCIal=pgOb7~t$De}rC9oXnhdTN7Wau;m5eY#-MxkQa{sO?_N8Aq4g8aJyP+_pM z3<&|LWwkow|AONu;eW;P7Yw2~MDH*EVwpx43LHW(<|m1Nm<5*MAJD*h-8E7$D1LYv z@x@?+gGmt>axn83WDfrg+1|g9@x#2SEWZrJLUTI{BmCUq`cI7Dtg)!=KP>cLko|5P zB24ZF2(Vqxgyq-_eL4C++{?vs9qp$>`9YQ;vK@yU3wA11plj7E{tz{we08pZ>;YFD z%3xVh$Ti4cL;Sa>SRMPj3FOefg#b+HhnUbUV;k&7ps;}q2dryZ@VLG>9q)_rCEy_D zt0DO#Fi=onNJ4?Z2L}cm3ai@xH7HgE2!RO0J_Q9yEfKCb)U2wR8w!ba_;6?m#b8+r z2*wW&G5cF+Y+Id^e}=|CeCltShdKNTX~&S7+5fdkVFteOu<+0T+m;0c2ljn)xwR2pSnlb|o+wJmgS3g+lO!db519 z5XR915{+c(e@)7(A%7Ek1T9$b<@Y%m&K5ZTT-Tut05}T70P#Q~esyh&0-A~9u<-J5 zp$Qj;P=N+Il7NS3;0aL_ZnA?57pMSQ(7*t34HB^v0koruX>eEKIksu2AMgg%WWn{y`?O4Wi88f^svs7u4W@h-X?N zeOP%DJTE|$i5+ksl%E0g&;bdr(W(w?#BNwvG@6x$&{?Ge6dgXvmV5}r(Ul)RC=c^t zumc2tfJuA`gLnoNFc0E$=w_AHDwCH`eRVJ$8qE5!2Cs!u$RJY-s;);%kvr>FMjd=! zUyOQWSkW&LaWvm7;q4~zt)i zF+)A`jK{GzO=Ol$FFwIHW}rE$7<)diL~VQPcmi z+E~dzPR6^JSB}imLyz#8ZhI^xvZYYSA|q2q>RWpS81ztnd$82T9wnKb7mEaAj%lo{ zL*6<$*VQs9T)On~WpmV6{eb^IM6Rxl;BcwKHl|ElP@~;lH-$F zp7^w#YjcyWlKv(u$I!dHQP{Or=P624{XHt2=4Uot+^IA-q*y%nNvD2DVu@)ju{&RQ z>16K2z5!o(AjNnQd>oOR#iN?i+Zt%j>CNlSBhBcV573& z7mgKkxI6pW*(CLf&%w`{DIE#hko&`=IMIpETU^&4I*EN?-+!@Jz&)=iUvPxNSC?zO zAyVeBUQb)`VC8t=G4EqonLjbQyCyWhG%7p0=rr1YLWJZjL_3Sd3&t1+2NWH!J;p5@~T}59^ zt|LD7jPyF}Tl5i(FXb@_7d%q%$Zi=NNgwx=c<`eB8j39_`j23bQ0@zSrq8C z+ZMCNHF5j}%yZr6r09wYF__xIFVhIZ)X)x{r|hJBE)Pw+uQ!~J$)9~`Myt?%r9(fBX?n0e)~Wx6(a}YEV*vlvlR`}yMY>T1 zE?Ng?BVWExQ#fI(#h7bA`bS!he|F$Fi?JHS9Vj5}QXJzAeIjr*0C(6d&xZ&O+Pqq6 z1;@f&Yjb4cW0fq^y1`pjjb3B^>7>^=A+{CSkJDNet=@M&zb<2a>al0UCUcxRzUxax zPG+SK7<(h}UDxGy`n;vw!Ct(!CnJh1X0ld|{ej`~qEs3Od}*yXR+b<2X! zrV+kaIA2jmmLvN-H|jq3OmU6%Qm{Lx z=^AM!80y82IdEdF46}6Wi22(CW8dnLqgyyCT*kxVZudqE@Y78O?~>lAJL_1ncjU1L z3x5`8oKf4dhwgo5A<)q*tD)hsKyJ&&&Y4+Jrp69Q&T~HWZ~H>hzD#!LPUxA9FY79Z zR~|hu{zmGCx36T{mvH}eTN{PDQomCk9#J#eR_#G)V&bOPub^+rzNmPT&n9+bIB(ab zkrh#QFMN_uh{l_}^uBz{_#=#Yg%_6%b5QQgg>_~42KoiZ2{tXO)P4)1nlpCS+q9lx z8$EgO5^hHFT5eTWsw~&T;doDT%j_6SQy}pMSDw|V)W+&SNm1V7ZQ1H`omV7dE|iJA zX;6^!velnoIJ_@J$ibJ+XR-Oe0e$ro*pyV-8CojgyIdP3K2bFTl|y?L1^I_%7> zKg4BnQ=kvOt)Wu$exeX}o|TLGMr49fc@|BUJMPBHi-L%$TQ_bVsm)%wYJQg8%E4)> z$!9i~c(U1A{3Di>GEW@o$@6xK$Z+Z2UZR z&B?TfnK)+DzCK)@{<5s@;EC^5Z^a(4yATl1cNV-Kce=marJ6RFX(y!CVz;80&NcdV zQr9Tq)2l)f$qd0i z)#r;))He2g3aDK<##KczcFX6l-dspqEcP;9?^&HI=#_H$*a7p6C(}tsF~Dxp>-3 zM7Cr5p`f`7D~PSh&*?eL^gcF;SBb>PsYmZ$1RcNlsS7Mk)wcX_8L@Nf-5zo2(G9%K zA%_+ozwsI6SB&jFNgW8?ZPD>+EJGt|UCra~(Unt5y>a(5NFS0eIwsw`QyH>oE7)u3 zQLQ{}YZJZCcY%O9+C1%${c3tdbv{A2&ra}-=0~rHHOSVKlnZX<^tU(G6>miH72U{A z|9Z}!j?sAW6erN)Xm`}Uyu#(c^=|dq0y|25R6Hic!=iPc*hI4p*UFBh z4`0ehJg$#AvoonEcOkyGt5uyt6g<@-th8tvH`Ee4QYi7>ljIE zZVuIae`nFMF6O$L-8UwkNV}D7h1RY;b1xZ=_ zi`RW~b)Pn$U*R^?OZL8*^m70aN>I3+6dU&BEyL{fHp#E)%<%gX8gTjvrFjkr z+ttkl_~p?j#D~}d<(O*6;6EjakDyy(7hLor5p*cIhH}DwxcZ;7RJhA(s>}|h$#pOj z;vfH)Ar`D5AO1On{%b}Kvsq1skx15n&;K0oulBs60=g2}paRZ`i5zh4KcZq~ z!*n7a)G2vKJr7G?M;QhjIuv-ePqTj>Js>SdyM6MxfOt+Z-J ztU`*T#EzR6nu&A>MCimQQp>*rMC{y z)I2K2WO}VVZ=9}UKV_3e(oB>Wh?=0Enc(_(ry=JX$iJf3I<&*Y-FDgDUE9ze?ZLfp z8HJ8!+T#UR-j^z%Z;X^=uNR5CaNV8bYO8Dgqnc|58ZU+z;@#D!?dJ~_YKh1ed3z?M z;wlPTwpWdBG+T#Ja(FLp@8_2NHsW>R5zoc2R8EQfK0;Yq=`_tm_R=xla)*!%dTKX+ zv~yXaU|pq>+tV{)V(D5B@9b$x!qsJWE{YEaf=kK{{8^^|rfYV2 z9cwSBgg>H~|fc_y4~xO06z zUqxS`OqqZG(5c6EDfb_n&$nfCzq@^`4}0yeqa>k}Bvw5$8YSnM*T~2bJ4@e=c_pcKJhvRrWrRfb4 zyo8(V9~1~v3K3on?tBG_JtgH@Bj>rz7_od4r%kWx9R`KxU3G-a;@fy_b&PViHX%Kd z+Y81^a80T?)$wZM&vRU#Vm-E7RLv{Y?D}pgt5GAWgN}Rqg8xc;qW9E27X=Ud4MpF1 z{BY)qz!8C)iUJc(HG^q|Rc2+Ko!jX<9GTIKv^VX`{YgSfOZ!J{_A8V4s;fsr;sgT> zJ#6NUXQ*k?rEL!Fh{(Z)?;|iW)lm1k9MvH9HHUX6p(0vjZoPJqvmEouBjawUt3|G-gx;{ZexYLZ(ml>?ZD$zuP^)Zb=5cyJq{i3qknK;Y2i@mBc)Fr8D4}@t56isJwUrij>}S_h2FH=i{6jm#i=|z5 z%Nv|7CyV+a1wQ?8?XpL4hLU7(^h0qun=MNL($N!vDYs|08c^lHnz_t&x7p~1+D+az zr#8x$SQD+iUoBgNj`W=TzR{I5xkQ`^N&&mib=0$U{ZGOwi?48JaTiVQhsRI$w)BUF5z*-xGteP@-ATr_qHZT~|f zuHNw0#ISd?z+G=sni0=7;T79Ebn+p!N6H2KenN#xN|92Go^HGC1=YSSt*12y)+DlB zeK=r+6lgb3sh30Psow)iCT`E?OK3wG7=_L8?$?$N$$Pi%mMJKbGmk^jP7zf3g?E(R zD{vf6B0PR_-Y#BqJ>A>K6%j+Vx{#rjz^x+iV14fnpQOv`ZJyK5gMp|#PI~9J8#QO| zjrL{AFS92%Cw)4h9eO?@HE2VBTeD>Gh%Xt}y{=5?5D_<*{9kO>^rO92 zuH`RVtKogocwkAdcuBWRyz85Id7H-Gjf=f+Kg@lYn^#{^k>#5`ii;CCmbBokA>p`x zwg544t8Z~Z-5~4KNI|-})@;dfy1g&jcdos8=)!#;8#9eyfn@J)f8Ud*tN9-ZByy@m zS>R@lt4re#qNa}Ra%^ba_o;Ct`WTm8h3Up}M<0G$33IpBi=HCiy$4+0FDqYE9JiddbdWb$+Ty0ly-v5?jy;9$Iog}) zW64{N`r@CU+{ov8t*)%3A31w$u(e~^)4*_a{3}~7K#?viZRvYZW~67TuertQRG7dz zuI0l)6F2l)!nWsp(?i9Y?pjM$pwo>DT`^bcBZoSU*2mVon7gjc5t(tQ<<;U>gG^6B zq3(m%D^AA0HhbFDEo<3p3>n;}^at87@Dmy|?Gl zvqaoeojd$*N)~D~y1U-Z&+K|BIwCg>ylC0BZ%;ycoXs%;-m&R{6Q}Hb3x`5F=L7bE zizWcJB=zdn&%73T4sQT@n9mlk+@d;xPDBRYoKf#+*UTsFwRYu^O01Eu(dERCS#t8* z_MCQT4nCRs&Hg&_8P|Btf$a_txaA^7bvvwt&0F@{C(tg+d6HE3zB515QTe^w(d7=VD!YM`cLkqb*ewYPW~8zGhN!SCN46Y)P&NpGlA zIi~{(_Vl>~h>d8yvl&R8G{793!z3iVEYv;R@bYvSW2B&93w3|wKKT$2!#b0wh0nD^ zR?@*aQxCZ^ZFZ|m7rKXbkTlMI){M3hO)!YS$)6Ftx3m9=ZX9Yrb>ezXo$yv3Qs)`5 zMB#JD*j>Hmdr$6@FK+I}PE2od&r^wZKGm+YRnXBm%f zvdw%FU+GWOQm!&N#q&<1!+71=aP?YpsEf_HG~M_uQj(Iz?BmaC-4;hD4_>c&qBL`+ zZ8}WhRL?WSkY#>r&x(*{HtKUKuX@$a_PWr|9BFw0VUEV~oT`iI=?YH!;(Hr8uF&dK)b3xCfc>g;eH(5VBjFw7gQ(zc-O+8qHIv6Y$=ER+0ef5Q8xbYM2 zLgCEzR~6f*+AY(v!gDpuS8%#v?vBzPgQ}wwqm269A9wOwg=06N)jzL!{8A`Uuc)WE zWPC2N$c3_JDT_BW@ZNC6({5{~ZiJyeUAR&y@u0+xyRc)SYm-qhVyR zJN>}GG|yw_ph3rgFS3&>_n$qGOqlFrYa3=V`_)>8q4y__n2h$j%iO7(&R-~REWx!Q z($8d7s_TAz7Q(;uB)WGO$A3tNk3j8IhEOn>M2F5sK<5mCgW=OK(2fy90Ny;Og#_WX zsMx?@XbYRc{mZ!oRtn8vhg*JZ1N_*>LOQruviq`j+<=e-rWyl@fwlt}3afvI!~5{? z=8F%3M2LiW(EvOKI@3b;*9wkc5)Gjw)Zv093h~=6-0vIhKX=zzJ8ukb3?w&nW(31z z?Gdm@{P?#XN}Cxn|Z8u`((; zJS18u=_C^>A(8lB$7RLGLzgUz4T+8sO0E-1`KgExED8x-xMEps!1AbNOM>Ht(!tRS z<1M0>En5;~u_AWKvKWj1x6)L((qzaWLNd^g((}gNCgabypNxJE{8KenS|NGQMvGL186ohJi zQvcLwB^>A`;U<|bRG%RnG}2AlPf~o86KZ&Pc)0!*zy5u5lZX`#4$5?r5Z{F*GX%WE zk}0C+t3vQjO9gJrxjT|d*_gV17zQ@ zq0ijtu+(=J*;hi#!e`_B-!il(@;q<8rw-C&%~9CbL#rLLDERYZ3aCkhzV14hCv>99 ztS`_xc_+?3`VKa2DOf0j^rV#59(3~@g3UYL)Y@r>_^RHk}>bBBAyE@3rK1o$SX3*qFT~hHc=FA^D z;g{il;P*NVhZ*if{bf%eeeOJy6fkYfFF-f8|L8Jdk-MvK{5Ae_A(ljA5NctJO$;8 z_Dmz|BPYFMG=yI(r=N)#v~&JVe%r_*F0Xk7&MMZYEiQ|2UX3w-Y^^>`@6|;kzLY=w zWHyGclEnwH%jk60Y1%wo6~8AappEHtuK95i4w~E~`sE^pw623?4?E1vPmO^Vqi&FK zNP^Oc#V~5S7JjbjVyfz=;oDOM^e+2rU#IwzMU3A<$+lae%kvXkQ#Ov>;|iF2&3x9g z|1da?(MOv@IruYaHgciA;CWLpm60@MRL-ZOu?5g0H50#j_2XY?AFy}vj3I%g4^yo$ zL$lz6Flurn9eFi@0;d~c>}8y8C6K_fwjsP>==1tVFls#<#v4B3!DOW_J~A2u71j?~t78xUsdNS$jJIUB zm%SGSwokJ29QTVkSx=)`-z{)~gCW^p_NBSHbMbeuE2m+W%CtTuW1`Fs3XL5^Pe=KY z_TgzT^xfGxNG>huT9$Fj#LheP6W^ zaw-n8XO|Da3yUGRTfv*e>TSe`Kgr7Q}`vGgwgNozNrk8ZHPtGtSFZ=5M_ zbQ#JvyHc%XI(a+l;FIky*-xV@@M(Y-I9>e(? zC%3Xki6;ajQnWECv7ELh*`e9yQ~cy%n`z3Je5`jJ#gEQWV^J!P1%EsG*xtkaG0n^d zN6TulH*KJ23D%SNE?(R7Ln_(`XZDdMHG9F2Hk z3R|rVaG6p)>{@z@^L<)^N*fZHUEfSdsWyjW=KHCB;Y>*VvV&I1Y2dtoQ~pZK`(Do z$dO|789x7k2dH-^(67F!f*?bq=7dX1B=pz};S(Rg$i#b-&YB+{Hsbb(i z5H^1M0h_A)Y0}r7=&|37_y=FXJ);FGTlpq~D?)g6>@^Ds+)S#YhEmIr5dK^J5AbKl zNWXtEysch<617G+!&jA^@=So`9xqvDZ#ubr8%=rErc7;LF7I`CI}R7EqRS(4!O~_k zW&BP+*AIrcDU4%PSIuDD%|@mum_Tm>-PpaT#kg{S_`hv(4OI<#0j@{A*yy`O_#<~b zP7ONF>hnclcqxj$gnYrrg^Or}cwRr(D50k8PC-U+EoQxrpwE5wd`;XNoILHS;ESva z=QdNBv=41(gCk^-WVTXk`ylMLk;nA3gJ8^;;+gh`czv%IT}Vj6g4fn?)vFBEr+wx{ zW932StS)XJoy9(V=px?>sSw%T4oU%Oq`7wvxAbobQ{53m{K6#qF29UZ^s0e0oiy6_ zTN<`ml%uBKM6xJLqe*FLP$Fw#w3R9e1{H$}WDDb{n((HIPOr5I3ppH2Y(=lH6xx&_=U* zShj7K=)&_^I5O-U>-`W#McwsG&1yW|5!K?FZEx|L%MSYEyo1a%H85S>0M-^?XU$Q0 zRP?@|!*;{1wD@2kMaC_po6)8CcBcdl(JH{WTb@*=GKA}LiiDwiwn5K@&Fq&$4-2Yt zX9K=`#5JwA*t`MV%sA2n_v)-<`y^xtQkT=>Yuky7{t02Dm1y*>i)`sKEoNEK%XN)7 z$bM?o;)06t$Q?OD=D*FE`BE7QDA|i)Foh4jb{Wp(6wpJ(R4|z1!k%BqWU}6#?3n)^ zIOJYR?US4^=GSKGoLWXhRJ`DVg&BJoX~t%Z*~Q%VX~3u}tJt69G}vrcjlautzzJe0 zFzz5r3VYAe=k0)ScbQWE)GQ*rlYv_~YbGR+9 zKo&2(@ODidlg#grC1wrO)O?VBN9nP&$*FL_`~({>k%T`+s?&Xr!1K5=U23#JCHRc$ z-x}Gq-Z^|s?M$rMaupLxqqs9gL(pz^1Yb36H@l@&fP*B)LG|=J_U=h8Tfe*-_j*p` zi>&X%^!bn2p>G>N?bZ!g*QU!}-0BC@;>TmUrzV%lUxLaZt-OLy26^Vb2Q0mduf_@4 zo1X#f>yay9srCikmA2rt{%crtO(kYoyn_B?{ZKUN2U{%{$lm`3w79m2j?^B38TU7! zy+jf8+Pq-vKCI!NHm)PDfMA@{p2S`XjbK^7Q>1*LTd(Egut;uCVB`-Eiuz7g<#SYq8pTZ|4t!5)zyBWM241E!;012a^e47TT8X2RX=6L+H*qT&) z1F?UhHtmSc#+uuD{M_9WFk!wFxya<@u#K1gBT;uza!=}xshsVwQj3N+Bt zVZ7&Fc;+RZ6C3*TvjqZrkhO>|1(Y$j!R?}buT9YN!yT@0_f8t>yPN876|tT-{fPE# zL}$Z$EJxgvJ#LjojpaNm$X3TS%^UGp<0-f^Clb}hE~0Znebk%j19f*^Le~WaD4p~J z%7;DxnTu2Dhx~ayrKS+~R?j50X^Jc)tUndrpN%iq72>Rt0-EkUmO8MSPAa~{w!(4j z^H}jSf!bWZRj!fsBn_?mh$)Y&LX9u|*n>@B^j1cl1tqqUmEHih`ENRRv}!!t ztgr!3O87Dzt*LM-MUL$6s$==j37FYlNAv$|q%FTeq&fc@^eURc`mkqsVXYQ-w{ROJ zzb&WV`4F^LUEEahVI!%( z+Q;{n<)e+Eo8af_bedXyf!6n|fsT*snCH4W{&bWkE0BJV3bV68P2Q7LrKsZHgjUFu zA4U)L53$KVKEeTYJ=}Z44sVAXfVVx7U>MOKgqpF!Awhma{ulKcv}9q7P%BI!>|Y@| zG)$~QwPy%*{!yU@sY2bbL79^hTbREnKbMJ4%qqDp z>wESF+;+i<-T|VBv}(bK_ICSbudb%wH&=^h&08z-I@8{?Z;((h!&F!F%D>ru(?w;0 z_Ktpn2d;{O)*K&E-|c$47Qg!IeB5 z{Qs5^w{Y2tkpEYP8~mpf!+%N{HbXf4e+0FWxR`$>vEIBtxYR3!X2*Jx?58eT_C<}H zCF8I+QVw1IJb{ol4YKK5LXJ}pvmHN%(SElmc2z-_vpHK&hrU-}<+A>G;uw!E?_=?q zx+%rXQ75j7pHXC(AyRl?8;fhb~g>Cli5SC+)169!uq&fofXij zDT!sfFR~DYrIh+85FI=YKwN_t=J_um`Q<~|45dY+*SLXGh!0?qM$%l{>^| zl|XmpLY6w+0;9?-xG}{%tUqf^v&Bo(Ni|t|IrbcU*UTeF`N04-NBM&*7qjW16S1Wp z=w8i9QnL@o_9P258N3oiSVQVAN zXRsIgO-p5Y!mXtIF$IG!dt=_Ek9hovgJA6 zJ2sR0y%^YhH9dq1P9KB~fn}_x!~&i6X}?!7zRmazCmq9a^p)2%eB~VOn0f)aSq?(;D~)(O>j3Au<|gwnk!Jc5Mi?sT zjplzv2pXqwR@NiXTd<8Dl$YYn^X`~+z?zzO$dkyRfz8Xz=kKvi@afbdTFlP{37Hji zApR3uxhs$UG#p{^tERy`M}5}tvXRAo38o|0n_1{5b7pd}O%Pei@$dTikjtzKF#UTX zZJ#!TyvE-~rJr-~+wip*_9p-)l&{A}->1OGxg~hv2w=F|J`9k^0h`q4)Gu@ybN@RD z>lR7kJH=d7oTG%N=FOnXy2s&zCKM?|UigQZ?n;M{6;WaoZwY55XJ+Bju3b>9tw95R*ke({7|N=ufTq@13Mei`pQQcN`74aIT}i=!RYlY> zYYOW1=9AtFcj%ZsiuVXxjm8JF1hLBFSZ=uozv=B#ocvG?qYBp0;r`aN|E(>?4m?Ot zwC)mj{yd62q#bgW$)S{U6083*O8`5PP?ev86KN&xat&pheKrbqmRGP1z1w-`0S#Di zK8>7Pj*!QgwYWaF9`!3tu`^Q!V`mP=qrHxlciRrlRmWrCw-(WhAT=_-n~X||XIOHE zBt~5wMYkvMT&tQE>L%sT(5c#N(Ju{TVJ)2TOfNF(s0Ca9e01_i=Jm>5C^*gyjfeTu zD(MQ5WbFg^Vib;F^OD&Q8F9NYy9HNRX5iIdf7l|$>n!H=NkN)tJ_a9M%W3bKPp(_b z$RqhF3?4X&a^Fs25s9Z*n0y6IDZPZLzT&x}$H=3o2ESDlFtaI3XwuuC>`>cj*1B~) z@(WM0;V*`umQw-6f2?Whu6jfU-@Dm^-WY5jbCA07^x*Wasoabs65wRMgj~!T1tFfV zpl4AHo*8F{aUcA7w?P?H9{a(5L5Kmxb(W#&vJ>o~@--%Bs^~D!-H#F%p5lIVmQwJ} zE!>Mq`kYVnOqkJflFgXW!9D%vh}99NS=-=hdfdIAg>RDPp18_Uzem$BaMdd&{B(&8 zlW4+*t$M7MU1bX$v)IT(LOj-)&4l5qm=WlNH{yf1l-F`7Kj001sJw_8i=-XyXX)aX zGrzzp!yAIO4;MH~)pCu7KhZ!%7aCpLxV}Y-wAiF*f^feAJkV~S;)%(q9M;KAU6jNI zchvFnsY7Abg$9Z!Dx}iUa}a&YVAe$oQk*J@{P>|*y;T5iKAZ8WoC>i^2jRxO$84nO z3N~Mc!_VcX*!l;#lpVDR^ws`CLO7#ID>8YdmzJ1W^$f;+n1fJgk0wV0&@9~>m5Z-Z zzM>Al9#nDb^gYZZr9j-u1LB$EWH{OhEpOJsxDueKtGht_mFV;7B&OJY0IB=7;yKVC zjWq|*!Z8VA30?}b$4(*W9qD9Hxpg zA?i>T$MD9nyIj~WHI%Xp!k(!cp!cXR{@SyS30HjJRtFts`&0(P9K)$tHf8|KJ>r9s z6L#|!`*ZR5?}Ip`Sp>VU{Nj?L{h9aeHe3$_=}z(jrd@NIqHNpX<$_XL+Lp$a`WjPv zs2uj3=zxqyCGdG{OrJG!s9UFs%vGG=+$T%!$Z7@H^f8k?JpP1POc{phdQ!Oj!XsMN zF9Kz{WhpJ(88Z$#@D2Vd*!ZMc40knHgtH?qsaK~&(`@#0oC&A(ek5Ht-b7xNO(K=Y zd*JdxIpX+a{Ccht44qzrs?sy2K3;}y*wr$_b8Ref?N`|HsF#bc8iS|oe#4+;`pumq ze8`t&kn4eySU#vK zgdWMPYUNx07LeAGG;sX%90e*8&4Jd|WR!6p&5~*%;oc2MDjWsDPn&S;(X+VzT#+F8 z&3p9IUB~5apGb0xjiB&lD{Bkw0bA2J@{2T~OS3k>cJ;+{R?dvo*$hA<>8&t1?ItLz zoq>F>5peBmHVP|4@k?qxUwQX5{+cO|m&$UPZpJQHTGz~$d9<;ZRq^O9af9hBZv$D` zsW5z18P+OvUv$YaO(YOg3J@C$^)g_rPI3u4%L&XRIl1+tb99wY_oP>@0G! zsU%NF8@e=62Jd#7;)a8jbnSa8-igy835OB*`C;g`?| zYH2FuE!rQFm)0$&ajpWD>yN;?SK*NRZ9jSGD$smcDavn|#2wx zflbF@_>IxbHfJ3+E)L>Sl|7;UHlXa=?|fp&R9JrQAz!0?m<89L_U9f+vE;p!Jgqy0r!bbz1O>zkl z+!?+GSDUtwcH}*pzwI`4Jlo1fXqsa~d@>z9#8I#ID^@deJ1W<1XHu`VsMg8SZotwz zm?Txp`uw8tq?9gtnIOCu%w(e@FLSPYWnhuUS)AfOoW^)sz;xGT82x)IsfI2Gd)*Ew zy8as`Z=Ay)KGMcd`>>5(UmQuFO^?91A+69|lLneSGtgB14vwFl2Qw!mA(vXiHRzX- z!P9y+e782sc@c*ngGOVFgahtAu$@U47Q=ylN4W^MNf>M|Ue`8-klr~H>iwF*Z+kQ! zvuEd0#~ed)+GCDepSIwgvmw;cctEs;AsmMGignGX_Q?TQGn15c4 z;yY8B|EglJ9-)lynL-y~bMbzpdi@z1SLDsyL?K{yTn0U|% zJ);{)LvuHjr+bkw+7@QlT;vbj%7me63N#{5{NLRb&6i);!sc{1p=xqA4Iiw^Hf4BI zMRPo#@IV3In+MbWn_0~NwFAZOOTp{*z06=>8Y=gha8g!fxa26|DaUP~H#wR0pLPrD zKUuSyzvHp;(R7k*naexd+OdZ7{m?dG1H_lBu!f3!jIRpjPGro3$T>e*f|olP$0%Y` z`Y^mCd;_;CQc-08m;acqjt?7;vf1}{(^LIAn&sy}DvSK+c2LfGM`)p=Hlk!Kc7u{J1}kth&$%%FQi+t`27kd1|a1hQk1j z$&3YNal%ot!qGwhi!(j$KhCtpe~K9MPZ5?ggk%3L;vbKB+`p1oRYKzE2YDuZ=Z)T8 z-B9s!zChdTHgqi61Wmu)>5hdV*=Ww;T9!rQgAJ4L+mQsEIBzc-|F)8C-8Ta6Oz>hI zZ_M!3wOTqdW;#WVe@?XD6}J}c#?d~%*mNmvx@$TD@25Oq7bhNpRSl1L(X$y}4?Rp0|Ip@i!l_rXaDj9H9xN`HRr|9$>e13d~=t z^$+P{)$ik&blE`q@kt-|6>q0$Tb9#;G*{ZWqlD2$S-?I09lY=}n`>!KVGVPkOFkOz z-`B-D&87Gxzlhb1)@EA*mt#Wp2#Wc%fDaeh)9ytvl)pfleYOx_m}3nbv)#`1sh%L? zNjj8TFb8wEJ#2@%8p?e71}QD2{LS1yY}ng6Xqz{j394OSnpO#I@o0g2`JqrXFp|1w zW?*5~GM2o%kXIYL1oPG+S(umdI^xIrf{imNi)Gh%``cjCn~K|93c&T)6=oVNMJ0YI_@Y7sE9?ze?R4a0H2m%7cc0*6Li$li)Cc%0ahSX8H;@L( z>|(2W_R_qfdtB1YAl7+k3tHdZ&fPgSi5axarU6ZRL1OMGbe?7hM`zCj&+K71c-3Wr zac2=)ee}TR3PrRs(~uIqw~*?y8f-D%#gf#zp>gMPc(q~``@Lcxoh_-Ls-tquYLYZo zD%GLnU=!3oTLdn8{g_+p7OwpG7V791N*jmHq3V+on7LvPlX-1R%~~g!Q}iIrUiA)4 zf5yRgA1S(`rc7-whq9^PLGa`T?vEJi%Hr5P;@pK4;k6gAys({h|(l(cs4`bY}cY@SYJ#U*5sLK7R%Zehi?-%PBCn*qQWe=g`l^QLx); zF~6EW!QV>rp!18QX^TY#+q6jzXF8b(IPK|V{uju#MFTx=3SdiZ5$J7or7IG5>E4== zxNB@G4*a7`vm&F#GT#XO_Bo?dkS#8_bQ_{fZg76$R=3=>6~3K5Nj<*~LebGOMA^Zp z8C%NT!ALZFcL-yvI$)x768NV#z+&+;vsYKr-ce};zVFe7%Im49X{JU*Vu(vEcpHb_e%H>3@&CR7c{6% zV>bx9N6_q-H(+%{CU~M+S+xNg+@OXBzPN9R!km?JE%(jJI=FFH|{|CluR@j`hXWM zoQw);R+Q7y15N$hvBq>7Cymh>%C@wXQ2n$T zmfU)oS$d7epKgnplgA`{lc+|AC-gD9SuQNy>^HpoQ_0VE51_XT%*E-1BFr0H1B;7f zF+E40q*at)%{UdN;~2_&tyoTfI{VN_zKF$cKFWOTw!xyb7z}Yf&9-bcAPJ36{CW!~ zW)*Y-o(0KKv-3$fvT_oo|9r`t*9T(TmN}pkexJM9UsQeJ=k_dWT|SjM9YNFKCj^pvHo^XN1E~Fa5?B6u z17{A6r@Ni6jr+)zN@$Uc>uFHh?}#RM9k6Ob1G#0Xa^4RONq+QYXz|(y zzfEKKsd@*1ciO~WR<`k3FQRFAi33f|;4tCUJ2oJ10NeB_2aWy0SaiTd%)5R=^y$?m zluZA|G@b0xL#2UpkBY>tWn(GjtUA?;v53UcM-X7*M>@whV3V&Bt|+<>eJAHod$1i9 zEOaE{`YdWxvBB8MF5F_{2JC@fV5HNZ{@7>Ikl_*Z@IVBL2d2xYO^N5qIJ3F?Nv_A3 zC5NeEfnEi+9vXr>C#7?Sx6iOZK?v&%&Sg5}!XC=k@;`@8W?g4v*m7xW%65IqMS47i zM9BhFoWC6heVzdGE}EdD*#Jy#Zs*m`X7JT@E77Z}oqL_RhpCUSW|`j8Fs$hloZFd& zD^LXGF&pVoi3=%yx1$+@1JE@_%ybAJ`42X6*(e$qrcjz>IGD z9^l4xTjSffe5w49V37O{6{hl8WaNp5=X zMf3+OUbZgu?4D#F!W3P@S{qey#Q%#S_^c+EF=_(5L zvBz7cVx;h~fof~iiq?0Usz*sgGndHq$#TQw8$DP7{$W|`q0lOY&3>yK!U`7AV# zThBghoaLFXzA2OPp*k#Hun*u{A?x6FKw*hBO<4Jy6G>bW)oj;bHzIXd!<*Y|u7?)p zZjQwAKlAbY&r~?)`;8?$y8uSx+qkR=4e&`z4<<}&}_BJepG7bgLiFZx1DT6zb}puf!}LyBgSD;5OLO?9xZtYHEmUJsc{6_zf?q}`PyvA`0IRP zOga<< zUS5Kq0&nBfF_Fyij2n01q$OU`sKG&{`t+e(0bG^*$ob|6KIv`?O8A|?Y>5U~D(qzw zm7Mrl%Udw+t5WmPB5gK#>s6L6sfeGhq%yzzo|GEC1x~Fkf{wor@Y3u6bl2%Z!Ea4? zvw9fSfBC|EmQTYn`E^XGL!3nN(t-^){rU1;_t~}CrLd(;1^4TFkn@FDv9+`d#<)4N zg+&=m6tIwrH1@(w!!&qaW|KIBh5RfK=k-)@ zH&$a|ekaSnHV&UDl+h<~GUd^v7m&PV5xmr%3h`b$u&1+|*E_3#pVGduqyCntq#Dhp zD1_p{(=#c+w?D2)^uW+_x7dU^#<+UV6BJnQ#L`KT#91!{J2x-EPVXDg{UVpf@7_em z+wL>H<=eooK6zRa%3rl8f>A~rm;fEh6@oUp(G9$XZ#yAvhpOH2tC^-t%zH8t3U zW8rXDW;sS}8^%hO4uIocGB|wH8PN^r`E)Yn6CCjD$DJ*SrpK$Np;5+mJn(xd*f0Lg z!ZN%-X-^nAeLcxuIb=}dCrMl>w&l781=5L+^RV-!19rC?fSH^T4U_l83HF6}nHuP# zuN2$=DubWqqrx_3T@vdCY4+8;9ZJpav#<8)oN#=s(CU9n35*-_FATB%4@a>1ha=d| z5Kj1S7-IJ?3|Uv5ExPEE($qZLN<>-}f)6wA*=q*-3XZ9Gi$13AYdV{kE*dvAyeVMK zOwmQHO3|TJyPEDEI(H&ww7zKa(NvMoh$;3RLRG=<4u3&wvbN~#(wlK%_e?xD_$>RXp++XP4+U3!O{lKYh%z$n!Fl&w(X!~BROu>YQE!F3 z$v$oTu?1W%l)JsgzHyv+fN12 z0v!*!ChyBOs5P>6*$*IR_6dHn_IFm&#F>>$7Ocg(2F|X9HQeF9y7HClJqd|1&o|s$d4=Z#Jw&h zV4>2Fe5_ZJPiqksSDN6OyZYeptsHhJJK{Nw9B~WmiUT$T)0nBVY3lbJlAALe`;||o z0SV?bzG5y@pM#;-zHhcP>z*OTPr8X#3s8^!-UDJy89?K71ZR zt24(DZX8JC9|j`VP>CbYo?)xs&ca$@9JzOiU3Bj=xMs{y8aqM)&n&3Nnc|Q=>z1SY zpu$fL;_Tgr+Y%yI>Z*kQ8`I;?cri9ajF zu9=ASR&D}MDJ`ZOTEnhPQzr9>S6r0aTedkVm(~nVUBpq9q^0 znXjE}+x3-Twx|}C6gZP0-~(JqG^VM4CvtUbTZx^D!=wp11lty{GY57s$v1_V;JO9^ zj$MJ1eWzLF%_XE$TZ|>%96i_k14I8FgRGSq6j(aQ;r{kJm~h_+BLoF3{+caWrd?x# zDc3}q?=^9gr#I$LIS&w;0~`I-&@OE;-Zu#cnWG0-p4ukn8g7ImOAP70=Rv&CAP!(w z4u-uIF?c|&zr(IoI~ig*%{M2JRNq`w(N&?|`5t)ZP&BS9y+~=ppEC)~Vv(~@jot=D z(V+CJVB#Ewm*+hI(W;Rcv-AUVY*VDCvM)seW#5?Z?(<+=FM}sQ+J3_z9^$p^C`|q> zOTTZ8F70EeSvL%)Pxr^y-O=pT^EceY&^qPC?NBDQoOJ2RvlqK(30M9q<0MlS` zn{?tmj-E4*`U3v2++oY9cby9+uaqF8^b?SoFCd-sIp~mOL3+s=wBcz0&X(38#kH>N z@0J1dv@Q|e&mRa?Yu>P>5*y&}luP_vtfh{jO_URF3!99!$?;o193YBhHd-5~wQoDi z8{`9}&B0Xr?l$!8dd06Dn!~oZE8(Nwrw}HP!?~SCM5||Dp^*S*t}H|A(U)o1u}|IpNt6cB?9nw_Hhb$!{5dfe8+-%s^PxA?5-}G~E%hY#w_TiJ-$d$;v7yqj z+i_#Ub(Xb5n)!t6(X*K54jHX*!MF zq>bZv8B*T8gHQEDS`xYj`}cX`X~P?A{!0bgplwedhOg$%H$3HL z)jMI(vx^X6kxt(SDq(c=SYl>k%x1lV-ROTCCRQI~{enKT?gP!Nr1T*lI!VNz)-IrZ z?uc2UA;b@>#jvDU`Z`g2Q)EuE4|XvY^EJp={i*bS7-eM#JV%C>B-0$#-A5m5_iPcjfVur7~wKFMBh;-hcO!3h)Q(TZ<9Y*5R}Cab>&XfoUZ3%k_ecXAbX=F>3Lo2`YV($|6A zd&nLft`Iy^-c7+@Ch;1MxolYWS#~5vg?Hk-xVEM9ILYJFsL{HW{al=aakC#lz_|X+ zYW4$h^^u#Dnb04XdsNW!%+q9iZZz(gY)Ze?%s|@c9#{v}!uqG_oW14(7~|;!mbv5U zoYHJICFeJA_N8wV1~)G`?u;h;?r|lvMpLzDDtV~5P_oqt^!PXvTHaZ) zKYA&w;HV}>ZSZDu6c^Eutv>XAZ49hccQbQ8`Ha=5NYcV~b?P^)na>mivA;~nEuS~u?6y#{&BS72e;$?P^BVa;M&;KU_gsJ6=@t8v?bw$EzR&8J`$8l1Kh}bO zBSZKR;xBuPQx+Dc{s-5KJr={SW~$(_qWjwGklkFP8TI$nz^nhH*YH#*(k8b z->h()K?%NihR`4*K=bbh5R+0_U2Z5vq&i|~;9$ymI0|fX=8__3O&dC=@*?*ge42s* z&0JfK{k@~{&b?m9vbrM@PQJ~psn_J^-tnR6_EM~94(FdNtYT>*Dag%Krs_gBJheN4 zEzk9#_K$!OHCy3A${%iw4wCap1<;8IU`vwTK*e<)E~rlh|2O?9HCRaB?#{rQ8g<;Z zn2T(_-Z@w~;39R)6+##L4O^Zj(Zgk_xVa|^w^}vQ%JkLn{6zrH8)87it5;H{^%Hm) zq5~Up>zHnz2RAeF2An&fj_tNXXxySjR3WvPGkNm}4@U;mmDYo-Y=RawEWM4k>uueH&Bc#vU=Eon-fUpmJC<)XCe+y z;`uUh8t6#1*h(DWNmZF|*n}AkXpmqn_*y+0r{1@N{;zFdzj%Y<*N-+7WEx8QHyr1u zs>9{uwg_IeeW9JbC!lt%1!w-~x+u^wlXl&lO+Fvu1%?a#Kw_0Ib%=DR zut%Rl>&o#*mLXOD*^f)x*WkKqN|aV+ikmd$(0uq7*f=YRt8>VMfv#H6asE3=JX?T+ zKObR%FFZxP10%UgjU5=QzMH$U=Q$s+{31N){kII=j2w{V?j zAIo;IH$pX5K1i48+FRqb(pwbS@(@j(+h~qzqu7GhX0`k!rrhR${MnO~*L;}mk58qr z8!9N@sYY@}Jf8SggX;UjC`n~Bg@-dijLQ>9uF`1=9~z3+TI{Lm-6Iy%?S`TE2k{3l zj^f@-lw`Rx^l5v|S(vN39_nMCLu=+I_Qt}S9WmbkS9_w_1*?r{QY?kp=H2k3bmg;de82B*bD;kO4nnX17rRwkE3EtTQy@f>@cyzvOAOPP~|aUwlNH|$N@0LAmy z;11`F5Vy$)N5K~Ikhsf?ThqZbxsjDAL{dM?Ec~rMh$5#m_Huncylj1$bv|%mhs0w{ zSfT=>Zf#_9weFGJOMe=({s}E`J_a~j9&5#Yk{r}xM28CwF|GyW)gx*8?L+)DQ8`^0 z{DBWL)Tij-E@-1PhOV!f%Q7lQ!TI~`th~q#*V(NTb={keNjBfe3ezxfZU%Z8Y+}NR zu|lW+J0MLX{oz7!C0;;@zPMHn6kUo!ytn6O9P%H^LR_|;dDN(PTT^x;jHQ#>`1*iAOE0@eeeDWYA5s9iyx`tKoERzX_{8ltG=Z#QVbjjA8N4L)lI&=2!pGqtAjh6yZ4)_hgR3NssO_ z1)a&L%&u~$;~uk5&2>z9>KDjSte_dityEM|hEx9LvupSAnb{5xrnxv5+3R6=ZPRJ) zRDKq^IV5rOFNk-)@zq?$b02y*D;>L3a&VUQ5kcF#J){ynlBUher1MI5*qdBkZm{D4 zSgfu97nF~nTiw3@r>`@Q%CYVKzGya;iqcFaLuE**>pXVIl!Qt&2n~ixaZ!c{jhY7y z5*3vUk)cps=iZ4(rc4o~LMYTNM7Ip@<$0d_x8Akh_xJpLt#z!mkMq3NxsUJue7^^u zR&Iogxhys<;S?+{A4R|Bg;0uHC%5aqHaO4E1E&Mc*mkcDzv&KPb4};4Z7W4w)A*~b zq>Bv<$>+S`)o&vG5;(r6b?yjfNk3rk@bQwW}QzJ@}r|z zm3=y!j3?>z9U(5XZA7URWlXBBm-X%n1^JnBaAjyFxJIodAE^-W@n|31O-{ynkDb74 z?HY0qPU7cv6+q=WJyv{q5ucLNiJKpCv~_nayDnu3VPDq>cHUe78gnElp?E38x-SC< zg)y{g*a;ZfYk_Bu)`Lpo2R2+FB*ktE{_+nI)K7c~mhnT#%r2h}ZSq9V9hq?Uh#c!4 zxsNxwmdQr=WpD>%%xG&sCI2#SAp5J|Ew&**iTb83Vc+_ek)D(x{Vw*U=|djC7nx8B z5}Lu}=K&PAHx&bdgINC^t7%Sh1%6V!#@WfFvXd%z1dodEvrS@{zT{pKJ6F4bf?R(x zjS*+a`Sxs(?XeIm@RfX``BxSqw;nFMEQjO?u5@6*4%&3B3{$>^2^ipbB?lpF23Me9e{ps#?kCj~}lg5P~z=!qI0e7DhyA=u{ z)WVdG&zb;_*RNw~0$D1#VFmSNj?}JP$d-MI$1cTiIKH9@%O@F9g0DHWsj1R(Z+kB0 zYag14r#DH?MQH5XBzXMj6UyAG=bqI>bBzHvA?@NyPXE+Jn(yES3*0Urj{DCQ@_?!`Hu?@sMQESauHQpQ&^F2GrP3l7#*CaCO&RQV@Iy@+A2b(K`%ec!Mg!|O=gF)h zT;T4=Nl-g$DCO%PU>BaGP(gor2&#Sw)#C1q6l&1uqImFTxP=VXo@GW2E8y?91NqdA zMYw2mCi{qs`LT&(XlAf6J8gc0cZ#b*kH(L<$X^`<*ArRz!Exk}aTAOsD@3x%auBnm z6Mwi~uP~i5mptZsK;rHNTvSIGEL+*11+*LR#qTq?r6c#V(=Wc@j@4&SdUY0@Q&Aur zwe2)!!f7lYH;y_#E7B~xotT^W38X(XaZa0Vvf&-dRAG4r*giG7^w5N`$DQA1bCmym zaW93sEr*Gc8>w4%FYB{71c&{cVD+Kv7<0}7?0Vkds4x?-2;T>J!4g#A=f$L^J?3Ly zJ;L4J)__SvEOhI>5q&B;h6^3T(L{R&r7Bp#t93V+>G~3okoSkkGC9^^J%+RLzsUOQ zzrnH1`*F0h3wxoe!Jg!8!NSU3F>iR9oAK~G)3PaL-?cWtgTNN+SSQqZ@5)5dX&(rfHlVV(Z(zse@@zi zQfGjQj`{G)J76%f9Sq z=|N*jt6c`P6%0w>W{<^1v)RA{`{-fAI5hby4a>8qz}N}(P$cuZqJCN=f6i(mnaY*Y zzBiWy8s2tTwKkX9GIYp1PZQ*Z?}Wsk>Tv#X44*F?8rZ2DlCXk2eX+f+}7*9(A0kj_7R5{tjRZ^wr^jR0m38 zK2vKp1M4&^QGUxfJd~6NBHAL_bQ($+sVQgb@9X~KmrvD!?AJVcGzgphf;oq zw0zZ7wl%gA58c>6z4oaXo^z1NU6@bcvl=C>nmK0!Q<8l%2)yp7)ep<^m=~P?P?bIpq|Cec+2;5G$i+_u5h8>99nohq2koDI?!GJ4W5io zfxF-cp-ICrN!gog-FFi{XBEPZmLhB(ypL6?Mxv*`IlH{|JFEJ2i79C9;|iueLQAvL z_&`Ge(+62_>%`eICZF=~`LrfBA-j~~8U}#;S23=1vyiWmFvSykx?;NF3<}+A*y&5H zxa@9_xPNgF`?@u0b#x0WRbC_RS?u9+nhXurk^;T#@u2hd8Es}3tWkRpJ-MAsbq}^u zk-ZI;RnHOi>clWmAH#CC>a%C^GSKKdj#)1Zh1olzS$me)81=fHwTOn(_kziEafLQ( zT@ym-M-<_4MjZd+*Cx8Utc%$X+Qw>&t!U=$@zA-^2d&D4*xT|REAGgFN78pnUtvn~ z*L#a&T?WJ2*i~q>Q-ZB3(xMxhduY}7V!Et$3{IGeO#qVxq&)f-zv-zPrG8@E_V}YX z{h~MRkbcSjF1*Aq@JJym{W|7u0u;0I9_RW!gT2#fL~YqZ2)enPg0}@&ZvxUXg6<(@Pn!>ULv7M3 z>e7Ekn%d>e*;Wdcy|;l2g^Os?Bo93Bss`&bLW!PPQU0%GRJYZJcJKqp)<2862jAz^ zX*}gAuO`P)uLzwN&>V*omQo{=@lfSZ+<9a*`Rf9yJrBdjDjjUL(lDs^ie>lWvgl`@ zJsWUvFog`52=$ihY2)cMS}{@qY>yW((;iKX)i-7p6a6u1v?Jd8=#8I}b*LgH3nJ4x z(Yy2>o)?^@`|}WY?X;oD>H#o$<$T(dTaTJbN7$IL>TpHV04_Y*%3VGaThVAXg1Sy> z!{|FzsFcyme)pQwRW3(xsW^_i{AnNtt3`pz3|%T3cm~_vg_7*zr7-z@CyUR204+T- zpta!|>!@u(Q%6Oze%dHDK3(B=uKR&&Yx>h7^R+PIdpmnxh@vp@*rg?WJuhWA6esG> zBw6*-tUas(9Og=a&BZOaJuHC=FTP<}=b^TKM)XN)Efj-Rx);jPDvtmcEF=#>#A8>K=GBLDjPN*8!TWD+3?P2T^6G zAN~qF%FBOOA}yB$d{ZKpr%ufQh1`KOc7i-@`z3(f^qw&sRH}*X&#LER)DN)hBX`i!yk~rE!XAwDzESb~Kr#2%)3Gp>SEWx9 zKgl(HAnvKq+edLTuXayl?&fH)sx^d8=SxEyUuJVT4Q(tX z;OWSER?Lj3$|R9~s>HBEiJ^?7O6gg&Hhr+1K(~Abu;EJtRCQWx{#JIT^R1uwN|j{z zFlZQ9Tnxl>zc*8ASb@NGVm^EG@h#SVP=(%~>rnr-H#R!F5cKVrg-v&qS##oXvT@1e z0){XYUoGWQ{W@9R<)a^#-qm$4RGw78UvZWg1|f+;lfiV@4VT?q-> zss(PltwHuxKl~)lve;dJjeU{d1YM4+VR33GlTV!l>kEf6vlZ{*>&Gs1&3r-U6trOK z2Y>V{7=SO_tl>pyI~!w}ND-@Iz*Au!HtL**`7$jyAuomhknF`mS2gg^9nO+u&*M?m zMI3Awfkvn^6D|rAF8<#rktzSCMC||35gh*L2#yZICI1maoc<+-IGbmeJq}-5vBCUk z#o;I)QPvb4QO(0GqOoba1)<4lqE`Xlg4IP#w0CxbAX;XvU}J7f#j^dTqWBM!MBSfV zMTNnN6>0BJh@vuYiW*a`M6)N277dLp7AY_4Ez7f9gCMMbat`0)C|B*zN|09V!>(ql6HFL?y z_7*-l{g&SD(c&!T{9;-?*I1<5-zeKw$I67`>6c77cAu?433IUx$Ylxh71It&Z{LF# zGeeo*=1ejw-9RU=#4z>p11!kWA7`s*QhDYj%8|a#a^K}q#9t*LF=acrFyYx}38EwC2kcF{yO^pR@t&E>8iPem1HOJA4H;3h$-zsLekbptp$*D(qX`$xLQcVm zt@mKP`bXSyxP{&|HL>set0;GOu1J$zfc_=tq3cRKy~`O#d8V5|d$k2kkrih;WWMLz zbFx8Z&2k7j8AlJuZ%-bAi4xF_5j|4o`w4;lAAk3`uXWC_48X$`!9_E-|j7jlnFjevxQ%jjnr;a~oe><0wgV8ux z00&wAGFAS@##{_t*dNoc9i*sVv2?g!C0q5*3nYiWWq*yhj2Etc!TBSlaZ>IVxSG~p z;pn2veUortRFws;12RCA{sK-u5R%nPLt1hE2EQw78ICpZ1AqI2l-u$b3!88Q>eUTM zv^bZd!so;9kzwq0X)5$|`vJ2pVd)KBVv%t>dv1G>t_2&=nS!gJs5%ZBp1i_A*Nyl? z)!F<2jYl|m-aY1eK?70-O<`etG%Fhw4U5Jr!s>uJ+%fqYDRo)$BlXMZYx{KZq+cJx z#o*Y4?2nin5(Kf$VyeR69rGU*PRm^{p>6qgDD({AB699CYrzQ%munGFBbCPITuh{M ziC$PXcr>Y5B=W~2cJSleZ(y6J433;@%_bel!GRf__{^k5^k%0GKJp$Rp0K#{k8_e} zL3tM6Q@WGw-nNIWezhO1oYP6Bbfv&{&;(}fssdX%Z*ZxZOrm51kmIlNnt}bGE=(Hi z%jJ?|#1I2g)2TVruD zjwQj%LJSLVV@lJuL*Mn6Y;s6G3{BF7a=J}b<_qbPjWqa+t|H~jf>-o97_6^PH>a%U zzgoMIVSzD4s})k_nhY9oEgC0eJYeer*5lpBr$BY?ck-5e!A~E4in&Lc@o9&%@ZQiE zba(7uc_N+IQE{xA=J37nJjRFgEe_+IzGhs%mtbqW34GXe7Y1=2Fe+>v6X}>FWT~`d?+5JCk|E-duKJ$QgDsEtP~LWGQC;3Gz#nqu7O`P?`V4s-t4bWBOCR zBUX}5O^$>{-7&PP=qQxLAEZZWLNNE4fNAv~aa{RVHY28PsxBAYoe4##VE!oNL%=hCy zE{+3Et%qr!-$-|+{>591yxB+Z_Z6;2CvldVAMH%6pyN}N*sPrEV0!5Tye?b8veu;I zXxV3ik@f&bY%k&XtHWq|QZu{I63UiH9Ky17&R{=l55)D(VFQ-!#UuOr!3;6oBe6|^ z246YP6@A&o%Rjb*U^^x9IULH=jpxwQw+=8~uoPyEa)gCDi|LG3ojAiM99GGRSZml} zG~AvD{oiOpKk+-^C$p8l8fsIlx(VC$+>2hfnp5pSVl&4Eh+|HVatCX*p=IM2-p)jk zerr~u?hAEN^!m*ti$8OEpEp3#;QbIYErFsMRmdgnFmv35xaI3ZOYV*x7v(jmGU;}< zSfbRUyQgg-XwP)$)JmsAQqO2i`!w{k+{wFMeoivi4^!uoLG;qGQcNDiVx;O<)?{eN zTxvR5YeF8xyy-{!#!|R`uovd(i{o_Pet^4!_v6UP$9XrwbxyU-n5Aok({Z5#cv#-& zB;)U(^oOV5y4xP+Zo5npud6UCr5S$uR52Oxl=+TDCK>PU#>DVrFh6z&)vp?b<_DVD z?#F9s!w+#prD`FV_V#g-C4;CYZwJ**Q>90;BPttQTDbkvp3GrEKP&%<=LAP59On`Z zZ-GfyHXD*w$pUljVc)J9P&;A^KWy7xmiW2?X2&~Ipq30IT`a=D-SfdRwGzFi8^CIh zFo+#F6;9jPLcpgU>}-nz`GAqENqkhdzZFd17e&*HYqs=FX*HW8e8XQIQosl89>q@O zL_&OVG=|OeCDG6@koEVZ)1za#OYN&*(0X?^=HNW`xo8qK4V%gB513Db+gDRtlPufN zIRhpvp9a&!(xYl?D|VX8P<7q~KJ?gNI$Wv@>$BZ3CEU(~q{k zE1{K#8wq!yva4zGB_dV&_x$#KU7r#s~#JMJXg9x+iIBRYaFGb+IvjU)W1#Zr~IjT0#9 zl@0bRGvV({*@G@7q3~+6xaIGTg&&rZAXT;$DkORKxt}GjcpplNzuVc8)A?XAy@yYC zI)pL4a?m}iko&sC7qo@CFlN;w40v)8x}zkl%uWe_+wMih)=TK{EHOEgvIbt($#PyY z`q(l>2J#inp|Us~9)x$ZHJ!sjJtjp=^NpgM`3`XN`#bpj(Te+a-v;hvJYqIYrfi6H z6SFP8N||{A?oF!+tvJz1i%03wtBu3xNM|J$gaKSNr^WC1F+|wZW_ru}so{`khp}`hC90msOQ;2T- zO+OtD!I!23Z1UNqU_B`T*Dg$9Wl{x@|LGa)w5(y~(oBmVgI!%U0X7V{ERLRah2%03n;mw6mFi{V_hr$59_5f*_!9Z0%kWov z9d`Cb;L4svw%ya8MT!l4HcES$Qu}0Fa_ktVSe3`J#N(d513n1)G_>LS*f?0c{xCSL z9SiMwg;Y1m4*IG5X0Zi4GrStdWPcL0g*7r`gX>(9ZY|Ti{ z#DcY*A-W}n(jy9RzP2toi*qfOM=plA9~Dfse~75?s1Iv3iKjmXhbs@-EA$jZR8rX)9yAmOF?}|G3H0HBSf@&Nzd6CK+IoVmQ`~u4S@!heCJQF3>$?1Xr62Xv&L0 zqD*F6`1h$XhR-0du<%Z2UAFUM;$%W(3laWG-hLR8nCg1Me){0Ai$ zvRUK}J~uq5|Bz?Q#J3(ll?-PijkB5aDG~GN-iL)B2f=F+faf=260(nRNlRwAq2x;H zNSw|xzi_Plvl7kxbsx{}98bQ_N7GwDD(~ymA1tjS*qA7N7-qsj$u$*xFTsFp3(;M@ z5g(_^F{#ZO*m{2x{djN5rLC-E<)-Dpy)~fN%jIZz>m;p{+)r&zr(l8CS<8;m{prn* zE;i9}Bbm3Hr`qB;61ncBh^Xnv*(&k(`%L)@cL&pr{3iMxvWOb;L@dNC4%$tHkdXEU zBP#E4gZ6BKFO$RsU$8lOh^0e^=?W}3atW+d@dCMz6QRLrF8sAClolM(rm^#mV!nnI z*6A!}n}u#*HYJi?Pd*Lpx{|oG?$6eu+ILjMH zIjn|&SIOWO7Y8l-rZBgLBvf)TV-7E6sQ;-rW`BMQ7uM_rbH()8;t6Naz<4u?0#*6c z?EN(Hj2ZBE?ARn(TiB)J0)F{#S#8Z?EI1U8b+_X%y*VGcThw9m!l_L8`at}7VmZ5% zRSMffPGf1(F$!!nCXWupCjTV*(CSHtd1F%6(u4D#&+*kyoJ9H&OW=gqOn$}Kmh;g6 z%|Da3qLL*OScgXyE>|~$JH2<<_akz&CS?OGdOnq=8vI~^ClV=oWPf`^Q|AvD6f14Skh5ZA_|dr>a+wkgmS+0E=Y{Gf6+`tfmmlUD>GZR|$Ssm7txRB62P`Kj1QzHKnMppi# zCan6SCU`gqJ^v$&c>PNl*`YlRIll%HsqBT3mi7HzIEKd!eV-b@q z5~$2!g6rF|1XF)Bh{BYc1+i?H;GohrQOl7c0T-Ann0D|%`6a)4k&5avUf^6{d8nmL z@at!-DE!O-fo)uss4>M%~6E*)|k7M4zr`msS?E)_fK8 zth^@JX7J7O%122-z{dc~sVkpYDsQnC1sAUtN#{!Q7d#~R3tuuU)%40l4`dZZ(OCoe z0TxD_Ma_PJmA0{H9ctHS1nN-WrP27eE|#gu|Bvd0-uP}6{LEMCK1gKwjHY$+-^ zRN@}r)}tvUCS2F6k+tv0@mJmk68zv z$Mby`*@mLaDBK<>4EA)^{BO1VYj1Y^Q_0Rhm4rA5cm1hEB2u{fPaTqHXT>t%og%`K?p%1cQsDoU*6D&+KLXf5P&W=tyNZZG5s zD&*DhX7sjeE#!kLOev`>NaZTzk6>b8U`R756!2#7W^6AM3@Q}T@Mex+1d7@e3j6u_ zdHn|hFyYNmQYezt>HOpP69N0J%f&Xd@$!CtR!f2@PW{!%5+{&&`n_S&Ix zb`N4J_U>Ie(RP3MHrw?w?E4K>TK1mVT(SSVy2AcfCm?QbqxaA5wEOmb z?+sSj9?M-}yEd<7uU?_EO^@XAy_N-!ta+L`_PVAWv@UPWv8|3Uwe~+$wDeVoAlNdWB38oIrklK}lwAW>xBB?KDogSH20aQX_bQ Z!X-tCnR%&2@x>*HC8>q1wS{b@dH^{&vL65d diff --git a/tests/data/rllib_data/single_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/single_agent/checkpoint_1/checkpoint-1.tune_metadata index e83b72aeaf2291e2f177e78504c94fde7e5a3392..55b72be28978f4b959b001b57aad8683ce0f562a 100644 GIT binary patch delta 105 zcmdnOc!_a>yn>;LiIGWil3_}kiMdH~YHCudrJ;dks!3v6iiufrs?kJyZ7GA&2rdQ& zhLX(O)cBJ8lEj=sR(FTQ4ck^VOl;9ph~Ngw6(v^2m!%dJXXfV>azwBKMGW*T^b86) GOZ5Pp1Rp;D delta 75 zcmcb_xP@_oyh2K{ak8bMiII7tiLptlxw)lqD0&z+vtKr&yE|cKHf@4J z!HdOE8YW&u@vH|At``s9yoeX?>QBf)@CP`XL|Ut}u*|;iy*E4G_x4v_U6|WVJuA$P z4c4RH*-kc$0<=xp17N(jsmrH@W%;$RV2vlZ*N7B2);TShLqb$_1g;}a+R(dEin`yb z>Efn(WJT+nc^q*;1LR;qXoy_Gg*>NMtnca0Nlt45)hQ!w8us=&lOaVe4FNd~@u*Bqb2F}-k@2MNokn245Q zt(4>)W6=;SlDkG(FJF|gu_pV*?A4l2w`~ZU+-4;7YzM1j?e*ln5jL%%u?0H~Bpgo} zSzNJva6AYFsu3?@fPi>)5noRET8=p`i8xBEl7E*U(qGm>5J{&K@!+u0H@A<$kSLOYfGGRxmZ*Q-cjsk-_Tn3NY2YhnNb+pQ!E0kLLCGXQ8jj#X|jFCV+sl rRuVRoR__BANX1FD^bnsKZczi+$R%OV{fGTGCH^t}P}59#cjoFnXm$xn~ZNi9pwnbO1J>F4G@Sy*J# z#7POv%-+lszrUUQi_unK2X_xoaei7!d`5mzW>tRPX&nzxnl9-g4lUW6_ zu_Plk9%S`qS(Y3|ZixLr*BO~jp22!rqBuD^yA1I>alb>G@4CPOr%_h%RmYR{7 zoReA%lq@MKoqUo_d$T0_EXK{3IVLbNo9LNN-pD0Bxt1$r@&+!6$*;LqF&mk3O`aei zz4?}a2os~xrMVD;wi(OSDG7N zo?n!mT2ws6o2iEb#7{3M1#%cBYl&x0l-Cm#C@D(J%!^M-EJ@CYFV3t=o#LDDYNC(Q VW;@ZhjGNz!S2Hq7PR^Fx0st4Wrak}w From 1965e6bcc928b4895c926bea8ea5c0df1d1b9752 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 11 Jun 2020 16:55:25 -0700 Subject: [PATCH 247/438] remove .func --- flow/visualize/visualizer_rllib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 67b9768c3..c1dd83193 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -177,7 +177,7 @@ def visualizer_rllib(args): if multiagent: state_init = {} # map the agent id to its policy - policy_map_fn = config['multiagent']['policy_mapping_fn'].func + policy_map_fn = config['multiagent']['policy_mapping_fn'] size = config['model']['lstm_cell_size'] for key in config['multiagent']['policies'].keys(): state_init[key] = [np.zeros(size, np.float32), From 39fe260e63aaafeceb2d8c982eeebcb8faf582a6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 13 Jun 2020 18:27:14 -0700 Subject: [PATCH 248/438] Modify time-space diagram plotting (#969) * use pandas for data reshaping * fix flow params assertion * modify plotting for all i210 lanes * generalize plotting to all networks, update tests --- flow/visualize/time_space_diagram.py | 667 +++++++----------- tests/fast_tests/test_files/i210_emission.csv | 2 +- tests/fast_tests/test_visualizers.py | 404 ++++++----- 3 files changed, 455 insertions(+), 618 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index a08ecdf0f..bc26ad855 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -20,7 +20,7 @@ from flow.networks import RingNetwork, FigureEightNetwork, MergeNetwork, I210SubNetwork import argparse -import csv +from collections import defaultdict try: from matplotlib import pyplot as plt except ImportError: @@ -30,6 +30,7 @@ from matplotlib.collections import LineCollection import matplotlib.colors as colors import numpy as np +import pandas as pd # networks that can be plotted by this method @@ -41,47 +42,46 @@ ] -def import_data_from_emission(fp): - r"""Import relevant data from the predefined emission (.csv) file. +def import_data_from_trajectory(fp, params=dict()): + r"""Import and preprocess data from the Flow trajectory (.csv) file. Parameters ---------- fp : str file path (for the .csv formatted file) + params : dict + flow-specific parameters, including: + + * "network" (str): name of the network that was used when generating + the emission file. Must be one of the network names mentioned in + ACCEPTABLE_NETWORKS, + * "net_params" (flow.core.params.NetParams): network-specific + parameters. This is used to collect the lengths of various network + links. Returns ------- - dict of dict - Key = "veh_id": name of the vehicle \n Elements: - - * "time": time step at every sample - * "edge": edge ID at every sample - * "pos": relative position at every sample - * "vel": speed at every sample + pd.DataFrame """ - # initialize all output variables - veh_id, t, edge, rel_pos, vel, lane = [], [], [], [], [], [] - - # import relevant data from emission file - for record in csv.DictReader(open(fp)): - veh_id.append(record['id']) - t.append(record['time']) - edge.append(record['edge_id']) - rel_pos.append(record['relative_position']) - vel.append(record['speed']) - lane.append(record['lane_number']) - - # we now want to separate data by vehicle ID - ret = {key: {'time': [], 'edge': [], 'pos': [], 'vel': [], 'lane': []} - for key in np.unique(veh_id)} - for i in range(len(veh_id)): - ret[veh_id[i]]['time'].append(float(t[i])) - ret[veh_id[i]]['edge'].append(edge[i]) - ret[veh_id[i]]['pos'].append(float(rel_pos[i])) - ret[veh_id[i]]['vel'].append(float(vel[i])) - ret[veh_id[i]]['lane'].append(float(lane[i])) + # Read trajectory csv into pandas dataframe + df = pd.read_csv(fp) - return ret + # Convert column names for backwards compatibility using emissions csv + column_conversions = { + 'time': 'time_step', + 'lane_number': 'lane_id', + } + df = df.rename(columns=column_conversions) + if 'distance' not in df.columns: + df['distance'] = _get_abs_pos(df, params) + + # Compute line segment ends by shifting dataframe by 1 row + df[['next_pos', 'next_time']] = df.groupby('id')[['distance', 'time_step']].shift(-1) + + # Remove nans from data + df = df[df['next_time'].notna()] + + return df def get_time_space_data(data, params): @@ -89,13 +89,8 @@ def get_time_space_data(data, params): Parameters ---------- - data : dict of dict - Key = "veh_id": name of the vehicle \n Elements: - - * "time": time step at every sample - * "edge": edge ID at every sample - * "pos": relative position at every sample - * "vel": speed at every sample + data : pd.DataFrame + cleaned dataframe of the trajectory data params : dict flow-specific parameters, including: @@ -108,17 +103,14 @@ def get_time_space_data(data, params): Returns ------- - as_array - n_steps x n_veh matrix specifying the absolute position of every - vehicle at every time step. Set to zero if the vehicle is not present - in the network at that time step. - as_array - n_steps x n_veh matrix specifying the speed of every vehicle at every - time step. Set to zero if the vehicle is not present in the network at - that time step. - as_array - a (n_steps,) vector representing the unique time steps in the - simulation + ndarray (or dict of ndarray) + 3d array (n_segments x 2 x 2) containing segments to be plotted. + every inner 2d array is comprised of two 1d arrays representing + [start time, start distance] and [end time, end distance] pairs. + + in the case of I210, the nested arrays are wrapped into a dict, + keyed on the lane number, so that each lane can be plotted + separately. Raises ------ @@ -127,7 +119,7 @@ def get_time_space_data(data, params): """ # check that the network is appropriate assert params['network'] in ACCEPTABLE_NETWORKS, \ - 'Network must be one of: ' + ', '.join(ACCEPTABLE_NETWORKS) + 'Network must be one of: ' + ', '.join([network.__name__ for network in ACCEPTABLE_NETWORKS]) # switcher used to compute the positions based on the type of network switcher = { @@ -137,22 +129,16 @@ def get_time_space_data(data, params): I210SubNetwork: _i210_subnetwork } - # Collect a list of all the unique times. - all_time = [] - for veh_id in data.keys(): - all_time.extend(data[veh_id]['time']) - all_time = np.sort(np.unique(all_time)) - # Get the function from switcher dictionary func = switcher[params['network']] # Execute the function - pos, speed, all_time = func(data, params, all_time) + segs, data = func(data) - return pos, speed, all_time + return segs, data -def _merge(data, params, all_time): +def _merge(data): r"""Generate position and speed data for the merge. This only include vehicles on the main highway, and not on the adjacent @@ -160,73 +146,28 @@ def _merge(data, params, all_time): Parameters ---------- - data : dict of dict - Key = "veh_id": name of the vehicle \n Elements: - - * "time": time step at every sample - * "edge": edge ID at every sample - * "pos": relative position at every sample - * "vel": speed at every sample - params : dict - flow-specific parameters - all_time : array_like - a (n_steps,) vector representing the unique time steps in the - simulation + data : pd.DataFrame + cleaned dataframe of the trajectory data Returns ------- - as_array - n_steps x n_veh matrix specifying the absolute position of every - vehicle at every time step. Set to zero if the vehicle is not present - in the network at that time step. - as_array - n_steps x n_veh matrix specifying the speed of every vehicle at every - time step. Set to zero if the vehicle is not present in the network at - that time step. + ndarray + 3d array (n_segments x 2 x 2) containing segments to be plotted. + every inner 2d array is comprised of two 1d arrays representing + [start time, start distance] and [end time, end distance] pairs. + pd.DataFrame + modified trajectory dataframe """ - # import network data from flow params - inflow_edge_len = 100 - premerge = params['net'].additional_params['pre_merge_length'] - postmerge = params['net'].additional_params['post_merge_length'] - - # generate edge starts - edgestarts = { - 'inflow_highway': 0, - 'left': inflow_edge_len + 0.1, - 'center': inflow_edge_len + premerge + 22.6, - 'inflow_merge': inflow_edge_len + premerge + postmerge + 22.6, - 'bottom': 2 * inflow_edge_len + premerge + postmerge + 22.7, - ':left_0': inflow_edge_len, - ':center_0': inflow_edge_len + premerge + 0.1, - ':center_1': inflow_edge_len + premerge + 0.1, - ':bottom_0': 2 * inflow_edge_len + premerge + postmerge + 22.6 - } + # Omit ghost edges + keep_edges = {'inflow_merge', 'bottom', ':bottom_0'} + data = data[data['edge_id'].isin(keep_edges)] + + segs = data[['time_step', 'distance', 'next_time', 'next_pos']].values.reshape((len(data), 2, 2)) + + return segs, data + - # compute the absolute position - for veh_id in data.keys(): - data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'], - data[veh_id]['pos'], edgestarts) - - # prepare the speed and absolute position in a way that is compatible with - # the space-time diagram, and compute the number of vehicles at each step - pos = np.zeros((all_time.shape[0], len(data.keys()))) - speed = np.zeros((all_time.shape[0], len(data.keys()))) - for i, veh_id in enumerate(sorted(data.keys())): - for spd, abs_pos, ti, edge in zip(data[veh_id]['vel'], - data[veh_id]['abs_pos'], - data[veh_id]['time'], - data[veh_id]['edge']): - # avoid vehicles outside the main highway - if edge in ['inflow_merge', 'bottom', ':bottom_0']: - continue - ind = np.where(ti == all_time)[0] - pos[ind, i] = abs_pos - speed[ind, i] = spd - - return pos, speed, all_time - - -def _ring_road(data, params, all_time): +def _ring_road(data): r"""Generate position and speed data for the ring road. Vehicles that reach the top of the plot simply return to the bottom and @@ -234,147 +175,61 @@ def _ring_road(data, params, all_time): Parameters ---------- - data : dict of dict - Key = "veh_id": name of the vehicle \n Elements: - - * "time": time step at every sample - * "edge": edge ID at every sample - * "pos": relative position at every sample - * "vel": speed at every sample - params : dict - flow-specific parameters - all_time : array_like - a (n_steps,) vector representing the unique time steps in the - simulation + data : pd.DataFrame + cleaned dataframe of the trajectory data Returns ------- - as_array - n_steps x n_veh matrix specifying the absolute position of every - vehicle at every time step. Set to zero if the vehicle is not present - in the network at that time step. - as_array - n_steps x n_veh matrix specifying the speed of every vehicle at every - time step. Set to zero if the vehicle is not present in the network at - that time step. + ndarray + 3d array (n_segments x 2 x 2) containing segments to be plotted. + every inner 2d array is comprised of two 1d arrays representing + [start time, start distance] and [end time, end distance] pairs. + pd.DataFrame + unmodified trajectory dataframe """ - # import network data from flow params - ring_length = params['net'].additional_params["length"] - junction_length = 0.1 # length of inter-edge junctions - - edgestarts = { - "bottom": 0, - ":right_0": 0.25 * ring_length, - "right": 0.25 * ring_length + junction_length, - ":top_0": 0.5 * ring_length + junction_length, - "top": 0.5 * ring_length + 2 * junction_length, - ":left_0": 0.75 * ring_length + 2 * junction_length, - "left": 0.75 * ring_length + 3 * junction_length, - ":bottom_0": ring_length + 3 * junction_length - } - - # compute the absolute position - for veh_id in data.keys(): - data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'], - data[veh_id]['pos'], edgestarts) + segs = data[['time_step', 'distance', 'next_time', 'next_pos']].values.reshape((len(data), 2, 2)) - # create the output variables - pos = np.zeros((all_time.shape[0], len(data.keys()))) - speed = np.zeros((all_time.shape[0], len(data.keys()))) - for i, veh_id in enumerate(sorted(data.keys())): - for spd, abs_pos, ti in zip(data[veh_id]['vel'], - data[veh_id]['abs_pos'], - data[veh_id]['time']): - ind = np.where(ti == all_time)[0] - pos[ind, i] = abs_pos - speed[ind, i] = spd + return segs, data - return pos, speed, all_time +def _i210_subnetwork(data): + r"""Generate time and position data for the i210 subnetwork. -def _i210_subnetwork(data, params, all_time): - r"""Generate position and speed data for the i210 subnetwork. - - We only look at the second to last lane of edge 119257908#1-AddedOnRampEdge + We generate plots for all lanes, so the segments are wrapped in + a dictionary. Parameters ---------- - data : dict of dict - Key = "veh_id": name of the vehicle \n Elements: - - * "time": time step at every sample - * "edge": edge ID at every sample - * "pos": relative position at every sample - * "vel": speed at every sample - params : dict - flow-specific parameters - all_time : array_like - a (n_steps,) vector representing the unique time steps in the - simulation + data : pd.DataFrame + cleaned dataframe of the trajectory data Returns ------- - as_array - n_steps x n_veh matrix specifying the absolute position of every - vehicle at every time step. Set to zero if the vehicle is not present - in the network at that time step. - as_array - n_steps x n_veh matrix specifying the speed of every vehicle at every - time step. Set to zero if the vehicle is not present in the network at - that time step. + dict of ndarray + dictionary of 3d array (n_segments x 2 x 2) containing segments + to be plotted. the dictionary is keyed on lane numbers, with the + values being the 3d array representing the segments. every inner + 2d array is comprised of two 1d arrays representing + [start time, start distance] and [end time, end distance] pairs. + pd.DataFrame + modified trajectory dataframe """ - # import network data from flow params - # - # edge_starts = {"119257908#0": 0, - # "119257908#1-AddedOnRampEdge": 686.98} - desired_lane = 1 - edge_starts = {"119257914": 0, - "119257908#0": 61.58, - "119257908#1-AddedOnRampEdge": 686.98 + 61.58} - # edge_starts = {"119257908#0": 0} - # edge_starts = {"119257908#1-AddedOnRampEdge": 0} - # desired_lane = 5 - - # compute the absolute position - for veh_id in data.keys(): - data[veh_id]['abs_pos'] = _get_abs_pos_1_edge(data[veh_id]['edge'], - data[veh_id]['pos'], - edge_starts) - - # create the output variables - # TODO(@ev) handle subsampling better than this - low_time = int(0 / params['sim'].sim_step) - high_time = int(1600 / params['sim'].sim_step) - all_time = all_time[low_time:high_time] - - # track only vehicles that were around during this time period - observed_row_list = [] - pos = np.zeros((all_time.shape[0], len(data.keys()))) - speed = np.zeros((all_time.shape[0], len(data.keys()))) - for i, veh_id in enumerate(sorted(data.keys())): - for spd, abs_pos, ti, edge, lane in zip(data[veh_id]['vel'], - data[veh_id]['abs_pos'], - data[veh_id]['time'], - data[veh_id]['edge'], - data[veh_id]['lane']): - # avoid vehicles not on the relevant edges. Also only check the second to - # last lane - if edge not in edge_starts.keys() or ti not in all_time or lane != desired_lane: - continue - else: - if i not in observed_row_list: - observed_row_list.append(i) - ind = np.where(ti == all_time)[0] - pos[ind, i] = abs_pos - speed[ind, i] = spd - - pos = pos[:, observed_row_list] - speed = speed[:, observed_row_list] - - return pos, speed, all_time - - -def _figure_eight(data, params, all_time): + # Omit ghost edges + omit_edges = {'ghost0', '119257908#3'} + data.loc[:, :] = data[~data['edge_id'].isin(omit_edges)] + + # Reset lane numbers that are offset by ramp lanes + offset_edges = set(data[data['lane_id'] == 5]['edge_id'].unique()) + data.loc[data['edge_id'].isin(offset_edges), 'lane_id'] -= 1 + + segs = dict() + for lane, df in data.groupby('lane_id'): + segs[lane] = df[['time_step', 'distance', 'next_time', 'next_pos']].values.reshape((len(df), 2, 2)) + + return segs, data + + +def _figure_eight(data): r"""Generate position and speed data for the figure eight. The vehicles traveling towards the intersection from one side will be @@ -383,137 +238,165 @@ def _figure_eight(data, params, all_time): Parameters ---------- - data : dict of dict - Key = "veh_id": name of the vehicle \n Elements: - - * "time": time step at every sample - * "edge": edge ID at every sample - * "pos": relative position at every sample - * "vel": speed at every sample - params : dict - flow-specific parameters - all_time : array_like - a (n_steps,) vector representing the unique time steps in the - simulation + data : pd.DataFrame + cleaned dataframe of the trajectory data Returns ------- - as_array - n_steps x n_veh matrix specifying the absolute position of every - vehicle at every time step. Set to zero if the vehicle is not present - in the network at that time step. - as_array - n_steps x n_veh matrix specifying the speed of every vehicle at every - time step. Set to zero if the vehicle is not present in the network at - that time step. + ndarray + 3d array (n_segments x 2 x 2) containing segments to be plotted. + every inner 2d array is comprised of two 1d arrays representing + [start time, start distance] and [end time, end distance] pairs. + pd.DataFrame + unmodified trajectory dataframe """ - # import network data from flow params - net_params = params['net'] - ring_radius = net_params.additional_params['radius_ring'] - ring_edgelen = ring_radius * np.pi / 2. - intersection = 2 * ring_radius - junction = 2.9 + 3.3 * net_params.additional_params['lanes'] - inner = 0.28 - - # generate edge starts - edgestarts = { - 'bottom': inner, - 'top': intersection / 2 + junction + inner, - 'upper_ring': intersection + junction + 2 * inner, - 'right': intersection + 3 * ring_edgelen + junction + 3 * inner, - 'left': 1.5 * intersection + 3 * ring_edgelen + 2 * junction + 3 * inner, - 'lower_ring': 2 * intersection + 3 * ring_edgelen + 2 * junction + 4 * inner, - ':bottom_0': 0, - ':center_1': intersection / 2 + inner, - ':top_0': intersection + junction + inner, - ':right_0': intersection + 3 * ring_edgelen + junction + 2 * inner, - ':center_0': 1.5 * intersection + 3 * ring_edgelen + junction + 3 * inner, - ':left_0': 2 * intersection + 3 * ring_edgelen + 2 * junction + 3 * inner, - # for aimsun - 'bottom_to_top': intersection / 2 + inner, - 'right_to_left': junction + 3 * inner, - } + segs = data[['time_step', 'distance', 'next_time', 'next_pos']].values.reshape((len(data), 2, 2)) + + return segs, data + - # compute the absolute position - for veh_id in data.keys(): - data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'], - data[veh_id]['pos'], edgestarts) - - # create the output variables - pos = np.zeros((all_time.shape[0], len(data.keys()))) - speed = np.zeros((all_time.shape[0], len(data.keys()))) - for i, veh_id in enumerate(sorted(data.keys())): - for spd, abs_pos, ti in zip(data[veh_id]['vel'], - data[veh_id]['abs_pos'], - data[veh_id]['time']): - ind = np.where(ti == all_time)[0] - pos[ind, i] = abs_pos - speed[ind, i] = spd - - # reorganize data for space-time plot - figure_eight_len = 6 * ring_edgelen + 2 * intersection + 2 * junction + 10 * inner - intersection_loc = [edgestarts[':center_1'] + intersection / 2, - edgestarts[':center_0'] + intersection / 2] - pos[pos < intersection_loc[0]] += figure_eight_len - pos[np.logical_and(pos > intersection_loc[0], pos < intersection_loc[1])] \ - += - intersection_loc[1] - pos[pos > intersection_loc[1]] = \ - - pos[pos > intersection_loc[1]] + figure_eight_len + intersection_loc[0] - - return pos, speed, all_time - - -def _get_abs_pos(edge, rel_pos, edgestarts): +def _get_abs_pos(df, params): """Compute the absolute positions from edges and relative positions. This is the variable we will ultimately use to plot individual vehicles. Parameters ---------- - edge : list of str - list of edges at every time step - rel_pos : list of float - list of relative positions at every time step - edgestarts : dict - the absolute starting position of every edge + df : pd.DataFrame + dataframe of trajectory data + params : dict + flow-specific parameters Returns ------- - list of float + pd.Series the absolute positive for every sample """ - ret = [] - for edge_i, pos_i in zip(edge, rel_pos): - ret.append(pos_i + edgestarts[edge_i]) + if params['network'] == MergeNetwork: + inflow_edge_len = 100 + premerge = params['net'].additional_params['pre_merge_length'] + postmerge = params['net'].additional_params['post_merge_length'] + + # generate edge starts + edgestarts = { + 'inflow_highway': 0, + 'left': inflow_edge_len + 0.1, + 'center': inflow_edge_len + premerge + 22.6, + 'inflow_merge': inflow_edge_len + premerge + postmerge + 22.6, + 'bottom': 2 * inflow_edge_len + premerge + postmerge + 22.7, + ':left_0': inflow_edge_len, + ':center_0': inflow_edge_len + premerge + 0.1, + ':center_1': inflow_edge_len + premerge + 0.1, + ':bottom_0': 2 * inflow_edge_len + premerge + postmerge + 22.6 + } + elif params['network'] == RingNetwork: + ring_length = params['net'].additional_params["length"] + junction_length = 0.1 # length of inter-edge junctions + + edgestarts = { + "bottom": 0, + ":right_0": 0.25 * ring_length, + "right": 0.25 * ring_length + junction_length, + ":top_0": 0.5 * ring_length + junction_length, + "top": 0.5 * ring_length + 2 * junction_length, + ":left_0": 0.75 * ring_length + 2 * junction_length, + "left": 0.75 * ring_length + 3 * junction_length, + ":bottom_0": ring_length + 3 * junction_length + } + elif params['network'] == FigureEightNetwork: + net_params = params['net'] + ring_radius = net_params.additional_params['radius_ring'] + ring_edgelen = ring_radius * np.pi / 2. + intersection = 2 * ring_radius + junction = 2.9 + 3.3 * net_params.additional_params['lanes'] + inner = 0.28 + + # generate edge starts + edgestarts = { + 'bottom': inner, + 'top': intersection / 2 + junction + inner, + 'upper_ring': intersection + junction + 2 * inner, + 'right': intersection + 3 * ring_edgelen + junction + 3 * inner, + 'left': 1.5 * intersection + 3 * ring_edgelen + 2 * junction + 3 * inner, + 'lower_ring': 2 * intersection + 3 * ring_edgelen + 2 * junction + 4 * inner, + ':bottom_0': 0, + ':center_1': intersection / 2 + inner, + ':top_0': intersection + junction + inner, + ':right_0': intersection + 3 * ring_edgelen + junction + 2 * inner, + ':center_0': 1.5 * intersection + 3 * ring_edgelen + junction + 3 * inner, + ':left_0': 2 * intersection + 3 * ring_edgelen + 2 * junction + 3 * inner, + # for aimsun + 'bottom_to_top': intersection / 2 + inner, + 'right_to_left': junction + 3 * inner, + } + else: + edgestarts = defaultdict(float) + + ret = df.apply(lambda x: x['relative_position'] + edgestarts[x['edge_id']], axis=1) + + if params['network'] == FigureEightNetwork: + # reorganize data for space-time plot + figure_eight_len = 6 * ring_edgelen + 2 * intersection + 2 * junction + 10 * inner + intersection_loc = [edgestarts[':center_1'] + intersection / 2, + edgestarts[':center_0'] + intersection / 2] + ret.loc[ret < intersection_loc[0]] += figure_eight_len + ret.loc[(ret > intersection_loc[0]) & (ret < intersection_loc[1])] += -intersection_loc[1] + ret.loc[ret > intersection_loc[1]] = \ + - ret.loc[ret > intersection_loc[1]] + figure_eight_len + intersection_loc[0] return ret -def _get_abs_pos_1_edge(edges, rel_pos, edge_starts): - """Compute the absolute positions from a subset of edges. +def plot_tsd(ax, df, segs, args, lane=None): + """Plot the time-space diagram. - This is the variable we will ultimately use to plot individual vehicles. + Take the pre-processed segments and other meta-data, then plot all the line segments. Parameters ---------- - edges : list of str - list of edges at every time step - rel_pos : list of float - list of relative positions at every time step - edge_starts : dict - the absolute starting position of every edge + ax : matplotlib.axes.Axes + figure axes that will be plotted on + df : pd.DataFrame + data used for axes bounds and speed coloring + segs : list of list of lists + line segments to be plotted, where each segment is a list of two [x,y] pairs + args : dict + parsed arguments + lane : int, optional + lane number to be shown in plot title Returns ------- - list of float - the absolute positive for every sample + None """ - ret = [] - for edge_i, pos_i in zip(edges, rel_pos): - if edge_i in edge_starts.keys(): - ret.append(pos_i + edge_starts[edge_i]) - else: - ret.append(-1) - return ret + norm = plt.Normalize(args.min_speed, args.max_speed) + + xmin = max(df['time_step'].min(), args.start) + xmax = min(df['time_step'].max(), args.stop) + xbuffer = (xmax - xmin) * 0.025 # 2.5% of range + ymin, ymax = df['distance'].min(), df['distance'].max() + ybuffer = (ymax - ymin) * 0.025 # 2.5% of range + + ax.set_xlim(xmin - xbuffer, xmax + xbuffer) + ax.set_ylim(ymin - ybuffer, ymax + ybuffer) + + lc = LineCollection(segs, cmap=my_cmap, norm=norm) + lc.set_array(df['speed'].values) + lc.set_linewidth(1) + ax.add_collection(lc) + ax.autoscale() + + if lane: + ax.set_title('Time-Space Diagram: Lane {}'.format(lane), fontsize=25) + else: + ax.set_title('Time-Space Diagram', fontsize=25) + ax.set_ylabel('Position (m)', fontsize=20) + ax.set_xlabel('Time (s)', fontsize=20) + plt.xticks(fontsize=18) + plt.yticks(fontsize=18) + + cbar = plt.colorbar(lc, ax=ax, norm=norm) + cbar.set_label('Velocity (m/s)', fontsize=20) + cbar.ax.tick_params(labelsize=18) if __name__ == '__main__': @@ -525,8 +408,8 @@ def _get_abs_pos_1_edge(edges, rel_pos, edge_starts): '.json') # required arguments - parser.add_argument('emission_path', type=str, - help='path to the csv file.') + parser.add_argument('trajectory_path', type=str, + help='path to the Flow trajectory csv file.') parser.add_argument('flow_params', type=str, help='path to the flow_params json file.') @@ -553,12 +436,6 @@ def _get_abs_pos_1_edge(edges, rel_pos, edge_starts): module = __import__("examples.exp_configs.non_rl", fromlist=[args.flow_params]) flow_params = getattr(module, args.flow_params).flow_params - # import data from the emission.csv file - emission_data = import_data_from_emission(args.emission_path) - - # compute the position and speed for all vehicles at all times - pos, speed, time = get_time_space_data(emission_data, flow_params) - # some plotting parameters cdict = { 'red': ((0, 0, 0), (0.2, 1, 1), (0.6, 1, 1), (1, 0, 0)), @@ -567,64 +444,34 @@ def _get_abs_pos_1_edge(edges, rel_pos, edge_starts): } my_cmap = colors.LinearSegmentedColormap('my_colormap', cdict, 1024) - # perform plotting operation - fig = plt.figure(figsize=(16, 9)) - ax = plt.axes() - norm = plt.Normalize(args.min_speed, args.max_speed) - cols = [] + # Read trajectory csv into pandas dataframe + traj_df = import_data_from_trajectory(args.trajectory_path, flow_params) - xmin = max(time[0], args.start) - xmax = min(time[-1], args.stop) - xbuffer = (xmax - xmin) * 0.025 # 2.5% of range - ymin, ymax = np.amin(pos), np.amax(pos) - ybuffer = (ymax - ymin) * 0.025 # 2.5% of range + # Convert df data into segments for plotting + segs, traj_df = get_time_space_data(traj_df, flow_params) - ax.set_xlim(xmin - xbuffer, xmax + xbuffer) - ax.set_ylim(ymin - ybuffer, ymax + ybuffer) + if flow_params['network'] == I210SubNetwork: + nlanes = traj_df['lane_id'].nunique() + fig = plt.figure(figsize=(16, 9*nlanes)) - for indx_car in range(pos.shape[1]): - unique_car_pos = pos[:, indx_car] - - if flow_params['network'] == I210SubNetwork: - indices = np.where(pos[:, indx_car] != 0)[0] - unique_car_speed = speed[indices, indx_car] - points = np.array([time[indices], pos[indices, indx_car]]).T.reshape(-1, 1, 2) - else: - - # discontinuity from wraparound - disc = np.where(np.abs(np.diff(unique_car_pos)) >= 10)[0] + 1 - unique_car_time = np.insert(time, disc, np.nan) - unique_car_pos = np.insert(unique_car_pos, disc, np.nan) - unique_car_speed = np.insert(speed[:, indx_car], disc, np.nan) - # - points = np.array( - [unique_car_time, unique_car_pos]).T.reshape(-1, 1, 2) - segments = np.concatenate([points[:-1], points[1:]], axis=1) - lc = LineCollection(segments, cmap=my_cmap, norm=norm) - - # Set the values used for color mapping - lc.set_array(unique_car_speed) - lc.set_linewidth(1.75) - cols.append(lc) - - plt.title(args.title, fontsize=25) - plt.ylabel('Position (m)', fontsize=20) - plt.xlabel('Time (s)', fontsize=20) - - for col in cols: - line = ax.add_collection(col) - cbar = plt.colorbar(line, ax=ax, norm=norm) - cbar.set_label('Velocity (m/s)', fontsize=20) - cbar.ax.tick_params(labelsize=18) + for lane, df in traj_df.groupby('lane_id'): + ax = plt.subplot(nlanes, 1, lane+1) - plt.xticks(fontsize=18) - plt.yticks(fontsize=18) + plot_tsd(ax, df, segs[lane], args, lane) + else: + # perform plotting operation + fig = plt.figure(figsize=(16, 9)) + ax = plt.axes() + + plot_tsd(ax, traj_df, segs, args) ########################################################################### # Note: For MergeNetwork only # if flow_params['network'] == 'MergeNetwork': # - plt.plot(time, [0] * pos.shape[0], linewidth=3, color="white") # - plt.plot(time, [-0.1] * pos.shape[0], linewidth=3, color="white") # + plt.plot([df['time_step'].min(), df['time_step'].max()], + [0, 0], linewidth=3, color="white") # + plt.plot([df['time_step'].min(), df['time_step'].max()], + [-0.1, -0.1], linewidth=3, color="white") # ########################################################################### plt.show() diff --git a/tests/fast_tests/test_files/i210_emission.csv b/tests/fast_tests/test_files/i210_emission.csv index d43c115a4..ec63cf9cf 100644 --- a/tests/fast_tests/test_files/i210_emission.csv +++ b/tests/fast_tests/test_files/i210_emission.csv @@ -1,4 +1,4 @@ -x,time,edge_id,eclass,type,PMx,speed,angle,CO,CO2,electricity,noise,lane_number,NOx,relative_position,route,y,id,fuel,HC,waiting +x,time,edge_id,eclass,type,PMx,speed,angle,CO,CO2,electricity,noise,lane_number,NOx,distance,route,y,id,fuel,HC,waiting 485.04,0.8,119257914,HBEFA3/PC_G_EU4,human,0.05,23.0,119.74,3.32,3793.12,0.0,70.29,1,1.17,5.1,route119257914_0,1068.18,flow_00.0,1.63,0.11,0.0 500.91,1.6,119257914,HBEFA3/PC_G_EU4,human,0.0,22.84,119.74,0.0,0.0,0.0,69.9,1,0.0,23.37,route119257914_0,1059.12,flow_00.0,0.0,0.0,0.0 517.1,2.4,119257914,HBEFA3/PC_G_EU4,human,0.15,23.31,119.74,78.83,7435.5,0.0,71.61,1,2.88,42.02,route119257914_0,1049.87,flow_00.0,3.2,0.54,0.0 diff --git a/tests/fast_tests/test_visualizers.py b/tests/fast_tests/test_visualizers.py index 7af413909..d2f4a20a4 100644 --- a/tests/fast_tests/test_visualizers.py +++ b/tests/fast_tests/test_visualizers.py @@ -91,236 +91,226 @@ def test_capacity_diagram_generator(self): np.testing.assert_array_almost_equal(std_outflows, expected_stds) def test_time_space_diagram_figure_eight(self): - # check that the exported data matches the expected emission file data - fig8_emission_data = { - 'idm_3': {'pos': [27.25, 28.25, 30.22, 33.17], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.98, 2.95], - 'edge': ['upper_ring', 'upper_ring', 'upper_ring', - 'upper_ring'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_4': {'pos': [56.02, 57.01, 58.99, 61.93], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.98, 2.95], - 'edge': ['upper_ring', 'upper_ring', 'upper_ring', - 'upper_ring'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_5': {'pos': [84.79, 85.78, 87.76, 90.7], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.98, 2.95], - 'edge': ['upper_ring', 'upper_ring', 'upper_ring', - 'upper_ring'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_2': {'pos': [28.77, 29.76, 1.63, 4.58], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.97, 2.95], - 'edge': ['top', 'top', 'upper_ring', 'upper_ring'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_13': {'pos': [106.79, 107.79, 109.77, 112.74], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.98, 2.96], - 'edge': ['lower_ring', 'lower_ring', 'lower_ring', - 'lower_ring'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_9': {'pos': [22.01, 23.0, 24.97, 27.92], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.97, 2.95], - 'edge': ['left', 'left', 'left', 'left'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_6': {'pos': [113.56, 114.55, 116.52, 119.47], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.97, 2.95], - 'edge': ['upper_ring', 'upper_ring', 'upper_ring', - 'upper_ring'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_8': {'pos': [29.44, 0.28, 2.03, 4.78], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.84, 1.76, 2.75], - 'edge': ['right', ':center_0', ':center_0', - ':center_0'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_12': {'pos': [78.03, 79.02, 80.99, 83.94], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.98, 2.95], - 'edge': ['lower_ring', 'lower_ring', 'lower_ring', - 'lower_ring'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_10': {'pos': [20.49, 21.48, 23.46, 26.41], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.98, 2.95], - 'edge': ['lower_ring', 'lower_ring', 'lower_ring', - 'lower_ring'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_11': {'pos': [49.26, 50.25, 52.23, 55.17], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.98, 2.95], - 'edge': ['lower_ring', 'lower_ring', 'lower_ring', - 'lower_ring'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_1': {'pos': [0.0, 0.99, 2.97, 5.91], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.98, 2.95], - 'edge': ['top', 'top', 'top', 'top'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_7': {'pos': [0.67, 1.66, 3.64, 6.58], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 0.99, 1.97, 2.94], - 'edge': ['right', 'right', 'right', 'right'], - 'lane': [0.0, 0.0, 0.0, 0.0]}, - 'idm_0': {'pos': [0.0, 1.0, 2.98, 5.95], - 'time': [1.0, 2.0, 3.0, 4.0], - 'vel': [0.0, 1.0, 1.99, 2.97], - 'edge': ['bottom', 'bottom', 'bottom', 'bottom'], - 'lane': [0.0, 0.0, 0.0, 0.0]} - } dir_path = os.path.dirname(os.path.realpath(__file__)) - actual_emission_data = tsd.import_data_from_emission( - os.path.join(dir_path, 'test_files/fig8_emission.csv')) - self.assertDictEqual(fig8_emission_data, actual_emission_data) - - # test get_time_space_data for figure eight networks flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/fig8.json')) - pos, speed, _ = tsd.get_time_space_data( - actual_emission_data, flow_params) - - expected_pos = np.array( - [[60, 23.8, 182.84166941, 154.07166941, 125.30166941, 96.54166941, - -203.16166941, -174.40166941, -145.63166941, -116.86166941, - -88.09166941, -59.33, -30.56, -1.79], - [59, 22.81, 181.85166941, 153.08166941, 124.31166941, 95.54166941, - -202.17166941, -173.40166941, -144.64166941, -115.87166941, - -87.10166941, -58.34, -29.72, -0.8], - [57.02, 20.83, 179.87166941, 151.10166941, 122.34166941, - 93.56166941, -200.02166941, -171.43166941, -142.66166941, - -113.89166941, -85.13166941, -56.36, -27.97, 208.64166941]] + emission_data = tsd.import_data_from_trajectory( + os.path.join(dir_path, 'test_files/fig8_emission.csv'), flow_params) + + segs, _ = tsd.get_time_space_data(emission_data, flow_params) + + expected_segs = np.array([ + [[1., 60.], [2., 59.]], + [[2., 59.], [3., 57.02]], + [[3., 57.02], [4., 54.05]], + [[1., 23.8], [2., 22.81]], + [[2., 22.81], [3., 20.83]], + [[3., 20.83], [4., 17.89]], + [[1., 182.84166941], [2., 181.85166941]], + [[2., 181.85166941], [3., 179.87166941]], + [[3., 179.87166941], [4., 176.92166941]], + [[1., 154.07166941], [2., 153.08166941]], + [[2., 153.08166941], [3., 151.10166941]], + [[3., 151.10166941], [4., 148.16166941]], + [[1., 125.30166941], [2., 124.31166941]], + [[2., 124.31166941], [3., 122.34166941]], + [[3., 122.34166941], [4., 119.39166941]], + [[1., 96.54166941], [2., 95.54166941]], + [[2., 95.54166941], [3., 93.56166941]], + [[3., 93.56166941], [4., 90.59166941]], + [[1., -203.16166941], [2., -202.17166941]], + [[2., -202.17166941], [3., -200.02166941]], + [[3., -200.02166941], [4., -197.07166941]], + [[1., -174.40166941], [2., -173.40166941]], + [[2., -173.40166941], [3., -171.43166941]], + [[3., -171.43166941], [4., -168.48166941]], + [[1., -145.63166941], [2., -144.64166941]], + [[2., -144.64166941], [3., -142.66166941]], + [[3., -142.66166941], [4., -139.72166941]], + [[1., -116.86166941], [2., -115.87166941]], + [[2., -115.87166941], [3., -113.89166941]], + [[3., -113.89166941], [4., -110.95166941]], + [[1., -88.09166941], [2., -87.10166941]], + [[2., -87.10166941], [3., -85.13166941]], + [[3., -85.13166941], [4., -82.18166941]], + [[1., -59.33], [2., -58.34]], + [[2., -58.34], [3., -56.36]], + [[3., -56.36], [4., -53.42]], + [[1., -30.56], [2., -29.72]], + [[2., -29.72], [3., -27.97]], + [[3., -27.97], [4., -25.22]], + [[1., -1.79], [2., -0.8]], + [[2., -0.8], [3., 208.64166941]], + [[3., 208.64166941], [4., 205.69166941]]] ) - expected_speed = np.array([ - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, - 0.99, 0.84, 0.99], - [1.99, 1.98, 1.98, 1.98, 1.98, 1.98, 1.97, 1.98, 1.98, 1.98, 1.97, - 1.97, 1.76, 1.97] - ]) - np.testing.assert_array_almost_equal(pos[:-1, :], expected_pos) - np.testing.assert_array_almost_equal(speed[:-1, :], expected_speed) + np.testing.assert_array_almost_equal(segs, expected_segs) def test_time_space_diagram_merge(self): dir_path = os.path.dirname(os.path.realpath(__file__)) - emission_data = tsd.import_data_from_emission( - os.path.join(dir_path, 'test_files/merge_emission.csv')) - flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/merge.json')) - pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_pos = np.array( - [[4.86, 180.32, 361.32, 547.77, 0], - [4.88, 180.36, 361.36, 547.8, 0], - [4.95, 180.43, 361.44, 547.87, 0], - [5.06, 180.54, 361.56, 547.98, 0], - [5.21, 180.68, 361.72, 548.12, 0], - [5.4, 180.86, 0, 0, 0]] - ) - expected_speed = np.array( - [[0, 0, 0, 0, 0], - [0.15, 0.17, 0.19, 0.14, 0], - [0.35, 0.37, 0.39, 0.34, 0], - [0.54, 0.57, 0.59, 0.54, 0], - [0.74, 0.7, 0.79, 0.71, 0], - [0.94, 0.9, 0, 0, 0]] + emission_data = tsd.import_data_from_trajectory( + os.path.join(dir_path, 'test_files/merge_emission.csv'), flow_params) + + segs, _ = tsd.get_time_space_data(emission_data, flow_params) + + expected_segs = np.array([ + [[2.0000e-01, 7.2949e+02], [4.0000e-01, 7.2953e+02]], + [[4.0000e-01, 7.2953e+02], [6.0000e-01, 7.2961e+02]], + [[6.0000e-01, 7.2961e+02], [8.0000e-01, 7.2973e+02]], + [[8.0000e-01, 7.2973e+02], [1.0000e+00, 7.2988e+02]]] ) - np.testing.assert_array_almost_equal(pos, expected_pos) - np.testing.assert_array_almost_equal(speed, expected_speed) + np.testing.assert_array_almost_equal(segs, expected_segs) def test_time_space_diagram_I210(self): dir_path = os.path.dirname(os.path.realpath(__file__)) - emission_data = tsd.import_data_from_emission( - os.path.join(dir_path, 'test_files/i210_emission.csv')) - module = __import__("examples.exp_configs.non_rl", fromlist=["i210_subnetwork"]) flow_params = getattr(module, "i210_subnetwork").flow_params - pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_pos = np.array( - [[5.1, 0., 0.], - [23.37, 0., 0.], - [42.02, 5.1, 0.], - [61.21, 22.97, 0.], - [80.45, 40.73, 5.1], - [101.51, 0., 0.]] - ) - expected_speed = np.array( - [[23., 0., 0.], - [22.84, 0., 0.], - [23.31, 23., 0.], - [23.98, 22.33, 0.], - [24.25, 22.21, 23.], - [26.33, 0., 0.]] - ) - - np.testing.assert_array_almost_equal(pos, expected_pos) - np.testing.assert_array_almost_equal(speed, expected_speed) + emission_data = tsd.import_data_from_trajectory( + os.path.join(dir_path, 'test_files/i210_emission.csv'), flow_params) + + segs, _ = tsd.get_time_space_data(emission_data, flow_params) + + expected_segs = { + 1: np.array([ + [[0.8, 5.1], [1.6, 23.37]], + [[1.6, 23.37], [2.4, 42.02]], + [[2.4, 42.02], [3.2, 61.21]], + [[3.2, 61.21], [4., 18.87]], + [[4., 18.87], [4.8, 39.93]], + [[2.4, 5.1], [3.2, 22.97]], + [[3.2, 22.97], [4., 40.73]]] + ), + 2: np.array([ + [[2.4, 5.1], [3.2, 23.98]], + [[3.2, 23.98], [4., 43.18]]] + ), + 3: np.array([ + [[0.8, 5.1], [1.6, 23.72]], + [[1.6, 23.72], [2.4, 43.06]], + [[2.4, 43.06], [3.2, 1.33]], + [[3.2, 1.33], [4., 21.65]], + [[4., 21.65], [4.8, 43.46]], + [[2.4, 5.1], [3.2, 23.74]], + [[3.2, 23.74], [4., 42.38]]] + ), + 4: np.array([ + [[2.4, 5.1], [3.2, 23.6]], + [[3.2, 23.6], [4., 42.46]]] + )} + + for lane, expected_seg in expected_segs.items(): + np.testing.assert_array_almost_equal(segs[lane], expected_seg) def test_time_space_diagram_ring_road(self): dir_path = os.path.dirname(os.path.realpath(__file__)) - emission_data = tsd.import_data_from_emission( - os.path.join(dir_path, 'test_files/ring_230_emission.csv')) - flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/ring_230.json')) - pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_pos = np.array( - [[0.0000e+00, 9.5500e+00, 9.5550e+01, 1.0510e+02, 1.1465e+02, - 1.2429e+02, 1.3384e+02, 1.4338e+02, 1.5293e+02, 1.6247e+02, - 1.7202e+02, 1.8166e+02, 1.9090e+01, 1.9121e+02, 2.0075e+02, - 2.8640e+01, 3.8180e+01, 4.7730e+01, 5.7270e+01, 6.6920e+01, - 7.6460e+01, 8.6010e+01], - [1.0000e-02, 9.5500e+00, 9.5560e+01, 1.0511e+02, 1.1465e+02, - 1.2430e+02, 1.3384e+02, 1.4339e+02, 1.5294e+02, 1.6248e+02, - 1.7203e+02, 1.8167e+02, 1.9100e+01, 1.9122e+02, 2.0076e+02, - 2.8640e+01, 3.8190e+01, 4.7740e+01, 5.7280e+01, 6.6930e+01, - 7.6470e+01, 8.6020e+01], - [2.0000e-02, 9.5700e+00, 9.5580e+01, 1.0512e+02, 1.1467e+02, - 1.2431e+02, 1.3386e+02, 1.4341e+02, 1.5295e+02, 1.6250e+02, - 1.7204e+02, 1.8169e+02, 1.9110e+01, 1.9123e+02, 2.0078e+02, - 2.8660e+01, 3.8210e+01, 4.7750e+01, 5.7300e+01, 6.6940e+01, - 7.6490e+01, 8.6030e+01], - [5.0000e-02, 9.5900e+00, 9.5600e+01, 1.0515e+02, 1.1469e+02, - 1.2434e+02, 1.3388e+02, 1.4343e+02, 1.5297e+02, 1.6252e+02, - 1.7207e+02, 1.8171e+02, 1.9140e+01, 1.9126e+02, 2.0081e+02, - 2.8680e+01, 3.8230e+01, 4.7770e+01, 5.7320e+01, 6.6970e+01, - 7.6510e+01, 8.6060e+01], - [8.0000e-02, 9.6200e+00, 9.5630e+01, 1.0518e+02, 1.1472e+02, - 1.2437e+02, 1.3391e+02, 1.4346e+02, 1.5301e+02, 1.6255e+02, - 1.7210e+02, 1.8174e+02, 1.9170e+01, 1.9129e+02, 2.0085e+02, - 2.8710e+01, 3.8260e+01, 4.7810e+01, 5.7350e+01, 6.7000e+01, - 7.6540e+01, 8.6090e+01], - [1.2000e-01, 9.6600e+00, 9.5670e+01, 1.0522e+02, 1.1476e+02, - 1.2441e+02, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, - 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, - 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, - 0.0000e+00, 0.0000e+00]] + emission_data = tsd.import_data_from_trajectory( + os.path.join(dir_path, 'test_files/ring_230_emission.csv'), flow_params) + + segs, _ = tsd.get_time_space_data(emission_data, flow_params) + + expected_segs = np.array([ + [[1.0000e-01, 0.0000e+00], [2.0000e-01, 1.0000e-02]], + [[2.0000e-01, 1.0000e-02], [3.0000e-01, 2.0000e-02]], + [[3.0000e-01, 2.0000e-02], [4.0000e-01, 5.0000e-02]], + [[4.0000e-01, 5.0000e-02], [5.0000e-01, 8.0000e-02]], + [[5.0000e-01, 8.0000e-02], [6.0000e-01, 1.2000e-01]], + [[1.0000e-01, 9.5500e+00], [2.0000e-01, 9.5500e+00]], + [[2.0000e-01, 9.5500e+00], [3.0000e-01, 9.5700e+00]], + [[3.0000e-01, 9.5700e+00], [4.0000e-01, 9.5900e+00]], + [[4.0000e-01, 9.5900e+00], [5.0000e-01, 9.6200e+00]], + [[5.0000e-01, 9.6200e+00], [6.0000e-01, 9.6600e+00]], + [[1.0000e-01, 9.5550e+01], [2.0000e-01, 9.5560e+01]], + [[2.0000e-01, 9.5560e+01], [3.0000e-01, 9.5580e+01]], + [[3.0000e-01, 9.5580e+01], [4.0000e-01, 9.5600e+01]], + [[4.0000e-01, 9.5600e+01], [5.0000e-01, 9.5630e+01]], + [[5.0000e-01, 9.5630e+01], [6.0000e-01, 9.5670e+01]], + [[1.0000e-01, 1.0510e+02], [2.0000e-01, 1.0511e+02]], + [[2.0000e-01, 1.0511e+02], [3.0000e-01, 1.0512e+02]], + [[3.0000e-01, 1.0512e+02], [4.0000e-01, 1.0515e+02]], + [[4.0000e-01, 1.0515e+02], [5.0000e-01, 1.0518e+02]], + [[5.0000e-01, 1.0518e+02], [6.0000e-01, 1.0522e+02]], + [[1.0000e-01, 1.1465e+02], [2.0000e-01, 1.1465e+02]], + [[2.0000e-01, 1.1465e+02], [3.0000e-01, 1.1467e+02]], + [[3.0000e-01, 1.1467e+02], [4.0000e-01, 1.1469e+02]], + [[4.0000e-01, 1.1469e+02], [5.0000e-01, 1.1472e+02]], + [[5.0000e-01, 1.1472e+02], [6.0000e-01, 1.1476e+02]], + [[1.0000e-01, 1.2429e+02], [2.0000e-01, 1.2430e+02]], + [[2.0000e-01, 1.2430e+02], [3.0000e-01, 1.2431e+02]], + [[3.0000e-01, 1.2431e+02], [4.0000e-01, 1.2434e+02]], + [[4.0000e-01, 1.2434e+02], [5.0000e-01, 1.2437e+02]], + [[5.0000e-01, 1.2437e+02], [6.0000e-01, 1.2441e+02]], + [[1.0000e-01, 1.3384e+02], [2.0000e-01, 1.3384e+02]], + [[2.0000e-01, 1.3384e+02], [3.0000e-01, 1.3386e+02]], + [[3.0000e-01, 1.3386e+02], [4.0000e-01, 1.3388e+02]], + [[4.0000e-01, 1.3388e+02], [5.0000e-01, 1.3391e+02]], + [[1.0000e-01, 1.4338e+02], [2.0000e-01, 1.4339e+02]], + [[2.0000e-01, 1.4339e+02], [3.0000e-01, 1.4341e+02]], + [[3.0000e-01, 1.4341e+02], [4.0000e-01, 1.4343e+02]], + [[4.0000e-01, 1.4343e+02], [5.0000e-01, 1.4346e+02]], + [[1.0000e-01, 1.5293e+02], [2.0000e-01, 1.5294e+02]], + [[2.0000e-01, 1.5294e+02], [3.0000e-01, 1.5295e+02]], + [[3.0000e-01, 1.5295e+02], [4.0000e-01, 1.5297e+02]], + [[4.0000e-01, 1.5297e+02], [5.0000e-01, 1.5301e+02]], + [[1.0000e-01, 1.6247e+02], [2.0000e-01, 1.6248e+02]], + [[2.0000e-01, 1.6248e+02], [3.0000e-01, 1.6250e+02]], + [[3.0000e-01, 1.6250e+02], [4.0000e-01, 1.6252e+02]], + [[4.0000e-01, 1.6252e+02], [5.0000e-01, 1.6255e+02]], + [[1.0000e-01, 1.7202e+02], [2.0000e-01, 1.7203e+02]], + [[2.0000e-01, 1.7203e+02], [3.0000e-01, 1.7204e+02]], + [[3.0000e-01, 1.7204e+02], [4.0000e-01, 1.7207e+02]], + [[4.0000e-01, 1.7207e+02], [5.0000e-01, 1.7210e+02]], + [[1.0000e-01, 1.8166e+02], [2.0000e-01, 1.8167e+02]], + [[2.0000e-01, 1.8167e+02], [3.0000e-01, 1.8169e+02]], + [[3.0000e-01, 1.8169e+02], [4.0000e-01, 1.8171e+02]], + [[4.0000e-01, 1.8171e+02], [5.0000e-01, 1.8174e+02]], + [[1.0000e-01, 1.9090e+01], [2.0000e-01, 1.9100e+01]], + [[2.0000e-01, 1.9100e+01], [3.0000e-01, 1.9110e+01]], + [[3.0000e-01, 1.9110e+01], [4.0000e-01, 1.9140e+01]], + [[4.0000e-01, 1.9140e+01], [5.0000e-01, 1.9170e+01]], + [[1.0000e-01, 1.9121e+02], [2.0000e-01, 1.9122e+02]], + [[2.0000e-01, 1.9122e+02], [3.0000e-01, 1.9123e+02]], + [[3.0000e-01, 1.9123e+02], [4.0000e-01, 1.9126e+02]], + [[4.0000e-01, 1.9126e+02], [5.0000e-01, 1.9129e+02]], + [[1.0000e-01, 2.0075e+02], [2.0000e-01, 2.0076e+02]], + [[2.0000e-01, 2.0076e+02], [3.0000e-01, 2.0078e+02]], + [[3.0000e-01, 2.0078e+02], [4.0000e-01, 2.0081e+02]], + [[4.0000e-01, 2.0081e+02], [5.0000e-01, 2.0085e+02]], + [[1.0000e-01, 2.8640e+01], [2.0000e-01, 2.8640e+01]], + [[2.0000e-01, 2.8640e+01], [3.0000e-01, 2.8660e+01]], + [[3.0000e-01, 2.8660e+01], [4.0000e-01, 2.8680e+01]], + [[4.0000e-01, 2.8680e+01], [5.0000e-01, 2.8710e+01]], + [[1.0000e-01, 3.8180e+01], [2.0000e-01, 3.8190e+01]], + [[2.0000e-01, 3.8190e+01], [3.0000e-01, 3.8210e+01]], + [[3.0000e-01, 3.8210e+01], [4.0000e-01, 3.8230e+01]], + [[4.0000e-01, 3.8230e+01], [5.0000e-01, 3.8260e+01]], + [[1.0000e-01, 4.7730e+01], [2.0000e-01, 4.7740e+01]], + [[2.0000e-01, 4.7740e+01], [3.0000e-01, 4.7750e+01]], + [[3.0000e-01, 4.7750e+01], [4.0000e-01, 4.7770e+01]], + [[4.0000e-01, 4.7770e+01], [5.0000e-01, 4.7810e+01]], + [[1.0000e-01, 5.7270e+01], [2.0000e-01, 5.7280e+01]], + [[2.0000e-01, 5.7280e+01], [3.0000e-01, 5.7300e+01]], + [[3.0000e-01, 5.7300e+01], [4.0000e-01, 5.7320e+01]], + [[4.0000e-01, 5.7320e+01], [5.0000e-01, 5.7350e+01]], + [[1.0000e-01, 6.6920e+01], [2.0000e-01, 6.6930e+01]], + [[2.0000e-01, 6.6930e+01], [3.0000e-01, 6.6940e+01]], + [[3.0000e-01, 6.6940e+01], [4.0000e-01, 6.6970e+01]], + [[4.0000e-01, 6.6970e+01], [5.0000e-01, 6.7000e+01]], + [[1.0000e-01, 7.6460e+01], [2.0000e-01, 7.6470e+01]], + [[2.0000e-01, 7.6470e+01], [3.0000e-01, 7.6490e+01]], + [[3.0000e-01, 7.6490e+01], [4.0000e-01, 7.6510e+01]], + [[4.0000e-01, 7.6510e+01], [5.0000e-01, 7.6540e+01]], + [[1.0000e-01, 8.6010e+01], [2.0000e-01, 8.6020e+01]], + [[2.0000e-01, 8.6020e+01], [3.0000e-01, 8.6030e+01]], + [[3.0000e-01, 8.6030e+01], [4.0000e-01, 8.6060e+01]], + [[4.0000e-01, 8.6060e+01], [5.0000e-01, 8.6090e+01]]] ) - expected_speed = np.array([ - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, - 0.08, 0.08, 0.08, 0.1, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08], - [0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, - 0.16, 0.16, 0.16, 0.2, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16], - [0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, - 0.23, 0.23, 0.23, 0.29, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23], - [0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, - 0.31, 0.31, 0.31, 0.39, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31], - [0.41, 0.41, 0.41, 0.41, 0.41, 0.41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0] - ]) - - np.testing.assert_array_almost_equal(pos, expected_pos) - np.testing.assert_array_almost_equal(speed, expected_speed) + + np.testing.assert_array_almost_equal(segs, expected_segs) def test_plot_ray_results(self): dir_path = os.path.dirname(os.path.realpath(__file__)) From 5b38dc491c45b2a0861d84e02aae508b054df243 Mon Sep 17 00:00:00 2001 From: Pengyuan Zhou Date: Mon, 15 Jun 2020 01:51:36 +0300 Subject: [PATCH 249/438] perhaps it means to ignore instead of add? (#966) --- flow/core/kernel/simulation/traci.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 2cd109024..8d51b8e25 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -100,7 +100,7 @@ def start_simulation(self, network, sim_params): if sim_params.use_ballistic: sumo_call.append("--step-method.ballistic") - # add step logs (if requested) + # ignore step logs (if requested) if sim_params.no_step_log: sumo_call.append("--no-step-log") From 86c2e4cf73341611850ff7b312c681c31fd6d702 Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Mon, 15 Jun 2020 11:20:47 -0700 Subject: [PATCH 250/438] I210 highway updated (#934) * removed sweep * added updated single lane highway network config * added i210 xml files with downstream edge * added I210Router * added updated i210 features * minor cleanup * better parameters based on when congestion propagates * bug fixes * bug fixes * more bug fixes * bug fixes * minor * broader tests for scenario * added tests for specify_routes * PR fixes --- examples/exp_configs/non_rl/highway_single.py | 61 +- .../exp_configs/non_rl/i210_subnetwork.py | 110 +- .../non_rl/i210_subnetwork_sweep.py | 151 - .../rl/multiagent/multiagent_i210.py | 12 +- .../templates/sumo/i210_with_ghost_cell.xml | 5719 +++++++++++++++++ .../i210_with_ghost_cell_with_downstream.xml | 5719 +++++++++++++++++ flow/controllers/__init__.py | 5 +- flow/controllers/routing_controllers.py | 26 + flow/networks/highway.py | 20 +- flow/networks/i210_subnetwork.py | 247 +- flow/networks/ring.py | 2 +- tests/fast_tests/test_scenarios.py | 189 +- tests/fast_tests/test_vehicles.py | 4 + tests/setup_scripts.py | 1 + 14 files changed, 11945 insertions(+), 321 deletions(-) delete mode 100644 examples/exp_configs/non_rl/i210_subnetwork_sweep.py create mode 100644 examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml create mode 100644 examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index 46b18c0e9..0ced89f27 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -1,9 +1,5 @@ -"""Multi-agent highway with ramps example. - -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" -from flow.controllers import BandoFTLController +"""Example of an open network with human-driven vehicles.""" +from flow.controllers import IDMController from flow.core.params import EnvParams from flow.core.params import NetParams from flow.core.params import InitialConfig @@ -11,15 +7,21 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams +from flow.core.params import SumoCarFollowingParams from flow.networks import HighwayNetwork from flow.envs import TestEnv from flow.networks.highway import ADDITIONAL_NET_PARAMS -TRAFFIC_SPEED = 11 -END_SPEED = 16 -TRAFFIC_FLOW = 2056 -HORIZON = 3600 -INCLUDE_NOISE = False +# the speed of vehicles entering the network +TRAFFIC_SPEED = 24.1 +# the maximum speed at the downstream boundary edge +END_SPEED = 6.0 +# the inflow rate of vehicles +TRAFFIC_FLOW = 2215 +# the simulation time horizon (in steps) +HORIZON = 1500 +# whether to include noise in the car-following models +INCLUDE_NOISE = True additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params.update({ @@ -31,28 +33,30 @@ "speed_limit": 30, # number of edges to divide the highway into "num_edges": 2, - # whether to include a ghost edge of length 500m. This edge is provided a - # different speed limit. + # whether to include a ghost edge. This edge is provided a different speed + # limit. "use_ghost_edge": True, # speed limit for the ghost edge - "ghost_speed_limit": END_SPEED + "ghost_speed_limit": END_SPEED, + # length of the downstream ghost edge with the reduced speed limit + "boundary_cell_length": 300, }) vehicles = VehicleParams() vehicles.add( "human", - num_vehicles=0, + acceleration_controller=(IDMController, { + 'a': 1.3, + 'b': 2.0, + 'noise': 0.3 if INCLUDE_NOISE else 0.0 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0.5 + ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + model="SL2015", + lc_sublane=2.0, ), - acceleration_controller=(BandoFTLController, { - 'alpha': .5, - 'beta': 20.0, - 'h_st': 12.0, - 'h_go': 50.0, - 'v_max': 30.0, - 'noise': 1.0 if INCLUDE_NOISE else 0.0, - }), ) inflows = InFlows() @@ -64,8 +68,6 @@ depart_speed=TRAFFIC_SPEED, name="idm_highway_inflow") -# SET UP FLOW PARAMETERS - flow_params = dict( # name of the experiment exp_tag='highway-single', @@ -82,14 +84,15 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - warmup_steps=0, - sims_per_step=1, + warmup_steps=500, + sims_per_step=3, ), # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( - sim_step=0.5, + sim_step=0.4, render=False, + use_ballistic=True, restart_instance=False ), diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index dd85c56cf..eda037068 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -1,9 +1,9 @@ """I-210 subnetwork example.""" import os - import numpy as np -from flow.controllers.car_following_models import IDMController +from flow.controllers import IDMController +from flow.controllers import I210Router from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -15,7 +15,49 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# create the base vehicle type that will be used for inflows +# =========================================================================== # +# Specify some configurable constants. # +# =========================================================================== # + +# whether to include the upstream ghost edge in the network +WANT_GHOST_CELL = True +# whether to include the downstream slow-down edge in the network +WANT_DOWNSTREAM_BOUNDARY = True +# whether to include vehicles on the on-ramp +ON_RAMP = True +# the inflow rate of vehicles (in veh/hr) +INFLOW_RATE = 5 * 2215 +# the speed of inflowing vehicles from the main edge (in m/s) +INFLOW_SPEED = 24.1 + +# =========================================================================== # +# Specify the path to the network template. # +# =========================================================================== # + +if WANT_DOWNSTREAM_BOUNDARY: + NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_" + "downstream.xml") +elif WANT_GHOST_CELL: + NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml") +else: + NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml") + +# If the ghost cell is not being used, remove it from the initial edges that +# vehicles can be placed on. +edges_distribution = EDGES_DISTRIBUTION.copy() +if not WANT_GHOST_CELL: + edges_distribution.remove("ghost0") + +# =========================================================================== # +# Specify vehicle-specific information and inflows. # +# =========================================================================== # + vehicles = VehicleParams() vehicles.add( "human", @@ -24,35 +66,39 @@ lane_change_mode="strategic", ), acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 + "a": 1.3, + "b": 2.0, + "noise": 0.3, }), + routing_controller=(I210Router, {}) if ON_RAMP else None, ) inflow = InFlows() # main highway inflow.add( veh_type="human", - edge="119257914", - vehs_per_hour=8378, - departLane="random", - departSpeed=23) + edge="ghost0" if WANT_GHOST_CELL else "119257914", + vehs_per_hour=INFLOW_RATE, + departLane="best", + departSpeed=INFLOW_SPEED) # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321, -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421, -# departLane="random", -# departSpeed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") +if ON_RAMP: + inflow.add( + veh_type="human", + edge="27414345", + vehs_per_hour=500, + departLane="random", + departSpeed=10) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=500, + departLane="random", + departSpeed=10) + +# =========================================================================== # +# Generate the flow_params dict with all relevant simulation information. # +# =========================================================================== # flow_params = dict( # name of the experiment @@ -69,7 +115,7 @@ # simulation-related parameters sim=SumoParams( - sim_step=0.5, + sim_step=0.4, render=False, color_by_speed=True, use_ballistic=True @@ -77,14 +123,18 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4500, + horizon=10000, ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=NET_TEMPLATE + template=NET_TEMPLATE, + additional_params={ + "on_ramp": ON_RAMP, + "ghost_edge": WANT_GHOST_CELL, + } ), # vehicles to be placed in the network at the start of a rollout (see @@ -94,10 +144,14 @@ # parameters specifying the positioning of vehicles upon initialization/ # reset (see flow.core.params.InitialConfig) initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, + edges_distribution=edges_distribution, ), ) +# =========================================================================== # +# Specify custom callable that is logged during simulation runtime. # +# =========================================================================== # + edge_id = "119257908#1-AddedOnRampEdge" custom_callables = { "avg_merge_speed": lambda env: np.nan_to_num(np.mean( diff --git a/examples/exp_configs/non_rl/i210_subnetwork_sweep.py b/examples/exp_configs/non_rl/i210_subnetwork_sweep.py deleted file mode 100644 index 28cba81ce..000000000 --- a/examples/exp_configs/non_rl/i210_subnetwork_sweep.py +++ /dev/null @@ -1,151 +0,0 @@ -"""I-210 subnetwork example. - -In this case flow_params is a list of dicts. This is to test the effects of -multiple human-driver model parameters on the flow traffic. -""" -from collections import OrderedDict -from copy import deepcopy -import itertools -import os -import numpy as np - -from flow.core.params import SumoParams -from flow.core.params import EnvParams -from flow.core.params import NetParams -from flow.core.params import SumoLaneChangeParams -from flow.core.params import VehicleParams -from flow.core.params import InitialConfig -from flow.core.params import InFlows -import flow.config as config -from flow.envs import TestEnv -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION - -# the default parameters for all lane change parameters -default_dict = { - "lane_change_mode": "strategic", - "model": "LC2013", - "lc_strategic": 1.0, - "lc_cooperative": 1.0, - "lc_speed_gain": 1.0, - "lc_keep_right": 1.0, - "lc_look_ahead_left": 2.0, - "lc_speed_gain_right": 1.0, - "lc_sublane": 1.0, - "lc_pushy": 0, - "lc_pushy_gap": 0.6, - "lc_assertive": 1, - "lc_accel_lat": 1.0 -} - -# values to sweep through for some lane change parameters -sweep_dict = OrderedDict({ - "lc_strategic": [1.0, 2.0, 4.0, 8.0], - "lc_cooperative": [1.0, 2.0], - "lc_look_ahead_left": [2.0, 4.0] -}) - -# Create a list of possible lane change parameter combinations. -all_names = sorted(sweep_dict) -combinations = itertools.product(*(sweep_dict[name] for name in all_names)) -combination_list = list(combinations) -res = [] -for val in combination_list: - curr_dict = {} - for elem, name in zip(val, all_names): - curr_dict[name] = elem - res.append(curr_dict) - -# Create a list of all possible flow_params dictionaries to sweep through the -# different lane change parameters. -flow_params = [] - -for lane_change_dict in res: - # no vehicles in the network. The lane change parameters of inflowing - # vehicles are updated here. - vehicles = VehicleParams() - update_dict = deepcopy(default_dict) - update_dict.update(lane_change_dict) - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams(**update_dict) - ) - - inflow = InFlows() - # main highway - inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=8378, - # probability=1.0, - departLane="random", - departSpeed=20) - # on ramp - inflow.add( - veh_type="human", - edge="27414345", - vehs_per_hour=321, - departLane="random", - departSpeed=20) - inflow.add( - veh_type="human", - edge="27414342#0", - vehs_per_hour=421, - departLane="random", - departSpeed=20) - - NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") - - params = dict( - # name of the experiment - exp_tag='I-210_subnetwork', - - # name of the flow environment the experiment is running on - env_name=TestEnv, - - # name of the network class the experiment is running on - network=I210SubNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # simulation-related parameters - sim=SumoParams( - sim_step=0.8, - render=True, - color_by_speed=True - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=4500, # one hour of run time - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - template=NET_TEMPLATE - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon - # initialization/reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, - ), - ) - - # Store the next flow_params dict. - flow_params.append(params) - - -custom_callables = { - "avg_merge_speed": lambda env: np.mean(env.k.vehicle.get_speed( - env.k.vehicle.get_ids_by_edge("119257908#1-AddedOnRampEdge"))) -} diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 94f709ff4..a6d194708 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -35,6 +35,10 @@ # percentage of autonomous vehicles compared to human vehicles on highway PENETRATION_RATE = 10 +# TODO: temporary fix +edges_distribution = EDGES_DISTRIBUTION.copy() +edges_distribution.remove("ghost0") + # SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() additional_env_params.update({ @@ -145,7 +149,11 @@ # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=NET_TEMPLATE + template=NET_TEMPLATE, + additional_params={ + "on_ramp": False, + "ghost_edge": False + } ), # vehicles to be placed in the network at the start of a rollout (see @@ -155,7 +163,7 @@ # parameters specifying the positioning of vehicles upon initialization/ # reset (see flow.core.params.InitialConfig) initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, + edges_distribution=edges_distribution, ), ) diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml new file mode 100644 index 000000000..66e5a1131 --- /dev/null +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml @@ -0,0 +1,5719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml new file mode 100644 index 000000000..10d4d8d45 --- /dev/null +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml @@ -0,0 +1,5719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/flow/controllers/__init__.py b/flow/controllers/__init__.py index 4dfcf05b7..a61d16980 100755 --- a/flow/controllers/__init__.py +++ b/flow/controllers/__init__.py @@ -28,7 +28,7 @@ # routing controllers from flow.controllers.base_routing_controller import BaseRouter from flow.controllers.routing_controllers import ContinuousRouter, \ - GridRouter, BayBridgeRouter + GridRouter, BayBridgeRouter, I210Router __all__ = [ "RLController", "BaseController", "BaseLaneChangeController", "BaseRouter", @@ -36,5 +36,6 @@ "IDMController", "SimCarFollowingController", "FollowerStopper", "PISaturation", "StaticLaneChanger", "SimLaneChangeController", "ContinuousRouter", "GridRouter", "BayBridgeRouter", "LACController", - "GippsController", "NonLocalFollowerStopper", "BandoFTLController" + "GippsController", "NonLocalFollowerStopper", "BandoFTLController", + "I210Router" ] diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index e6ccdde78..02aa34cb4 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -124,3 +124,29 @@ def choose_route(self, env): new_route = super().choose_route(env) return new_route + + +class I210Router(ContinuousRouter): + """Assists in choosing routes in select cases for the I-210 sub-network. + + Extension to the Continuous Router. + + Usage + ----- + See base class for usage example. + """ + + def choose_route(self, env): + """See parent class.""" + edge = env.k.vehicle.get_edge(self.veh_id) + lane = env.k.vehicle.get_lane(self.veh_id) + + # vehicles on these edges in lanes 4 and 5 are not going to be able to + # make it out in time + if edge == "119257908#1-AddedOffRampEdge" and lane in [5, 4, 3]: + new_route = env.available_routes[ + "119257908#1-AddedOffRampEdge"][0][0] + else: + new_route = super().choose_route(env) + + return new_route diff --git a/flow/networks/highway.py b/flow/networks/highway.py index 7e9c18ad5..e48331cf9 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -14,11 +14,13 @@ "speed_limit": 30, # number of edges to divide the highway into "num_edges": 1, - # whether to include a ghost edge of length 500m. This edge is provided a - # different speed limit. + # whether to include a ghost edge. This edge is provided a different speed + # limit. "use_ghost_edge": False, # speed limit for the ghost edge "ghost_speed_limit": 25, + # length of the downstream ghost edge with the reduced speed limit + "boundary_cell_length": 500 } @@ -34,9 +36,11 @@ class HighwayNetwork(Network): * **lanes** : number of lanes in the highway * **speed_limit** : max speed limit of the highway * **num_edges** : number of edges to divide the highway into - * **use_ghost_edge** : whether to include a ghost edge of length 500m. This - edge is provided a different speed limit. + * **use_ghost_edge** : whether to include a ghost edge. This edge is + provided a different speed limit. * **ghost_speed_limit** : speed limit for the ghost edge + * **boundary_cell_length** : length of the downstream ghost edge with the + reduced speed limit Usage ----- @@ -70,8 +74,6 @@ def __init__(self, if p not in net_params.additional_params: raise KeyError('Network parameter "{}" not supplied'.format(p)) - self.end_length = 500 - super().__init__(name, vehicles, net_params, initial_config, traffic_lights) @@ -80,6 +82,7 @@ def specify_nodes(self, net_params): length = net_params.additional_params["length"] num_edges = net_params.additional_params.get("num_edges", 1) segment_lengths = np.linspace(0, length, num_edges+1) + end_length = net_params.additional_params["boundary_cell_length"] nodes = [] for i in range(num_edges+1): @@ -92,7 +95,7 @@ def specify_nodes(self, net_params): if self.net_params.additional_params["use_ghost_edge"]: nodes += [{ "id": "edge_{}".format(num_edges + 1), - "x": length + self.end_length, + "x": length + end_length, "y": 0 }] @@ -103,6 +106,7 @@ def specify_edges(self, net_params): length = net_params.additional_params["length"] num_edges = net_params.additional_params.get("num_edges", 1) segment_length = length/float(num_edges) + end_length = net_params.additional_params["boundary_cell_length"] edges = [] for i in range(num_edges): @@ -120,7 +124,7 @@ def specify_edges(self, net_params): "type": "highway_end", "from": "edge_{}".format(num_edges), "to": "edge_{}".format(num_edges + 1), - "length": self.end_length + "length": end_length }] return edges diff --git a/flow/networks/i210_subnetwork.py b/flow/networks/i210_subnetwork.py index d8e05efb5..b86a0dc8a 100644 --- a/flow/networks/i210_subnetwork.py +++ b/flow/networks/i210_subnetwork.py @@ -1,9 +1,18 @@ """Contains the I-210 sub-network class.""" - from flow.networks.base import Network +from flow.core.params import InitialConfig +from flow.core.params import TrafficLightParams + +ADDITIONAL_NET_PARAMS = { + # whether to include vehicle on the on-ramp + "on_ramp": False, + # whether to include the downstream slow-down edge in the network + "ghost_edge": False, +} EDGES_DISTRIBUTION = [ # Main highway + "ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", @@ -25,6 +34,12 @@ class I210SubNetwork(Network): """A network used to simulate the I-210 sub-network. + Requires from net_params: + + * **on_ramp** : whether to include vehicle on the on-ramp + * **ghost_edge** : whether to include the downstream slow-down edge in the + network + Usage ----- >>> from flow.core.params import NetParams @@ -39,103 +54,145 @@ class I210SubNetwork(Network): >>> ) """ - def specify_routes(self, net_params): - """See parent class. + def __init__(self, + name, + vehicles, + net_params, + initial_config=InitialConfig(), + traffic_lights=TrafficLightParams()): + """Initialize the I210 sub-network scenario.""" + for p in ADDITIONAL_NET_PARAMS.keys(): + if p not in net_params.additional_params: + raise KeyError('Network parameter "{}" not supplied'.format(p)) + + super(I210SubNetwork, self).__init__( + name=name, + vehicles=vehicles, + net_params=net_params, + initial_config=initial_config, + traffic_lights=traffic_lights, + ) - Routes for vehicles moving through the bay bridge from Oakland to San - Francisco. - """ + def specify_routes(self, net_params): + """See parent class.""" rts = { - # Main highway "119257914": [ - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - # (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 17 / 8378) - ], - # "119257908#0": [ - # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOnRampEdge": [ - # (["119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1": [ - # (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOffRampEdge": [ - # (["119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#2": [ - # (["119257908#2", "119257908#3"], 1), - # ], - # "119257908#3": [ - # (["119257908#3"], 1), - # ], - # - # # On-ramp - # "27414345": [ - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 9 / 321), - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 9 / 321), - # ], - # "27414342#0": [ - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 20 / 421), - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 20 / 421), - # ], - # "27414342#1-AddedOnRampEdge": [ - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 0.5), - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - # ], - # - # # Off-ramp - # "173381935": [ - # (["173381935"], 1), - # ], + (["119257914", + "119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ] } + if net_params.additional_params["ghost_edge"]: + rts.update({ + "ghost0": [ + (["ghost0", + "119257914", + "119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1), + ], + }) + + if net_params.additional_params["on_ramp"]: + rts.update({ + # Main highway + "119257908#0": [ + (["119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#1-AddedOnRampEdge": [ + (["119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#1": [ + (["119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#1-AddedOffRampEdge": [ + (["119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#2": [ + (["119257908#2", + "119257908#3"], 1), + ], + "119257908#3": [ + (["119257908#3"], 1), + ], + + # On-ramp + "27414345": [ + (["27414345", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1 - 9 / 321), + (["27414345", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "173381935"], 9 / 321), + ], + "27414342#0": [ + (["27414342#0", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1 - 20 / 421), + (["27414342#0", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "173381935"], 20 / 421), + ], + "27414342#1-AddedOnRampEdge": [ + (["27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 0.5), + (["27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "173381935"], 0.5), + ], + + # Off-ramp + "173381935": [ + (["173381935"], 1), + ], + }) + return rts diff --git a/flow/networks/ring.py b/flow/networks/ring.py index de4d17503..ceef22a78 100755 --- a/flow/networks/ring.py +++ b/flow/networks/ring.py @@ -37,7 +37,7 @@ class RingNetwork(Network): >>> from flow.core.params import NetParams >>> from flow.core.params import VehicleParams >>> from flow.core.params import InitialConfig - >>> from flow.scenarios import RingNetwork + >>> from flow.networks import RingNetwork >>> >>> network = RingNetwork( >>> name='ring_road', diff --git a/tests/fast_tests/test_scenarios.py b/tests/fast_tests/test_scenarios.py index d72a50b17..5fccdcb3b 100644 --- a/tests/fast_tests/test_scenarios.py +++ b/tests/fast_tests/test_scenarios.py @@ -5,8 +5,11 @@ from flow.networks import BottleneckNetwork, FigureEightNetwork, \ TrafficLightGridNetwork, HighwayNetwork, RingNetwork, MergeNetwork, \ MiniCityNetwork, MultiRingNetwork +from flow.networks import I210SubNetwork from tests.setup_scripts import highway_exp_setup +import flow.config as config + __all__ = [ "MultiRingNetwork", "MiniCityNetwork" ] @@ -97,7 +100,8 @@ def test_additional_net_params(self): "speed_limit": 30, "num_edges": 1, "use_ghost_edge": False, - "ghost_speed_limit": 25 + "ghost_speed_limit": 25, + "boundary_cell_length": 300, } ) ) @@ -116,7 +120,8 @@ def test_ghost_edge(self): "speed_limit": 30, "num_edges": 1, "use_ghost_edge": False, - "ghost_speed_limit": 25 + "ghost_speed_limit": 25, + "boundary_cell_length": 300, }) ) env.reset() @@ -131,7 +136,7 @@ def test_ghost_edge(self): self.assertEqual(env.k.network.speed_limit("highway_0"), 30) # =================================================================== # - # With a ghost edge # + # With a ghost edge (300m, 25m/s) # # =================================================================== # # create the network @@ -142,13 +147,14 @@ def test_ghost_edge(self): "speed_limit": 30, "num_edges": 1, "use_ghost_edge": True, - "ghost_speed_limit": 25 + "ghost_speed_limit": 25, + "boundary_cell_length": 300, }) ) env.reset() # check the network length - self.assertEqual(env.k.network.length(), 1500.1) + self.assertEqual(env.k.network.length(), 1300.1) # check the edge list self.assertEqual(env.k.network.get_edge_list(), @@ -158,6 +164,35 @@ def test_ghost_edge(self): self.assertEqual(env.k.network.speed_limit("highway_0"), 30) self.assertEqual(env.k.network.speed_limit("highway_end"), 25) + # =================================================================== # + # With a ghost edge (500m, 10m/s) # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": True, + "ghost_speed_limit": 10, + "boundary_cell_length": 500, + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1500.1) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), + ["highway_0", "highway_end"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + self.assertEqual(env.k.network.speed_limit("highway_end"), 10) + class TestRingNetwork(unittest.TestCase): @@ -219,6 +254,150 @@ def test_additional_net_params(self): ) +class TestI210SubNetwork(unittest.TestCase): + + """Tests I210SubNetwork in flow/networks/i210_subnetwork.py.""" + + def test_additional_net_params(self): + """Ensures that not returning the correct params leads to an error.""" + self.assertTrue( + test_additional_params( + network_class=I210SubNetwork, + additional_params={ + "on_ramp": False, + "ghost_edge": False, + } + ) + ) + + def test_specify_routes(self): + """Validates that the routes are properly specified for the network. + + This is done simply by checking the initial edges routes are specified + from, which alternates based on choice of network configuration. + + This method tests the routes for the following cases: + + 1. on_ramp = False, ghost_edge = False + 2. on_ramp = True, ghost_edge = False + 3. on_ramp = False, ghost_edge = True + 4. on_ramp = True, ghost_edge = True + """ + # test case 1 + network = I210SubNetwork( + name='test-3', + vehicles=VehicleParams(), + net_params=NetParams( + template=os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml" + ), + additional_params={ + "on_ramp": False, + "ghost_edge": False, + }, + ), + ) + + self.assertEqual( + ['119257914'], + sorted(list(network.specify_routes(network.net_params).keys())) + ) + + del network + + # test case 2 + network = I210SubNetwork( + name='test-3', + vehicles=VehicleParams(), + net_params=NetParams( + template=os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml" + ), + additional_params={ + "on_ramp": True, + "ghost_edge": True, + }, + ), + ) + + self.assertEqual( + ['119257908#0', + '119257908#1', + '119257908#1-AddedOffRampEdge', + '119257908#1-AddedOnRampEdge', + '119257908#2', + '119257908#3', + '119257914', + '173381935', + '27414342#0', + '27414342#1-AddedOnRampEdge', + '27414345', + 'ghost0'], + sorted(list(network.specify_routes(network.net_params).keys())) + ) + + del network + + # test case 3 + network = I210SubNetwork( + name='test-3', + vehicles=VehicleParams(), + net_params=NetParams( + template=os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml" + ), + additional_params={ + "on_ramp": False, + "ghost_edge": True, + }, + ), + ) + + self.assertEqual( + ['119257914', 'ghost0'], + sorted(list(network.specify_routes(network.net_params).keys())) + ) + + del network + + # test case 4 + network = I210SubNetwork( + name='test-3', + vehicles=VehicleParams(), + net_params=NetParams( + template=os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml" + ), + additional_params={ + "on_ramp": True, + "ghost_edge": True, + }, + ), + ) + + self.assertEqual( + ['119257908#0', + '119257908#1', + '119257908#1-AddedOffRampEdge', + '119257908#1-AddedOnRampEdge', + '119257908#2', + '119257908#3', + '119257914', + '173381935', + '27414342#0', + '27414342#1-AddedOnRampEdge', + '27414345', + 'ghost0'], + sorted(list(network.specify_routes(network.net_params).keys())) + ) + + del network + + ############################################################################### # Utility methods # ############################################################################### diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index 1ae2d1cf0..7e1405007 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -261,6 +261,7 @@ def test_no_junctions_highway(self): "num_edges": 1, "use_ghost_edge": False, "ghost_speed_limit": 25, + "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -335,6 +336,7 @@ def test_no_junctions_highway(self): "num_edges": 1, "use_ghost_edge": False, "ghost_speed_limit": 25, + "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -405,6 +407,7 @@ def test_no_junctions_highway(self): "num_edges": 3, "use_ghost_edge": False, "ghost_speed_limit": 25, + "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -474,6 +477,7 @@ def test_no_junctions_highway(self): "num_edges": 3, "use_ghost_edge": False, "ghost_speed_limit": 25, + "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() diff --git a/tests/setup_scripts.py b/tests/setup_scripts.py index ac88d2e42..343bad906 100644 --- a/tests/setup_scripts.py +++ b/tests/setup_scripts.py @@ -346,6 +346,7 @@ def highway_exp_setup(sim_params=None, "num_edges": 1, "use_ghost_edge": False, "ghost_speed_limit": 25, + "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) From d9470cbff0845c84d8c78699bdc3ba6ba57ea9f0 Mon Sep 17 00:00:00 2001 From: AboudyKreidieh Date: Mon, 15 Jun 2020 12:12:08 -0700 Subject: [PATCH 251/438] some cleanup --- .../exp_configs/non_rl/i210_subnetwork.py | 164 ++++-------------- .../rl/multiagent/multiagent_i210.py | 6 +- 2 files changed, 36 insertions(+), 134 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index d4f0800ad..8b572d39f 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -28,7 +28,7 @@ # whether to include the downstream slow-down edge in the network WANT_DOWNSTREAM_BOUNDARY = True # whether to include vehicles on the on-ramp -ON_RAMP = True +ON_RAMP = False # the inflow rate of vehicles (in veh/hr) INFLOW_RATE = 2050 # the speed of inflowing vehicles from the main edge (in m/s) @@ -42,59 +42,6 @@ # steps to run before follower-stopper is allowed to take control WARMUP_STEPS = 600 -highway_start_edge = "ghost0" if WANT_GHOST_CELL else "119257914" -accel_data = (IDMController, {'a': 1.3, 'b': 2.0, 'noise': 0.3}) - -vehicles = VehicleParams() - -inflow = InFlows() - -if ON_RAMP: - vehicles.add( - "human", - num_vehicles=0, - color="white", - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=accel_data, - routing_controller=(I210Router, {}) - ) - if PENETRATION_RATE > 0.0: - vehicles.add( - "av", - num_vehicles=0, - color="red", - acceleration_controller=(FollowerStopper, { - "v_des": V_DES, - "no_control_edges": ["ghost0", "119257908#3"] - }), - routing_controller=(I210Router, {}) - ) - - # inflow.add( - # veh_type="human", - # edge=highway_start_edge, - # vehs_per_hour=inflow_rate, - # departLane="best", - # departSpeed=inflow_speed) - - lane_list = ['0', '1', '2', '3', '4'] - - for lane in lane_list: - inflow.add( - veh_type="human", - edge=highway_start_edge, - vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), - departLane=lane, - departSpeed=INFLOW_SPEED) - - inflow.add( - veh_type="human", - edge="27414345", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)) - ) - # =========================================================================== # # Specify the path to the network template. # # =========================================================================== # @@ -124,6 +71,7 @@ # =========================================================================== # vehicles = VehicleParams() + vehicles.add( "human", num_vehicles=0, @@ -138,96 +86,54 @@ routing_controller=(I210Router, {}) if ON_RAMP else None, ) +vehicles.add( + "av", + num_vehicles=0, + color="red", + acceleration_controller=(FollowerStopper, { + "v_des": V_DES, + "no_control_edges": ["ghost0", "119257908#3"] + }), + routing_controller=(I210Router, {}) +) + inflow = InFlows() + # main highway -inflow.add( - veh_type="human", - edge="ghost0" if WANT_GHOST_CELL else "119257914", - vehs_per_hour=INFLOW_RATE, - departLane="best", - departSpeed=INFLOW_SPEED) +highway_start_edge = "ghost0" if WANT_GHOST_CELL else "119257914" + +for lane in [0, 1, 2, 3, 4]: + inflow.add( + veh_type="human", + edge=highway_start_edge, + vehs_per_hour=INFLOW_RATE * (1 - PENETRATION_RATE), + departLane=lane, + departSpeed=INFLOW_SPEED) + + if PENETRATION_RATE > 0.0: + inflow.add( + veh_type="av", + edge=highway_start_edge, + vehs_per_hour=INFLOW_RATE * PENETRATION_RATE, + departLane=lane, + departSpeed=INFLOW_SPEED) + # on ramp if ON_RAMP: inflow.add( veh_type="human", edge="27414345", - vehs_per_hour=500, - departLane="random", - departSpeed=10) + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + departSpeed=10, + ) if PENETRATION_RATE > 0.0: - for lane in [0, 1, 2, 3, 4]: - inflow.add( - veh_type="av", - edge=highway_start_edge, - vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), - departLane=lane, - departSpeed=INFLOW_SPEED) - inflow.add( veh_type="av", edge="27414345", vehs_per_hour=int(500 * PENETRATION_RATE), departLane="random", departSpeed=10) - inflow.add( - veh_type="av", - edge="27414342#0", - vehs_per_hour=int(500 * PENETRATION_RATE), - departLane="random", - departSpeed=10) - -else: - # create the base vehicle type that will be used for inflows - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=accel_data, - ) - if PENETRATION_RATE > 0.0: - vehicles.add( - "av", - color="red", - num_vehicles=0, - acceleration_controller=(FollowerStopper, { - "v_des": V_DES, - "no_control_edges": ["ghost0", "119257908#3"] - }), - ) - - # If you want to turn off the fail safes uncomment this: - - # vehicles.add( - # 'human', - # num_vehicles=0, - # lane_change_params=SumoLaneChangeParams( - # lane_change_mode='strategic', - # ), - # acceleration_controller=accel_data, - # car_following_params=SumoCarFollowingParams(speed_mode='19') - # ) - - lane_list = ['0', '1', '2', '3', '4'] - - for lane in lane_list: - inflow.add( - veh_type="human", - edge=highway_start_edge, - vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), - departLane=lane, - departSpeed=INFLOW_SPEED) - - if PENETRATION_RATE > 0.0: - for lane in lane_list: - inflow.add( - veh_type="av", - edge=highway_start_edge, - vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), - departLane=lane, - departSpeed=INFLOW_SPEED) # =========================================================================== # # Generate the flow_params dict with all relevant simulation information. # diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index fe5c71f63..028e5bc7c 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -24,7 +24,7 @@ from flow.utils.registry import make_create_env # SET UP PARAMETERS FOR THE SIMULATION -WANT_GHOST_CELL = False +WANT_GHOST_CELL = True # WANT_DOWNSTREAM_BOUNDARY = True ON_RAMP = False PENETRATION_RATE = 0.10 @@ -51,10 +51,6 @@ edges_distribution = EDGES_DISTRIBUTION highway_start_edge = "119257914" -# TODO: temporary fix -edges_distribution = EDGES_DISTRIBUTION.copy() -edges_distribution.remove("ghost0") - # SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() additional_env_params.update({ From 1f6ceee2208b0af7b9458157beecc7c225fc0f04 Mon Sep 17 00:00:00 2001 From: AboudyKreidieh Date: Mon, 15 Jun 2020 12:22:04 -0700 Subject: [PATCH 252/438] removed unused simulation --- examples/exp_configs/non_rl/straight_road.py | 134 ------------------- 1 file changed, 134 deletions(-) delete mode 100644 examples/exp_configs/non_rl/straight_road.py diff --git a/examples/exp_configs/non_rl/straight_road.py b/examples/exp_configs/non_rl/straight_road.py deleted file mode 100644 index 1669bb896..000000000 --- a/examples/exp_configs/non_rl/straight_road.py +++ /dev/null @@ -1,134 +0,0 @@ -"""Multi-agent highway with ramps example. - -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" -import numpy as np - -from flow.controllers import IDMController -from flow.controllers.velocity_controllers import FollowerStopper -from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ - VehicleParams, SumoParams, SumoLaneChangeParams -from flow.core.rewards import miles_per_gallon -from flow.networks import HighwayNetwork -from flow.envs import TestEnv -from flow.networks.highway import ADDITIONAL_NET_PARAMS - - -# SET UP PARAMETERS FOR THE SIMULATION - -# number of steps per rollout -HORIZON = 2000 - -# inflow rate on the highway in vehicles per hour -HIGHWAY_INFLOW_RATE = 10800 / 5 -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 0.0 - - -# SET UP PARAMETERS FOR THE NETWORK - -additional_net_params = ADDITIONAL_NET_PARAMS.copy() -additional_net_params.update({ - # length of the highway - "length": 2000, - # number of lanes - "lanes": 1, - # speed limit for all edges - "speed_limit": 30, - # number of edges to divide the highway into - "num_edges": 2 -}) - -# CREATE VEHICLE TYPES AND INFLOWS - -vehicles = VehicleParams() -inflows = InFlows() - -# human vehicles -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), -) - -if PENETRATION_RATE > 0.0: - vehicles.add( - "av", - num_vehicles=0, - acceleration_controller=(FollowerStopper, {"v_des": 12.0}), - ) - -# add human vehicles on the highway -# add human vehicles on the highway -inflows.add( - veh_type="human", - edge="highway_0", - vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), - depart_lane="free", - depart_speed="23", - name="idm_highway_inflow") - -if PENETRATION_RATE > 0.0: - inflows.add( - veh_type="av", - edge="highway_0", - vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), - depart_lane="free", - depart_speed="23", - name="av_highway_inflow") - -# SET UP FLOW PARAMETERS - -flow_params = dict( - # name of the experiment - exp_tag='multiagent_highway', - - # name of the flow environment the experiment is running on - env_name=TestEnv, - - # name of the network class the experiment is running on - network=HighwayNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - warmup_steps=400, - sims_per_step=1, - ), - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.5, - render=False, - restart_instance=False - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflows, - additional_params=additional_net_params - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig(), -) - -custom_callables = { - "avg_speed": lambda env: np.nan_to_num(np.mean( - env.k.vehicle.get_speed(env.k.vehicle.get_ids_by_edge(['highway_0', 'highway_1'])))), - "mpg": lambda env: miles_per_gallon(env, env.k.vehicle.get_ids(), gain=1.0) - -} From 9e16b19092a14e0912cefd687a209e5aaba79a3f Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Mon, 15 Jun 2020 21:08:20 -0700 Subject: [PATCH 253/438] I210 updated (#970) * updated i210 simulation * increased the time horizon * added edge starts --- .../exp_configs/non_rl/i210_subnetwork.py | 27 +++--- .../i210_with_ghost_cell_with_downstream.xml | 14 +-- flow/networks/i210_subnetwork.py | 88 +++++++++++++++++++ 3 files changed, 112 insertions(+), 17 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index eda037068..b0c58c894 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -24,11 +24,15 @@ # whether to include the downstream slow-down edge in the network WANT_DOWNSTREAM_BOUNDARY = True # whether to include vehicles on the on-ramp -ON_RAMP = True +ON_RAMP = False # the inflow rate of vehicles (in veh/hr) -INFLOW_RATE = 5 * 2215 +INFLOW_RATE = 2050 # the speed of inflowing vehicles from the main edge (in m/s) -INFLOW_SPEED = 24.1 +INFLOW_SPEED = 25.5 +# horizon over which to run the env +HORIZON = 1500 +# steps to run before follower-stopper is allowed to take control +WARMUP_STEPS = 600 # =========================================================================== # # Specify the path to the network template. # @@ -75,12 +79,13 @@ inflow = InFlows() # main highway -inflow.add( - veh_type="human", - edge="ghost0" if WANT_GHOST_CELL else "119257914", - vehs_per_hour=INFLOW_RATE, - departLane="best", - departSpeed=INFLOW_SPEED) +for lane in [0, 1, 2, 3, 4]: + inflow.add( + veh_type="human", + edge="ghost0" if WANT_GHOST_CELL else "119257914", + vehs_per_hour=INFLOW_RATE, + departLane=lane, + departSpeed=INFLOW_SPEED) # on ramp if ON_RAMP: inflow.add( @@ -123,7 +128,9 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=10000, + horizon=HORIZON, + warmup_steps=WARMUP_STEPS, + sims_per_step=3 ), # network-related parameters (see flow.core.params.NetParams and the diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml index 10d4d8d45..ee508b730 100644 --- a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml @@ -3501,11 +3501,11 @@ - - - - - + + + + + @@ -4727,8 +4727,8 @@ - - + + diff --git a/flow/networks/i210_subnetwork.py b/flow/networks/i210_subnetwork.py index b86a0dc8a..f4315b07f 100644 --- a/flow/networks/i210_subnetwork.py +++ b/flow/networks/i210_subnetwork.py @@ -65,6 +65,26 @@ def __init__(self, if p not in net_params.additional_params: raise KeyError('Network parameter "{}" not supplied'.format(p)) + # The length of each edge and junction is a fixed term that can be + # found in the xml file. + self.length_with_ghost_edge = [ + ("ghost0", 573.08), + (":300944378_0", 0.30), + ("119257914", 61.28), + (":300944379_0", 0.31), + ("119257908#0", 696.97), + (":300944436_0", 2.87), + ("119257908#1-AddedOnRampEdge", 97.20), + (":119257908#1-AddedOnRampNode_0", 3.24), + ("119257908#1", 239.68), + (":119257908#1-AddedOffRampNode_0", 3.24), + ("119257908#1-AddedOffRampEdge", 98.50), + (":1686591010_1", 5.46), + ("119257908#2", 576.61), + (":1842086610_1", 4.53), + ("119257908#3", 17.49), + ] + super(I210SubNetwork, self).__init__( name=name, vehicles=vehicles, @@ -196,3 +216,71 @@ def specify_routes(self, net_params): }) return rts + + def specify_edge_starts(self): + """See parent class.""" + if self.net_params.additional_params["ghost_edge"]: + # Collect the names of all the edges. + edge_names = [ + e[0] for e in self.length_with_ghost_edge + if not e[0].startswith(":") + ] + + edge_starts = [] + for edge in edge_names: + # Find the position of the edge in the list of tuples. + edge_pos = next( + i for i in range(len(self.length_with_ghost_edge)) + if self.length_with_ghost_edge[i][0] == edge + ) + + # Sum of lengths until the edge is reached to compute the + # starting position of the edge. + edge_starts.append(( + edge, + sum(e[1] for e in self.length_with_ghost_edge[:edge_pos]) + )) + + elif self.net_params.additional_params["on_ramp"]: + # TODO: this will incorporated in the future, if needed. + edge_starts = [] + + else: + # TODO: this will incorporated in the future, if needed. + edge_starts = [] + + return edge_starts + + def specify_internal_edge_starts(self): + """See parent class.""" + if self.net_params.additional_params["ghost_edge"]: + # Collect the names of all the junctions. + edge_names = [ + e[0] for e in self.length_with_ghost_edge + if e[0].startswith(":") + ] + + edge_starts = [] + for edge in edge_names: + # Find the position of the edge in the list of tuples. + edge_pos = next( + i for i in range(len(self.length_with_ghost_edge)) + if self.length_with_ghost_edge[i][0] == edge + ) + + # Sum of lengths until the edge is reached to compute the + # starting position of the edge. + edge_starts.append(( + edge, + sum(e[1] for e in self.length_with_ghost_edge[:edge_pos]) + )) + + elif self.net_params.additional_params["on_ramp"]: + # TODO: this will incorporated in the future, if needed. + edge_starts = [] + + else: + # TODO: this will incorporated in the future, if needed. + edge_starts = [] + + return edge_starts From 7fd11b172e4a6e964f9cad3a41757e97960b72cf Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 15 Jun 2020 22:03:14 -0700 Subject: [PATCH 254/438] Add queries for safety metrics reporting --- flow/data_pipeline/query.py | 66 +++++++++++++++++++++++++++++++++++-- 1 file changed, 64 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 184c7217a..9d701eec1 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -9,6 +9,9 @@ "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" ], + "fact_safety_metrics": [ + "FACT_SAFETY_METRICS" + ], "fact_network_throughput_agg": [ "FACT_NETWORK_THROUGHPUT_AGG" ], @@ -17,6 +20,22 @@ ] }, "fact_energy_trace": {}, + "fact_safety_metrics": { + "fact_safety_metrics_agg": [ + "FACT_SAFETY_METRICS_AGG" + ] + }, + # @brent: are these needed? Is there a race condition here that may break things? + # "fact_safety_metrics_agg": { + # "leaderboard_chart": [ + # "LEADERBOARD_CHART" + # ] + # }, + # "fact_network_throughput_agg": { + # "leaderboard_chart": [ + # "LEADERBOARD_CHART" + # ] + # } "POWER_DEMAND_MODEL_DENOISED_ACCEL": { "fact_vehicle_fuel_efficiency_agg": [ "FACT_VEHICLE_FUEL_EFFICIENCY_AGG" @@ -50,6 +69,8 @@ tables = [ "fact_vehicle_trace", "fact_energy_trace", + "fact_safety_metrics", + "fact_safety_metrics_agg", "fact_network_throughput_agg", "fact_network_inflows_outflows", "fact_vehicle_fuel_efficiency_agg", @@ -179,6 +200,39 @@ class QueryStrings(Enum): 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + FACT_SAFETY_METRICS = """ + SELECT + vt.id, + vt.time_step, + COALESCE(( + value_lower_left*(headway_upper-headway)*(rel_speed_upper-leader_rel_speed) + + value_lower_right*(headway-headway_lower)*(rel_speed_upper-leader_rel_speed) + + value_upper_left*(headway_upper-headway)*(leader_rel_speed-rel_speed_lower) + + value_upper_right*(headway-headway_lower)*(leader_rel_speed-rel_speed_lower) + ) / ((headway_upper-headway_lower)*(rel_speed_upper-rel_speed_lower)), 200) AS safety_value, + vt.source_id + FROM fact_vehicle_trace vt + LEFT OUTER JOIN fact_safety_matrix sm ON 1 = 1 + AND vt.leader_rel_speed BETWEEN sm.rel_speed_lower AND sm.rel_speed_upper + AND vt.headway BETWEEN sm.headway_lower AND sm.headway_upper + WHERE 1 = 1 + AND vt.date = \'{{date}}\' + AND vt.partition_name = \'{{partition}}\' + ; + """ + + FACT_SAFETY_METRICS_AGG = """ + SELECT + source_id, + SUM(CASE WHEN safety_value > 0 THEN 1 ELSE 0) * 100 / COUNT() safety_rate, + MAX(safety_value) AS safety_value_max + FROM fact_safety_metrics + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + GROUP BY 1 + """ + FACT_NETWORK_THROUGHPUT_AGG = """ WITH min_time AS ( SELECT @@ -272,13 +326,19 @@ class QueryStrings(Enum): e.energy_model_id, e.efficiency_meters_per_joules, 19972 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, - t.throughput_per_hour + t.throughput_per_hour, + s.safety_rate, + s.safety_value_max FROM fact_network_throughput_agg AS t JOIN fact_network_fuel_efficiency_agg AS e ON 1 = 1 AND e.date = \'{date}\' AND e.partition_name = \'{partition}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' AND t.source_id = e.source_id AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + JOIN fact_safety_metrics_agg AS s ON 1 = 1 + AND s.dat = \'{date}\' + AND s.partition_name = \'{partition}_FACT_SAFETY_METRICS_AGG\' + AND t.source_id = s.source_id WHERE 1 = 1 AND t.date = \'{date}\' AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' @@ -536,7 +596,9 @@ class QueryStrings(Enum): l.energy_model_id, l.efficiency_meters_per_joules, l.efficiency_miles_per_gallon, - l.throughput_per_hour + l.throughput_per_hour, + l.safety_rate, + l.safety_value_max FROM leaderboard_chart AS l, metadata_table AS m WHERE 1 = 1 AND l.source_id = m.source_id From c7937ffc01b589d39ce949420d88b1e6fbcb8db2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 15 Jun 2020 22:04:52 -0700 Subject: [PATCH 255/438] fix typo --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 9d701eec1..1ecaffb30 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -336,7 +336,7 @@ class QueryStrings(Enum): AND t.source_id = e.source_id AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' JOIN fact_safety_metrics_agg AS s ON 1 = 1 - AND s.dat = \'{date}\' + AND s.date = \'{date}\' AND s.partition_name = \'{partition}_FACT_SAFETY_METRICS_AGG\' AND t.source_id = s.source_id WHERE 1 = 1 From d523b743d9a0596169650f7397e7b352355f7d77 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 15 Jun 2020 22:07:10 -0700 Subject: [PATCH 256/438] filter warmup steps and ghost edges from safety calculation --- flow/data_pipeline/query.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 1ecaffb30..bd8cc8bc9 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -218,6 +218,8 @@ class QueryStrings(Enum): WHERE 1 = 1 AND vt.date = \'{{date}}\' AND vt.partition_name = \'{{partition}}\' + AND vt.time_step >= {start_filter} + AND vt.{loc_filter} ; """ From 946938a9704e99a0cfc1a2958ca6a1043c2dfbdd Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 15 Jun 2020 22:11:50 -0700 Subject: [PATCH 257/438] invert safety_rate definition --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index bd8cc8bc9..bc406ae17 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -226,7 +226,7 @@ class QueryStrings(Enum): FACT_SAFETY_METRICS_AGG = """ SELECT source_id, - SUM(CASE WHEN safety_value > 0 THEN 1 ELSE 0) * 100 / COUNT() safety_rate, + SUM(CASE WHEN safety_value < 0 THEN 1 ELSE 0) * 100 / COUNT() safety_rate, MAX(safety_value) AS safety_value_max FROM fact_safety_metrics WHERE 1 = 1 From 5ac982892a8600bd9b371b2e0f77e191836bdb89 Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Thu, 18 Jun 2020 11:21:37 -0700 Subject: [PATCH 258/438] cleanup to the multi-agent trainer (#971) - make the multi-agent trainer pretty - fix minor bug in Experiment.py where gen_emission had to be set or else it errored out --- .../rl/multiagent/multiagent_i210.py | 259 ++++++++---------- flow/core/experiment.py | 7 +- 2 files changed, 115 insertions(+), 151 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 96fc78cd2..e5b5b5d81 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -18,51 +18,70 @@ from flow.core.params import InFlows from flow.core.params import VehicleParams from flow.core.params import SumoParams +from flow.core.params import SumoCarFollowingParams from flow.core.params import SumoLaneChangeParams from flow.core.rewards import energy_consumption from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS from flow.utils.registry import make_create_env +from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# SET UP PARAMETERS FOR THE SIMULATION -WANT_GHOST_CELL = True -# WANT_DOWNSTREAM_BOUNDARY = True +# =========================================================================== # +# Specify some configurable constants. # +# =========================================================================== # + +# whether to include the downstream slow-down edge in the network as well as a ghost cell at the upstream edge +WANT_BOUNDARY_CONDITIONS = True +# whether to include vehicles on the on-ramp ON_RAMP = False +# the inflow rate of vehicles (in veh/hr) +INFLOW_RATE = 2050 +# the speed of inflowing vehicles from the main edge (in m/s) +INFLOW_SPEED = 25.5 +# fraction of vehicles that are RL vehicles. 0.10 corresponds to 10% PENETRATION_RATE = 0.10 -V_DES = 7.0 -HORIZON = 1000 +# desired speed of the vehicles in the network +V_DES = 5.0 +# horizon over which to run the env +HORIZON = 1500 +# steps to run before follower-stopper is allowed to take control WARMUP_STEPS = 600 - -inflow_rate = 2050 -inflow_speed = 25.5 - -accel_data = (IDMController, {'a': 1.3, 'b': 2.0, 'noise': 0.3}) - -VEH_PER_HOUR_BASE_119257914 = 10800 -VEH_PER_HOUR_BASE_27414345 = 321 -VEH_PER_HOUR_BASE_27414342 = 421 - -if WANT_GHOST_CELL: - from flow.networks.i210_subnetwork_ghost_cell import I210SubNetworkGhostCell, EDGES_DISTRIBUTION - - edges_distribution = EDGES_DISTRIBUTION - highway_start_edge = 'ghost0' +# whether to turn off the fail safes for the human-driven vehicles +ALLOW_COLLISIONS = False + +# =========================================================================== # +# Specify the path to the network template. # +# =========================================================================== # + +if WANT_BOUNDARY_CONDITIONS: + NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_" + "downstream.xml") else: - from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION - edges_distribution = EDGES_DISTRIBUTION - highway_start_edge = "119257914" + NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml") +edges_distribution = EDGES_DISTRIBUTION.copy() + +# =========================================================================== # +# Set up parameters for the environment. # +# =========================================================================== # -# SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() additional_env_params.update({ 'max_accel': 2.6, 'max_decel': 4.5, - # configure the observation space. Look at the I210MultiEnv class for more info. + + # configure the observation space. Look at the I210MultiEnv class for more + # info. 'lead_obs': True, # whether to add in a reward for the speed of nearby vehicles "local_reward": True, - # whether to use the MPG reward. Otherwise, defaults to a target velocity reward + # whether to use the MPG reward. Otherwise, defaults to a target velocity + # reward "mpg_reward": False, - # whether to use the MPJ reward. Otherwise, defaults to a target velocity reward + # whether to use the MPJ reward. Otherwise, defaults to a target velocity + # reward "mpj_reward": False, # how many vehicles to look back for the MPG reward "look_back_length": 1, @@ -74,7 +93,8 @@ # which edges we shouldn't apply control on "no_control_edges": ["ghost0", "119257908#3"], - # whether to add a slight reward for opening up a gap that will be annealed out N iterations in + # whether to add a slight reward for opening up a gap that will be annealed + # out N iterations in "headway_curriculum": False, # how many timesteps to anneal the headway curriculum over "headway_curriculum_iters": 100, @@ -96,144 +116,85 @@ "penalize_accel": True }) -# CREATE VEHICLE TYPES AND INFLOWS -# no vehicles in the network -vehicles = VehicleParams() - -inflow = InFlows() +# =========================================================================== # +# Specify vehicle-specific information and inflows. # +# =========================================================================== # +# create the base vehicle types that will be used for inflows +vehicles = VehicleParams() if ON_RAMP: vehicles.add( "human", num_vehicles=0, - color="white", + routing_controller=(I210Router, {}), + acceleration_controller=(IDMController, { + 'a': 1.3, + 'b': 2.0, + 'noise': 0.3 + }), + car_following_params=SumoCarFollowingParams( + speed_mode=19 if ALLOW_COLLISIONS else 'right_of_way' + ), lane_change_params=SumoLaneChangeParams( lane_change_mode="strategic", ), - acceleration_controller=accel_data, - routing_controller=(I210Router, {}) ) - if PENETRATION_RATE > 0.0: - vehicles.add( - "av", - num_vehicles=0, - color="red", - acceleration_controller=(RLController, {}), - routing_controller=(I210Router, {}) - ) - - # inflow.add( - # veh_type="human", - # edge=highway_start_edge, - # vehs_per_hour=inflow_rate, - # departLane="best", - # departSpeed=inflow_speed) - - lane_list = ['0', '1', '2', '3', '4'] - - for lane in lane_list: - inflow.add( - veh_type="human", - edge=highway_start_edge, - vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), - departLane=lane, - departSpeed=inflow_speed) +else: + vehicles.add( + "human", + num_vehicles=0, + acceleration_controller=(IDMController, { + 'a': 1.3, + 'b': 2.0, + 'noise': 0.3 + }), + car_following_params=SumoCarFollowingParams( + speed_mode=19 if ALLOW_COLLISIONS else 'right_of_way' + ), + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + ) +vehicles.add( + "av", + num_vehicles=0, + acceleration_controller=(RLController, {}), +) +inflow = InFlows() +for lane in [0, 1, 2, 3, 4]: + # Add the inflows from the main highway. inflow.add( veh_type="human", - edge="27414345", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), - departLane="random", - departSpeed=10) + edge="119257914", + vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=INFLOW_SPEED) inflow.add( - veh_type="human", - edge="27414342#0", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), - departLane="random", - departSpeed=10) - - if PENETRATION_RATE > 0.0: - for lane in lane_list: - inflow.add( - veh_type="av", - edge=highway_start_edge, - vehs_per_hour=int(inflow_rate * PENETRATION_RATE), - departLane=lane, - departSpeed=inflow_speed) - + veh_type="av", + edge="119257914", + vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), + departLane=lane, + departSpeed=INFLOW_SPEED) + + # Add the inflows from the on-ramps. + if ON_RAMP: inflow.add( - veh_type="av", + veh_type="human", edge="27414345", - vehs_per_hour=int(500 * PENETRATION_RATE), + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) inflow.add( - veh_type="av", + veh_type="human", edge="27414342#0", - vehs_per_hour=int(500 * PENETRATION_RATE), + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) -else: - # create the base vehicle type that will be used for inflows - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=accel_data, - ) - if PENETRATION_RATE > 0.0: - vehicles.add( - "av", - color="red", - num_vehicles=0, - acceleration_controller=(RLController, {}), - ) - - # If you want to turn off the fail safes uncomment this: - - # vehicles.add( - # 'human', - # num_vehicles=0, - # lane_change_params=SumoLaneChangeParams( - # lane_change_mode='strategic', - # ), - # acceleration_controller=accel_data, - # car_following_params=SumoCarFollowingParams(speed_mode='19') - # ) - - lane_list = ['0', '1', '2', '3', '4'] - - for lane in lane_list: - inflow.add( - veh_type="human", - edge=highway_start_edge, - vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), - departLane=lane, - departSpeed=inflow_speed) - - if PENETRATION_RATE > 0.0: - for lane in lane_list: - inflow.add( - veh_type="av", - edge=highway_start_edge, - vehs_per_hour=int(inflow_rate * PENETRATION_RATE), - departLane=lane, - departSpeed=inflow_speed) - - -network_xml_file = "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml" - -# network_xml_file = "examples/exp_configs/templates/sumo/i210_with_congestion.xml" - -NET_TEMPLATE = os.path.join(config.PROJECT_PATH, network_xml_file) - -if WANT_GHOST_CELL: - network = I210SubNetworkGhostCell -else: - network = I210SubNetwork +# =========================================================================== # +# Generate the flow_params dict with all relevant simulation information. # +# =========================================================================== # flow_params = dict( # name of the experiment @@ -243,7 +204,7 @@ env_name=I210MultiEnv, # name of the network class the experiment is running on - network=network, + network=I210SubNetwork, # simulator that is used by the experiment simulator='traci', @@ -274,7 +235,7 @@ template=NET_TEMPLATE, additional_params={ "on_ramp": ON_RAMP, - "ghost_edge": WANT_GHOST_CELL + "ghost_edge": WANT_BOUNDARY_CONDITIONS } ), @@ -289,14 +250,16 @@ ), ) -# SET UP RLLIB MULTI-AGENT FEATURES +# =========================================================================== # +# Set up rllib multi-agent features. # +# =========================================================================== # create_env, env_name = make_create_env(params=flow_params, version=0) # register as rllib env register_env(env_name, create_env) -# multiagent configuration +# multi-agent configuration test_env = create_env() obs_space = test_env.observation_space act_space = test_env.action_space diff --git a/flow/core/experiment.py b/flow/core/experiment.py index df8992c20..7f5352c25 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -164,8 +164,9 @@ def rl_actions(*_): metadata['is_baseline'].append(str(is_baseline)) dir_path = self.env.sim_params.emission_path - trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + if not dir_path is None: + trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) + metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) for i in range(num_runs): ret = 0 @@ -189,7 +190,7 @@ def rl_actions(*_): extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) # write to disk every 100 steps - if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0: + if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0 and not dir_path is None: write_dict_to_csv(trajectory_table_path, extra_info, not j) extra_info.clear() From 57469374e24d413762f531006ed797758a4e1e60 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 18 Jun 2020 15:34:17 -0700 Subject: [PATCH 259/438] Metadata Configuration (#957) --- .gitignore | 2 + flow/core/experiment.py | 25 +++--- flow/core/util.py | 67 +++++++-------- flow/data_pipeline/README.md | 12 +++ flow/data_pipeline/data_pipeline.py | 45 +++++++++- flow/data_pipeline/lambda_function.py | 4 +- flow/data_pipeline/leaderboard_utils.py | 7 +- flow/data_pipeline/query.py | 108 ++++++++++++++++++------ flow/envs/base.py | 10 +++ flow/visualize/i210_replay.py | 13 +-- flow/visualize/visualizer_rllib.py | 56 ++++++++++++ 11 files changed, 267 insertions(+), 82 deletions(-) create mode 100644 flow/data_pipeline/README.md diff --git a/.gitignore b/.gitignore index 29d788c27..6d9ff7a71 100644 --- a/.gitignore +++ b/.gitignore @@ -111,3 +111,5 @@ flow.ang *.ang.old *.sang +# local configuration file for data pipeline +**/data_pipeline_config diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 7f5352c25..ceb8c7b61 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,6 +1,6 @@ """Contains an experiment class for running simulations.""" from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info +from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration from flow.data_pipeline.leaderboard_utils import network_name_translate from collections import defaultdict from datetime import datetime, timezone @@ -158,12 +158,18 @@ def rl_actions(*_): cur_datetime = datetime.now(timezone.utc) cur_date = cur_datetime.date().isoformat() cur_time = cur_datetime.time().isoformat() + # collecting information for metadata table metadata['source_id'].append(source_id) metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(self.env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(is_baseline)) + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) + + if convert_to_csv and self.env.simulator == "traci": + dir_path = self.env.sim_params.emission_path - dir_path = self.env.sim_params.emission_path if not dir_path is None: trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) @@ -172,6 +178,8 @@ def rl_actions(*_): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} + run_id = "run_{}".format(i) + self.env.pipeline_params = (extra_info, source_id, run_id) state = self.env.reset() for j in range(num_steps): t0 = time.time() @@ -185,9 +193,7 @@ def rl_actions(*_): ret += reward # collect additional information for the data pipeline - get_extra_info(self.env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id] * len(veh_ids)) - extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) + get_extra_info(self.env.k.vehicle, extra_info, veh_ids, source_id, run_id) # write to disk every 100 steps if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0 and not dir_path is None: @@ -230,8 +236,7 @@ def rl_actions(*_): emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv - # FIXME(@Brent): produce seg fault with large CSV - # emission_to_csv(emission_path) + emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) @@ -241,12 +246,12 @@ def rl_actions(*_): if to_aws: upload_to_s3('circles.data.pipeline', - 'metadata_table/date={0}/partition_name={1}_METADATA/' - '{1}_METADATA.csv'.format(cur_date, source_id), + 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, + source_id), metadata_table_path) upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), trajectory_table_path, - {'network': metadata['network'][0]}) + {'network': metadata['network'][0], 'is_baseline': metadata['is_baseline'][0]}) return info_dict diff --git a/flow/core/util.py b/flow/core/util.py index 1821a76a5..cd269e6af 100755 --- a/flow/core/util.py +++ b/flow/core/util.py @@ -47,42 +47,39 @@ def emission_to_csv(emission_path, output_path=None): path to the csv file that will be generated, default is the same directory as the emission file, with the same name """ - parser = etree.XMLParser(recover=True) - tree = ElementTree.parse(emission_path, parser=parser) - root = tree.getroot() - - # parse the xml data into a dict + context = etree.iterparse(emission_path, recover=True) out_data = [] - for time in root.findall('timestep'): - t = float(time.attrib['time']) - - for car in time: - out_data.append(dict()) - try: - out_data[-1]['time'] = t - out_data[-1]['CO'] = float(car.attrib['CO']) - out_data[-1]['y'] = float(car.attrib['y']) - out_data[-1]['CO2'] = float(car.attrib['CO2']) - out_data[-1]['electricity'] = float(car.attrib['electricity']) - out_data[-1]['type'] = car.attrib['type'] - out_data[-1]['id'] = car.attrib['id'] - out_data[-1]['eclass'] = car.attrib['eclass'] - out_data[-1]['waiting'] = float(car.attrib['waiting']) - out_data[-1]['NOx'] = float(car.attrib['NOx']) - out_data[-1]['fuel'] = float(car.attrib['fuel']) - out_data[-1]['HC'] = float(car.attrib['HC']) - out_data[-1]['x'] = float(car.attrib['x']) - out_data[-1]['route'] = car.attrib['route'] - out_data[-1]['relative_position'] = float(car.attrib['pos']) - out_data[-1]['noise'] = float(car.attrib['noise']) - out_data[-1]['angle'] = float(car.attrib['angle']) - out_data[-1]['PMx'] = float(car.attrib['PMx']) - out_data[-1]['speed'] = float(car.attrib['speed']) - out_data[-1]['edge_id'] = car.attrib['lane'].rpartition('_')[0] - out_data[-1]['lane_number'] = car.attrib['lane'].\ - rpartition('_')[-1] - except KeyError: - del out_data[-1] + for event, elem in context: + if elem.tag == "timestep": + t = float(elem.attrib['time']) + for car in elem: + out_data.append(dict()) + try: + out_data[-1]['time'] = t + out_data[-1]['CO'] = float(car.attrib['CO']) + out_data[-1]['y'] = float(car.attrib['y']) + out_data[-1]['CO2'] = float(car.attrib['CO2']) + out_data[-1]['electricity'] = float(car.attrib['electricity']) + out_data[-1]['type'] = car.attrib['type'] + out_data[-1]['id'] = car.attrib['id'] + out_data[-1]['eclass'] = car.attrib['eclass'] + out_data[-1]['waiting'] = float(car.attrib['waiting']) + out_data[-1]['NOx'] = float(car.attrib['NOx']) + out_data[-1]['fuel'] = float(car.attrib['fuel']) + out_data[-1]['HC'] = float(car.attrib['HC']) + out_data[-1]['x'] = float(car.attrib['x']) + out_data[-1]['route'] = car.attrib['route'] + out_data[-1]['relative_position'] = float(car.attrib['pos']) + out_data[-1]['noise'] = float(car.attrib['noise']) + out_data[-1]['angle'] = float(car.attrib['angle']) + out_data[-1]['PMx'] = float(car.attrib['PMx']) + out_data[-1]['speed'] = float(car.attrib['speed']) + out_data[-1]['edge_id'] = car.attrib['lane'].rpartition('_')[0] + out_data[-1]['lane_number'] = car.attrib['lane']. \ + rpartition('_')[-1] + except KeyError: + del out_data[-1] + elem.clear() # sort the elements of the dictionary by the vehicle id out_data = sorted(out_data, key=lambda k: k['id']) diff --git a/flow/data_pipeline/README.md b/flow/data_pipeline/README.md new file mode 100644 index 000000000..65aeb8d49 --- /dev/null +++ b/flow/data_pipeline/README.md @@ -0,0 +1,12 @@ +To run a simulation with output stored locally only: + + `python simulate.py EXP_CONFIG --gen_emission` + +To run a simulation and upload output to pipeline: + + `python simulate.py EXP_CONFIG --to_aws` + +To run a simulation, upload output to pipeline, and mark it as baseline: + + `python simulate.py EXP_CONFIG --to_aws --is_baseline` + diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 50c2c8422..74070cc7a 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -4,6 +4,8 @@ from flow.data_pipeline.query import QueryStrings from time import time from datetime import date +import csv +from io import StringIO def generate_trajectory_table(data_path, extra_info, partition_name): @@ -77,7 +79,7 @@ def upload_to_s3(bucket_name, bucket_key, file_path, metadata={}): return -def get_extra_info(veh_kernel, extra_info, veh_ids): +def get_extra_info(veh_kernel, extra_info, veh_ids, source_id, run_id): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) @@ -103,6 +105,32 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["edge_id"].append(veh_kernel.get_edge(vid)) extra_info["lane_id"].append(veh_kernel.get_lane(vid)) extra_info["distance"].append(veh_kernel.get_distance(vid)) + extra_info["relative_position"].append(veh_kernel.get_position(vid)) + extra_info["source_id"].append(source_id) + extra_info["run_id"].append(run_id) + + +def get_configuration(): + """Get configuration for the metadata table.""" + try: + config_df = pd.read_csv('./data_pipeline_config') + except FileNotFoundError: + config_df = pd.DataFrame(data={"submitter_name": [""], "strategy": [""]}) + + if not config_df['submitter_name'][0]: + name = input("Please enter your name:").strip() + while not name: + name = input("Please enter a non-empty name:").strip() + config_df['submitter_name'] = [name] + + strategy = input( + "Please enter strategy name (current: \"{}\"):".format(config_df["strategy"][0])).strip() + if strategy: + config_df['strategy'] = [strategy] + + config_df.to_csv('./data_pipeline_config', index=False) + + return config_df['submitter_name'][0], config_df['strategy'][0] def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): @@ -114,6 +142,21 @@ def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): s3.delete_object(Bucket=bucket, Key=key) +def update_baseline(s3, baseline_network, baseline_source_id): + obj = s3.get_object(Bucket='circles.data.pipeline', Key='baseline_table/baselines.csv')['Body'] + original_str = obj.read().decode() + reader = csv.DictReader(StringIO(original_str)) + new_str = StringIO() + writer = csv.DictWriter(new_str, fieldnames=['network', 'source_id']) + writer.writeheader() + writer.writerow({'network': baseline_network, 'source_id': baseline_source_id}) + for row in reader: + if row['network'] != baseline_network: + writer.writerow(row) + s3.put_object(Bucket='circles.data.pipeline', Key='baseline_table/baselines.csv', + Body=new_str.getvalue().replace('\r', '').encode()) + + class AthenaQuery: """Class used to run queries. diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 00cf0fba5..f7a32d5db 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,7 +1,7 @@ """lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus -from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data +from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data, update_baseline from flow.data_pipeline.query import tags, tables, network_using_edge from flow.data_pipeline.query import X_FILTER, EDGE_FILTER, WARMUP_STEPS, HORIZON_STEPS @@ -48,6 +48,8 @@ def lambda_handler(event, context): if 'network' in response["Metadata"]: if response["Metadata"]['network'] in network_using_edge: loc_filter = EDGE_FILTER + if 'is_baseline' in response['Metadata'] and response['Metadata']['is_baseline'] == 'True': + update_baseline(s3, response["Metadata"]['network'], source_id) query_dict = tags[table] diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index afc2fd8bc..dd7055f8b 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -5,7 +5,8 @@ from io import StringIO -network_name_map = {"highway-single": "Single-Lane Straight Road", +network_name_map = {"highway": "Single-Lane Straight Road", + "highway_single": "Single-Lane Straight Road", "ring": "Single-Lane Ring Road", "I-210_subnetwork": "I-210 without Ramps", "I_210_subnetwork": "I-210 without Ramps"} @@ -64,10 +65,12 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] names = [key_to_name(k) for k in keys] existing_results = os.listdir("./result/{}".format(table_name)) + updated = False for index in range(len(keys)): if names[index] not in existing_results: + updated = True s3.download_file(bucket, keys[index], "./result/{}/{}".format(table_name, names[index])) - if table_name == "leaderboard_chart_agg": + if table_name == "leaderboard_chart_agg" and updated: for p in existing_results: os.remove("./result/{}/{}".format(table_name, p)) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 184c7217a..d538e7d62 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -65,26 +65,46 @@ X_FILTER = "x BETWEEN 500 AND 2300" -EDGE_FILTER = "edge_id <> ANY (VALUES 'ghost0', '119257908#3')" +EDGE_FILTER = "edge_id <> ALL (VALUES 'ghost0', '119257908#3')" WARMUP_STEPS = 600 * 3 * 0.4 HORIZON_STEPS = 1000 * 3 * 0.4 -VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ +VEHICLE_POWER_DEMAND_TACOMA_FINAL_SELECT = """ SELECT id, time_step, speed, acceleration, road_grade, - GREATEST(0, 1200 * speed * (( + GREATEST(0, 2041 * speed * (( CASE WHEN acceleration > 0 THEN 1 WHEN acceleration < 0 THEN 0 ELSE 0.5 - END * (1 - {0}) + {0}) * acceleration + 9.81 * SIN(road_grade) - ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3)) AS power, + END * (1 - {0}) + {0}) * acceleration + 9.807 * SIN(road_grade) + ) + 2041 * 9.807 * 0.0027 * speed + 0.5 * 1.225 * 3.2 * 0.4 * POW(speed,3)) AS power, + \'{1}\' AS energy_model_id, + source_id + FROM {2} + ORDER BY id, time_step + """ + +VEHICLE_POWER_DEMAND_PRIUS_FINAL_SELECT = """ + SELECT + id, + time_step, + speed, + acceleration, + road_grade, + GREATEST(-2.8 * speed, 1663 * speed * (( + CASE + WHEN acceleration > 0 THEN 1 + WHEN acceleration < 0 THEN 0 + ELSE 0.5 + END * (1 - {0}) + {0}) * acceleration + 9.807 * SIN(road_grade) + ) + 1663 * 9.807 * 0.007 * speed + 0.5 * 1.225 * 2.4 * 0.24 * POW(speed,3)) AS power, \'{1}\' AS energy_model_id, source_id FROM {2} @@ -122,9 +142,9 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, - 'POWER_DEMAND_MODEL', - 'regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_TACOMA_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL', + 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -142,9 +162,9 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL', - 'denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_TACOMA_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL', + 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -175,9 +195,9 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', - 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_TACOMA_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) FACT_NETWORK_THROUGHPUT_AGG = """ WITH min_time AS ( @@ -305,6 +325,7 @@ class QueryStrings(Enum): FROM min_max_time_step WHERE 1 = 1 AND min_time_step >= {start_filter} + AND min_time_step < {stop_filter} GROUP BY 1, 2 ), outflows AS ( SELECT @@ -313,11 +334,14 @@ class QueryStrings(Enum): 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step WHERE 1 = 1 + AND max_time_step >= {start_filter} AND max_time_step < {stop_filter} GROUP BY 1, 2 ) SELECT - COALESCE(i.time_step, o.time_step) AS time_step, + COALESCE(i.time_step, o.time_step) - MIN(COALESCE(i.time_step, o.time_step)) + OVER (PARTITION BY COALESCE(i.source_id, o.source_id) + ORDER BY COALESCE(i.time_step, o.time_step) ASC) AS time_step, COALESCE(i.source_id, o.source_id) AS source_id, COALESCE(i.inflow_rate, 0) AS inflow_rate, COALESCE(o.outflow_rate, 0) AS outflow_rate @@ -434,7 +458,8 @@ class QueryStrings(Enum): SELECT vt.id, vt.source_id, - vt.time_step, + vt.time_step - FIRST_VALUE(vt.time_step) + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS time_step, energy_model_id, et.speed, et.acceleration, @@ -528,17 +553,44 @@ class QueryStrings(Enum): ;""" LEADERBOARD_CHART_AGG = """ + WITH agg AS ( + SELECT + l.date AS submission_date, + m.submission_time, + l.source_id, + m.submitter_name, + m.strategy, + m.network, + m.is_baseline, + l.energy_model_id, + l.efficiency_meters_per_joules, + l.efficiency_miles_per_gallon, + l.throughput_per_hour, + b.source_id AS baseline_source_id + FROM leaderboard_chart AS l, metadata_table AS m, baseline_table as b + WHERE 1 = 1 + AND l.source_id = m.source_id + AND m.network = b.network + AND (m.is_baseline='False' + OR (m.is_baseline='True' + AND m.source_id = b.source_id)) + ) SELECT - l.date AS submission_date, - l.source_id, - m.network, - m.is_baseline, - l.energy_model_id, - l.efficiency_meters_per_joules, - l.efficiency_miles_per_gallon, - l.throughput_per_hour - FROM leaderboard_chart AS l, metadata_table AS m - WHERE 1 = 1 - AND l.source_id = m.source_id - ORDER BY l.date, m.submission_time, l.source_id ASC + agg.submission_date, + agg.source_id, + agg.submitter_name, + agg.strategy, + agg.network, + agg.is_baseline, + agg.energy_model_id, + agg.efficiency_meters_per_joules, + agg.efficiency_miles_per_gallon, + 100 * (1 - baseline.efficiency_miles_per_gallon / agg.efficiency_miles_per_gallon) AS percent_improvement, + agg.throughput_per_hour + FROM agg + JOIN agg AS baseline ON 1 = 1 + AND agg.network = baseline.network + AND baseline.is_baseline = 'True' + AND agg.baseline_source_id = baseline.source_id + ORDER BY agg.submission_date, agg.submission_time ASC ;""" diff --git a/flow/envs/base.py b/flow/envs/base.py index 1e739faba..9dec30025 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -26,6 +26,8 @@ from flow.core.kernel import Kernel from flow.utils.exceptions import FatalFlowError +from flow.data_pipeline.data_pipeline import get_extra_info + class Env(gym.Env, metaclass=ABCMeta): """Base environment class. @@ -578,6 +580,14 @@ def reset(self): # perform (optional) warm-up steps before training for _ in range(self.env_params.warmup_steps): observation, _, _, _ = self.step(rl_actions=None) + # collect data for pipeline during the warmup period + try: + extra_info, source_id, run_id = self.pipeline_params + veh_ids = self.k.vehicle.get_ids() + get_extra_info(self.k.vehicle, extra_info, veh_ids, source_id, run_id) + # In case the attribute `pipeline_params` if not added to this instance + except AttributeError as e: + pass # render a frame self.render(reset=True) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index b2e22d5b3..fb6792c11 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -32,7 +32,7 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables -from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info +from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration from flow.data_pipeline.leaderboard_utils import network_name_translate import uuid @@ -221,6 +221,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(args.is_baseline)) + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) i = 0 while i < args.num_rollouts: @@ -231,6 +234,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= completed_vehicle_avg_energy = {} completed_vehicle_travel_time = {} custom_vals = {key: [] for key in custom_callables.keys()} + run_id = "run_{}".format(i) + env.pipeline_params = (extra_info, source_id, run_id) state = env.reset() initial_vehicles = set(env.k.vehicle.get_ids()) for _ in range(env_params.horizon): @@ -260,10 +265,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= veh_ids = env.k.vehicle.get_ids() vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) - # Collect information from flow for the trajectory output - get_extra_info(env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id] * len(veh_ids)) - extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) + # collect additional information for the data pipeline + get_extra_info(env.k.vehicle, extra_info, veh_ids, source_id, run_id) # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 0ab658f75..059cabbbd 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -33,6 +33,11 @@ from flow.utils.rllib import get_rllib_config from flow.utils.rllib import get_rllib_pkl +from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration +from flow.data_pipeline.leaderboard_utils import network_name_translate +from collections import defaultdict +from datetime import datetime, timezone +import uuid EXAMPLE_USAGE = """ example usage: @@ -207,6 +212,23 @@ def visualizer_rllib(args): if not sim_params.restart_instance: env.restart_simulation(sim_params=sim_params, render=sim_params.render) + # data pipeline + extra_info = defaultdict(lambda: []) + source_id = 'flow_{}'.format(uuid.uuid4().hex) + metadata = defaultdict(lambda: []) + # collect current time + cur_datetime = datetime.now(timezone.utc) + cur_date = cur_datetime.date().isoformat() + cur_time = cur_datetime.time().isoformat() + # collecting information for metadata table + metadata['source_id'].append(source_id) + metadata['submission_time'].append(cur_time) + metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) + metadata['is_baseline'].append(str(args.is_baseline)) + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) + # Simulate and collect metrics final_outflows = [] final_inflows = [] @@ -216,6 +238,8 @@ def visualizer_rllib(args): std_speed = [] for i in range(args.num_rollouts): vel = [] + run_id = "run_{}".format(i) + env.pipeline_params = (extra_info, source_id, run_id) state = env.reset() if multiagent: ret = {key: [0] for key in rets.keys()} @@ -246,6 +270,10 @@ def visualizer_rllib(args): else: action = agent.compute_action(state) state, reward, done, _ = env.step(action) + + # collect data for data pipeline + get_extra_info(vehicles, extra_info, vehicles.get_ids(), source_id, run_id) + if multiagent: for actor, rew in reward.items(): ret[policy_map_fn(actor)][0] += rew @@ -341,6 +369,22 @@ def visualizer_rllib(args): # delete the .xml version of the emission file os.remove(emission_path) + # generate datapipeline output + trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) + metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + write_dict_to_csv(trajectory_table_path, extra_info, True) + write_dict_to_csv(metadata_table_path, metadata, True) + + if args.to_aws: + upload_to_s3('circles.data.pipeline', + 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, + source_id), + metadata_table_path) + upload_to_s3('circles.data.pipeline', + 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), + trajectory_table_path, + {'network': metadata['network'][0]}) + def create_parser(): """Create the parser to capture CLI arguments.""" @@ -394,6 +438,18 @@ def create_parser(): '--horizon', type=int, help='Specifies the horizon.') + parser.add_argument( + '--is_baseline', + action='store_true', + help='specifies whether this is a baseline run' + ) + parser.add_argument( + '--to_aws', + type=str, nargs='?', default=None, const="default", + help='Specifies the name of the partition to store the output' + 'file on S3. Putting not None value for this argument' + 'automatically set gen_emission to True.' + ) return parser From 25e623ab6d3d0a8baf22ac91d2c68f2711127194 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 18 Jun 2020 17:55:55 -0700 Subject: [PATCH 260/438] timespace diagram merge bug fix --- flow/utils/rllib.py | 3 +- flow/visualize/time_space_diagram.py | 122 ++------------------------- 2 files changed, 9 insertions(+), 116 deletions(-) diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index e3404a61f..4d2d8553f 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -95,7 +95,8 @@ def get_flow_params(config): if type(config) == dict: flow_params = json.loads(config['env_config']['flow_params']) else: - flow_params = json.load(open(config, 'r')) + config = json.load(open(config, 'r')) + flow_params = json.loads(config['env_config']['flow_params']) # reinitialize the vehicles class from stored data veh = VehicleParams() diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index fe66c1089..cc0f388bd 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -169,31 +169,7 @@ def _merge(data): return segs, data - # compute the absolute position - for veh_id in data.keys(): - data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'], - data[veh_id]['pos'], edgestarts) - - # prepare the speed and absolute position in a way that is compatible with - # the space-time diagram, and compute the number of vehicles at each step - pos = np.zeros((all_time.shape[0], len(data.keys()))) - speed = np.zeros((all_time.shape[0], len(data.keys()))) - for i, veh_id in enumerate(sorted(data.keys())): - for spd, abs_pos, ti, edge in zip(data[veh_id]['vel'], - data[veh_id]['abs_pos'], - data[veh_id]['time'], - data[veh_id]['edge']): - # avoid vehicles outside the main highway - if edge in ['inflow_merge', 'bottom', ':bottom_0']: - continue - ind = np.where(ti == all_time)[0] - pos[ind, i] = abs_pos - speed[ind, i] = spd - - return pos, speed, all_time - - -def _highway(data, params, all_time): +def _highway(data): r"""Generate position and speed data for the highway subnetwork. Parameters @@ -220,61 +196,11 @@ def _highway(data, params, all_time): time step. Set to zero if the vehicle is not present in the network at that time step. """ - junction_length = 0.1 - length = params['net'].additional_params["length"] - num_edges = params['net'].additional_params.get("num_edges", 1) - edge_starts = {} - # Add the main edges. - edge_starts.update({ - "highway_{}".format(i): i * (length / num_edges + junction_length) - for i in range(num_edges) - }) - - if params['net'].additional_params["use_ghost_edge"]: - edge_starts.update({"highway_end": length + num_edges * junction_length}) - - edge_starts.update({ - ":edge_{}".format(i + 1): (i + 1) * length / num_edges + i * junction_length - for i in range(num_edges - 1) - }) - - if params['net'].additional_params["use_ghost_edge"]: - edge_starts.update({ - ":edge_{}".format(num_edges): length + (num_edges - 1) * junction_length - }) - - # compute the absolute position - for veh_id in data.keys(): - data[veh_id]['abs_pos'] = _get_abs_pos_1_edge(data[veh_id]['edge'], - data[veh_id]['pos'], - edge_starts) - - # track only vehicles that were around during this time period - # create the output variables - pos = np.zeros((all_time.shape[0], len(data.keys()))) - speed = np.zeros((all_time.shape[0], len(data.keys()))) - observed_row_list = [] - for i, veh_id in enumerate(sorted(data.keys())): - for spd, abs_pos, ti, edge, lane in zip(data[veh_id]['vel'], - data[veh_id]['abs_pos'], - data[veh_id]['time'], - data[veh_id]['edge'], - data[veh_id]['lane']): - # avoid vehicles not on the relevant edges. Also only check the - # second to last lane - if edge not in edge_starts.keys() or ti not in all_time: - continue - else: - if i not in observed_row_list: - observed_row_list.append(i) - ind = np.where(ti == all_time)[0] - pos[ind, i] = abs_pos - speed[ind, i] = spd - - pos = pos[:, observed_row_list] - speed = speed[:, observed_row_list] - - return pos, speed, all_time + data.loc[:, :] = data[(data['distance'] > 500)] + data.loc[:, :] = data[(data['distance'] < 2300)] + segs = data[['time_step', 'distance', 'next_time', 'next_pos']].values.reshape((len(data), 2, 2)) + + return segs, data def _ring_road(data, params, all_time): @@ -566,6 +492,7 @@ def plot_tsd(ax, df, segs, args, lane=None): for lane, df in traj_df.groupby('lane_id'): ax = plt.subplot(nlanes, 1, lane+1) + plot_tsd(ax, df, segs[lane], args, lane) else: # perform plotting operation @@ -574,41 +501,6 @@ def plot_tsd(ax, df, segs, args, lane=None): plot_tsd(ax, traj_df, segs, args) - for indx_car in range(pos.shape[1]): - unique_car_pos = pos[:, indx_car] - - if flow_params['network'] == I210SubNetwork or flow_params['network'] == HighwayNetwork: - indices = np.where(pos[:, indx_car] != 0)[0] - unique_car_speed = speed[indices, indx_car] - points = np.array([time[indices], pos[indices, indx_car]]).T.reshape(-1, 1, 2) - else: - - # discontinuity from wraparound - disc = np.where(np.abs(np.diff(unique_car_pos)) >= 10)[0] + 1 - unique_car_time = np.insert(time, disc, np.nan) - unique_car_pos = np.insert(unique_car_pos, disc, np.nan) - unique_car_speed = np.insert(speed[:, indx_car], disc, np.nan) - # - points = np.array( - [unique_car_time, unique_car_pos]).T.reshape(-1, 1, 2) - segments = np.concatenate([points[:-1], points[1:]], axis=1) - lc = LineCollection(segments, cmap=my_cmap, norm=norm) - - # Set the values used for color mapping - lc.set_array(unique_car_speed) - lc.set_linewidth(1.75) - cols.append(lc) - - plt.title(args.title, fontsize=25) - plt.ylabel('Position (m)', fontsize=20) - plt.xlabel('Time (s)', fontsize=20) - - for col in cols: - line = ax.add_collection(col) - cbar = plt.colorbar(line, ax=ax, norm=norm) - cbar.set_label('Velocity (m/s)', fontsize=20) - cbar.ax.tick_params(labelsize=18) - ########################################################################### # Note: For MergeNetwork only # if flow_params['network'] == 'MergeNetwork': # From 2e76a4c1e02112aab24dcf21be6c9dcb9ea7187b Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 20:13:40 -0700 Subject: [PATCH 261/438] reduce time-bins to 10s --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d538e7d62..a60b524fd 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -494,7 +494,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, @@ -512,7 +512,7 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY time_step ASC) AS energy_start, From 0c7de605ca86e28ecc731a2d93797151217b9d79 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 20:15:30 -0700 Subject: [PATCH 262/438] reduce time-bins in more places --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index a60b524fd..beccf9c99 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -514,10 +514,10 @@ class QueryStrings(Enum): id, CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, FIRST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + OVER (PARTITION BY id, CAST(time_step/10 AS INTEGER) * 10 ORDER BY time_step ASC) AS energy_start, LAST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + OVER (PARTITION BY id, CAST(time_step/10 AS INTEGER) * 10 ORDER BY time_step ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( From 71dee847ee5e8d59c4a09193b0d5639895c0cafc Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 18 Jun 2020 21:16:32 -0700 Subject: [PATCH 263/438] docstring fix --- flow/visualize/time_space_diagram.py | 35 ++++++++++++---------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index cc0f388bd..d8ae7cd73 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -170,31 +170,26 @@ def _merge(data): return segs, data def _highway(data): - r"""Generate position and speed data for the highway subnetwork. + r"""Generate time and position data for the highway. + + We generate plots for all lanes, so the segments are wrapped in + a dictionary. Parameters ---------- - data : dict of dict - Key = "veh_id": name of the vehicle \n Elements: - * "time": time step at every sample - * "edge": edge ID at every sample - * "pos": relative position at every sample - * "vel": speed at every sample - params : dict - flow-specific parameters - all_time : array_like - a (n_steps,) vector representing the unique time steps in the - simulation + data : pd.DataFrame + cleaned dataframe of the trajectory data + Returns ------- - as_array - n_steps x n_veh matrix specifying the absolute position of every - vehicle at every time step. Set to zero if the vehicle is not present - in the network at that time step. - as_array - n_steps x n_veh matrix specifying the speed of every vehicle at every - time step. Set to zero if the vehicle is not present in the network at - that time step. + dict of ndarray + dictionary of 3d array (n_segments x 2 x 2) containing segments + to be plotted. the dictionary is keyed on lane numbers, with the + values being the 3d array representing the segments. every inner + 2d array is comprised of two 1d arrays representing + [start time, start distance] and [end time, end distance] pairs. + pd.DataFrame + modified trajectory dataframe """ data.loc[:, :] = data[(data['distance'] > 500)] data.loc[:, :] = data[(data['distance'] < 2300)] From dfb1c0790f57603a098ef1b5745c992c6231f89e Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 21:29:38 -0700 Subject: [PATCH 264/438] add query to count vehicles in domain at every timestep --- flow/data_pipeline/query.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d538e7d62..e1c3faa31 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -14,9 +14,13 @@ ], "fact_network_inflows_outflows": [ "FACT_NETWORK_INFLOWS_OUTFLOWS" + ], + "fact_vehicle_counts_by_time": [ + "FACT_VEHICLE_COUNTS_BY_TIME" ] }, "fact_energy_trace": {}, + "fact_vehicle_counts_by_time": {}, "POWER_DEMAND_MODEL_DENOISED_ACCEL": { "fact_vehicle_fuel_efficiency_agg": [ "FACT_VEHICLE_FUEL_EFFICIENCY_AGG" @@ -552,6 +556,22 @@ class QueryStrings(Enum): ORDER BY time_seconds_bin ASC ;""" + FACT_VEHICLE_COUNTS_BY_TIME = """ + SELECT + vt.source_id, + vt.time_step - FIRST_VALUE(vt.time_step) + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS time_step, + COUNT(DISTINCT vt.id) AS vehicle_counts + FROM fact_vehicle_trace vt + WHERE 1 = 1 + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND vt.{loc_filter} + AND vt.time_step >= {start_filter} + GROUP BY 1, 2 + ; + """ + LEADERBOARD_CHART_AGG = """ WITH agg AS ( SELECT From 23c55fefe168dfc7b95645ae328b27fabef5f35f Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 21:31:59 -0700 Subject: [PATCH 265/438] fix typo in window function --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e1c3faa31..61dda1212 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -560,7 +560,7 @@ class QueryStrings(Enum): SELECT vt.source_id, vt.time_step - FIRST_VALUE(vt.time_step) - OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS time_step, + OVER (PARTITION BY vt.source_id ORDER BY vt.time_step ASC) AS time_step, COUNT(DISTINCT vt.id) AS vehicle_counts FROM fact_vehicle_trace vt WHERE 1 = 1 From 7ac4c32f4b9c0eed92a9ca0369f97c6c05f7f0c6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 10:44:25 -0700 Subject: [PATCH 266/438] implement _get_abs_pos() for HighwayNetwork --- flow/visualize/time_space_diagram.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index d8ae7cd73..93a3d713f 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -360,6 +360,8 @@ def _get_abs_pos(df, params): 'bottom_to_top': intersection / 2 + inner, 'right_to_left': junction + 3 * inner, } + elif params['network'] == HighwayNetwork: + return df['x'] else: edgestarts = defaultdict(float) From 5de54b7999410856dc193fd24dec44ce0fc64ae8 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:40:49 -0700 Subject: [PATCH 267/438] remove trailing whitespaces --- flow/data_pipeline/query.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d538e7d62..d303341cf 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -571,9 +571,9 @@ class QueryStrings(Enum): WHERE 1 = 1 AND l.source_id = m.source_id AND m.network = b.network - AND (m.is_baseline='False' - OR (m.is_baseline='True' - AND m.source_id = b.source_id)) + AND (m.is_baseline='False' + OR (m.is_baseline='True' + AND m.source_id = b.source_id)) ) SELECT agg.submission_date, From 38a6d707148bfc5b89281ae6f26ddbb900d541c6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:41:58 -0700 Subject: [PATCH 268/438] remove unused import --- flow/core/util.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/core/util.py b/flow/core/util.py index cd269e6af..c0c31f811 100755 --- a/flow/core/util.py +++ b/flow/core/util.py @@ -4,7 +4,6 @@ import errno import os from lxml import etree -from xml.etree import ElementTree def makexml(name, nsl): From 379104893c3bfdfe5d9ef56f620cc0d56b324dcb Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:49:34 -0700 Subject: [PATCH 269/438] fix flake8 issues --- flow/core/experiment.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index ceb8c7b61..8ede367a7 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -170,7 +170,7 @@ def rl_actions(*_): if convert_to_csv and self.env.simulator == "traci": dir_path = self.env.sim_params.emission_path - if not dir_path is None: + if dir_path: trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) @@ -196,7 +196,7 @@ def rl_actions(*_): get_extra_info(self.env.k.vehicle, extra_info, veh_ids, source_id, run_id) # write to disk every 100 steps - if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0 and not dir_path is None: + if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0 and dir_path: write_dict_to_csv(trajectory_table_path, extra_info, not j) extra_info.clear() @@ -230,17 +230,6 @@ def rl_actions(*_): # wait a short period of time to ensure the xml file is readable time.sleep(0.1) - # collect the location of the emission file - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - write_dict_to_csv(trajectory_table_path, extra_info) write_dict_to_csv(metadata_table_path, metadata, True) From ed0135748100bfbd81f5a9f42deba934e72e80c4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:51:19 -0700 Subject: [PATCH 270/438] remove unused error variable --- flow/envs/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/envs/base.py b/flow/envs/base.py index 9dec30025..8a36d6a47 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -586,7 +586,7 @@ def reset(self): veh_ids = self.k.vehicle.get_ids() get_extra_info(self.k.vehicle, extra_info, veh_ids, source_id, run_id) # In case the attribute `pipeline_params` if not added to this instance - except AttributeError as e: + except AttributeError: pass # render a frame From b9fd3be9393f5bed6f0b83a6550cae7136747468 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:51:58 -0700 Subject: [PATCH 271/438] add expected blank line before function --- flow/visualize/time_space_diagram.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index 93a3d713f..e9c43e0ed 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -169,6 +169,7 @@ def _merge(data): return segs, data + def _highway(data): r"""Generate time and position data for the highway. From 62ee8a04fe04180dba0958708bc106d9a7ca7eee Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:58:36 -0700 Subject: [PATCH 272/438] add specified exception to try --- flow/algorithms/centralized_PPO.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/algorithms/centralized_PPO.py b/flow/algorithms/centralized_PPO.py index 57fdd7e33..d30442773 100644 --- a/flow/algorithms/centralized_PPO.py +++ b/flow/algorithms/centralized_PPO.py @@ -228,7 +228,8 @@ def centralized_critic_postprocessing(policy, try: central_obs_batch = np.hstack( (sample_batch["obs"], np.hstack(central_obs_list))) - except: + except Exception as e: + print(‘Error in centralized PPO: ’, e) # TODO(@ev) this is a bug and needs to be fixed central_obs_batch = sample_batch["obs"] max_vf_agents = policy.model.max_num_agents From 7824d88317c84df8de4e93e57773ba217e1638e5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 13:11:21 -0700 Subject: [PATCH 273/438] implement HighwayNetwork for Time-Space Diagrams (#979) * implement HighwayNetwork for Time-Space Diagrams * fix flake8 * PR fixes * update docstrings Co-authored-by: AboudyKreidieh --- flow/visualize/time_space_diagram.py | 41 ++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index bc26ad855..3c7ab8b21 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -17,7 +17,7 @@ python time_space_diagram.py .csv .json """ from flow.utils.rllib import get_flow_params -from flow.networks import RingNetwork, FigureEightNetwork, MergeNetwork, I210SubNetwork +from flow.networks import RingNetwork, FigureEightNetwork, MergeNetwork, I210SubNetwork, HighwayNetwork import argparse from collections import defaultdict @@ -38,7 +38,8 @@ RingNetwork, FigureEightNetwork, MergeNetwork, - I210SubNetwork + I210SubNetwork, + HighwayNetwork ] @@ -103,7 +104,7 @@ def get_time_space_data(data, params): Returns ------- - ndarray (or dict of ndarray) + ndarray (or dict < str, np.ndarray >) 3d array (n_segments x 2 x 2) containing segments to be plotted. every inner 2d array is comprised of two 1d arrays representing [start time, start distance] and [end time, end distance] pairs. @@ -126,7 +127,8 @@ def get_time_space_data(data, params): RingNetwork: _ring_road, MergeNetwork: _merge, FigureEightNetwork: _figure_eight, - I210SubNetwork: _i210_subnetwork + I210SubNetwork: _i210_subnetwork, + HighwayNetwork: _highway, } # Get the function from switcher dictionary @@ -167,6 +169,33 @@ def _merge(data): return segs, data +def _highway(data): + r"""Generate time and position data for the highway. + + We generate plots for all lanes, so the segments are wrapped in + a dictionary. + + Parameters + ---------- + data : pd.DataFrame + cleaned dataframe of the trajectory data + + Returns + ------- + ndarray + 3d array (n_segments x 2 x 2) containing segments to be plotted. + every inner 2d array is comprised of two 1d arrays representing + [start time, start distance] and [end time, end distance] pairs. + pd.DataFrame + modified trajectory dataframe + """ + data.loc[:, :] = data[(data['distance'] > 500)] + data.loc[:, :] = data[(data['distance'] < 2300)] + segs = data[['time_step', 'distance', 'next_time', 'next_pos']].values.reshape((len(data), 2, 2)) + + return segs, data + + def _ring_road(data): r"""Generate position and speed data for the ring road. @@ -205,7 +234,7 @@ def _i210_subnetwork(data): Returns ------- - dict of ndarray + dict < str, np.ndarray > dictionary of 3d array (n_segments x 2 x 2) containing segments to be plotted. the dictionary is keyed on lane numbers, with the values being the 3d array representing the segments. every inner @@ -329,6 +358,8 @@ def _get_abs_pos(df, params): 'bottom_to_top': intersection / 2 + inner, 'right_to_left': junction + 3 * inner, } + elif params['network'] == HighwayNetwork: + return df['x'] else: edgestarts = defaultdict(float) From 01676d9e8a29449b1fc9a3e5ad028eb0ff36f6e2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 16:09:36 -0700 Subject: [PATCH 274/438] correct some docstring inconsistencies --- flow/visualize/time_space_diagram.py | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index e9c43e0ed..d194597e4 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -142,7 +142,7 @@ def get_time_space_data(data, params): def _merge(data): - r"""Generate position and speed data for the merge. + r"""Generate time and position data for the merge. This only include vehicles on the main highway, and not on the adjacent on-ramp. @@ -173,9 +173,6 @@ def _merge(data): def _highway(data): r"""Generate time and position data for the highway. - We generate plots for all lanes, so the segments are wrapped in - a dictionary. - Parameters ---------- data : pd.DataFrame @@ -183,11 +180,9 @@ def _highway(data): Returns ------- - dict of ndarray - dictionary of 3d array (n_segments x 2 x 2) containing segments - to be plotted. the dictionary is keyed on lane numbers, with the - values being the 3d array representing the segments. every inner - 2d array is comprised of two 1d arrays representing + ndarray + 3d array (n_segments x 2 x 2) containing segments to be plotted. + every inner 2d array is comprised of two 1d arrays representing [start time, start distance] and [end time, end distance] pairs. pd.DataFrame modified trajectory dataframe @@ -199,8 +194,8 @@ def _highway(data): return segs, data -def _ring_road(data, params, all_time): - r"""Generate position and speed data for the ring road. +def _ring_road(data): + r"""Generate time and position data for the ring road. Vehicles that reach the top of the plot simply return to the bottom and continue. @@ -237,7 +232,7 @@ def _i210_subnetwork(data): Returns ------- - dict of ndarray + dict < str, np.ndarray > dictionary of 3d array (n_segments x 2 x 2) containing segments to be plotted. the dictionary is keyed on lane numbers, with the values being the 3d array representing the segments. every inner @@ -262,7 +257,7 @@ def _i210_subnetwork(data): def _figure_eight(data): - r"""Generate position and speed data for the figure eight. + r"""Generate time and position data for the figure eight. The vehicles traveling towards the intersection from one side will be plotted from the top downward, while the vehicles from the other side will From dd24fb06094d6c0bca33bca86095b82a468cb13f Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 20 Jun 2020 22:38:01 -0700 Subject: [PATCH 275/438] Add query to produce max score line in leaderboard --- flow/data_pipeline/query.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d303341cf..a7a0b794f 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -44,6 +44,11 @@ "leaderboard_chart_agg": [ "LEADERBOARD_CHART_AGG" ] + }, + "leaderboard_chart_agg": { + "fact_top_scores": [ + "FACT_TOP_SCORES" + ] } } @@ -594,3 +599,30 @@ class QueryStrings(Enum): AND agg.baseline_source_id = baseline.source_id ORDER BY agg.submission_date, agg.submission_time ASC ;""" + + FACT_TOP_SCORES = """ + WITH curr_max AS ( + SELECT + network, + submission_date, + 1000 * MAX(efficiency_meters_per_joules) + OVER (PARTITION BY network ORDER BY submission_date ASC + ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS max_score + FROM leaderboard_chart_agg + WHERE 1 = 1 + AND is_baseline = FALSE + ), prev_max AS ( + SELECT + network, + submission_date, + LAG(max_score, 1) OVER (PARTITION BY network ORDER BY submission_date ASC) AS max_score + FROM curr_max + ), unioned AS ( + SELECT * FROM curr_max + UNION ALL + SELECT * FROM prev_max + ) + SELECT DISTINCT * + FROM unioned + ORDER BY 1, 2, 3 + ;""" From 4e6a9b2f62d2a1617c796ef051865e784430675c Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 21 Jun 2020 22:07:37 -0700 Subject: [PATCH 276/438] Add I210 edgestarts --- flow/visualize/time_space_diagram.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index d194597e4..ad8443da3 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -358,6 +358,22 @@ def _get_abs_pos(df, params): } elif params['network'] == HighwayNetwork: return df['x'] + elif params['network'] == I210SubNetwork: + edgestarts = { + '119257914': -5.0999999999995795, + '119257908#0': 56.49000000018306, + ':300944379_0': 56.18000000000016, + ':300944436_0': 753.4599999999871, + '119257908#1-AddedOnRampEdge': 756.3299999991157, + ':119257908#1-AddedOnRampNode_0': 853.530000000022, + '119257908#1': 856.7699999997207, + ':119257908#1-AddedOffRampNode_0': 1096.4499999999707, + '119257908#1-AddedOffRampEdge': 1099.6899999995558, + ':1686591010_1': 1198.1899999999541, + '119257908#2': 1203.6499999994803, + ':1842086610_1': 1780.2599999999056, + '119257908#3': 1784.7899999996537, + } else: edgestarts = defaultdict(float) From 3a5508cdf7ed9ab002294d4130ec9c04d4c2766f Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Mon, 22 Jun 2020 10:02:36 -0700 Subject: [PATCH 277/438] Replace strategic mode with the new name, sumo_default --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- examples/exp_configs/rl/multiagent/multiagent_i210.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 671e18d5a..5c07aadf0 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -76,7 +76,7 @@ "human", num_vehicles=0, lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + lane_change_mode="sumo_default", ), acceleration_controller=(IDMController, { "a": 1.3, diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 433489780..f468d37c6 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -136,7 +136,7 @@ speed_mode=19 if ALLOW_COLLISIONS else 'right_of_way' ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + lane_change_mode="sumo_default", ), ) else: @@ -152,7 +152,7 @@ speed_mode=19 if ALLOW_COLLISIONS else 'right_of_way' ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + lane_change_mode="sumo_default", ), ) vehicles.add( From 641f724a9fa661b95075f877c2c1fe7dd10ca939 Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Mon, 22 Jun 2020 10:24:57 -0700 Subject: [PATCH 278/438] fixed h-baselines bug (#982) * fixed h-baselines bug * potential bug fix --- examples/train.py | 37 ++++--------------------------- tests/fast_tests/test_examples.py | 10 ++++----- 2 files changed, 9 insertions(+), 38 deletions(-) diff --git a/examples/train.py b/examples/train.py index 1b2f22476..5f8edbb22 100644 --- a/examples/train.py +++ b/examples/train.py @@ -213,13 +213,10 @@ def train_rllib(submodule, flags): run_experiments({flow_params["exp_tag"]: exp_config}) -def train_h_baselines(flow_params, args, multiagent): +def train_h_baselines(env_name, args, multiagent): """Train policies using SAC and TD3 with h-baselines.""" from hbaselines.algorithms import OffPolicyRLAlgorithm from hbaselines.utils.train import parse_options, get_hyperparameters - from hbaselines.envs.mixed_autonomy import FlowEnv - - flow_params = deepcopy(flow_params) # Get the command-line arguments that are relevant here args = parse_options(description="", example_usage="", args=args) @@ -227,31 +224,6 @@ def train_h_baselines(flow_params, args, multiagent): # the base directory that the logged data will be stored in base_dir = "training_data" - # Create the training environment. - env = FlowEnv( - flow_params, - multiagent=multiagent, - shared=args.shared, - maddpg=args.maddpg, - render=args.render, - version=0 - ) - - # Create the evaluation environment. - if args.evaluate: - eval_flow_params = deepcopy(flow_params) - eval_flow_params['env'].evaluate = True - eval_env = FlowEnv( - eval_flow_params, - multiagent=multiagent, - shared=args.shared, - maddpg=args.maddpg, - render=args.render_eval, - version=1 - ) - else: - eval_env = None - for i in range(args.n_training): # value of the next seed seed = args.seed + i @@ -299,8 +271,8 @@ def train_h_baselines(flow_params, args, multiagent): # Create the algorithm object. alg = OffPolicyRLAlgorithm( policy=policy, - env=env, - eval_env=eval_env, + env="flow:{}".format(env_name), + eval_env="flow:{}".format(env_name) if args.evaluate else None, **hp ) @@ -393,8 +365,7 @@ def main(args): elif flags.rl_trainer.lower() == "stable-baselines": train_stable_baselines(submodule, flags) elif flags.rl_trainer.lower() == "h-baselines": - flow_params = submodule.flow_params - train_h_baselines(flow_params, args, multiagent) + train_h_baselines(flags.exp_config, args, multiagent) else: raise ValueError("rl_trainer should be either 'rllib', 'h-baselines', " "or 'stable-baselines'.") diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index 0b385f28a..b5faf6517 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -229,11 +229,11 @@ class TestHBaselineExamples(unittest.TestCase): confirming that it runs. """ @staticmethod - def run_exp(flow_params, multiagent): + def run_exp(env_name, multiagent): train_h_baselines( - flow_params=flow_params, + env_name=env_name, args=[ - flow_params["env_name"].__name__, + env_name, "--initial_exploration_steps", "1", "--total_steps", "10" ], @@ -241,10 +241,10 @@ def run_exp(flow_params, multiagent): ) def test_singleagent_ring(self): - self.run_exp(singleagent_ring.copy(), multiagent=False) + self.run_exp("singleagent_ring", multiagent=False) def test_multiagent_ring(self): - self.run_exp(multiagent_ring.copy(), multiagent=True) + self.run_exp("multiagent_ring", multiagent=True) class TestRllibExamples(unittest.TestCase): From af0b4f68dcadb542b2b472c1d1d59e9adafa86a1 Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Mon, 22 Jun 2020 12:30:42 -0700 Subject: [PATCH 279/438] Replicated changes in 867. Done bug (#980) * Replicated changes in 867. Changes only made to traci * Aimsun changes minus reset --- flow/core/kernel/vehicle/aimsun.py | 17 +++++++++++++++++ flow/core/kernel/vehicle/traci.py | 7 +++++-- flow/envs/multiagent/base.py | 2 +- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/flow/core/kernel/vehicle/aimsun.py b/flow/core/kernel/vehicle/aimsun.py index ce0d026e5..16c94558a 100644 --- a/flow/core/kernel/vehicle/aimsun.py +++ b/flow/core/kernel/vehicle/aimsun.py @@ -65,6 +65,7 @@ def __init__(self, # number of vehicles to exit the network for every time-step self._num_arrived = [] self._arrived_ids = [] + self._arrived_rl_ids = [] # contains conversion from Flow-ID to Aimsun-ID self._id_aimsun2flow = {} @@ -174,11 +175,17 @@ def update(self, reset): added_vehicles = self.kernel_api.get_entered_ids() exited_vehicles = self.kernel_api.get_exited_ids() + # keep track of arrived rl vehicles + arrived_rl_ids = [] + # add the new vehicles if they should be tracked for aimsun_id in added_vehicles: veh_type = self.kernel_api.get_vehicle_type_name(aimsun_id) if veh_type in self.tracked_vehicle_types: self._add_departed(aimsun_id) + if aimsun_id in self.get_rl_ids(): + arrived_rl_ids.append(aimsun_id) + self._arrived_rl_ids.append(arrived_rl_ids) # remove the exited vehicles if they were tracked if not reset: @@ -639,6 +646,16 @@ def get_arrived_ids(self): """See parent class.""" raise NotImplementedError + def get_arrived_rl_ids(self, k=1): + """See parent class.""" + if len(self._arrived_rl_ids) > 0: + arrived = [] + for arr in self._arrived_rl_ids[-k:]: + arrived.extend(arr) + return arrived + else: + return 0 + def get_departed_ids(self): """See parent class.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 134bac49f..6f119b7bb 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -521,10 +521,13 @@ def get_arrived_ids(self): """See parent class.""" return self._arrived_ids - def get_arrived_rl_ids(self): + def get_arrived_rl_ids(self, k=1): """See parent class.""" if len(self._arrived_rl_ids) > 0: - return self._arrived_rl_ids[-1] + arrived = [] + for arr in self._arrived_rl_ids[-k:]: + arrived.extend(arr) + return arrived else: return 0 diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index ec95474c6..2d9c3cd78 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -122,7 +122,7 @@ def step(self, rl_actions): else: reward = self.compute_reward(rl_actions, fail=crash) - for rl_id in self.k.vehicle.get_arrived_rl_ids(): + for rl_id in self.k.vehicle.get_arrived_rl_ids(self.env_params.sims_per_step): done[rl_id] = True reward[rl_id] = 0 states[rl_id] = np.zeros(self.observation_space.shape[0]) From 3725e834d4b2d08f0010305cebd3e056a1026a8e Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 22 Jun 2020 19:10:36 -0700 Subject: [PATCH 280/438] remove trailing whitespace --- flow/data_pipeline/query.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 717d49a02..fae7355c0 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -225,9 +225,9 @@ class QueryStrings(Enum): vt.id, vt.time_step, COALESCE(( - value_lower_left*(headway_upper-headway)*(rel_speed_upper-leader_rel_speed) + - value_lower_right*(headway-headway_lower)*(rel_speed_upper-leader_rel_speed) + - value_upper_left*(headway_upper-headway)*(leader_rel_speed-rel_speed_lower) + + value_lower_left*(headway_upper-headway)*(rel_speed_upper-leader_rel_speed) + + value_lower_right*(headway-headway_lower)*(rel_speed_upper-leader_rel_speed) + + value_upper_left*(headway_upper-headway)*(leader_rel_speed-rel_speed_lower) + value_upper_right*(headway-headway_lower)*(leader_rel_speed-rel_speed_lower) ) / ((headway_upper-headway_lower)*(rel_speed_upper-rel_speed_lower)), 200) AS safety_value, vt.source_id From f87f67a817112525eb6401d3803f9860040680c8 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 22 Jun 2020 19:18:51 -0700 Subject: [PATCH 281/438] fix CASE syntax error --- flow/data_pipeline/query.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index fae7355c0..02149f204 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -25,17 +25,6 @@ "FACT_SAFETY_METRICS_AGG" ] }, - # @brent: are these needed? Is there a race condition here that may break things? - # "fact_safety_metrics_agg": { - # "leaderboard_chart": [ - # "LEADERBOARD_CHART" - # ] - # }, - # "fact_network_throughput_agg": { - # "leaderboard_chart": [ - # "LEADERBOARD_CHART" - # ] - # } "POWER_DEMAND_MODEL_DENOISED_ACCEL": { "fact_vehicle_fuel_efficiency_agg": [ "FACT_VEHICLE_FUEL_EFFICIENCY_AGG" @@ -246,7 +235,7 @@ class QueryStrings(Enum): FACT_SAFETY_METRICS_AGG = """ SELECT source_id, - SUM(CASE WHEN safety_value < 0 THEN 1 ELSE 0) * 100 / COUNT() safety_rate, + SUM(CASE WHEN safety_value < 0 THEN 1 ELSE 0 END) * 100 / COUNT() safety_rate, MAX(safety_value) AS safety_value_max FROM fact_safety_metrics WHERE 1 = 1 From 54ce4ec4e66d23646ab4d2f7c75a278ee81c07c7 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 20:13:40 -0700 Subject: [PATCH 282/438] reduce time-bins to 10s --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 02149f204..d3c80e496 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -545,7 +545,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, @@ -563,7 +563,7 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY time_step ASC) AS energy_start, From be5b853b0a59ba8c676692b50f979654ee86a2a1 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 20:15:30 -0700 Subject: [PATCH 283/438] reduce time-bins in more places --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d3c80e496..480c945c5 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -565,10 +565,10 @@ class QueryStrings(Enum): id, CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, FIRST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + OVER (PARTITION BY id, CAST(time_step/10 AS INTEGER) * 10 ORDER BY time_step ASC) AS energy_start, LAST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + OVER (PARTITION BY id, CAST(time_step/10 AS INTEGER) * 10 ORDER BY time_step ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( From e3de3db5168fe0c03742a856dda173da4e5b4147 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 22 Jun 2020 20:28:44 -0700 Subject: [PATCH 284/438] fix groupby/window fn error --- flow/data_pipeline/query.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index b5452206c..fbc258af0 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -608,18 +608,25 @@ class QueryStrings(Enum): ;""" FACT_VEHICLE_COUNTS_BY_TIME = """ + WITH counts AS ( + SELECT + vt.source_id, + vt.time_step, + COUNT(DISTINCT vt.id) AS vehicle_counts + FROM fact_vehicle_trace vt + WHERE 1 = 1 + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND vt.{loc_filter} + AND vt.time_step >= {start_filter} + GROUP BY 1, 2 + ) SELECT - vt.source_id, - vt.time_step - FIRST_VALUE(vt.time_step) - OVER (PARTITION BY vt.source_id ORDER BY vt.time_step ASC) AS time_step, - COUNT(DISTINCT vt.id) AS vehicle_counts - FROM fact_vehicle_trace vt - WHERE 1 = 1 - AND vt.date = \'{date}\' - AND vt.partition_name = \'{partition}\' - AND vt.{loc_filter} - AND vt.time_step >= {start_filter} - GROUP BY 1, 2 + source_id, + time_step - FIRST_VALUE(time_step) + OVER (PARTITION BY source_id ORDER BY time_step ASC) AS time_step, + vehicle_counts + FROM counts ; """ From 7f406cbfbe187874ffc090ffd33414bcf74464be Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 22 Jun 2020 21:12:51 -0700 Subject: [PATCH 285/438] fix is_baseline data type --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index a7a0b794f..cce4d984d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -610,7 +610,7 @@ class QueryStrings(Enum): ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS max_score FROM leaderboard_chart_agg WHERE 1 = 1 - AND is_baseline = FALSE + AND is_baseline = 'False' ), prev_max AS ( SELECT network, From 5d897afd644c94467d6b017050bb9882742d4aaa Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 22 Jun 2020 22:18:55 -0700 Subject: [PATCH 286/438] change schema, vehicle_counts -> vehicle_count --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 21cf05cca..22a0e734e 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -617,7 +617,7 @@ class QueryStrings(Enum): SELECT vt.source_id, vt.time_step, - COUNT(DISTINCT vt.id) AS vehicle_counts + COUNT(DISTINCT vt.id) AS vehicle_count FROM fact_vehicle_trace vt WHERE 1 = 1 AND vt.date = \'{date}\' @@ -630,7 +630,7 @@ class QueryStrings(Enum): source_id, time_step - FIRST_VALUE(time_step) OVER (PARTITION BY source_id ORDER BY time_step ASC) AS time_step, - vehicle_counts + vehicle_count FROM counts ; """ From 7ddf890fef982bb2da7562774cf2fe2b6ba5b2c9 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 23 Jun 2020 18:14:09 -0700 Subject: [PATCH 287/438] fix some query bugs --- flow/data_pipeline/lambda_function.py | 10 ++++++---- flow/data_pipeline/query.py | 14 ++++++++++---- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index f7a32d5db..97f625eab 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -2,7 +2,7 @@ import boto3 from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data, update_baseline -from flow.data_pipeline.query import tags, tables, network_using_edge +from flow.data_pipeline.query import tags, tables, network_using_edge, summary_tables from flow.data_pipeline.query import X_FILTER, EDGE_FILTER, WARMUP_STEPS, HORIZON_STEPS s3 = boto3.client('s3') @@ -31,7 +31,7 @@ def lambda_handler(event, context): queryEngine.repair_partition(table, query_date, partition) # delete obsolete data - if table == "leaderboard_chart_agg": + if table in summary_tables: delete_obsolete_data(s3, key, table) # add table that need to start a query to list @@ -43,12 +43,14 @@ def lambda_handler(event, context): stop_filter = WARMUP_STEPS + HORIZON_STEPS for bucket, key, table, query_date, partition in records: source_id = "flow_{}".format(partition.split('_')[1]) - response = s3.head_object(Bucket=bucket, Key=key) + metadata_key = "fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv".format(query_date, source_id) + response = s3.head_object(Bucket=bucket, Key=metadata_key) loc_filter = X_FILTER if 'network' in response["Metadata"]: if response["Metadata"]['network'] in network_using_edge: loc_filter = EDGE_FILTER - if 'is_baseline' in response['Metadata'] and response['Metadata']['is_baseline'] == 'True': + if table == 'fact_vehicle_trace' \ + and 'is_baseline' in response['Metadata'] and response['Metadata']['is_baseline'] == 'True': update_baseline(s3, response["Metadata"]['network'], source_id) query_dict = tags[table] diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 22a0e734e..302048632 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -67,6 +67,7 @@ tables = [ "fact_vehicle_trace", "fact_energy_trace", + "fact_vehicle_counts_by_time", "fact_safety_metrics", "fact_safety_metrics_agg", "fact_network_throughput_agg", @@ -77,9 +78,12 @@ "fact_network_fuel_efficiency_agg", "leaderboard_chart", "leaderboard_chart_agg", + "fact_top_scores", "metadata_table" ] +summary_tables = ["leaderboard_chart_agg", "fact_top_scores"] + network_using_edge = ["I-210 without Ramps"] X_FILTER = "x BETWEEN 500 AND 2300" @@ -234,8 +238,8 @@ class QueryStrings(Enum): AND vt.leader_rel_speed BETWEEN sm.rel_speed_lower AND sm.rel_speed_upper AND vt.headway BETWEEN sm.headway_lower AND sm.headway_upper WHERE 1 = 1 - AND vt.date = \'{{date}}\' - AND vt.partition_name = \'{{partition}}\' + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' AND vt.time_step >= {start_filter} AND vt.{loc_filter} ; @@ -248,8 +252,8 @@ class QueryStrings(Enum): MAX(safety_value) AS safety_value_max FROM fact_safety_metrics WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}_FACT_SAFETY_METRICS\' GROUP BY 1 """ @@ -649,6 +653,8 @@ class QueryStrings(Enum): l.efficiency_meters_per_joules, l.efficiency_miles_per_gallon, l.throughput_per_hour, + l.safety_rate, + l.safety_value_max, b.source_id AS baseline_source_id FROM leaderboard_chart AS l, metadata_table AS m, baseline_table as b WHERE 1 = 1 From e0087e6c92983d371c9f5f8609c8e8a708d2b50d Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 29 Jun 2020 21:37:56 -0700 Subject: [PATCH 288/438] first attempt at use experiment in visualize_rllib --- flow/core/experiment.py | 6 +- flow/visualize/visualizer_rllib.py | 364 +++++++++++++++-------------- 2 files changed, 197 insertions(+), 173 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8ede367a7..8a273c3e6 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -78,7 +78,11 @@ def __init__(self, flow_params, custom_callables=None): self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. - create_env, _ = make_create_env(flow_params) + create_env, env_name = make_create_env(flow_params) + + # record env_name and create_env, need it to register for ray + self.env_name = env_name + self.create_env = create_env # Create the environment. self.env = create_env() diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 059cabbbd..fd1309d4b 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -39,6 +39,8 @@ from datetime import datetime, timezone import uuid +from flow.core.experiment import Experiment + EXAMPLE_USAGE = """ example usage: python ./visualizer_rllib.py /ray_results/experiment_dir/result_dir 1 @@ -137,8 +139,9 @@ def visualizer_rllib(args): sim_params.save_render = True # Create and register a gym+rllib env - create_env, env_name = make_create_env(params=flow_params, version=0) - register_env(env_name, create_env) + # create_env, env_name = make_create_env(params=flow_params, version=0) + exp = Experiment(flow_params) + register_env(exp.env_name, exp.create_env) # check if the environment is a single or multiagent environment, and # get the right address accordingly @@ -163,23 +166,24 @@ def visualizer_rllib(args): env_params.horizon = args.horizon # create the agent that will be used to compute the actions - agent = agent_cls(env=env_name, config=config) + agent = agent_cls(env=exp.env_name, config=config) checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num agent.restore(checkpoint) if hasattr(agent, "local_evaluator") and \ os.environ.get("TEST_FLAG") != 'True': - env = agent.local_evaluator.env + exp.env = agent.local_evaluator.env else: - env = gym.make(env_name) + pass + # env = gym.make(env_name) # reroute on exit is a training hack, it should be turned off at test time. - if hasattr(env, "reroute_on_exit"): - env.reroute_on_exit = False + if hasattr(exp.env, "reroute_on_exit"): + exp.env.reroute_on_exit = False if args.render_mode == 'sumo_gui': - env.sim_params.render = True # set to True after initializing agent and env + exp.env.sim_params.render = True # set to True after initializing agent and env if multiagent: rets = {} @@ -210,180 +214,196 @@ def visualizer_rllib(args): # if restart_instance, don't restart here because env.reset will restart later if not sim_params.restart_instance: - env.restart_simulation(sim_params=sim_params, render=sim_params.render) + exp.env.restart_simulation(sim_params=sim_params, render=sim_params.render) # data pipeline - extra_info = defaultdict(lambda: []) - source_id = 'flow_{}'.format(uuid.uuid4().hex) - metadata = defaultdict(lambda: []) - # collect current time - cur_datetime = datetime.now(timezone.utc) - cur_date = cur_datetime.date().isoformat() - cur_time = cur_datetime.time().isoformat() - # collecting information for metadata table - metadata['source_id'].append(source_id) - metadata['submission_time'].append(cur_time) - metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) - metadata['is_baseline'].append(str(args.is_baseline)) - name, strategy = get_configuration() - metadata['submitter_name'].append(name) - metadata['strategy'].append(strategy) - - # Simulate and collect metrics - final_outflows = [] - final_inflows = [] - mpg = [] - mpj = [] - mean_speed = [] - std_speed = [] - for i in range(args.num_rollouts): - vel = [] - run_id = "run_{}".format(i) - env.pipeline_params = (extra_info, source_id, run_id) - state = env.reset() + # extra_info = defaultdict(lambda: []) + # source_id = 'flow_{}'.format(uuid.uuid4().hex) + # metadata = defaultdict(lambda: []) + # # collect current time + # cur_datetime = datetime.now(timezone.utc) + # cur_date = cur_datetime.date().isoformat() + # cur_time = cur_datetime.time().isoformat() + # # collecting information for metadata table + # metadata['source_id'].append(source_id) + # metadata['submission_time'].append(cur_time) + # metadata['network'].append(network_name_translate(exp.env.network.name.split('_20')[0])) + # metadata['is_baseline'].append(str(args.is_baseline)) + # name, strategy = get_configuration() + # metadata['submitter_name'].append(name) + # metadata['strategy'].append(strategy) + + def rl_action(state): if multiagent: - ret = {key: [0] for key in rets.keys()} - else: - ret = 0 - for _ in range(env_params.horizon): - vehicles = env.unwrapped.k.vehicle - speeds = vehicles.get_speed(vehicles.get_ids()) - - # only include non-empty speeds - if speeds: - vel.append(np.mean(speeds)) - - mpg.append(miles_per_gallon(env.unwrapped, vehicles.get_ids(), gain=1.0)) - mpj.append(miles_per_megajoule(env.unwrapped, vehicles.get_ids(), gain=1.0)) - - if multiagent: - action = {} - for agent_id in state.keys(): - if use_lstm: - action[agent_id], state_init[agent_id], logits = \ - agent.compute_action( + action = {} + for agent_id in state.keys(): + if use_lstm: + action[agent_id], state_init[agent_id], logits = \ + agent.compute_action( state[agent_id], state=state_init[agent_id], policy_id=policy_map_fn(agent_id)) - else: - action[agent_id] = agent.compute_action( - state[agent_id], policy_id=policy_map_fn(agent_id)) - else: - action = agent.compute_action(state) - state, reward, done, _ = env.step(action) - - # collect data for data pipeline - get_extra_info(vehicles, extra_info, vehicles.get_ids(), source_id, run_id) - - if multiagent: - for actor, rew in reward.items(): - ret[policy_map_fn(actor)][0] += rew - else: - ret += reward - if multiagent and done['__all__']: - break - if not multiagent and done: - break - - if multiagent: - for key in rets.keys(): - rets[key].append(ret[key]) - else: - rets.append(ret) - outflow = vehicles.get_outflow_rate(500) - final_outflows.append(outflow) - inflow = vehicles.get_inflow_rate(500) - final_inflows.append(inflow) - if np.all(np.array(final_inflows) > 1e-5): - throughput_efficiency = [x / y for x, y in - zip(final_outflows, final_inflows)] + else: + action[agent_id] = agent.compute_action( + state[agent_id], policy_id=policy_map_fn(agent_id)) else: - throughput_efficiency = [0] * len(final_inflows) - mean_speed.append(np.mean(vel)) - std_speed.append(np.std(vel)) - if multiagent: - for agent_id, rew in rets.items(): - print('Round {}, Return: {} for agent {}'.format( - i, ret, agent_id)) - else: - print('Round {}, Return: {}'.format(i, ret)) - - print('==== Summary of results ====') - print("Return:") - print(mean_speed) - if multiagent: - for agent_id, rew in rets.items(): - print('For agent', agent_id) - print(rew) - print('Average, std return: {}, {} for agent {}'.format( - np.mean(rew), np.std(rew), agent_id)) - else: - print(rets) - print('Average, std: {}, {}'.format( - np.mean(rets), np.std(rets))) - - print("\nSpeed, mean (m/s):") - print(mean_speed) - print('Average, std: {}, {}'.format(np.mean(mean_speed), np.std( - mean_speed))) - - print('Average, std miles per gallon: {}, {}'.format(np.mean(mpg), np.std(mpg))) - - print('Average, std miles per megajoule: {}, {}'.format(np.mean(mpj), np.std(mpj))) - - # Compute arrival rate of vehicles in the last 500 sec of the run - print("\nOutflows (veh/hr):") - print(final_outflows) - print('Average, std: {}, {}'.format(np.mean(final_outflows), - np.std(final_outflows))) - # Compute departure rate of vehicles in the last 500 sec of the run - print("Inflows (veh/hr):") - print(final_inflows) - print('Average, std: {}, {}'.format(np.mean(final_inflows), - np.std(final_inflows))) - # Compute throughput efficiency in the last 500 sec of the - print("Throughput efficiency (veh/hr):") - print(throughput_efficiency) - print('Average, std: {}, {}'.format(np.mean(throughput_efficiency), - np.std(throughput_efficiency))) + action = agent.compute_action(state) + return action + exp.run(num_runs=args.num_rollouts, rl_actions=rl_action) + # Simulate and collect metrics + # final_outflows = [] + # final_inflows = [] + # mpg = [] + # mpj = [] + # mean_speed = [] + # std_speed = [] + # for i in range(args.num_rollouts): + # vel = [] + # run_id = "run_{}".format(i) + # env.pipeline_params = (extra_info, source_id, run_id) + # state = env.reset() + # if multiagent: + # ret = {key: [0] for key in rets.keys()} + # else: + # ret = 0 + # for _ in range(env_params.horizon): + # vehicles = env.unwrapped.k.vehicle + # speeds = vehicles.get_speed(vehicles.get_ids()) + # + # # only include non-empty speeds + # if speeds: + # vel.append(np.mean(speeds)) + # + # mpg.append(miles_per_gallon(env.unwrapped, vehicles.get_ids(), gain=1.0)) + # mpj.append(miles_per_megajoule(env.unwrapped, vehicles.get_ids(), gain=1.0)) + # + # if multiagent: + # action = {} + # for agent_id in state.keys(): + # if use_lstm: + # action[agent_id], state_init[agent_id], logits = \ + # agent.compute_action( + # state[agent_id], state=state_init[agent_id], + # policy_id=policy_map_fn(agent_id)) + # else: + # action[agent_id] = agent.compute_action( + # state[agent_id], policy_id=policy_map_fn(agent_id)) + # else: + # action = agent.compute_action(state) + # state, reward, done, _ = env.step(action) + # + # # collect data for data pipeline + # get_extra_info(vehicles, extra_info, vehicles.get_ids(), source_id, run_id) + # + # if multiagent: + # for actor, rew in reward.items(): + # ret[policy_map_fn(actor)][0] += rew + # else: + # ret += reward + # if multiagent and done['__all__']: + # break + # if not multiagent and done: + # break + # + # if multiagent: + # for key in rets.keys(): + # rets[key].append(ret[key]) + # else: + # rets.append(ret) + # outflow = vehicles.get_outflow_rate(500) + # final_outflows.append(outflow) + # inflow = vehicles.get_inflow_rate(500) + # final_inflows.append(inflow) + # if np.all(np.array(final_inflows) > 1e-5): + # throughput_efficiency = [x / y for x, y in + # zip(final_outflows, final_inflows)] + # else: + # throughput_efficiency = [0] * len(final_inflows) + # mean_speed.append(np.mean(vel)) + # std_speed.append(np.std(vel)) + # if multiagent: + # for agent_id, rew in rets.items(): + # print('Round {}, Return: {} for agent {}'.format( + # i, ret, agent_id)) + # else: + # print('Round {}, Return: {}'.format(i, ret)) + + # print('==== Summary of results ====') + # print("Return:") + # print(mean_speed) + # if multiagent: + # for agent_id, rew in rets.items(): + # print('For agent', agent_id) + # print(rew) + # print('Average, std return: {}, {} for agent {}'.format( + # np.mean(rew), np.std(rew), agent_id)) + # else: + # print(rets) + # print('Average, std: {}, {}'.format( + # np.mean(rets), np.std(rets))) + # + # print("\nSpeed, mean (m/s):") + # print(mean_speed) + # print('Average, std: {}, {}'.format(np.mean(mean_speed), np.std( + # mean_speed))) + # + # print('Average, std miles per gallon: {}, {}'.format(np.mean(mpg), np.std(mpg))) + # + # print('Average, std miles per megajoule: {}, {}'.format(np.mean(mpj), np.std(mpj))) + # + # # Compute arrival rate of vehicles in the last 500 sec of the run + # print("\nOutflows (veh/hr):") + # print(final_outflows) + # print('Average, std: {}, {}'.format(np.mean(final_outflows), + # np.std(final_outflows))) + # # Compute departure rate of vehicles in the last 500 sec of the run + # print("Inflows (veh/hr):") + # print(final_inflows) + # print('Average, std: {}, {}'.format(np.mean(final_inflows), + # np.std(final_inflows))) + # # Compute throughput efficiency in the last 500 sec of the + # print("Throughput efficiency (veh/hr):") + # print(throughput_efficiency) + # print('Average, std: {}, {}'.format(np.mean(throughput_efficiency), + # np.std(throughput_efficiency))) # terminate the environment - env.unwrapped.terminate() + # env.unwrapped.terminate() # if prompted, convert the emission file into a csv file - if args.gen_emission: - time.sleep(0.1) - - dir_path = os.path.dirname(os.path.realpath(__file__)) - emission_filename = '{0}-emission.xml'.format(env.network.name) - - emission_path = \ - '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename) - - # convert the emission file into a csv file - emission_to_csv(emission_path) - - # print the location of the emission csv file - emission_path_csv = emission_path[:-4] + ".csv" - print("\nGenerated emission file at " + emission_path_csv) - - # delete the .xml version of the emission file - os.remove(emission_path) - - # generate datapipeline output - trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) - write_dict_to_csv(trajectory_table_path, extra_info, True) - write_dict_to_csv(metadata_table_path, metadata, True) - - if args.to_aws: - upload_to_s3('circles.data.pipeline', - 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, - source_id), - metadata_table_path) - upload_to_s3('circles.data.pipeline', - 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), - trajectory_table_path, - {'network': metadata['network'][0]}) + # if args.gen_emission: + # time.sleep(0.1) + # + # dir_path = os.path.dirname(os.path.realpath(__file__)) + # emission_filename = '{0}-emission.xml'.format(env.network.name) + # + # emission_path = \ + # '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename) + # + # # convert the emission file into a csv file + # emission_to_csv(emission_path) + # + # # print the location of the emission csv file + # emission_path_csv = emission_path[:-4] + ".csv" + # print("\nGenerated emission file at " + emission_path_csv) + # + # # delete the .xml version of the emission file + # os.remove(emission_path) + # + # # generate datapipeline output + # trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) + # metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + # write_dict_to_csv(trajectory_table_path, extra_info, True) + # write_dict_to_csv(metadata_table_path, metadata, True) + # + # if args.to_aws: + # upload_to_s3('circles.data.pipeline', + # 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, + # source_id), + # metadata_table_path) + # upload_to_s3('circles.data.pipeline', + # 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), + # trajectory_table_path, + # {'network': metadata['network'][0]}) def create_parser(): From 97cfdee02f53ac84bf8069cb66b2cee2fea71ce5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 1 Jul 2020 17:39:20 -0700 Subject: [PATCH 289/438] grey out warmup period and ghost cells --- flow/visualize/time_space_diagram.py | 50 +++++++++++++++++++--------- 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index ad8443da3..c5703b11e 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -27,7 +27,8 @@ import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot as plt -from matplotlib.collections import LineCollection +from matplotlib.collections import LineCollection, PatchCollection +from matplotlib.patches import Rectangle import matplotlib.colors as colors import numpy as np import pandas as pd @@ -187,8 +188,6 @@ def _highway(data): pd.DataFrame modified trajectory dataframe """ - data.loc[:, :] = data[(data['distance'] > 500)] - data.loc[:, :] = data[(data['distance'] < 2300)] segs = data[['time_step', 'distance', 'next_time', 'next_pos']].values.reshape((len(data), 2, 2)) return segs, data @@ -241,10 +240,6 @@ def _i210_subnetwork(data): pd.DataFrame modified trajectory dataframe """ - # Omit ghost edges - omit_edges = {'ghost0', '119257908#3'} - data.loc[:, :] = data[~data['edge_id'].isin(omit_edges)] - # Reset lane numbers that are offset by ramp lanes offset_edges = set(data[data['lane_id'] == 5]['edge_id'].unique()) data.loc[data['edge_id'].isin(offset_edges), 'lane_id'] -= 1 @@ -391,7 +386,7 @@ def _get_abs_pos(df, params): return ret -def plot_tsd(ax, df, segs, args, lane=None): +def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None): """Plot the time-space diagram. Take the pre-processed segments and other meta-data, then plot all the line segments. @@ -408,6 +403,10 @@ def plot_tsd(ax, df, segs, args, lane=None): parsed arguments lane : int, optional lane number to be shown in plot title + ghost_edges : list or set of str + ghost edge names to be greyed out, default None + ghost_bounds : tuple + lower and upper bounds of domain, excluding ghost edges, default None Returns ------- @@ -415,8 +414,7 @@ def plot_tsd(ax, df, segs, args, lane=None): """ norm = plt.Normalize(args.min_speed, args.max_speed) - xmin = max(df['time_step'].min(), args.start) - xmax = min(df['time_step'].max(), args.stop) + xmin, xmax = df['time_step'].min(), df['time_step'].max() xbuffer = (xmax - xmin) * 0.025 # 2.5% of range ymin, ymax = df['distance'].min(), df['distance'].max() ybuffer = (ymax - ymin) * 0.025 # 2.5% of range @@ -430,6 +428,25 @@ def plot_tsd(ax, df, segs, args, lane=None): ax.add_collection(lc) ax.autoscale() + rects = [] + if ghost_edges: + y_domain_min = df[~df['edge_id'].isin(ghost_edges)]['distance'].min() + y_domain_max = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() + rects.append(Rectangle((xmin, y_domain_min), args.start - xmin, y_domain_max - y_domain_min)) + rects.append(Rectangle((xmin, ymin), xmax - xmin, y_domain_min - ymin)) + rects.append(Rectangle((xmin, y_domain_min + y_domain_max), xmax - xmin, ymax - (y_domain_min + y_domain_max))) + elif ghost_bounds: + rects.append(Rectangle((xmin, ghost_bounds[0]), args.start - xmin, ghost_bounds[1] - ghost_bounds[0])) + rects.append(Rectangle((xmin, ymin), xmax - xmin, ghost_bounds[0] - ymin)) + rects.append(Rectangle((xmin, ghost_bounds[0] + ghost_bounds[1]), xmax - xmin, ymax - (ghost_bounds[0] + ghost_bounds[1]))) + else: + rects.append(Rectangle((xmin, ymin), args.start - xmin, ymax - ymin)) + + if rects: + pc = PatchCollection(rects, facecolor='grey', alpha=0.5, edgecolor=None) + pc.set_zorder(20) + ax.add_collection(pc) + if lane: ax.set_title('Time-Space Diagram: Lane {}'.format(lane), fontsize=25) else: @@ -469,8 +486,6 @@ def plot_tsd(ax, df, segs, args, lane=None): help='The minimum speed in the color range.') parser.add_argument('--start', type=float, default=0, help='initial time (in sec) in the plot.') - parser.add_argument('--stop', type=float, default=float('inf'), - help='final time (in sec) in the plot.') args = parser.parse_args() @@ -502,13 +517,17 @@ def plot_tsd(ax, df, segs, args, lane=None): for lane, df in traj_df.groupby('lane_id'): ax = plt.subplot(nlanes, 1, lane+1) - plot_tsd(ax, df, segs[lane], args, lane) + plot_tsd(ax, df, segs[lane], args, int(lane+1), ghost_edges={'ghost0', '119257908#3'}) + plt.tight_layout() else: # perform plotting operation fig = plt.figure(figsize=(16, 9)) ax = plt.axes() - plot_tsd(ax, traj_df, segs, args) + if flow_params['network'] == HighwayNetwork: + plot_tsd(ax, traj_df, segs, args, ghost_bounds=(500, 2300)) + else: + plot_tsd(ax, traj_df, segs, args) ########################################################################### # Note: For MergeNetwork only # @@ -519,4 +538,5 @@ def plot_tsd(ax, df, segs, args, lane=None): [-0.1, -0.1], linewidth=3, color="white") # ########################################################################### - plt.show() + outfile = args.trajectory_path.replace('csv', 'png') + plt.savefig(outfile) From e22189ebfc9c4f818fda2d26ec88e46cd2dbed88 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 1 Jul 2020 18:58:57 -0700 Subject: [PATCH 290/438] fix rectangle positioning for both networks --- flow/visualize/time_space_diagram.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index c5703b11e..ec5c3d7da 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -434,11 +434,11 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) y_domain_max = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() rects.append(Rectangle((xmin, y_domain_min), args.start - xmin, y_domain_max - y_domain_min)) rects.append(Rectangle((xmin, ymin), xmax - xmin, y_domain_min - ymin)) - rects.append(Rectangle((xmin, y_domain_min + y_domain_max), xmax - xmin, ymax - (y_domain_min + y_domain_max))) + rects.append(Rectangle((xmin, y_domain_max), xmax - xmin, ymax - y_domain_max)) elif ghost_bounds: rects.append(Rectangle((xmin, ghost_bounds[0]), args.start - xmin, ghost_bounds[1] - ghost_bounds[0])) rects.append(Rectangle((xmin, ymin), xmax - xmin, ghost_bounds[0] - ymin)) - rects.append(Rectangle((xmin, ghost_bounds[0] + ghost_bounds[1]), xmax - xmin, ymax - (ghost_bounds[0] + ghost_bounds[1]))) + rects.append(Rectangle((xmin, ghost_bounds[1]), xmax - xmin, ymax - ghost_bounds[1])) else: rects.append(Rectangle((xmin, ymin), args.start - xmin, ymax - ymin)) From c830f7815bc38abc9a8c7050a1b54085459ebb00 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 1 Jul 2020 23:00:29 -0700 Subject: [PATCH 291/438] Reward options in I210-dev Add accel penalty, stop penalty, mpg reward, and ability to compute reward for any vehicles upstream of you (i.e. make you less greedy and more social) --- .../exp_configs/non_rl/i210_subnetwork.py | 11 +- .../rl/multiagent/multiagent_i210.py | 77 +++++---- .../rl/multiagent/multiagent_straight_road.py | 5 +- examples/train.py | 60 +++---- flow/algorithms/centralized_PPO.py | 72 +++----- flow/algorithms/custom_ppo.py | 20 ++- flow/controllers/car_following_models.py | 1 + flow/controllers/velocity_controllers.py | 2 - flow/core/experiment.py | 4 +- flow/core/kernel/vehicle/aimsun.py | 17 ++ flow/core/kernel/vehicle/traci.py | 7 +- flow/core/rewards.py | 2 +- flow/envs/multiagent/base.py | 2 +- flow/envs/multiagent/i210.py | 62 +++---- flow/networks/i210_subnetwork_ghost_cell.py | 162 ------------------ flow/visualize/i210_replay.py | 11 +- flow/visualize/time_space_diagram.py | 35 +--- flow/visualize/transfer/util.py | 15 +- tests/fast_tests/test_examples.py | 10 +- 19 files changed, 203 insertions(+), 372 deletions(-) delete mode 100644 flow/networks/i210_subnetwork_ghost_cell.py diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 5c07aadf0..73e49caef 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -31,6 +31,8 @@ ON_RAMP = False # the inflow rate of vehicles (in veh/hr) INFLOW_RATE = 2050 +# on-ramp inflow_rate +ON_RAMP_INFLOW_RATE = 500 # the speed of inflowing vehicles from the main edge (in m/s) INFLOW_SPEED = 25.5 # fraction of vehicles that are follower-stoppers. 0.10 corresponds to 10% @@ -123,7 +125,7 @@ inflow.add( veh_type="human", edge="27414345", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + vehs_per_hour=int(ON_RAMP_INFLOW_RATE * (1 - PENETRATION_RATE)), departSpeed=10, ) @@ -131,7 +133,7 @@ inflow.add( veh_type="av", edge="27414345", - vehs_per_hour=int(500 * PENETRATION_RATE), + vehs_per_hour=int(ON_RAMP_INFLOW_RATE * PENETRATION_RATE), departLane="random", departSpeed=10) @@ -209,11 +211,6 @@ def valid_ids(env, veh_ids): env.k.vehicle.get_speed(valid_ids(env, env.k.vehicle.get_ids())))), "avg_outflow": lambda env: np.nan_to_num( env.k.vehicle.get_outflow_rate(120)), - # # we multiply by 5 to account for the vehicle length and by 1000 to - # # convert into veh/km - # "avg_density": lambda env: 5 * 1000 * len(env.k.vehicle.get_ids_by_edge( - # edge_id)) / (env.k.network.edge_length(edge_id) - # * env.k.network.num_lanes(edge_id)), "mpg": lambda env: miles_per_gallon( env, valid_ids(env, env.k.vehicle.get_ids()), gain=1.0), "mpj": lambda env: miles_per_megajoule( diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index f468d37c6..a1c2e4f25 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -35,14 +35,16 @@ ON_RAMP = False # the inflow rate of vehicles (in veh/hr) INFLOW_RATE = 2050 +# the inflow rate on the on-ramp (in veh/hr) +ON_RAMP_INFLOW_RATE = 500 # the speed of inflowing vehicles from the main edge (in m/s) INFLOW_SPEED = 25.5 # fraction of vehicles that are RL vehicles. 0.10 corresponds to 10% -PENETRATION_RATE = 0.10 +PENETRATION_RATE = 0.05 # desired speed of the vehicles in the network V_DES = 5.0 # horizon over which to run the env -HORIZON = 1500 +HORIZON = 1000 # steps to run before follower-stopper is allowed to take control WARMUP_STEPS = 600 # whether to turn off the fail safes for the human-driven vehicles @@ -83,11 +85,11 @@ # whether to use the MPJ reward. Otherwise, defaults to a target velocity # reward "mpj_reward": False, - # how many vehicles to look back for the MPG reward - "look_back_length": 1, + # how many vehicles to look back for any reward + "look_back_length": 3, # whether to reroute vehicles once they have exited - "reroute_on_exit": True, - 'target_velocity': 8.0, + "reroute_on_exit": False, + 'target_velocity': 5.0, # how many AVs there can be at once (this is only for centralized critics) "max_num_agents": 10, # which edges we shouldn't apply control on @@ -108,12 +110,14 @@ # how many timesteps to anneal the headway curriculum over "speed_curriculum_iters": 20, # weight of the headway reward - "speed_reward_gain": 0.5, + "speed_reward_gain": 5.0, # penalize stopped vehicles - "penalize_stops": True, + "penalize_stops": False, + "stop_penalty": 0.01, # penalize accels - "penalize_accel": True + "penalize_accel": False, + "accel_penalty": (1 / 400.0) }) # =========================================================================== # @@ -163,32 +167,47 @@ inflow = InFlows() for lane in [0, 1, 2, 3, 4]: - # Add the inflows from the main highway. - inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), - departLane=lane, - departSpeed=INFLOW_SPEED) - inflow.add( - veh_type="av", - edge="119257914", - vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), - departLane=lane, - departSpeed=INFLOW_SPEED) + if WANT_BOUNDARY_CONDITIONS: + # Add the inflows from the main highway. + inflow.add( + veh_type="human", + edge="ghost0", + vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=INFLOW_SPEED) + inflow.add( + veh_type="av", + edge="ghost0", + vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), + departLane=lane, + departSpeed=INFLOW_SPEED) + else: + # Add the inflows from the main highway. + inflow.add( + veh_type="human", + edge="119257914", + vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=INFLOW_SPEED) + inflow.add( + veh_type="av", + edge="119257914", + vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), + departLane=lane, + departSpeed=INFLOW_SPEED) # Add the inflows from the on-ramps. if ON_RAMP: inflow.add( veh_type="human", edge="27414345", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + vehs_per_hour=int(ON_RAMP_INFLOW_RATE * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) inflow.add( veh_type="human", edge="27414342#0", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + vehs_per_hour=int(ON_RAMP_INFLOW_RATE * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) @@ -225,7 +244,7 @@ sims_per_step=3, warmup_steps=WARMUP_STEPS, additional_params=additional_env_params, - done_at_exit=False + done_at_exit=not additional_env_params["reroute_on_exit"] ), # network-related parameters (see flow.core.params.NetParams and the @@ -278,8 +297,8 @@ def policy_mapping_fn(_): "avg_speed": lambda env: np.mean([speed for speed in env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), "avg_outflow": lambda env: np.nan_to_num(env.k.vehicle.get_outflow_rate(120)), - "avg_energy": lambda env: -1*energy_consumption(env, 0.1), - "avg_per_step_energy": lambda env: -1*energy_consumption(env, 0.1) / env.k.vehicle.num_vehicles - if env.k.vehicle.num_vehicles > 0 - else 0, + "avg_energy": lambda env: -1 * energy_consumption(env, 0.1), + "avg_per_step_energy": lambda env: -1 * energy_consumption(env, 0.1) / env.k.vehicle.num_vehicles + if env.k.vehicle.num_vehicles > 0 + else 0, } diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index 5816d3fe7..73460d656 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -86,9 +86,12 @@ # penalize stopped vehicles "penalize_stops": True, + "stop_penalty": 0.05, # penalize accels - "penalize_accel": True + "penalize_accel": True, + "accel_penalty": 0.05, + }) diff --git a/examples/train.py b/examples/train.py index 112b7fa0d..d062fd39a 100644 --- a/examples/train.py +++ b/examples/train.py @@ -113,7 +113,6 @@ def run_model_stablebaseline(flow_params, """ from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv from stable_baselines import PPO2 - if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -186,6 +185,7 @@ def setup_exps_rllib(flow_params, config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.995 # discount rate config["use_gae"] = True + config["no_done_at_end"] = False config["lambda"] = 0.97 config["kl_target"] = 0.02 config["num_sgd_iter"] = 10 @@ -218,8 +218,8 @@ def setup_exps_rllib(flow_params, config["lr"] = tune.grid_search([5e-4, 5e-5]) elif alg_run == "TD3": - agent_cls = get_agent_class(alg_run) - config = deepcopy(agent_cls._default_config) + alg_run = get_agent_class(alg_run) + config = deepcopy(alg_run._default_config) config["num_workers"] = n_cpus config["horizon"] = horizon @@ -242,6 +242,9 @@ def on_episode_start(info): episode.user_data["avg_energy"] = [] episode.user_data["avg_mpg"] = [] episode.user_data["avg_mpj"] = [] + episode.user_data["num_cars"] = [] + episode.user_data["avg_accel_human"] = [] + episode.user_data["avg_accel_avs"] = [] def on_episode_step(info): episode = info["episode"] @@ -271,6 +274,15 @@ def on_episode_step(info): episode.user_data["avg_speed_avs"].append(av_speed) episode.user_data["avg_mpg"].append(miles_per_gallon(env, veh_ids, gain=1.0)) episode.user_data["avg_mpj"].append(miles_per_megajoule(env, veh_ids, gain=1.0)) + episode.user_data["num_cars"].append(len(env.k.vehicle.get_ids())) + episode.user_data["avg_accel_human"].append(np.nan_to_num(np.mean( + [np.abs((env.k.vehicle.get_speed(veh_id) - env.k.vehicle.get_previous_speed(veh_id))/env.sim_step) for + veh_id in veh_ids if veh_id in env.k.vehicle.previous_speeds.keys()] + ))) + episode.user_data["avg_accel_avs"].append(np.nan_to_num(np.mean( + [np.abs((env.k.vehicle.get_speed(veh_id) - env.k.vehicle.get_previous_speed(veh_id))/env.sim_step) for + veh_id in rl_ids if veh_id in env.k.vehicle.previous_speeds.keys()] + ))) def on_episode_end(info): episode = info["episode"] @@ -278,12 +290,14 @@ def on_episode_end(info): episode.custom_metrics["avg_speed"] = avg_speed avg_speed_avs = np.mean(episode.user_data["avg_speed_avs"]) episode.custom_metrics["avg_speed_avs"] = avg_speed_avs + episode.custom_metrics["avg_accel_avs"] = np.mean(episode.user_data["avg_accel_avs"]) episode.custom_metrics["avg_energy_per_veh"] = np.mean(episode.user_data["avg_energy"]) episode.custom_metrics["avg_mpg_per_veh"] = np.mean(episode.user_data["avg_mpg"]) episode.custom_metrics["avg_mpj_per_veh"] = np.mean(episode.user_data["avg_mpj"]) + episode.custom_metrics["num_cars"] = np.mean(episode.user_data["num_cars"]) def on_train_result(info): - """Store the mean score of the episode, and adjust the number of adversaries.""" + """Store the mean score of the episode, and increment or decrement the iteration number for curriculum.""" trainer = info["trainer"] trainer.workers.foreach_worker( lambda ev: ev.foreach_env( @@ -361,13 +375,10 @@ def trial_str_creator(trial): tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) -def train_h_baselines(flow_params, args, multiagent): +def train_h_baselines(env_name, args, multiagent): """Train policies using SAC and TD3 with h-baselines.""" from hbaselines.algorithms import OffPolicyRLAlgorithm from hbaselines.utils.train import parse_options, get_hyperparameters - from hbaselines.envs.mixed_autonomy import FlowEnv - - flow_params = deepcopy(flow_params) # Get the command-line arguments that are relevant here args = parse_options(description="", example_usage="", args=args) @@ -375,31 +386,6 @@ def train_h_baselines(flow_params, args, multiagent): # the base directory that the logged data will be stored in base_dir = "training_data" - # Create the training environment. - env = FlowEnv( - flow_params, - multiagent=multiagent, - shared=args.shared, - maddpg=args.maddpg, - render=args.render, - version=0 - ) - - # Create the evaluation environment. - if args.evaluate: - eval_flow_params = deepcopy(flow_params) - eval_flow_params['env'].evaluate = True - eval_env = FlowEnv( - eval_flow_params, - multiagent=multiagent, - shared=args.shared, - maddpg=args.maddpg, - render=args.render_eval, - version=1 - ) - else: - eval_env = None - for i in range(args.n_training): # value of the next seed seed = args.seed + i @@ -447,8 +433,8 @@ def train_h_baselines(flow_params, args, multiagent): # Create the algorithm object. alg = OffPolicyRLAlgorithm( policy=policy, - env=env, - eval_env=eval_env, + env="flow:{}".format(env_name), + eval_env="flow:{}".format(env_name) if args.evaluate else None, **hp ) @@ -468,7 +454,6 @@ def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines import PPO2 - flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] @@ -541,8 +526,7 @@ def main(args): elif flags.rl_trainer.lower() == "stable-baselines": train_stable_baselines(submodule, flags) elif flags.rl_trainer.lower() == "h-baselines": - flow_params = submodule.flow_params - train_h_baselines(flow_params, args, multiagent) + train_h_baselines(flags.exp_config, args, multiagent) else: raise ValueError("rl_trainer should be either 'rllib', 'h-baselines', " "or 'stable-baselines'.") diff --git a/flow/algorithms/centralized_PPO.py b/flow/algorithms/centralized_PPO.py index d30442773..133d7c8bf 100644 --- a/flow/algorithms/centralized_PPO.py +++ b/flow/algorithms/centralized_PPO.py @@ -1,14 +1,14 @@ +"""An example of customizing PPO to leverage a centralized critic.""" + from __future__ import absolute_import from __future__ import division from __future__ import print_function -"""An example of customizing PPO to leverage a centralized critic.""" - import argparse import numpy as np from ray.rllib.agents.ppo.ppo import PPOTrainer -from flow.algorithms.custom_ppo import CustomPPOTFPolicy +from flow.algorithms.custom_ppo import CustomPPOTFPolicy, KLCoeffMixin from ray.rllib.evaluation.postprocessing import compute_advantages, \ Postprocessing from ray.rllib.policy.sample_batch import SampleBatch @@ -65,14 +65,17 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, self.register_variables(self.central_vf.variables) def forward(self, input_dict, state, seq_lens): + """Run forward inference.""" return self.model.forward(input_dict, state, seq_lens) def central_value_function(self, central_obs): + """Compute the centralized value function.""" return tf.reshape( self.central_vf( [central_obs]), [-1]) def value_function(self): + """Compute the normal value function; this is only here to make the code run.""" return self.model.value_function() # not used @@ -145,23 +148,27 @@ def __init__(self, @override(RecurrentTFModelV2) def forward_rnn(self, inputs, state, seq_lens): + """Forward inference on the RNN.""" model_out, self._value_out, h, c = self.model( [inputs, seq_lens] + state) return model_out, [h, c] @override(ModelV2) def get_initial_state(self): + """Set up the initial RNN state.""" return [ np.zeros(self.cell_size, np.float32), np.zeros(self.cell_size, np.float32), ] def central_value_function(self, central_obs): + """Compute the central value function.""" return tf.reshape( self.central_vf( [central_obs]), [-1]) def value_function(self): + """Compute the normal value function; this is only here to make the code run.""" return tf.reshape(self._value_out, [-1]) # not used @@ -175,18 +182,18 @@ def __init__(self): ) def compute_central_vf(self, central_obs): + """Run forward inference on the model.""" feed_dict = { self.get_placeholder(CENTRAL_OBS): central_obs, } return self.get_session().run(self.central_value_function, feed_dict) -# Grabs the opponent obs/act and includes it in the experience train_batch, -# and computes GAE using the central vf predictions. def centralized_critic_postprocessing(policy, sample_batch, other_agent_batches=None, episode=None): + """Find all other agents that overlapped with you and stack their obs to be passed to the central VF.""" if policy.loss_initialized(): assert other_agent_batches is not None @@ -207,13 +214,6 @@ def centralized_critic_postprocessing(policy, agent_id: other_agent_batches[agent_id][1]["obs"].copy() for agent_id in other_agent_batches.keys() } - # padded_agent_obs = {agent_id: - # overlap_and_pad_agent( - # time_span, - # rel_agent_time, - # other_obs[agent_id]) - # for agent_id, - # rel_agent_time in rel_agents.items()} padded_agent_obs = { agent_id: fill_missing( agent_time, @@ -229,7 +229,7 @@ def centralized_critic_postprocessing(policy, central_obs_batch = np.hstack( (sample_batch["obs"], np.hstack(central_obs_list))) except Exception as e: - print(‘Error in centralized PPO: ’, e) + print("Error in centralized PPO: ", e) # TODO(@ev) this is a bug and needs to be fixed central_obs_batch = sample_batch["obs"] max_vf_agents = policy.model.max_num_agents @@ -287,6 +287,7 @@ def time_overlap(time_span, agent_time): def fill_missing(agent_time, other_agent_time, obs): + """Pad the obs to the appropriate length for agents that don't overlap perfectly in time.""" # shortcut, the two overlap perfectly if np.sum(agent_time == other_agent_time) == agent_time.shape[0]: return obs @@ -315,15 +316,9 @@ def overlap_and_pad_agent(time_span, agent_time, obs): assert time_overlap(time_span, agent_time) print(time_span) print(agent_time) - if time_span[0] == 7 or agent_time[0] == 7: - import ipdb - ipdb.set_trace() # FIXME(ev) some of these conditions can be combined # no padding needed if agent_time[0] == time_span[0] and agent_time[1] == time_span[1]: - if obs.shape[0] < 200: - import ipdb - ipdb.set_trace() return obs # agent enters before time_span starts and exits before time_span end if agent_time[0] < time_span[0] and agent_time[1] < time_span[1]: @@ -332,9 +327,6 @@ def overlap_and_pad_agent(time_span, agent_time, obs): overlap_obs = obs[non_overlap_time:] padding = np.zeros((missing_time, obs.shape[1])) obs_concat = np.concatenate((overlap_obs, padding)) - if obs_concat.shape[0] < 200: - import ipdb - ipdb.set_trace() return obs_concat # agent enters after time_span starts and exits after time_span ends elif agent_time[0] > time_span[0] and agent_time[1] > time_span[1]: @@ -343,9 +335,6 @@ def overlap_and_pad_agent(time_span, agent_time, obs): missing_time = agent_time[0] - time_span[0] padding = np.zeros((missing_time, obs.shape[1])) obs_concat = np.concatenate((padding, overlap_obs)) - if obs_concat.shape[0] < 200: - import ipdb - ipdb.set_trace() return obs_concat # agent time is entirely contained in time_span elif agent_time[0] >= time_span[0] and agent_time[1] <= time_span[1]: @@ -358,9 +347,6 @@ def overlap_and_pad_agent(time_span, agent_time, obs): if missing_right > 0: padding = np.zeros((missing_right, obs.shape[1])) obs_concat = np.concatenate((obs_concat, padding)) - if obs_concat.shape[0] < 200: - import ipdb - ipdb.set_trace() return obs_concat # agent time totally contains time_span elif agent_time[0] <= time_span[0] and agent_time[1] >= time_span[1]: @@ -371,14 +357,11 @@ def overlap_and_pad_agent(time_span, agent_time, obs): overlap_obs = overlap_obs[non_overlap_left:] if non_overlap_right > 0: overlap_obs = overlap_obs[:-non_overlap_right] - if overlap_obs.shape[0] < 200: - import ipdb - ipdb.set_trace() return overlap_obs -# Copied from PPO but optimizing the central value function def loss_with_central_critic(policy, model, dist_class, train_batch): + """Set up the PPO loss but replace the VF loss with the centralized VF loss.""" CentralizedValueMixin.__init__(policy) logits, state = model.from_batch(train_batch) @@ -409,6 +392,8 @@ def loss_with_central_critic(policy, model, dist_class, train_batch): class PPOLoss(object): + """Object containing the PPO loss function.""" + def __init__(self, action_space, dist_class, @@ -472,6 +457,7 @@ def __init__(self, model_config : dict, optional model config for use in specifying action distributions. """ + def reduce_mean_valid(t): return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) @@ -508,28 +494,13 @@ def reduce_mean_valid(t): def new_ppo_surrogate_loss(policy, model, dist_class, train_batch): + """Return the PPO loss with the centralized value function.""" loss = loss_with_central_critic(policy, model, dist_class, train_batch) return loss -class KLCoeffMixin(object): - def __init__(self, config): - # KL Coefficient - self.kl_coeff_val = config["kl_coeff"] - self.kl_target = config["kl_target"] - self.kl_coeff = tf.get_variable( - initializer=tf.constant_initializer(self.kl_coeff_val), - name="kl_coeff", - shape=(), - trainable=False, - dtype=tf.float32) - - def update_kl(self, blah): - pass - - def setup_mixins(policy, obs_space, action_space, config): - # copied from PPO + """Construct additional classes that add on to PPO.""" KLCoeffMixin.__init__(policy, config) EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], @@ -541,7 +512,7 @@ def setup_mixins(policy, obs_space, action_space, config): def central_vf_stats(policy, train_batch, grads): - # Report the explained variance of the central value function. + """Report the explained variance of the centralized value function.""" return { "vf_explained_var": explained_variance( train_batch[Postprocessing.VALUE_TARGETS], @@ -550,6 +521,7 @@ def central_vf_stats(policy, train_batch, grads): def kl_and_loss_stats(policy, train_batch): + """Trianing stats to pass to the tensorboard.""" return { "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), "cur_lr": tf.cast(policy.cur_lr, tf.float64), diff --git a/flow/algorithms/custom_ppo.py b/flow/algorithms/custom_ppo.py index 65291f1d4..47a4459aa 100644 --- a/flow/algorithms/custom_ppo.py +++ b/flow/algorithms/custom_ppo.py @@ -1,4 +1,4 @@ -"""PPO but we add in the outflow after the reward to the final reward.""" +"""PPO but without the adaptive KL term that RLlib added.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -15,11 +15,11 @@ from ray.rllib.utils.explained_variance import explained_variance from ray.rllib.utils.tf_ops import make_tf_callable from ray.rllib.utils import try_import_tf + from ray.rllib.agents.trainer_template import build_trainer from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG from ray.rllib.agents.ppo.ppo import warn_about_bad_reward_scales - tf = try_import_tf() logger = logging.getLogger(__name__) @@ -29,6 +29,8 @@ class PPOLoss(object): + """PPO Loss object.""" + def __init__(self, action_space, dist_class, @@ -92,6 +94,7 @@ def __init__(self, model_config : dict, optional model config for use in specifying action distributions. """ + def reduce_mean_valid(t): return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) @@ -127,6 +130,7 @@ def reduce_mean_valid(t): def ppo_surrogate_loss(policy, model, dist_class, train_batch): + """Construct and return the PPO loss.""" logits, state = model.from_batch(train_batch) action_dist = dist_class(logits, model) @@ -163,6 +167,7 @@ def ppo_surrogate_loss(policy, model, dist_class, train_batch): def kl_and_loss_stats(policy, train_batch): + """Return statistics for the tensorboard.""" return { "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), "cur_lr": tf.cast(policy.cur_lr, tf.float64), @@ -216,6 +221,7 @@ def postprocess_ppo_gae(policy, def clip_gradients(policy, optimizer, loss): + """If grad_clip is not None, clip the gradients.""" variables = policy.model.trainable_variables() if policy.config["grad_clip"] is not None: grads_and_vars = optimizer.compute_gradients(loss, variables) @@ -229,6 +235,8 @@ def clip_gradients(policy, optimizer, loss): class ValueNetworkMixin(object): + """Construct the value function.""" + def __init__(self, obs_space, action_space, config): if config["use_gae"]: @@ -242,7 +250,7 @@ def value(ob, prev_action, prev_reward, *state): [prev_reward]), "is_training": tf.convert_to_tensor(False), }, [tf.convert_to_tensor([s]) for s in state], - tf.convert_to_tensor([1])) + tf.convert_to_tensor([1])) return self.model.value_function()[0] else: @@ -255,11 +263,13 @@ def value(ob, prev_action, prev_reward, *state): def setup_config(policy, obs_space, action_space, config): + """Add additional custom options from the config.""" # auto set the model option for layer sharing config["model"]["vf_share_layers"] = config["vf_share_layers"] def setup_mixins(policy, obs_space, action_space, config): + """Construct additional classes that add on to PPO.""" KLCoeffMixin.__init__(policy, config) ValueNetworkMixin.__init__(policy, obs_space, action_space, config) EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], @@ -268,6 +278,8 @@ def setup_mixins(policy, obs_space, action_space, config): class KLCoeffMixin(object): + """Update the KL Coefficient. This is intentionally disabled to match the PPO paper better.""" + def __init__(self, config): # KL Coefficient self.kl_coeff_val = config["kl_coeff"] @@ -280,6 +292,7 @@ def __init__(self, config): dtype=tf.float32) def update_kl(self, blah): + """Disabled to match the PPO paper better.""" pass @@ -300,6 +313,7 @@ def update_kl(self, blah): def validate_config(config): + """Check that the config is set up properly.""" if config["entropy_coeff"] < 0: raise DeprecationWarning("entropy_coeff must be >= 0") if isinstance(config["entropy_coeff"], int): diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index 42c9b2a9b..280c94d37 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -647,6 +647,7 @@ def __init__(self, def get_accel(self, env): """See parent class.""" + # without generating waves. lead_id = env.k.vehicle.get_leader(self.veh_id) if not lead_id: # no car ahead if self.want_max_accel: diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index 86868c5f7..4d8bfec79 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -122,8 +122,6 @@ def get_accel(self, env): or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1])) \ or (self.no_control_edges is not None and len(self.no_control_edges) > 0 and edge in self.no_control_edges): - # TODO(@evinitsky) put back - # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: return None else: # compute the acceleration from the desired velocity diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8ede367a7..1274280ba 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -169,8 +169,6 @@ def rl_actions(*_): if convert_to_csv and self.env.simulator == "traci": dir_path = self.env.sim_params.emission_path - - if dir_path: trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) @@ -196,7 +194,7 @@ def rl_actions(*_): get_extra_info(self.env.k.vehicle, extra_info, veh_ids, source_id, run_id) # write to disk every 100 steps - if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0 and dir_path: + if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0: write_dict_to_csv(trajectory_table_path, extra_info, not j) extra_info.clear() diff --git a/flow/core/kernel/vehicle/aimsun.py b/flow/core/kernel/vehicle/aimsun.py index ce0d026e5..16c94558a 100644 --- a/flow/core/kernel/vehicle/aimsun.py +++ b/flow/core/kernel/vehicle/aimsun.py @@ -65,6 +65,7 @@ def __init__(self, # number of vehicles to exit the network for every time-step self._num_arrived = [] self._arrived_ids = [] + self._arrived_rl_ids = [] # contains conversion from Flow-ID to Aimsun-ID self._id_aimsun2flow = {} @@ -174,11 +175,17 @@ def update(self, reset): added_vehicles = self.kernel_api.get_entered_ids() exited_vehicles = self.kernel_api.get_exited_ids() + # keep track of arrived rl vehicles + arrived_rl_ids = [] + # add the new vehicles if they should be tracked for aimsun_id in added_vehicles: veh_type = self.kernel_api.get_vehicle_type_name(aimsun_id) if veh_type in self.tracked_vehicle_types: self._add_departed(aimsun_id) + if aimsun_id in self.get_rl_ids(): + arrived_rl_ids.append(aimsun_id) + self._arrived_rl_ids.append(arrived_rl_ids) # remove the exited vehicles if they were tracked if not reset: @@ -639,6 +646,16 @@ def get_arrived_ids(self): """See parent class.""" raise NotImplementedError + def get_arrived_rl_ids(self, k=1): + """See parent class.""" + if len(self._arrived_rl_ids) > 0: + arrived = [] + for arr in self._arrived_rl_ids[-k:]: + arrived.extend(arr) + return arrived + else: + return 0 + def get_departed_ids(self): """See parent class.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2a6a4da12..0fa1e6e17 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -524,10 +524,13 @@ def get_arrived_ids(self): """See parent class.""" return self._arrived_ids - def get_arrived_rl_ids(self): + def get_arrived_rl_ids(self, k=1): """See parent class.""" if len(self._arrived_rl_ids) > 0: - return self._arrived_rl_ids[-1] + arrived = [] + for arr in self._arrived_rl_ids[-k:]: + arrived.extend(arr) + return arrived else: return 0 diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 0f234da7e..6462b0a8c 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -322,7 +322,7 @@ def energy_consumption(env, gain=.001): rho = 1.225 # air density (kg/m^3) A = 2.6 # vehicle cross sectional area (m^2) for veh_id in env.k.vehicle.get_ids(): - if veh_id not in env.k.vehicle.previous_speeds: + if veh_id not in env.k.vehicle.previous_speeds.keys(): continue speed = env.k.vehicle.get_speed(veh_id) prev_speed = env.k.vehicle.get_previous_speed(veh_id) diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index 881461d63..77a3d2c12 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -128,7 +128,7 @@ def step(self, rl_actions): reward = self.compute_reward(rl_actions, fail=crash) if self.env_params.done_at_exit: - for rl_id in self.k.vehicle.get_arrived_rl_ids(): + for rl_id in self.k.vehicle.get_arrived_rl_ids(self.env_params.sims_per_step): done[rl_id] = True reward[rl_id] = 0 states[rl_id] = -1 * np.ones(self.observation_space.shape[0]) diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index ad301a3f5..450a0269c 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -1,4 +1,5 @@ """Environment for training vehicles to reduce congestion in the I210.""" + from gym.spaces import Box import numpy as np @@ -94,9 +95,11 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): # penalize stops self.penalize_stops = env_params.additional_params["penalize_stops"] + self.stop_penalty = env_params.additional_params["stop_penalty"] # penalize accel self.penalize_accel = env_params.additional_params.get("penalize_accel", False) + self.accel_penalty = env_params.additional_params["accel_penalty"] @property def observation_space(self): @@ -142,16 +145,9 @@ def _apply_rl_actions(self, rl_actions): if rl_actions: for rl_id, actions in rl_actions.items(): accel = actions[0] - - # lane_change_softmax = np.exp(actions[1:4]) - # lane_change_softmax /= np.sum(lane_change_softmax) - # lane_change_action = np.random.choice([-1, 0, 1], - # p=lane_change_softmax) id_list.append(rl_id) accel_list.append(accel) self.k.vehicle.apply_acceleration(id_list, accel_list) - # self.k.vehicle.apply_lane_change(rl_id, lane_change_action) - # print('time to apply actions is ', time() - t) def in_control_range(self, veh_id): """Return if a veh_id is on an edge that is allowed to be controlled. @@ -194,6 +190,7 @@ def compute_reward(self, rl_actions, **kwargs): rewards = {} valid_ids = [rl_id for rl_id in self.k.vehicle.get_rl_ids() if self.in_control_range(rl_id)] + valid_human_ids = [veh_id for veh_id in self.k.vehicle.get_ids() if self.in_control_range(veh_id)] if self.env_params.additional_params["local_reward"]: des_speed = self.env_params.additional_params["target_velocity"] @@ -220,26 +217,27 @@ def compute_reward(self, rl_actions, **kwargs): else: break else: - speeds = [] - follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) - if follow_speed >= 0: - speeds.append(follow_speed) - if self.k.vehicle.get_speed(rl_id) >= 0: - speeds.append(self.k.vehicle.get_speed(rl_id)) - if len(speeds) > 0: - # rescale so the critic can estimate it quickly - rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 - for speed in speeds]) / (des_speed ** 2) + follow_id = rl_id + for i in range(self.look_back_length + 1): + if follow_id not in ["", None]: + follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(follow_id)) + reward = (des_speed - min(np.abs(follow_speed - des_speed), des_speed)) ** 2 + reward /= ((des_speed ** 2) * self.look_back_length) + rewards[rl_id] += reward + else: + break + follow_id = self.k.vehicle.get_follower(follow_id) + else: if self.mpg_reward: - reward = np.nan_to_num(miles_per_gallon(self, self.k.vehicle.get_ids(), gain=1.0)) / 100.0 + reward = np.nan_to_num(miles_per_gallon(self, valid_human_ids, gain=1.0)) / 100.0 else: - speeds = self.k.vehicle.get_speed(self.k.vehicle.get_ids()) + speeds = self.k.vehicle.get_speed(valid_human_ids) des_speed = self.env_params.additional_params["target_velocity"] # rescale so the critic can estimate it quickly if self.reroute_on_exit: reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) - for speed in speeds]) / (des_speed)) + for speed in speeds]) / des_speed) else: reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 for speed in speeds]) / (des_speed ** 2)) @@ -256,10 +254,8 @@ def compute_reward(self, rl_actions, **kwargs): t_headway = max( self.k.vehicle.get_headway(veh_id) / self.k.vehicle.get_speed(veh_id), 0) - # print('time headway is {}, headway is {}'.format(t_headway, self.k.vehicle.get_headway(veh_id))) scaling_factor = max(0, 1 - self.num_training_iters / self.headway_curriculum_iters) penalty += scaling_factor * self.headway_reward_gain * min((t_headway - t_min) / t_min, 0) - # print('penalty is ', penalty) rewards[veh_id] += penalty @@ -274,7 +270,7 @@ def compute_reward(self, rl_actions, **kwargs): follow_id = self.k.vehicle.get_follower(follow_id) if follow_id not in ["", None]: if self.reroute_on_exit: - speed_reward += ((des_speed - np.abs(speed - des_speed))) / (des_speed) + speed_reward += (des_speed - np.abs(speed - des_speed)) / des_speed else: speed_reward += ((des_speed - np.abs(speed - des_speed)) ** 2) / (des_speed ** 2) else: @@ -287,11 +283,11 @@ def compute_reward(self, rl_actions, **kwargs): speed = self.k.vehicle.get_speed(veh_id) if self.penalize_stops: if speed < 1.0: - rewards[veh_id] -= .01 + rewards[veh_id] -= self.stop_penalty if self.penalize_accel and veh_id in self.k.vehicle.previous_speeds: prev_speed = self.k.vehicle.get_previous_speed(veh_id) abs_accel = abs(speed - prev_speed) / self.sim_step - rewards[veh_id] -= abs_accel / 400.0 + rewards[veh_id] -= abs_accel * self.accel_penalty # print('time to get reward is ', time() - t) return rewards @@ -324,8 +320,6 @@ def additional_command(self): if edge == self.exit_edge and \ (self.k.vehicle.get_position(veh_id) > self.k.network.edge_length(self.exit_edge) - 100) \ and self.k.vehicle.get_leader(veh_id) is None: - # if self.step_counter > 6000: - # import ipdb; ipdb.set_trace() type_id = self.k.vehicle.get_type(veh_id) # remove the vehicle self.k.vehicle.remove(veh_id) @@ -333,8 +327,7 @@ def additional_command(self): lane = valid_lanes[index] del valid_lanes[index] # reintroduce it at the start of the network - # TODO(@evinitsky) select the lane and speed a bit more cleanly - # Note, the position is 10 so you are not overlapping with the inflow car that is being removed. + # Note, the position is 20 so you are not overlapping with the inflow car that is being removed. # this allows the vehicle to be immediately inserted. try: self.k.vehicle.add( @@ -405,6 +398,17 @@ def step(self, rl_actions): done[rl_id] = True reward[rl_id] = 0 state[rl_id] = -1 * np.ones(self.observation_space.shape[0]) + else: + # you have to catch the vehicles on the exit edge, they have not yet + # recieved a done when the env terminates + if done['__all__']: + on_exit_edge = [rl_id for rl_id in self.k.vehicle.get_rl_ids() + if self.k.vehicle.get_edge(rl_id) == self.exit_edge] + for rl_id in on_exit_edge: + done[rl_id] = True + reward[rl_id] = 0 + state[rl_id] = -1 * np.ones(self.observation_space.shape[0]) + return state, reward, done, info diff --git a/flow/networks/i210_subnetwork_ghost_cell.py b/flow/networks/i210_subnetwork_ghost_cell.py deleted file mode 100644 index 8a45b4d91..000000000 --- a/flow/networks/i210_subnetwork_ghost_cell.py +++ /dev/null @@ -1,162 +0,0 @@ -"""Contains the I-210 sub-network class.""" - -from flow.networks.base import Network - -EDGES_DISTRIBUTION = [ - # Main highway - "ghost0", - "119257914", - "119257908#0", - "119257908#1-AddedOnRampEdge", - "119257908#1", - "119257908#1-AddedOffRampEdge", - "119257908#2", - "119257908#3", - - # On-ramp - "27414345", - "27414342#0", - "27414342#1-AddedOnRampEdge", - - # Off-ramp - "173381935", -] - - -class I210SubNetworkGhostCell(Network): - """A network used to simulate the I-210 sub-network. - - Usage - ----- - >>> from flow.core.params import NetParams - >>> from flow.core.params import VehicleParams - >>> from flow.core.params import InitialConfig - >>> from flow.networks import I210SubNetwork - >>> - >>> network = I210SubNetwork( - >>> name='I-210_subnetwork', - >>> vehicles=VehicleParams(), - >>> net_params=NetParams() - >>> ) - """ - - def specify_routes(self, net_params): - """See parent class. - - Routes for vehicles moving through the I210. - """ - if net_params.additional_params["on_ramp"]: - rts = { - # Main highway - "ghost0": [ - (["ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1 - 17 / 8378), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - 17 / 8378) - ], - "119257914": [ - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1 - 17 / 8378), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - 17 / 8378) - ], - "119257908#0": [ - (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1.0), - # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - ], - "119257908#1-AddedOnRampEdge": [ - (["119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1.0), - # (["119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - ], - "119257908#1": [ - (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1.0), - # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - ], - "119257908#1-AddedOffRampEdge": [ - (["119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1.0), - # (["119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - ], - "119257908#2": [ - (["119257908#2", "119257908#3"], 1), - ], - "119257908#3": [ - (["119257908#3"], 1), - ], - - # On-ramp - "27414345": [ - (["27414345", "27414342#1-AddedOnRampEdge", - "27414342#1", - "119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1 - 9 / 321), - (["27414345", "27414342#1-AddedOnRampEdge", - "27414342#1", - "119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "173381935"], - 9 / 321), - ], - "27414342#0": [ - (["27414342#0", "27414342#1-AddedOnRampEdge", - "27414342#1", - "119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1 - 20 / 421), - (["27414342#0", "27414342#1-AddedOnRampEdge", - "27414342#1", - "119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "173381935"], - 20 / 421), - ], - "27414342#1-AddedOnRampEdge": [ - (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 0.5), - (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - 0.5), - ], - - # Off-ramp - "173381935": [ - (["173381935"], 1), - ], - } - - else: - rts = { - # Main highway - "ghost0": [ - (["ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1), - ], - } - - return rts diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 162a16121..8a4684a61 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -19,7 +19,7 @@ from ray.tune.registry import register_env from flow.core.util import emission_to_csv, ensure_dir -from flow.core.rewards import vehicle_energy_consumption +from flow.core.rewards import veh_energy_consumption from flow.utils.registry import make_create_env from flow.utils.rllib import get_flow_params from flow.utils.rllib import get_rllib_config @@ -142,6 +142,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= if not sim_params.restart_instance: env.restart_simulation(sim_params=sim_params, render=sim_params.render) + # reroute on exit is a training hack, it should be turned off at test time. + if hasattr(env, "reroute_on_exit"): + env.reroute_on_exit = False + if rllib_config: # check if we have a multiagent environment but in a # backwards compatible way @@ -167,7 +171,6 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= rllib_flow_params = get_flow_params(rllib_config) agent_create_env, agent_env_name = make_create_env(params=rllib_flow_params, version=0) register_env(agent_env_name, agent_create_env) - # agent_cls = get_agent_class(config_run) if rllib_config['env_config']['run'] == "": from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel @@ -179,6 +182,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= agent_cls = CustomPPOTrainer elif config_run: agent_cls = get_agent_class(config_run) + else: + raise Exception('You forgot to store the algorithm type') # create the agent that will be used to compute the actions agent = agent_cls(env=agent_env_name, config=rllib_config) @@ -303,7 +308,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= per_vehicle_energy_trace[veh_id].append(0) completed_veh_types[veh_id] = env.k.vehicle.get_type(veh_id) else: - per_vehicle_energy_trace[veh_id].append(-1 * vehicle_energy_consumption(env, veh_id)) + per_vehicle_energy_trace[veh_id].append(-1 * veh_energy_consumption(env, veh_id)) if type(done) is dict and done['__all__']: break diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index ec5c3d7da..7182e8acf 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -1,16 +1,12 @@ """Generate a time space diagram for some networks. - This method accepts as input a csv file containing the sumo-formatted emission file, and then uses this data to generate a time-space diagram, with the x-axis being the time (in seconds), the y-axis being the position of a vehicle, and color representing the speed of te vehicles. - If the number of simulation steps is too dense, you can plot every nth step in the plot by setting the input `--steps=n`. - Note: This script assumes that the provided network has only one lane on the each edge, or one lane on the main highway in the case of MergeNetwork. - Usage ----- :: @@ -46,21 +42,18 @@ def import_data_from_trajectory(fp, params=dict()): r"""Import and preprocess data from the Flow trajectory (.csv) file. - Parameters ---------- fp : str file path (for the .csv formatted file) params : dict flow-specific parameters, including: - * "network" (str): name of the network that was used when generating the emission file. Must be one of the network names mentioned in ACCEPTABLE_NETWORKS, * "net_params" (flow.core.params.NetParams): network-specific parameters. This is used to collect the lengths of various network links. - Returns ------- pd.DataFrame @@ -88,32 +81,27 @@ def import_data_from_trajectory(fp, params=dict()): def get_time_space_data(data, params): r"""Compute the unique inflows and subsequent outflow statistics. - Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data params : dict flow-specific parameters, including: - * "network" (str): name of the network that was used when generating the emission file. Must be one of the network names mentioned in ACCEPTABLE_NETWORKS, * "net_params" (flow.core.params.NetParams): network-specific parameters. This is used to collect the lengths of various network links. - Returns ------- - ndarray (or dict of ndarray) + ndarray (or dict < str, np.ndarray >) 3d array (n_segments x 2 x 2) containing segments to be plotted. every inner 2d array is comprised of two 1d arrays representing [start time, start distance] and [end time, end distance] pairs. - in the case of I210, the nested arrays are wrapped into a dict, keyed on the lane number, so that each lane can be plotted separately. - Raises ------ AssertionError @@ -123,7 +111,6 @@ def get_time_space_data(data, params): assert params['network'] in ACCEPTABLE_NETWORKS, \ 'Network must be one of: ' + ', '.join([network.__name__ for network in ACCEPTABLE_NETWORKS]) - # switcher used to compute the positions based on the type of network # switcher used to compute the positions based on the type of network switcher = { RingNetwork: _ring_road, @@ -147,12 +134,10 @@ def _merge(data): This only include vehicles on the main highway, and not on the adjacent on-ramp. - Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data - Returns ------- ndarray @@ -174,6 +159,9 @@ def _merge(data): def _highway(data): r"""Generate time and position data for the highway. + We generate plots for all lanes, so the segments are wrapped in + a dictionary. + Parameters ---------- data : pd.DataFrame @@ -198,12 +186,10 @@ def _ring_road(data): Vehicles that reach the top of the plot simply return to the bottom and continue. - Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data - Returns ------- ndarray @@ -220,15 +206,12 @@ def _ring_road(data): def _i210_subnetwork(data): r"""Generate time and position data for the i210 subnetwork. - We generate plots for all lanes, so the segments are wrapped in a dictionary. - Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data - Returns ------- dict < str, np.ndarray > @@ -257,12 +240,10 @@ def _figure_eight(data): The vehicles traveling towards the intersection from one side will be plotted from the top downward, while the vehicles from the other side will be plotted from the bottom upward. - Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data - Returns ------- ndarray @@ -279,16 +260,13 @@ def _figure_eight(data): def _get_abs_pos(df, params): """Compute the absolute positions from edges and relative positions. - This is the variable we will ultimately use to plot individual vehicles. - Parameters ---------- df : pd.DataFrame dataframe of trajectory data params : dict flow-specific parameters - Returns ------- pd.Series @@ -388,9 +366,7 @@ def _get_abs_pos(df, params): def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None): """Plot the time-space diagram. - Take the pre-processed segments and other meta-data, then plot all the line segments. - Parameters ---------- ax : matplotlib.axes.Axes @@ -403,11 +379,14 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) parsed arguments lane : int, optional lane number to be shown in plot title +<<<<<<< HEAD +======= ghost_edges : list or set of str ghost edge names to be greyed out, default None ghost_bounds : tuple lower and upper bounds of domain, excluding ghost edges, default None +>>>>>>> 06ff2d970176c51dee5a5be092b85d44e84e6d82 Returns ------- None diff --git a/flow/visualize/transfer/util.py b/flow/visualize/transfer/util.py index 107e6d026..8c933c5a3 100644 --- a/flow/visualize/transfer/util.py +++ b/flow/visualize/transfer/util.py @@ -2,8 +2,7 @@ from copy import deepcopy from flow.core.params import InFlows -from examples.exp_configs.rl.multiagent.multiagent_i210 import VEH_PER_HOUR_BASE_119257914, \ - VEH_PER_HOUR_BASE_27414345, VEH_PER_HOUR_BASE_27414342 +from examples.exp_configs.rl.multiagent.multiagent_i210 import INFLOW_RATE, ON_RAMP_INFLOW_RATE def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): @@ -27,8 +26,8 @@ def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): all_inflows = [] inflow_119257914 = dict(veh_type="human", - edge="119257914", - vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * (1 - (pr)) * fr_coef, + edge="ghost0", + vehs_per_hour=INFLOW_RATE * (1 - (pr)) * fr_coef, # probability=1.0, departLane="random", departSpeed=departSpeed) @@ -36,8 +35,8 @@ def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): if pr > 0.0: inflow_119257914_av = dict(veh_type="av", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pr * fr_coef), + edge="ghost0", + vehs_per_hour=int(INFLOW_RATE * pr * fr_coef), # probability=1.0, departLane="random", departSpeed=departSpeed) @@ -46,14 +45,14 @@ def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): if on_ramp: inflow_27414345 = dict(veh_type="human", edge="27414345", - vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * (1 - (pr)) * fr_coef, + vehs_per_hour=ON_RAMP_INFLOW_RATE * (1 - (pr)) * fr_coef, departLane="random", departSpeed=departSpeed) all_inflows.append(inflow_27414345) if pr > 0.0: inflow_27414342 = dict(veh_type="human", edge="27414342#0", - vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * pr * fr_coef, + vehs_per_hour=ON_RAMP_INFLOW_RATE * pr * fr_coef, departLane="random", departSpeed=departSpeed) all_inflows.append(inflow_27414342) diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index 0b385f28a..b5faf6517 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -229,11 +229,11 @@ class TestHBaselineExamples(unittest.TestCase): confirming that it runs. """ @staticmethod - def run_exp(flow_params, multiagent): + def run_exp(env_name, multiagent): train_h_baselines( - flow_params=flow_params, + env_name=env_name, args=[ - flow_params["env_name"].__name__, + env_name, "--initial_exploration_steps", "1", "--total_steps", "10" ], @@ -241,10 +241,10 @@ def run_exp(flow_params, multiagent): ) def test_singleagent_ring(self): - self.run_exp(singleagent_ring.copy(), multiagent=False) + self.run_exp("singleagent_ring", multiagent=False) def test_multiagent_ring(self): - self.run_exp(multiagent_ring.copy(), multiagent=True) + self.run_exp("multiagent_ring", multiagent=True) class TestRllibExamples(unittest.TestCase): From ffec4cb84d58782800256395bd4cdf98ea939b88 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Jul 2020 03:35:32 -0700 Subject: [PATCH 292/438] furthur changes on experiment.py to accomdate RL --- flow/core/experiment.py | 34 ++++++++++++++++++++++++------ flow/visualize/visualizer_rllib.py | 5 ++--- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8a273c3e6..e0c8f9d00 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -62,7 +62,7 @@ class can generate csv files from emission files produced by sumo. These the environment object the simulator will run """ - def __init__(self, flow_params, custom_callables=None): + def __init__(self, flow_params, custom_callables=None, use_ray=False): """Instantiate the Experiment class. Parameters @@ -85,14 +85,16 @@ def __init__(self, flow_params, custom_callables=None): self.create_env = create_env # Create the environment. - self.env = create_env() + if not use_ray: + self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.utcnow()))) logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query="", is_baseline=False): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query="", is_baseline=False, + multiagent=False, rets=None, policy_map_fn=None): """Run the given network for a set number of runs. Parameters @@ -179,7 +181,10 @@ def rl_actions(*_): metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) for i in range(num_runs): - ret = 0 + if rets and multiagent: + ret = {key: [0] for key in rets.keys()} + else: + ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} run_id = "run_{}".format(i) @@ -194,7 +199,11 @@ def rl_actions(*_): # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) - ret += reward + if multiagent: + for actor, rew in reward.items(): + ret[policy_map_fn(actor)][0] += rew + else: + ret += reward # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids, source_id, run_id) @@ -208,9 +217,17 @@ def rl_actions(*_): for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) + if multiagent and done['__all__']: + break if type(done) is dict and done['__all__'] or type(done) is not dict and done: break + if multiagent: + for key in rets.keys(): + rets[key].append(ret[key]) + else: + rets.append(ret) + # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) @@ -219,7 +236,12 @@ def rl_actions(*_): for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) - print("Round {0}, return: {1}".format(i, ret)) + if multiagent: + for agent_id, rew in rets.items(): + print('Round {}, Return: {} for agent {}'.format( + i, ret, agent_id)) + else: + print('Round {}, Return: {}'.format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index fd1309d4b..f1a83d901 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -140,7 +140,7 @@ def visualizer_rllib(args): # Create and register a gym+rllib env # create_env, env_name = make_create_env(params=flow_params, version=0) - exp = Experiment(flow_params) + exp = Experiment(flow_params, use_ray=True) register_env(exp.env_name, exp.create_env) # check if the environment is a single or multiagent environment, and @@ -175,8 +175,7 @@ def visualizer_rllib(args): os.environ.get("TEST_FLAG") != 'True': exp.env = agent.local_evaluator.env else: - pass - # env = gym.make(env_name) + exp.env = gym.make(exp.env_name) # reroute on exit is a training hack, it should be turned off at test time. if hasattr(exp.env, "reroute_on_exit"): From 9f77475abb74e2a32ea4b3461db3ea4ba8fe696f Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Jul 2020 04:37:58 -0700 Subject: [PATCH 293/438] fix some issues with compatibility --- flow/core/experiment.py | 10 +++++----- flow/visualize/visualizer_rllib.py | 4 +++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index e0c8f9d00..e44571bdd 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -88,10 +88,10 @@ def __init__(self, flow_params, custom_callables=None, use_ray=False): if not use_ray: self.env = create_env() - logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.utcnow()))) + logging.info(" Starting experiment {} at {}".format( + self.env.network.name, str(datetime.utcnow()))) - logging.info("Initializing environment.") + logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query="", is_baseline=False, multiagent=False, rets=None, policy_map_fn=None): @@ -140,7 +140,6 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only # used to store info_dict = { - "returns": [], "velocities": [], "outflows": [], } @@ -230,7 +229,8 @@ def rl_actions(*_): # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) - info_dict["returns"].append(ret) + if not multiagent: + info_dict["returns"] = rets info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index f1a83d901..e23168a3a 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -193,6 +193,7 @@ def visualizer_rllib(args): else: rets = [] + policy_map_fn = None if config['model']['use_lstm']: use_lstm = True if multiagent: @@ -247,7 +248,8 @@ def rl_action(state): else: action = agent.compute_action(state) return action - exp.run(num_runs=args.num_rollouts, rl_actions=rl_action) + exp.run(num_runs=args.num_rollouts, rl_actions=rl_action, multiagent=multiagent, rets=rets, + policy_map_fn=policy_map_fn) # Simulate and collect metrics # final_outflows = [] # final_inflows = [] From 7736cd8d06174b75986a1aad250b8646343d8677 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Jul 2020 04:39:30 -0700 Subject: [PATCH 294/438] clean up old codes in visualize_rllib --- flow/visualize/visualizer_rllib.py | 173 ----------------------------- 1 file changed, 173 deletions(-) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index e23168a3a..a87783273 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -139,7 +139,6 @@ def visualizer_rllib(args): sim_params.save_render = True # Create and register a gym+rllib env - # create_env, env_name = make_create_env(params=flow_params, version=0) exp = Experiment(flow_params, use_ray=True) register_env(exp.env_name, exp.create_env) @@ -216,23 +215,6 @@ def visualizer_rllib(args): if not sim_params.restart_instance: exp.env.restart_simulation(sim_params=sim_params, render=sim_params.render) - # data pipeline - # extra_info = defaultdict(lambda: []) - # source_id = 'flow_{}'.format(uuid.uuid4().hex) - # metadata = defaultdict(lambda: []) - # # collect current time - # cur_datetime = datetime.now(timezone.utc) - # cur_date = cur_datetime.date().isoformat() - # cur_time = cur_datetime.time().isoformat() - # # collecting information for metadata table - # metadata['source_id'].append(source_id) - # metadata['submission_time'].append(cur_time) - # metadata['network'].append(network_name_translate(exp.env.network.name.split('_20')[0])) - # metadata['is_baseline'].append(str(args.is_baseline)) - # name, strategy = get_configuration() - # metadata['submitter_name'].append(name) - # metadata['strategy'].append(strategy) - def rl_action(state): if multiagent: action = {} @@ -250,161 +232,6 @@ def rl_action(state): return action exp.run(num_runs=args.num_rollouts, rl_actions=rl_action, multiagent=multiagent, rets=rets, policy_map_fn=policy_map_fn) - # Simulate and collect metrics - # final_outflows = [] - # final_inflows = [] - # mpg = [] - # mpj = [] - # mean_speed = [] - # std_speed = [] - # for i in range(args.num_rollouts): - # vel = [] - # run_id = "run_{}".format(i) - # env.pipeline_params = (extra_info, source_id, run_id) - # state = env.reset() - # if multiagent: - # ret = {key: [0] for key in rets.keys()} - # else: - # ret = 0 - # for _ in range(env_params.horizon): - # vehicles = env.unwrapped.k.vehicle - # speeds = vehicles.get_speed(vehicles.get_ids()) - # - # # only include non-empty speeds - # if speeds: - # vel.append(np.mean(speeds)) - # - # mpg.append(miles_per_gallon(env.unwrapped, vehicles.get_ids(), gain=1.0)) - # mpj.append(miles_per_megajoule(env.unwrapped, vehicles.get_ids(), gain=1.0)) - # - # if multiagent: - # action = {} - # for agent_id in state.keys(): - # if use_lstm: - # action[agent_id], state_init[agent_id], logits = \ - # agent.compute_action( - # state[agent_id], state=state_init[agent_id], - # policy_id=policy_map_fn(agent_id)) - # else: - # action[agent_id] = agent.compute_action( - # state[agent_id], policy_id=policy_map_fn(agent_id)) - # else: - # action = agent.compute_action(state) - # state, reward, done, _ = env.step(action) - # - # # collect data for data pipeline - # get_extra_info(vehicles, extra_info, vehicles.get_ids(), source_id, run_id) - # - # if multiagent: - # for actor, rew in reward.items(): - # ret[policy_map_fn(actor)][0] += rew - # else: - # ret += reward - # if multiagent and done['__all__']: - # break - # if not multiagent and done: - # break - # - # if multiagent: - # for key in rets.keys(): - # rets[key].append(ret[key]) - # else: - # rets.append(ret) - # outflow = vehicles.get_outflow_rate(500) - # final_outflows.append(outflow) - # inflow = vehicles.get_inflow_rate(500) - # final_inflows.append(inflow) - # if np.all(np.array(final_inflows) > 1e-5): - # throughput_efficiency = [x / y for x, y in - # zip(final_outflows, final_inflows)] - # else: - # throughput_efficiency = [0] * len(final_inflows) - # mean_speed.append(np.mean(vel)) - # std_speed.append(np.std(vel)) - # if multiagent: - # for agent_id, rew in rets.items(): - # print('Round {}, Return: {} for agent {}'.format( - # i, ret, agent_id)) - # else: - # print('Round {}, Return: {}'.format(i, ret)) - - # print('==== Summary of results ====') - # print("Return:") - # print(mean_speed) - # if multiagent: - # for agent_id, rew in rets.items(): - # print('For agent', agent_id) - # print(rew) - # print('Average, std return: {}, {} for agent {}'.format( - # np.mean(rew), np.std(rew), agent_id)) - # else: - # print(rets) - # print('Average, std: {}, {}'.format( - # np.mean(rets), np.std(rets))) - # - # print("\nSpeed, mean (m/s):") - # print(mean_speed) - # print('Average, std: {}, {}'.format(np.mean(mean_speed), np.std( - # mean_speed))) - # - # print('Average, std miles per gallon: {}, {}'.format(np.mean(mpg), np.std(mpg))) - # - # print('Average, std miles per megajoule: {}, {}'.format(np.mean(mpj), np.std(mpj))) - # - # # Compute arrival rate of vehicles in the last 500 sec of the run - # print("\nOutflows (veh/hr):") - # print(final_outflows) - # print('Average, std: {}, {}'.format(np.mean(final_outflows), - # np.std(final_outflows))) - # # Compute departure rate of vehicles in the last 500 sec of the run - # print("Inflows (veh/hr):") - # print(final_inflows) - # print('Average, std: {}, {}'.format(np.mean(final_inflows), - # np.std(final_inflows))) - # # Compute throughput efficiency in the last 500 sec of the - # print("Throughput efficiency (veh/hr):") - # print(throughput_efficiency) - # print('Average, std: {}, {}'.format(np.mean(throughput_efficiency), - # np.std(throughput_efficiency))) - - # terminate the environment - # env.unwrapped.terminate() - - # if prompted, convert the emission file into a csv file - # if args.gen_emission: - # time.sleep(0.1) - # - # dir_path = os.path.dirname(os.path.realpath(__file__)) - # emission_filename = '{0}-emission.xml'.format(env.network.name) - # - # emission_path = \ - # '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename) - # - # # convert the emission file into a csv file - # emission_to_csv(emission_path) - # - # # print the location of the emission csv file - # emission_path_csv = emission_path[:-4] + ".csv" - # print("\nGenerated emission file at " + emission_path_csv) - # - # # delete the .xml version of the emission file - # os.remove(emission_path) - # - # # generate datapipeline output - # trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - # metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) - # write_dict_to_csv(trajectory_table_path, extra_info, True) - # write_dict_to_csv(metadata_table_path, metadata, True) - # - # if args.to_aws: - # upload_to_s3('circles.data.pipeline', - # 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, - # source_id), - # metadata_table_path) - # upload_to_s3('circles.data.pipeline', - # 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), - # trajectory_table_path, - # {'network': metadata['network'][0]}) def create_parser(): From 8d60558803665210ff7566ec3beb6181e7e0f19a Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Jul 2020 05:04:22 -0700 Subject: [PATCH 295/438] remove useless import from visualize_rllib, incoporate experiment.py into i210_replay --- flow/core/experiment.py | 16 +- flow/visualize/i210_replay.py | 331 ++++++++++++++--------------- flow/visualize/visualizer_rllib.py | 13 +- 3 files changed, 173 insertions(+), 187 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index e44571bdd..d3e241ff8 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -62,7 +62,7 @@ class can generate csv files from emission files produced by sumo. These the environment object the simulator will run """ - def __init__(self, flow_params, custom_callables=None, use_ray=False): + def __init__(self, flow_params, custom_callables=None, register_with_ray=False): """Instantiate the Experiment class. Parameters @@ -85,7 +85,7 @@ def __init__(self, flow_params, custom_callables=None, use_ray=False): self.create_env = create_env # Create the environment. - if not use_ray: + if not register_with_ray: self.env = create_env() logging.info(" Starting experiment {} at {}".format( @@ -198,10 +198,10 @@ def rl_actions(*_): # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) - if multiagent: + if rets and multiagent: for actor, rew in reward.items(): ret[policy_map_fn(actor)][0] += rew - else: + elif not multiagent: ret += reward # collect additional information for the data pipeline @@ -221,10 +221,10 @@ def rl_actions(*_): if type(done) is dict and done['__all__'] or type(done) is not dict and done: break - if multiagent: + if rets and multiagent: for key in rets.keys(): rets[key].append(ret[key]) - else: + elif not multiagent: rets.append(ret) # Store the information from the run in info_dict. @@ -236,11 +236,11 @@ def rl_actions(*_): for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) - if multiagent: + if rets and multiagent: for agent_id, rew in rets.items(): print('Round {}, Return: {} for agent {}'.format( i, ret, agent_id)) - else: + elif not multiagent: print('Round {}, Return: {}'.format(i, ret)) # Print the averages/std for all variables in the info_dict. diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 162a16121..49189a12d 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -36,6 +36,8 @@ from flow.data_pipeline.leaderboard_utils import network_name_translate import uuid +from flow.core.experiment import Experiment + EXAMPLE_USAGE = """ example usage: python i210_replay.py -r /ray_results/experiment_dir/result_dir -c 1 @@ -132,15 +134,14 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= env_params.horizon = args.horizon # Create and register a gym+rllib env - create_env, env_name = make_create_env(params=flow_params, version=0) - env = create_env(env_name) + exp = Experiment(flow_params) if args.render_mode == 'sumo_gui': - env.sim_params.render = True # set to True after initializing agent and env + exp.env.sim_params.render = True # set to True after initializing agent and env # if restart_instance, don't restart here because env.reset will restart later if not sim_params.restart_instance: - env.restart_simulation(sim_params=sim_params, render=sim_params.render) + exp.env.restart_simulation(sim_params=sim_params, render=sim_params.render) if rllib_config: # check if we have a multiagent environment but in a @@ -221,176 +222,166 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= }) # reroute on exit is a training hack, it should be turned off at test time. - if hasattr(env, "reroute_on_exit"): - env.reroute_on_exit = False - - # date pipeline - extra_info = defaultdict(lambda: []) - source_id = 'flow_{}'.format(uuid.uuid4().hex) - metadata = defaultdict(lambda: []) - # collect current time - cur_datetime = datetime.now(timezone.utc) - cur_date = cur_datetime.date().isoformat() - cur_time = cur_datetime.time().isoformat() - metadata['source_id'].append(source_id) - metadata['submission_time'].append(cur_time) - metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) - metadata['is_baseline'].append(str(args.is_baseline)) - name, strategy = get_configuration() - metadata['submitter_name'].append(name) - metadata['strategy'].append(strategy) - - i = 0 - while i < args.num_rollouts: - print("Rollout iter", i) - vel = [] - per_vehicle_energy_trace = defaultdict(lambda: []) - completed_veh_types = {} - completed_vehicle_avg_energy = {} - completed_vehicle_travel_time = {} - custom_vals = {key: [] for key in custom_callables.keys()} - run_id = "run_{}".format(i) - env.pipeline_params = (extra_info, source_id, run_id) - state = env.reset() - initial_vehicles = set(env.k.vehicle.get_ids()) - for _ in range(env_params.horizon): - if rllib_config: - if multiagent: - action = {} - for agent_id in state.keys(): - if use_lstm: - action[agent_id], lstm_state[agent_id], _ = \ - agent.compute_action( - state[agent_id], state=lstm_state[agent_id], - policy_id=policy_map_fn(agent_id)) - else: - action[agent_id] = agent.compute_action( - state[agent_id], policy_id=policy_map_fn(agent_id)) - else: + if hasattr(exp.env, "reroute_on_exit"): + exp.env.reroute_on_exit = False + + def rl_action(state): + if rllib_config: + if multiagent: + action = {} + for agent_id in state.keys(): if use_lstm: - raise NotImplementedError + action[agent_id], lstm_state[agent_id], _ = \ + agent.compute_action( + state[agent_id], state=lstm_state[agent_id], + policy_id=policy_map_fn(agent_id)) else: - action = agent.compute_action(state) + action[agent_id] = agent.compute_action( + state[agent_id], policy_id=policy_map_fn(agent_id)) else: - action = None - - state, reward, done, _ = env.step(action) - - # Compute the velocity speeds and cumulative returns. - veh_ids = env.k.vehicle.get_ids() - vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) - - # collect additional information for the data pipeline - get_extra_info(env.k.vehicle, extra_info, veh_ids, source_id, run_id) - - # Compute the results for the custom callables. - for (key, lambda_func) in custom_callables.items(): - custom_vals[key].append(lambda_func(env)) - - for past_veh_id in per_vehicle_energy_trace.keys(): - if past_veh_id not in veh_ids and past_veh_id not in completed_vehicle_avg_energy: - all_trip_energy_distribution[completed_veh_types[past_veh_id]].append( - np.sum(per_vehicle_energy_trace[past_veh_id])) - all_trip_time_distribution[completed_veh_types[past_veh_id]].append( - len(per_vehicle_energy_trace[past_veh_id])) - completed_vehicle_avg_energy[past_veh_id] = np.sum(per_vehicle_energy_trace[past_veh_id]) - completed_vehicle_travel_time[past_veh_id] = len(per_vehicle_energy_trace[past_veh_id]) - - for veh_id in veh_ids: - if veh_id not in initial_vehicles: - if veh_id not in per_vehicle_energy_trace: - # we have to skip the first step's energy calculation - per_vehicle_energy_trace[veh_id].append(0) - completed_veh_types[veh_id] = env.k.vehicle.get_type(veh_id) - else: - per_vehicle_energy_trace[veh_id].append(-1 * vehicle_energy_consumption(env, veh_id)) - - if type(done) is dict and done['__all__']: - break - elif type(done) is not dict and done: - break - elif max_completed_trips is not None and len(completed_vehicle_avg_energy) > max_completed_trips: - break - if env.crash: - print("Crash on iter", i) - else: - # Store the information from the run in info_dict. - outflow = env.k.vehicle.get_outflow_rate(int(500)) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - info_dict["avg_trip_energy"].append(np.mean(list(completed_vehicle_avg_energy.values()))) - info_dict["avg_trip_time"].append(np.mean(list(completed_vehicle_travel_time.values()))) - info_dict["total_completed_trips"].append(len(list(completed_vehicle_avg_energy.values()))) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) - i += 1 - - print('======== Summary of results ========') - if args.run_transfer: - print("Transfer test: {}".format(transfer_test.transfer_str)) - print("====================================") - - # Print the averages/std for all variables in the info_dict. - for key in info_dict.keys(): - print("Average, std {}: {}, {}".format( - key, np.mean(info_dict[key]), np.std(info_dict[key]))) - - # terminate the environment - env.unwrapped.terminate() - - if output_dir: - ensure_dir(output_dir) - if args.run_transfer: - exp_name = "{}-replay".format(transfer_test.transfer_str) + if use_lstm: + raise NotImplementedError + else: + action = agent.compute_action(state) else: - exp_name = "i210_replay" - replay_out = os.path.join(output_dir, '{}-info.npy'.format(exp_name)) - np.save(replay_out, info_dict) - # if prompted, convert the emission file into a csv file - if args.gen_emission: - emission_filename = '{0}-emission.xml'.format(env.network.name) - time.sleep(0.1) - - emission_path = \ - '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename) - - output_path = os.path.join(output_dir, '{}-emission.csv'.format(exp_name)) - # convert the emission file into a csv file - emission_to_csv(emission_path, output_path=output_path) - - # generate the trajectory output file - trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - write_dict_to_csv(trajectory_table_path, extra_info, True) - metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) - write_dict_to_csv(metadata_table_path, metadata, True) - - # upload to s3 if asked - if args.use_s3: - upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' - '{1}_METADATA.csv'.format(cur_date, source_id), - metadata_table_path) - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( - cur_date, source_id), - trajectory_table_path, {'network': metadata['network'][0]}) - - # print the location of the emission csv file - print("\nGenerated emission file at " + output_path) - - # delete the .xml version of the emission file - os.remove(emission_path) - - all_trip_energies = os.path.join(output_dir, '{}-all_trip_energies.npy'.format(exp_name)) - np.save(all_trip_energies, dict(all_trip_energy_distribution)) - fig_names, figs = plot_trip_distribution(all_trip_energy_distribution) - - for fig_name, fig in zip(fig_names, figs): - edist_out = os.path.join(output_dir, '{}_energy_distribution.png'.format(fig_name)) - fig.savefig(edist_out) - - # Create the flow_params object - with open(os.path.join(output_dir, exp_name) + '.json', 'w') as outfile: - json.dump(flow_params, outfile, - cls=FlowParamsEncoder, sort_keys=True, indent=4) + action = None + + info_dict = exp.run(num_runs=args.num_rollouts, convert_to_csv=args.gen_emission, to_aws=args.use_s3, + rl_actions=rl_action, multiagent=rllib_config and multiagent) + + # i = 0 + # while i < args.num_rollouts: + # print("Rollout iter", i) + # vel = [] + # per_vehicle_energy_trace = defaultdict(lambda: []) + # completed_veh_types = {} + # completed_vehicle_avg_energy = {} + # completed_vehicle_travel_time = {} + # custom_vals = {key: [] for key in custom_callables.keys()} + # run_id = "run_{}".format(i) + # env.pipeline_params = (extra_info, source_id, run_id) + # state = env.reset() + # initial_vehicles = set(env.k.vehicle.get_ids()) + # for _ in range(env_params.horizon): + # + # + # state, reward, done, _ = env.step(action) + # + # # Compute the velocity speeds and cumulative returns. + # veh_ids = env.k.vehicle.get_ids() + # vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) + # + # # collect additional information for the data pipeline + # get_extra_info(env.k.vehicle, extra_info, veh_ids, source_id, run_id) + # + # # Compute the results for the custom callables. + # for (key, lambda_func) in custom_callables.items(): + # custom_vals[key].append(lambda_func(env)) + # + # for past_veh_id in per_vehicle_energy_trace.keys(): + # if past_veh_id not in veh_ids and past_veh_id not in completed_vehicle_avg_energy: + # all_trip_energy_distribution[completed_veh_types[past_veh_id]].append( + # np.sum(per_vehicle_energy_trace[past_veh_id])) + # all_trip_time_distribution[completed_veh_types[past_veh_id]].append( + # len(per_vehicle_energy_trace[past_veh_id])) + # completed_vehicle_avg_energy[past_veh_id] = np.sum(per_vehicle_energy_trace[past_veh_id]) + # completed_vehicle_travel_time[past_veh_id] = len(per_vehicle_energy_trace[past_veh_id]) + # + # for veh_id in veh_ids: + # if veh_id not in initial_vehicles: + # if veh_id not in per_vehicle_energy_trace: + # # we have to skip the first step's energy calculation + # per_vehicle_energy_trace[veh_id].append(0) + # completed_veh_types[veh_id] = env.k.vehicle.get_type(veh_id) + # else: + # per_vehicle_energy_trace[veh_id].append(-1 * vehicle_energy_consumption(env, veh_id)) + # + # if type(done) is dict and done['__all__']: + # break + # elif type(done) is not dict and done: + # break + # elif max_completed_trips is not None and len(completed_vehicle_avg_energy) > max_completed_trips: + # break + # if env.crash: + # print("Crash on iter", i) + # else: + # # Store the information from the run in info_dict. + # outflow = env.k.vehicle.get_outflow_rate(int(500)) + # info_dict["velocities"].append(np.mean(vel)) + # info_dict["outflows"].append(outflow) + # info_dict["avg_trip_energy"].append(np.mean(list(completed_vehicle_avg_energy.values()))) + # info_dict["avg_trip_time"].append(np.mean(list(completed_vehicle_travel_time.values()))) + # info_dict["total_completed_trips"].append(len(list(completed_vehicle_avg_energy.values()))) + # for key in custom_vals.keys(): + # info_dict[key].append(np.mean(custom_vals[key])) + # i += 1 + # + # print('======== Summary of results ========') + # if args.run_transfer: + # print("Transfer test: {}".format(transfer_test.transfer_str)) + # print("====================================") + # + # # Print the averages/std for all variables in the info_dict. + # for key in info_dict.keys(): + # print("Average, std {}: {}, {}".format( + # key, np.mean(info_dict[key]), np.std(info_dict[key]))) + # + # # terminate the environment + # env.unwrapped.terminate() + # + # if output_dir: + # ensure_dir(output_dir) + # if args.run_transfer: + # exp_name = "{}-replay".format(transfer_test.transfer_str) + # else: + # exp_name = "i210_replay" + # replay_out = os.path.join(output_dir, '{}-info.npy'.format(exp_name)) + # np.save(replay_out, info_dict) + # # if prompted, convert the emission file into a csv file + # if args.gen_emission: + # emission_filename = '{0}-emission.xml'.format(env.network.name) + # time.sleep(0.1) + # + # emission_path = \ + # '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename) + # + # output_path = os.path.join(output_dir, '{}-emission.csv'.format(exp_name)) + # # convert the emission file into a csv file + # emission_to_csv(emission_path, output_path=output_path) + # + # # generate the trajectory output file + # trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) + # write_dict_to_csv(trajectory_table_path, extra_info, True) + # metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + # write_dict_to_csv(metadata_table_path, metadata, True) + # + # # upload to s3 if asked + # if args.use_s3: + # upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' + # '{1}_METADATA.csv'.format(cur_date, source_id), + # metadata_table_path) + # upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( + # cur_date, source_id), + # trajectory_table_path, {'network': metadata['network'][0]}) + # + # # print the location of the emission csv file + # print("\nGenerated emission file at " + output_path) + # + # # delete the .xml version of the emission file + # os.remove(emission_path) + # + # all_trip_energies = os.path.join(output_dir, '{}-all_trip_energies.npy'.format(exp_name)) + # np.save(all_trip_energies, dict(all_trip_energy_distribution)) + # fig_names, figs = plot_trip_distribution(all_trip_energy_distribution) + # + # for fig_name, fig in zip(fig_names, figs): + # edist_out = os.path.join(output_dir, '{}_energy_distribution.png'.format(fig_name)) + # fig.savefig(edist_out) + # + # # Create the flow_params object + # with open(os.path.join(output_dir, exp_name) + '.json', 'w') as outfile: + # json.dump(flow_params, outfile, + # cls=FlowParamsEncoder, sort_keys=True, indent=4) return info_dict diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index a87783273..1a6f0ffcd 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -33,12 +33,6 @@ from flow.utils.rllib import get_rllib_config from flow.utils.rllib import get_rllib_pkl -from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration -from flow.data_pipeline.leaderboard_utils import network_name_translate -from collections import defaultdict -from datetime import datetime, timezone -import uuid - from flow.core.experiment import Experiment EXAMPLE_USAGE = """ @@ -139,7 +133,7 @@ def visualizer_rllib(args): sim_params.save_render = True # Create and register a gym+rllib env - exp = Experiment(flow_params, use_ray=True) + exp = Experiment(flow_params, register_with_ray=True) register_env(exp.env_name, exp.create_env) # check if the environment is a single or multiagent environment, and @@ -230,8 +224,9 @@ def rl_action(state): else: action = agent.compute_action(state) return action - exp.run(num_runs=args.num_rollouts, rl_actions=rl_action, multiagent=multiagent, rets=rets, - policy_map_fn=policy_map_fn) + + exp.run(num_runs=args.num_rollouts, convert_to_csv=args.gen_emission, to_aws=args.to_aws, + rl_actions=rl_action, multiagent=multiagent, rets=rets, policy_map_fn=policy_map_fn) def create_parser(): From cb74b8cbcadc33d2fe4be56fcc662943ff4542c5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 6 Jul 2020 16:53:03 -0700 Subject: [PATCH 296/438] fix pydocstyle --- flow/visualize/time_space_diagram.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index 7182e8acf..b3c0e8091 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -1,4 +1,5 @@ """Generate a time space diagram for some networks. + This method accepts as input a csv file containing the sumo-formatted emission file, and then uses this data to generate a time-space diagram, with the x-axis being the time (in seconds), the y-axis being the position of a vehicle, and @@ -42,6 +43,7 @@ def import_data_from_trajectory(fp, params=dict()): r"""Import and preprocess data from the Flow trajectory (.csv) file. + Parameters ---------- fp : str @@ -54,6 +56,7 @@ def import_data_from_trajectory(fp, params=dict()): * "net_params" (flow.core.params.NetParams): network-specific parameters. This is used to collect the lengths of various network links. + Returns ------- pd.DataFrame @@ -81,6 +84,7 @@ def import_data_from_trajectory(fp, params=dict()): def get_time_space_data(data, params): r"""Compute the unique inflows and subsequent outflow statistics. + Parameters ---------- data : pd.DataFrame @@ -93,6 +97,7 @@ def get_time_space_data(data, params): * "net_params" (flow.core.params.NetParams): network-specific parameters. This is used to collect the lengths of various network links. + Returns ------- ndarray (or dict < str, np.ndarray >) @@ -102,6 +107,7 @@ def get_time_space_data(data, params): in the case of I210, the nested arrays are wrapped into a dict, keyed on the lane number, so that each lane can be plotted separately. + Raises ------ AssertionError @@ -134,10 +140,12 @@ def _merge(data): This only include vehicles on the main highway, and not on the adjacent on-ramp. + Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data + Returns ------- ndarray @@ -186,10 +194,12 @@ def _ring_road(data): Vehicles that reach the top of the plot simply return to the bottom and continue. + Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data + Returns ------- ndarray @@ -206,12 +216,15 @@ def _ring_road(data): def _i210_subnetwork(data): r"""Generate time and position data for the i210 subnetwork. + We generate plots for all lanes, so the segments are wrapped in a dictionary. + Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data + Returns ------- dict < str, np.ndarray > @@ -240,10 +253,12 @@ def _figure_eight(data): The vehicles traveling towards the intersection from one side will be plotted from the top downward, while the vehicles from the other side will be plotted from the bottom upward. + Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data + Returns ------- ndarray @@ -260,13 +275,16 @@ def _figure_eight(data): def _get_abs_pos(df, params): """Compute the absolute positions from edges and relative positions. + This is the variable we will ultimately use to plot individual vehicles. + Parameters ---------- df : pd.DataFrame dataframe of trajectory data params : dict flow-specific parameters + Returns ------- pd.Series @@ -366,7 +384,9 @@ def _get_abs_pos(df, params): def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None): """Plot the time-space diagram. + Take the pre-processed segments and other meta-data, then plot all the line segments. + Parameters ---------- ax : matplotlib.axes.Axes @@ -379,14 +399,11 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) parsed arguments lane : int, optional lane number to be shown in plot title -<<<<<<< HEAD -======= ghost_edges : list or set of str ghost edge names to be greyed out, default None ghost_bounds : tuple lower and upper bounds of domain, excluding ghost edges, default None ->>>>>>> 06ff2d970176c51dee5a5be092b85d44e84e6d82 Returns ------- None From eb2416b941bc62110ed4b1a99c80063eb726bdbe Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 6 Jul 2020 16:54:24 -0700 Subject: [PATCH 297/438] add docstring --- flow/data_pipeline/data_pipeline.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 74070cc7a..858640914 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -143,6 +143,7 @@ def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): def update_baseline(s3, baseline_network, baseline_source_id): + """Update the baseline table on S3 if new baseline run is added.""" obj = s3.get_object(Bucket='circles.data.pipeline', Key='baseline_table/baselines.csv')['Body'] original_str = obj.read().decode() reader = csv.DictReader(StringIO(original_str)) From 6ed00e374b6495f5ece0bac1e6bcd2e10be1f4f9 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 6 Jul 2020 16:55:18 -0700 Subject: [PATCH 298/438] remove excess whitespace --- flow/visualize/time_space_diagram.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index b3c0e8091..b1500b48d 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -386,7 +386,7 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) """Plot the time-space diagram. Take the pre-processed segments and other meta-data, then plot all the line segments. - + Parameters ---------- ax : matplotlib.axes.Axes From b80e5634be495f85e3e245e61ad2a9681694f8ea Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 6 Jul 2020 18:01:11 -0700 Subject: [PATCH 299/438] only call get_configuration() if to_aws --- flow/core/experiment.py | 7 ++++--- flow/visualize/i210_replay.py | 7 ++++--- flow/visualize/visualizer_rllib.py | 7 ++++--- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 1274280ba..464b0a405 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -163,9 +163,10 @@ def rl_actions(*_): metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(self.env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(is_baseline)) - name, strategy = get_configuration() - metadata['submitter_name'].append(name) - metadata['strategy'].append(strategy) + if to_aws: + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) if convert_to_csv and self.env.simulator == "traci": dir_path = self.env.sim_params.emission_path diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 8a4684a61..c9e820b15 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -241,9 +241,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(args.is_baseline)) - name, strategy = get_configuration() - metadata['submitter_name'].append(name) - metadata['strategy'].append(strategy) + if args.to_aws: + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) i = 0 while i < args.num_rollouts: diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 059cabbbd..261dcbbc3 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -225,9 +225,10 @@ def visualizer_rllib(args): metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(args.is_baseline)) - name, strategy = get_configuration() - metadata['submitter_name'].append(name) - metadata['strategy'].append(strategy) + if args.to_aws: + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) # Simulate and collect metrics final_outflows = [] From 7c9a48ad9a671a5b980934c3e06c20cf5f07401e Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 11:10:45 -0700 Subject: [PATCH 300/438] Energy class for inventorying multiple energy models (#944) * New energy class to inventory multiple energy models Co-authored-by: Joy Carpio --- flow/core/kernel/vehicle/base.py | 16 ++++ flow/core/kernel/vehicle/traci.py | 16 ++++ flow/core/params.py | 21 ++++- flow/core/rewards.py | 139 ++++------------------------ flow/energy_models/base_energy.py | 59 ++++++++++++ flow/energy_models/power_demand.py | 127 +++++++++++++++++++++++++ flow/energy_models/toyota_energy.py | 58 ++++++++++++ tests/fast_tests/test_examples.py | 4 +- 8 files changed, 318 insertions(+), 122 deletions(-) create mode 100644 flow/energy_models/base_energy.py create mode 100644 flow/energy_models/power_demand.py create mode 100644 flow/energy_models/toyota_energy.py diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index fc1818e58..843ec7eb6 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -363,6 +363,22 @@ def get_fuel_consumption(self, veh_id, error=-1001): """ pass + @abstractmethod + def get_energy_model(self, veh_id, error=""): + """Return the energy model class object of the specified vehicle. + + Parameters + ---------- + veh_id : str or list of str + vehicle id, or list of vehicle ids + error : str + value that is returned if the vehicle is not found + Returns + ------- + subclass of BaseEnergyModel + """ + pass + @abstractmethod def get_speed(self, veh_id, error=-1001): """Return the speed of the specified vehicle. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 0fa1e6e17..ef401d180 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -292,6 +292,12 @@ def _add_departed(self, veh_id, veh_type): # specify the type self.__vehicles[veh_id]["type"] = veh_type + # specify energy model + energy_model = \ + self.type_parameters[veh_type]["energy_model"] + self.__vehicles[veh_id]["energy_model"] = \ + energy_model[0](veh_id, **energy_model[1]) + car_following_params = \ self.type_parameters[veh_type]["car_following_params"] @@ -549,6 +555,16 @@ def get_fuel_consumption(self, veh_id, error=-1001): return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons + def get_energy_model(self, veh_id, error=""): + """See parent class.""" + if isinstance(veh_id, (list, np.ndarray)): + return [self.get_energy_model(vehID) for vehID in veh_id] + try: + return self.__vehicles.get(veh_id, {'energy_model': error})['energy_model'] + except KeyError: + print("Energy model not specified for vehicle {}".format(veh_id)) + raise + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): diff --git a/flow/core/params.py b/flow/core/params.py index 94970d614..c6feb5086 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -7,6 +7,10 @@ from flow.controllers.car_following_models import SimCarFollowingController from flow.controllers.rlcontroller import RLController from flow.controllers.lane_change_controllers import SimLaneChangeController +from flow.energy_models.toyota_energy import PriusEnergy +from flow.energy_models.toyota_energy import TacomaEnergy +from flow.energy_models.power_demand import PDMCombustionEngine +from flow.energy_models.power_demand import PDMElectric SPEED_MODES = { @@ -39,6 +43,9 @@ "only_right_drive_safe": 576 } +ENERGY_MODELS = set([PriusEnergy, TacomaEnergy, PDMCombustionEngine, PDMElectric]) +DEFAULT_ENERGY_MODEL = PDMCombustionEngine + # Traffic light defaults PROGRAM_ID = 1 MAX_GAP = 3.0 @@ -262,6 +269,7 @@ def add(self, num_vehicles=0, car_following_params=None, lane_change_params=None, + energy_model=DEFAULT_ENERGY_MODEL, color=None): """Add a sequence of vehicles to the list of vehicles in the network. @@ -298,6 +306,12 @@ def add(self, # FIXME: depends on simulator lane_change_params = SumoLaneChangeParams() + if energy_model not in ENERGY_MODELS: + print('{} for vehicle {} is not a valid energy model. Defaulting to {}\n'.format(energy_model, + veh_id, + DEFAULT_ENERGY_MODEL)) + energy_model = DEFAULT_ENERGY_MODEL + type_params = {} type_params.update(car_following_params.controller_params) type_params.update(lane_change_params.controller_params) @@ -311,7 +325,8 @@ def add(self, "routing_controller": routing_controller, "initial_speed": initial_speed, "car_following_params": car_following_params, - "lane_change_params": lane_change_params} + "lane_change_params": lane_change_params, + "energy_model": energy_model} if color: type_params['color'] = color @@ -334,7 +349,9 @@ def add(self, "car_following_params": car_following_params, "lane_change_params": - lane_change_params + lane_change_params, + "energy_model": + energy_model }) # This is used to return the actual headways from the vehicles class. diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 6462b0a8c..b4af4c5bc 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -306,139 +306,40 @@ def punish_rl_lane_changes(env, penalty=1): return total_lane_change_penalty -def energy_consumption(env, gain=.001): - """Calculate power consumption of a vehicle. - - Assumes vehicle is an average sized vehicle. - The power calculated here is the lower bound of the actual power consumed - by a vehicle. - """ - power = 0 - - M = 1200 # mass of average sized vehicle (kg) - g = 9.81 # gravitational acceleration (m/s^2) - Cr = 0.005 # rolling resistance coefficient - Ca = 0.3 # aerodynamic drag coefficient - rho = 1.225 # air density (kg/m^3) - A = 2.6 # vehicle cross sectional area (m^2) - for veh_id in env.k.vehicle.get_ids(): - if veh_id not in env.k.vehicle.previous_speeds.keys(): - continue - speed = env.k.vehicle.get_speed(veh_id) - prev_speed = env.k.vehicle.get_previous_speed(veh_id) - - accel = abs(speed - prev_speed) / env.sim_step - - power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 - - return -gain * power - - -def veh_energy_consumption(env, veh_id, gain=.001): - """Calculate power consumption of a vehicle. - - Assumes vehicle is an average sized vehicle. - The power calculated here is the lower bound of the actual power consumed - by a vehicle. - """ - power = 0 - - M = 1200 # mass of average sized vehicle (kg) - g = 9.81 # gravitational acceleration (m/s^2) - Cr = 0.005 # rolling resistance coefficient - Ca = 0.3 # aerodynamic drag coefficient - rho = 1.225 # air density (kg/m^3) - A = 2.6 # vehicle cross sectional area (m^2) - - if veh_id not in env.k.vehicle.previous_speeds: - return 0 - - speed = env.k.vehicle.get_speed(veh_id) - prev_speed = env.k.vehicle.get_previous_speed(veh_id) - - accel = abs(speed - prev_speed) / env.sim_step - - power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 - - return -gain * power - - -def miles_per_megajoule(env, veh_ids=None, gain=.001): - """Calculate miles per mega-joule of either a particular vehicle or the total average of all the vehicles. - - Assumes vehicle is an average sized vehicle. - The power calculated here is the lower bound of the actual power consumed - by a vehicle. +def instantaneous_mpg(env, veh_ids=None, gain=.001): + """Calculate the instantaneous mpg for every simulation step specific to the vehicle type. Parameters ---------- env : flow.envs.Env the environment variable, which contains information on the current state of the system. - veh_ids : [list] - list of veh_ids to compute the reward over + veh_ids : [list] or str + list of veh_ids or single veh_id to compute the reward over gain : float scaling factor for the reward """ - mpj = 0 - counter = 0 if veh_ids is None: veh_ids = env.k.vehicle.get_ids() elif not isinstance(veh_ids, list): veh_ids = [veh_ids] - for veh_id in veh_ids: - speed = env.k.vehicle.get_speed(veh_id) - # convert to be positive since the function called is a penalty - power = -veh_energy_consumption(env, veh_id, gain=1.0) - if power > 0 and speed >= 0.1: - counter += 1 - # meters / joule is (v * \delta t) / (power * \delta t) - mpj += speed / power - if counter > 0: - mpj /= counter - - # convert from meters per joule to miles per joule - mpj /= 1609.0 - # convert from miles per joule to miles per megajoule - mpj *= 10 ** 6 - - return mpj * gain - -def miles_per_gallon(env, veh_ids=None, gain=.001): - """Calculate mpg of either a particular vehicle or the total average of all the vehicles. - - Assumes vehicle is an average sized vehicle. - The power calculated here is the lower bound of the actual power consumed - by a vehicle. - - Parameters - ---------- - env : flow.envs.Env - the environment variable, which contains information on the current - state of the system. - veh_ids : [list] - list of veh_ids to compute the reward over - gain : float - scaling factor for the reward - """ - mpg = 0 - counter = 0 - if veh_ids is None: - veh_ids = env.k.vehicle.get_ids() - elif not isinstance(veh_ids, list): - veh_ids = [veh_ids] + cumulative_gallons = 0 + cumulative_distance = 0 for veh_id in veh_ids: - speed = env.k.vehicle.get_speed(veh_id) - gallons_per_s = env.k.vehicle.get_fuel_consumption(veh_id) - if gallons_per_s > 0 and speed >= 0.0: - counter += 1 - # meters / gallon is (v * \delta t) / (gallons_per_s * \delta t) - mpg += speed / gallons_per_s - if counter > 0: - mpg /= counter - - # convert from meters per gallon to miles per gallon - mpg /= 1609.0 + energy_model = env.k.vehicle.get_energy_model(veh_id) + if energy_model != "": + speed = env.k.vehicle.get_speed(veh_id) + accel = env.k.vehicle.get_accel_no_noise_with_failsafe(veh_id) + grade = env.k.vehicle.get_road_grade(veh_id) + gallons_per_hr = energy_model.get_instantaneous_fuel_consumption(accel, speed, grade) + if gallons_per_hr > 0 and speed >= 0.0: + cumulative_gallons += gallons_per_hr + cumulative_distance += speed + + cumulative_gallons /= 3600.0 + cumulative_distance /= 1609.0 + # miles / gallon is (distance_dot * \delta t) / (gallons_dot * \delta t) + mpg = cumulative_distance / cumulative_gallons return mpg * gain diff --git a/flow/energy_models/base_energy.py b/flow/energy_models/base_energy.py new file mode 100644 index 000000000..bf1e16e09 --- /dev/null +++ b/flow/energy_models/base_energy.py @@ -0,0 +1,59 @@ +"""Script containing the base vehicle energy class.""" +from abc import ABCMeta, abstractmethod + + +class BaseEnergyModel(metaclass=ABCMeta): + """Base energy model class. + + Calculate the instantaneous power consumption of a vehicle in + the network. It returns the power in Watts regardless of the + vehicle type: whether EV or Combustion Engine, Toyota Prius or Tacoma + or non-Toyota vehicles. Non-Toyota vehicles are set by default + to be an averaged-size vehicle. + """ + + def __init__(self, kernel): + self.k = kernel + + # 15 kilowatts = 1 gallon/hour conversion factor + self.conversion = 15e3 + + @abstractmethod + def get_instantaneous_power(self, accel, speed, grade): + """Calculate the instantaneous power consumption of a vehicle. + + Must be implemented by child classes. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + pass + + def get_instantaneous_fuel_consumption(self, accel, speed, grade): + """Calculate the instantaneous fuel consumption of a vehicle. + + Fuel consumption is reported in gallons per hour, with the conversion + rate of 15kW = 1 gallon/hour. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + return self.get_instantaneous_power(accel, speed, grade) * self.conversion diff --git a/flow/energy_models/power_demand.py b/flow/energy_models/power_demand.py new file mode 100644 index 000000000..ddf09b2fc --- /dev/null +++ b/flow/energy_models/power_demand.py @@ -0,0 +1,127 @@ +"""Script containing the vehicle power demand model energy classes.""" +import math +import numpy as np +from flow.energy_models.base_energy import BaseEnergyModel +from abc import ABCMeta, abstractmethod + + +class PowerDemandModel(BaseEnergyModel, metaclass=ABCMeta): + """Vehicle Power Demand base energy model class. + + Calculate power consumption of a vehicle based on physics + derivation. Assumes some vehicle characteristics. The + power calculated here is the lower bound of the actual + power consumed by the vehicle plus a bilinear polynomial + function used as a correction factor. + """ + + def __init__(self, + kernel, + mass=2041, + area=3.2, + rolling_res_coeff=0.0027, + aerodynamic_drag_coeff=0.4, + p1_correction=4598.7155, + p3_correction=975.12719): + self.k = kernel + self.g = 9.807 + self.rho_air = 1.225 + self.gamma = 1 + self.mass = mass + self.cross_area = area + self.rolling_res_coeff = rolling_res_coeff + self.aerodynamic_drag_coeff = aerodynamic_drag_coeff + self.p1_correction = p1_correction + self.p3_correction = p3_correction + + def calculate_power_at_the_wheels(self, accel, speed, grade): + """Calculate the instantaneous power required. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + accel_slope_forces = self.mass * speed * ((np.heaviside(accel, 0.5) * (1 - self.gamma) + self.gamma)) * accel + accel_slope_forces += self.g * math.sin(grade) + rolling_friction = self.mass * self.g * self.rolling_res_coeff * speed + air_drag = 0.5 * self.rho_air * self.cross_area * self.aerodynamic_drag_coeff * speed**3 + power = accel_slope_forces + rolling_friction + air_drag + return power + + @abstractmethod + def get_regen_cap(self, accel, speed, grade): + """Set the maximum power retainable from regenerative braking. + + A negative regen cap is interpretted as a positive regenerative power. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + pass + + def get_power_correction_factor(self, accel, speed, grade): + """Calculate the instantaneous power correction of a vehicle. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + return self.p1_correction * accel + self.p3_correction * accel * speed + + def get_instantaneous_power(self, accel, speed, grade): + """See parent class. + + Apply the regenerative braking cap to the modelled power demand. + """ + regen_cap = self.get_regen_cap(accel, speed, grade) + power_at_the_wheels = max(regen_cap, self.calculate_power_at_the_wheels(accel, speed, grade)) + correction_factor = max(regen_cap, self.get_power_correction_factor(accel, speed, grade)) + return power_at_the_wheels + correction_factor + + +class PDMCombustionEngine(PowerDemandModel): + """Power Demand Model for a combustion engine vehicle.""" + + def get_regen_cap(self, accel, speed, grade): + """See parent class.""" + return 0 + + +class PDMElectric(PowerDemandModel): + """Power Demand Model for an electric vehicle.""" + + def __init__(self, kernel): + super(PDMElectric, self).__init__(kernel, + mass=1663, + area=2.4, + rolling_res_coeff=0.007, + aerodynamic_drag_coeff=0.24) + + def get_regen_cap(self, accel, speed, grade): + """See parent class.""" + return -2.8 * speed diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py new file mode 100644 index 000000000..d24b41662 --- /dev/null +++ b/flow/energy_models/toyota_energy.py @@ -0,0 +1,58 @@ +"""Script containing the Toyota energy classes.""" +import dill as pickle +import boto3 +from flow.energy_models.base_energy import BaseEnergyModel +import os +from abc import ABCMeta, abstractmethod + + +class ToyotaModel(BaseEnergyModel, metaclass=ABCMeta): + """Base Toyota Energy model class.""" + + def __init__(self, kernel, filename=None): + self.k = kernel + + # download file from s3 bucket + s3 = boto3.client('s3') + s3.download_file('toyota.restricted', filename, 'temp.pkl') + with open('temp.pkl', 'rb') as file: + self.toyota_energy = pickle.load(file) + + # delete pickle file + os.remove(file) + + @abstractmethod + def get_instantaneous_power(self, accel, speed, grade): + """See parent class.""" + pass + + +class PriusEnergy(ToyotaModel): + """Toyota Prius (EV) energy model class.""" + + def __init__(self, kernel, soc=0.9): + super(PriusEnergy, self).__init__(kernel, filename='prius_ev.pkl') + self.soc = soc + + def get_instantaneous_power(self, accel, speed, grade): + """See parent class.""" + socdot = self.toyota_energy(self.soc, accel, speed, grade) + self.soc -= socdot * self.k.env.sim_step + # FIXME (Joy): convert socdot to power + return socdot + + +class TacomaEnergy(ToyotaModel): + """Toyota Tacoma energy model class.""" + + def __init__(self, kernel): + super(TacomaEnergy, self).__init__(kernel, filename='tacoma.pkl') + + def get_instantaneous_power(self, accel, speed, grade): + """See parent class.""" + return self.get_instantaneous_fuel_consumption(accel, speed, grade) / self.conversion + + def get_instantaneous_fuel_consumption(self, accel, speed, grade): + """See parent class.""" + fc = self.toyota_energy(accel, speed, grade) + return fc diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index b5faf6517..8e871afb4 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -168,6 +168,7 @@ def test_parse_args(self): self.assertDictEqual(vars(args), { 'exp_config': 'exp_config', + 'local_mode': False, 'rl_trainer': 'rllib', 'num_cpus': 1, 'num_steps': 5000, @@ -188,6 +189,7 @@ def test_parse_args(self): self.assertDictEqual(vars(args), { 'checkpoint_path': '5', 'exp_config': 'exp_config', + 'local_mode': False, 'num_cpus': 1, 'num_steps': 3, 'rl_trainer': 'h-baselines', @@ -409,7 +411,7 @@ def run_exp(flow_params, **kwargs): alg_run, env_name, config = setup_rllib_exps(flow_params, 1, 1, **kwargs) try: - ray.init(num_cpus=1) + ray.init(num_cpus=1, local_mode=True) except Exception as e: print("ERROR", e) config['train_batch_size'] = 50 From c319a6c0d9ebf20777f3ebc42fda45947c6fef0b Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 11:13:23 -0700 Subject: [PATCH 301/438] merge custom output and failsafes to master (#981) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * merge custom output and failsafes to master * add write_to_csv() function to master * include pipeline README.md * add data pipeline __init__ * add experiment.py changes * Update simulate.py * Update simulate.py * Update simulate.py * Update experiment.py * Update experiment.py * fix flake8 issues * fix flake8 issues * fixed h-baselines bug * Update experiment.py * potential bug fix * merge custom output and failsafes to master * add write_to_csv() function to master * include pipeline README.md * add data pipeline __init__ * add experiment.py changes * Update simulate.py * Update simulate.py * Update simulate.py * Update experiment.py * Update experiment.py * fix flake8 issues * fix flake8 issues * Update experiment.py * Replicated changes in 867. Done bug (#980) * Replicated changes in 867. Changes only made to traci * Aimsun changes minus reset * address aboudy comments * revert change * change warning print to ValueError message * update to new update_accel methods * address brent comments * fix import typo * address comments * add display_warnings boolean * add get_next_speed() function to base vehicle class * revert addition of get_next_speed * merge custom output and failsafes to master * add write_to_csv() function to master * include pipeline README.md * add data pipeline __init__ * add experiment.py changes * Update simulate.py * Update simulate.py * Update simulate.py * Update experiment.py * Update experiment.py * fix flake8 issues * fix flake8 issues * Update experiment.py * add experiment.py changes * Update simulate.py * Update simulate.py * Update simulate.py * Update experiment.py * Update experiment.py * fix flake8 issues * address aboudy comments * revert change * change warning print to ValueError message * update to new update_accel methods * address brent comments * fix import typo * address comments * add display_warnings boolean * add get_next_speed() function to base vehicle class * revert addition of get_next_speed * remove duped line from rebase * Update base_controller.py * fix some bugs * revert change to get_feasible_action call signature * change print syntax to be python3.5 compliant * add tests for new failsafe features * fix failsafe unit tests * fix failsafe unit tests * fix unittest syntax * fix typo * smooth default to True * rearrange raise exception for test coverage * some minor fixes * cleanup * moved simulation logging to the simulation kernel (#991) * moved simulation logging to the simulation kernel * pydocstyle * PR fixes * bug fix Co-authored-by: AboudyKreidieh Co-authored-by: Kathy Jang Co-authored-by: Nathan Lichtlé --- flow/controllers/base_controller.py | 169 ++++++++++++++-- flow/controllers/car_following_models.py | 52 +++-- flow/core/experiment.py | 27 +-- flow/core/kernel/simulation/traci.py | 182 ++++++++++++++++-- flow/core/kernel/vehicle/base.py | 35 +++- flow/core/kernel/vehicle/traci.py | 66 ++++++- flow/visualize/time_space_diagram.py | 9 +- tests/fast_tests/test_controllers.py | 169 ++++++++++++++++ .../fast_tests/test_experiment_base_class.py | 36 +++- 9 files changed, 665 insertions(+), 80 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index cef92d573..3c9985360 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -34,8 +34,12 @@ class BaseController(metaclass=ABCMeta): specified to in this model are as desired. delay : int delay in applying the action (time) - fail_safe : str - Should be either "instantaneous" or "safe_velocity" + fail_safe : list of str or str + List of failsafes which can be "instantaneous", "safe_velocity", + "feasible_accel", or "obey_speed_limit". The order of applying the + falsafes will be based on the order in the list. + display_warnings : bool + Flag for toggling on/off printing failsafe warnings to screen. noise : double variance of the gaussian from which to sample a noisy acceleration """ @@ -45,6 +49,7 @@ def __init__(self, car_following_params, delay=0, fail_safe=None, + display_warnings=True, noise=0): """Instantiate the base class for acceleration behavior.""" self.veh_id = veh_id @@ -56,7 +61,29 @@ def __init__(self, self.delay = delay # longitudinal failsafe used by the vehicle - self.fail_safe = fail_safe + if isinstance(fail_safe, str): + failsafe_list = [fail_safe] + elif isinstance(fail_safe, list) or fail_safe is None: + failsafe_list = fail_safe + else: + failsafe_list = None + raise ValueError("fail_safe should be string or list of strings. Setting fail_safe to None\n") + + failsafe_map = { + 'instantaneous': self.get_safe_action_instantaneous, + 'safe_velocity': self.get_safe_velocity_action, + 'feasible_accel': lambda _, accel: self.get_feasible_action(accel), + 'obey_speed_limit': self.get_obey_speed_limit_action + } + self.failsafes = [] + if failsafe_list: + for check in failsafe_list: + if check in failsafe_map: + self.failsafes.append(failsafe_map.get(check)) + else: + raise ValueError('Skipping {}, as it is not a valid failsafe.'.format(check)) + + self.display_warnings = display_warnings self.max_accel = car_following_params.controller_params['accel'] # max deaccel should always be a positive @@ -77,8 +104,8 @@ def get_action(self, env): time step. This method also augments the controller with the desired level of - stochastic noise, and utlizes the "instantaneous" or "safe_velocity" - failsafes if requested. + stochastic noise, and utlizes the "instantaneous", "safe_velocity", + "feasible_accel", and/or "obey_speed_limit" failsafes if requested. Parameters ---------- @@ -90,6 +117,12 @@ def get_action(self, env): float the modified form of the acceleration """ + # clear the current stored accels of this vehicle to None + env.k.vehicle.update_accel(self.veh_id, None, noise=False, failsafe=False) + env.k.vehicle.update_accel(self.veh_id, None, noise=False, failsafe=True) + env.k.vehicle.update_accel(self.veh_id, None, noise=True, failsafe=False) + env.k.vehicle.update_accel(self.veh_id, None, noise=True, failsafe=True) + # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed if len(env.k.vehicle.get_edge(self.veh_id)) == 0: @@ -107,16 +140,26 @@ def get_action(self, env): if accel is None: return None + # store the acceleration without noise to each vehicle + # run fail safe if requested + env.k.vehicle.update_accel(self.veh_id, accel, noise=False, failsafe=False) + accel_no_noise_with_failsafe = accel + + for failsafe in self.failsafes: + accel_no_noise_with_failsafe = failsafe(env, accel_no_noise_with_failsafe) + + env.k.vehicle.update_accel(self.veh_id, accel_no_noise_with_failsafe, noise=False, failsafe=True) + # add noise to the accelerations, if requested if self.accel_noise > 0: - accel += np.random.normal(0, self.accel_noise) + accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) + env.k.vehicle.update_accel(self.veh_id, accel, noise=True, failsafe=False) - # run the failsafes, if requested - if self.fail_safe == 'instantaneous': - accel = self.get_safe_action_instantaneous(env, accel) - elif self.fail_safe == 'safe_velocity': - accel = self.get_safe_velocity_action(env, accel) + # run the fail-safes, if requested + for failsafe in self.failsafes: + accel = failsafe(env, accel) + env.k.vehicle.update_accel(self.veh_id, accel, noise=True, failsafe=True) return accel def get_safe_action_instantaneous(self, env, action): @@ -162,6 +205,13 @@ def get_safe_action_instantaneous(self, env, action): # if the vehicle will crash into the vehicle ahead of it in the # next time step (assuming the vehicle ahead of it is not # moving), then stop immediately + if self.display_warnings: + print( + "=====================================\n" + "Vehicle {} is about to crash. Instantaneous acceleration " + "clipping applied.\n" + "=====================================".format(self.veh_id)) + return -this_vel / sim_step else: # if the vehicle is not in danger of crashing, continue with @@ -223,8 +273,8 @@ def safe_velocity(self, env): Returns ------- float - maximum safe velocity given a maximum deceleration and delay in - performing the breaking action + maximum safe velocity given a maximum deceleration, delay in + performing the breaking action, and speed limit """ lead_id = env.k.vehicle.get_leader(self.veh_id) lead_vel = env.k.vehicle.get_speed(lead_id) @@ -235,4 +285,97 @@ def safe_velocity(self, env): v_safe = 2 * h / env.sim_step + dv - this_vel * (2 * self.delay) + # check for speed limit FIXME: this is not called + # this_edge = env.k.vehicle.get_edge(self.veh_id) + # edge_speed_limit = env.k.network.speed_limit(this_edge) + + if this_vel > v_safe: + if self.display_warnings: + print( + "=====================================\n" + "Speed of vehicle {} is greater than safe speed. Safe velocity " + "clipping applied.\n" + "=====================================".format(self.veh_id)) + return v_safe + + def get_obey_speed_limit_action(self, env, action): + """Perform the "obey_speed_limit" failsafe action. + + Checks if the computed acceleration would put us above edge speed limit. + If it would, output the acceleration that would put at the speed limit + velocity. + + Parameters + ---------- + env : flow.envs.Env + current environment, which contains information of the state of the + network at the current time step + action : float + requested acceleration action + + Returns + ------- + float + the requested action clipped by the speed limit + """ + # check for speed limit + this_edge = env.k.vehicle.get_edge(self.veh_id) + edge_speed_limit = env.k.network.speed_limit(this_edge) + + this_vel = env.k.vehicle.get_speed(self.veh_id) + sim_step = env.sim_step + + if this_vel + action * sim_step > edge_speed_limit: + if edge_speed_limit > 0: + if self.display_warnings: + print( + "=====================================\n" + "Speed of vehicle {} is greater than speed limit. Obey " + "speed limit clipping applied.\n" + "=====================================".format(self.veh_id)) + return (edge_speed_limit - this_vel) / sim_step + else: + return -this_vel / sim_step + else: + return action + + def get_feasible_action(self, action): + """Perform the "feasible_accel" failsafe action. + + Checks if the computed acceleration would put us above maximum + acceleration or deceleration. If it would, output the acceleration + equal to maximum acceleration or deceleration. + + Parameters + ---------- + action : float + requested acceleration action + + Returns + ------- + float + the requested action clipped by the feasible acceleration or + deceleration. + """ + if action > self.max_accel: + action = self.max_accel + + if self.display_warnings: + print( + "=====================================\n" + "Acceleration of vehicle {} is greater than the max " + "acceleration. Feasible acceleration clipping applied.\n" + "=====================================".format(self.veh_id)) + + if action < -self.max_deaccel: + action = -self.max_deaccel + + if self.display_warnings: + print( + "=====================================\n" + "Deceleration of vehicle {} is greater than the max " + "deceleration. Feasible acceleration clipping applied.\n" + "=====================================".format(self.veh_id)) + + return action diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index 42c9b2a9b..f5b7399bc 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -56,7 +56,8 @@ def __init__(self, v_des=8, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=True): """Instantiate a CFM controller.""" BaseController.__init__( self, @@ -64,7 +65,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.k_d = k_d @@ -132,7 +135,8 @@ def __init__(self, v_des=8, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=True): """Instantiate a Bilateral car-following model controller.""" BaseController.__init__( self, @@ -140,7 +144,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.k_d = k_d @@ -212,7 +218,8 @@ def __init__(self, a=0, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=True): """Instantiate a Linear Adaptive Cruise controller.""" BaseController.__init__( self, @@ -220,7 +227,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.k_1 = k_1 @@ -289,7 +298,8 @@ def __init__(self, v_max=30, time_delay=0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=True): """Instantiate an Optimal Vehicle Model controller.""" BaseController.__init__( self, @@ -297,7 +307,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.v_max = v_max self.alpha = alpha @@ -364,7 +376,8 @@ def __init__(self, h_st=5, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=True): """Instantiate a Linear OVM controller.""" BaseController.__init__( self, @@ -372,7 +385,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id # 4.8*1.85 for case I, 3.8*1.85 for case II, per Nakayama self.v_max = v_max @@ -445,6 +460,7 @@ def __init__(self, time_delay=0.0, noise=0, fail_safe=None, + display_warnings=True, car_following_params=None): """Instantiate an IDM controller.""" BaseController.__init__( @@ -453,7 +469,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.v0 = v0 self.T = T self.a = a @@ -546,7 +564,8 @@ def __init__(self, tau=1, delay=0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=True): """Instantiate a Gipps' controller.""" BaseController.__init__( self, @@ -554,8 +573,9 @@ def __init__(self, car_following_params, delay=delay, fail_safe=fail_safe, - noise=noise - ) + noise=noise, + display_warnings=display_warnings, + ) self.v_desired = v0 self.acc = acc @@ -627,7 +647,8 @@ def __init__(self, want_max_accel=False, time_delay=0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=True): """Instantiate an Bando controller.""" BaseController.__init__( self, @@ -636,6 +657,7 @@ def __init__(self, delay=time_delay, fail_safe=fail_safe, noise=noise, + display_warnings=display_warnings, ) self.veh_id = veh_id self.v_max = v_max diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 69a78cb0e..d97f96582 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,10 +1,8 @@ """Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -import datetime +from datetime import datetime import logging import time -import os import numpy as np @@ -81,7 +79,7 @@ def __init__(self, flow_params, custom_callables=None): self.env = create_env() logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) + self.env.network.name, str(datetime.utcnow()))) logging.info("Initializing environment.") @@ -170,6 +168,11 @@ def rl_actions(*_): print("Round {0}, return: {1}".format(i, ret)) + # Save emission data at the end of every rollout. This is skipped + # by the internal method if no emission path was specified. + if self.env.simulator == "traci": + self.env.k.simulation.save_emission(run_id=i) + # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( @@ -179,20 +182,4 @@ def rl_actions(*_): print("steps/second:", np.mean(times)) self.env.terminate() - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - # collect the location of the emission file - dir_path = self.env.sim_params.emission_path - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - return info_dict diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 8d51b8e25..387f7b03a 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -11,6 +11,7 @@ import logging import subprocess import signal +import csv # Number of retries on restarting SUMO before giving up @@ -21,6 +22,32 @@ class TraCISimulation(KernelSimulation): """Sumo simulation kernel. Extends flow.core.kernel.simulation.KernelSimulation + + Attributes + ---------- + sumo_proc : subprocess.Popen + contains the subprocess.Popen instance used to start traci + sim_step : float + seconds per simulation step + emission_path : str or None + Path to the folder in which to create the emissions output. Emissions + output is not generated if this value is not specified + time : float + used to internally keep track of the simulation time + stored_data : dict >> + a dict object used to store additional data if an emission file is + provided. The first key corresponds to the name of the vehicle, the + second corresponds to the time the sample was issued, and the final + keys represent the additional data stored at every given time for every + vehicle, and consists of the following keys: + + * acceleration (no noise): the accelerations issued to the vehicle, + excluding noise + * acceleration (requested): the requested acceleration by the vehicle, + including noise + * acceleration (actual): the actual acceleration by the vehicle, + collected by computing the difference between the speeds of the + vehicle and dividing it by the sim_step term """ def __init__(self, master_kernel): @@ -33,8 +60,12 @@ def __init__(self, master_kernel): sub-kernels) """ KernelSimulation.__init__(self, master_kernel) - # contains the subprocess.Popen instance used to start traci + self.sumo_proc = None + self.sim_step = None + self.emission_path = None + self.time = 0 + self.stored_data = dict() def pass_api(self, kernel_api): """See parent class. @@ -62,10 +93,61 @@ def simulation_step(self): def update(self, reset): """See parent class.""" - pass + if reset: + self.time = 0 + else: + self.time += self.sim_step + + # Collect the additional data to store in the emission file. + if self.emission_path is not None: + kv = self.master_kernel.vehicle + for veh_id in self.master_kernel.vehicle.get_ids(): + t = round(self.time, 2) + + # some miscellaneous pre-processing + position = kv.get_2d_position(veh_id) + + # Make sure dictionaries corresponding to the vehicle and + # time are available. + if veh_id not in self.stored_data.keys(): + self.stored_data[veh_id] = dict() + if t not in self.stored_data[veh_id].keys(): + self.stored_data[veh_id][t] = dict() + + # Add the speed, position, and lane data. + self.stored_data[veh_id][t].update({ + "speed": kv.get_speed(veh_id), + "lane_number": kv.get_lane(veh_id), + "edge_id": kv.get_edge(veh_id), + "relative_position": kv.get_position(veh_id), + "x": position[0], + "y": position[1], + "headway": kv.get_headway(veh_id), + "leader_id": kv.get_leader(veh_id), + "follower_id": kv.get_follower(veh_id), + "leader_rel_speed": + kv.get_speed(kv.get_leader(veh_id)) + - kv.get_speed(veh_id), + "target_accel_with_noise_with_failsafe": + kv.get_accel(veh_id, noise=True, failsafe=True), + "target_accel_no_noise_no_failsafe": + kv.get_accel(veh_id, noise=False, failsafe=False), + "target_accel_with_noise_no_failsafe": + kv.get_accel(veh_id, noise=True, failsafe=False), + "target_accel_no_noise_with_failsafe": + kv.get_accel(veh_id, noise=False, failsafe=True), + "realized_accel": + kv.get_realized_accel(veh_id), + "road_grade": kv.get_road_grade(veh_id), + "distance": kv.get_distance(veh_id), + }) def close(self): """See parent class.""" + # Save the emission data to a csv. + if self.emission_path is not None: + self.save_emission() + self.kernel_api.close() def check_collision(self): @@ -75,10 +157,24 @@ def check_collision(self): def start_simulation(self, network, sim_params): """Start a sumo simulation instance. - This method uses the configuration files created by the network class - to initialize a sumo instance. Also initializes a traci connection to - interface with sumo from Python. + This method performs the following operations: + + 1. It collect the simulation step size and the emission path + information. If an emission path is specifies, it ensures that the + path exists. + 2. It also uses the configuration files created by the network class to + initialize a sumo instance. + 3. Finally, It initializes a traci connection to interface with sumo + from Python and returns the connection. """ + # Save the simulation step size (for later use). + self.sim_step = sim_params.sim_step + + # Update the emission path term. + self.emission_path = sim_params.emission_path + if self.emission_path is not None: + ensure_dir(self.emission_path) + error = None for _ in range(RETRIES_ON_ERROR): try: @@ -109,17 +205,6 @@ def start_simulation(self, network, sim_params): sumo_call.append("--lateral-resolution") sumo_call.append(str(sim_params.lateral_resolution)) - # add the emission path to the sumo command (if requested) - if sim_params.emission_path is not None: - ensure_dir(sim_params.emission_path) - emission_out = os.path.join( - sim_params.emission_path, - "{0}-emission.xml".format(network.name)) - sumo_call.append("--emission-output") - sumo_call.append(emission_out) - else: - emission_out = None - if sim_params.overtake_right: sumo_call.append("--lanechange.overtake-right") sumo_call.append("true") @@ -146,7 +231,7 @@ def start_simulation(self, network, sim_params): if sim_params.num_clients > 1: logging.info(" Num clients are" + str(sim_params.num_clients)) - logging.debug(" Emission file: " + str(emission_out)) + logging.debug(" Emission file: " + str(self.emission_path)) logging.debug(" Step length: " + str(sim_params.sim_step)) # Opening the I/O thread to SUMO @@ -180,3 +265,66 @@ def teardown_sumo(self): os.killpg(self.sumo_proc.pid, signal.SIGTERM) except Exception as e: print("Error during teardown: {}".format(e)) + + def save_emission(self, run_id=0): + """Save any collected emission data to a csv file. + + If not data was collected, nothing happens. Moreover, any internally + stored data by this class is clear whenever data is stored. + + Parameters + ---------- + run_id : int + the rollout number, appended to the name of the emission file. Used + to store emission files from multiple rollouts run sequentially. + """ + # If there is no stored data, ignore this operation. This is to ensure + # that data isn't deleted if the operation is called twice. + if len(self.stored_data) == 0: + return + + # Get a csv name for the emission file. + name = "{}-{}_emission.csv".format( + self.master_kernel.network.network.name, run_id) + + # The name of all stored data-points (excluding id and time) + stored_ids = [ + "x", + "y", + "speed", + "headway", + "leader_id", + "target_accel_with_noise_with_failsafe", + "target_accel_no_noise_no_failsafe", + "target_accel_with_noise_no_failsafe", + "target_accel_no_noise_with_failsafe", + "realized_accel", + "road_grade", + "edge_id", + "lane_number", + "distance", + "relative_position", + "follower_id", + "leader_rel_speed", + ] + + # Update the stored data to push to the csv file. + final_data = {"time": [], "id": []} + final_data.update({key: [] for key in stored_ids}) + + for veh_id in self.stored_data.keys(): + for t in self.stored_data[veh_id].keys(): + final_data['time'].append(t) + final_data['id'].append(veh_id) + for key in stored_ids: + final_data[key].append(self.stored_data[veh_id][t][key]) + + with open(os.path.join(self.emission_path, name), "w") as f: + print(os.path.join(self.emission_path, name), self.emission_path) + writer = csv.writer(f, delimiter=',') + writer.writerow(final_data.keys()) + writer.writerows(zip(*final_data.values())) + + # Clear all memory from the stored data. This is useful if this + # function is called in between resets. + self.stored_data.clear() diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index d97ade984..a433b8924 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -128,15 +128,19 @@ def remove(self, veh_id): pass @abstractmethod - def apply_acceleration(self, veh_id, acc): + def apply_acceleration(self, veh_id, acc, smooth=True): """Apply the acceleration requested by a vehicle in the simulator. + In SUMO, this function applies slowDown method which applies smoothing. + Parameters ---------- veh_id : str or list of str list of vehicle identifiers acc : float or array_like requested accelerations from the vehicles + smooth : bool + whether to apply acceleration smoothly or not, default: True """ pass @@ -741,3 +745,32 @@ def get_max_speed(self, veh_id, error): float """ pass + + ########################################################################### + # Methods for Datapipeline # + ########################################################################### + + @abstractmethod + def get_accel(self, veh_id, noise=True, failsafe=True): + """Return the acceleration of vehicle with veh_id.""" + pass + + @abstractmethod + def update_accel(self, veh_id, accel, noise=True, failsafe=True): + """Update stored acceleration of vehicle with veh_id.""" + pass + + @abstractmethod + def get_2d_position(self, veh_id, error=-1001): + """Return (x, y) position of vehicle with veh_id.""" + pass + + @abstractmethod + def get_realized_accel(self, veh_id): + """Return the acceleration that the vehicle actually make.""" + pass + + @abstractmethod + def get_road_grade(self, veh_id): + """Return the road-grade of the vehicle with veh_id.""" + pass diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 6f119b7bb..39bfb35da 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -336,7 +336,8 @@ def _add_departed(self, veh_id, veh_type): tc.VAR_POSITION, tc.VAR_ANGLE, tc.VAR_SPEED_WITHOUT_TRACI, - tc.VAR_FUELCONSUMPTION + tc.VAR_FUELCONSUMPTION, + tc.VAR_DISTANCE ]) self.kernel_api.vehicle.subscribeLeader(veh_id, 2000) @@ -952,18 +953,22 @@ def _prev_edge_followers(self, veh_id, edge_dict, lane, num_edges): return tailway, follower - def apply_acceleration(self, veh_ids, acc): + def apply_acceleration(self, veh_ids, acc, smooth=True): """See parent class.""" - # to hand the case of a single vehicle + # to handle the case of a single vehicle if type(veh_ids) == str: veh_ids = [veh_ids] acc = [acc] for i, vid in enumerate(veh_ids): if acc[i] is not None and vid in self.get_ids(): + self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) - self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) + if smooth: + self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) + else: + self.kernel_api.vehicle.setSpeed(vid, next_vel) def apply_lane_change(self, veh_ids, direction): """See parent class.""" @@ -993,7 +998,7 @@ def apply_lane_change(self, veh_ids, direction): # perform the requested lane action action in TraCI if target_lane != this_lane: self.kernel_api.vehicle.changeLane( - veh_id, int(target_lane), 100000) + veh_id, int(target_lane), self.sim_step) if veh_id in self.get_rl_ids(): self.prev_last_lc[veh_id] = \ @@ -1013,6 +1018,8 @@ def choose_routes(self, veh_ids, route_choices): def get_x_by_id(self, veh_id): """See parent class.""" + if isinstance(veh_id, (list, np.ndarray)): + return [self.get_x_by_id(vehID) for vehID in veh_id] if self.get_edge(veh_id) == '': # occurs when a vehicle crashes is teleported for some other reason return 0. @@ -1121,3 +1128,52 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) + + def get_accel(self, veh_id, noise=True, failsafe=True): + """See parent class.""" + metric_name = 'accel' + if noise: + metric_name += '_with_noise' + else: + metric_name += '_no_noise' + if failsafe: + metric_name += '_with_falsafe' + else: + metric_name += '_no_failsafe' + + if metric_name not in self.__vehicles[veh_id]: + self.__vehicles[veh_id][metric_name] = None + return self.__vehicles[veh_id][metric_name] + + def update_accel(self, veh_id, accel, noise=True, failsafe=True): + """See parent class.""" + metric_name = 'accel' + if noise: + metric_name += '_with_noise' + else: + metric_name += '_no_noise' + if failsafe: + metric_name += '_with_falsafe' + else: + metric_name += '_no_failsafe' + + self.__vehicles[veh_id][metric_name] = accel + + def get_realized_accel(self, veh_id): + """See parent class.""" + if self.get_distance(veh_id) == 0: + return 0 + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + + def get_2d_position(self, veh_id, error=-1001): + """See parent class.""" + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) + + def get_distance(self, veh_id, error=-1001): + """See parent class.""" + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_DISTANCE, error) + + def get_road_grade(self, veh_id): + """See parent class.""" + # TODO : Brent + return 0 diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index 3c7ab8b21..9f3da553d 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -141,7 +141,7 @@ def get_time_space_data(data, params): def _merge(data): - r"""Generate position and speed data for the merge. + r"""Generate time and position data for the merge. This only include vehicles on the main highway, and not on the adjacent on-ramp. @@ -172,9 +172,6 @@ def _merge(data): def _highway(data): r"""Generate time and position data for the highway. - We generate plots for all lanes, so the segments are wrapped in - a dictionary. - Parameters ---------- data : pd.DataFrame @@ -197,7 +194,7 @@ def _highway(data): def _ring_road(data): - r"""Generate position and speed data for the ring road. + r"""Generate time and position data for the ring road. Vehicles that reach the top of the plot simply return to the bottom and continue. @@ -259,7 +256,7 @@ def _i210_subnetwork(data): def _figure_eight(data): - r"""Generate position and speed data for the figure eight. + r"""Generate time and position data for the figure eight. The vehicles traveling towards the intersection from one side will be plotted from the top downward, while the vehicles from the other side will diff --git a/tests/fast_tests/test_controllers.py b/tests/fast_tests/test_controllers.py index 58967cef8..bef765396 100644 --- a/tests/fast_tests/test_controllers.py +++ b/tests/fast_tests/test_controllers.py @@ -405,6 +405,175 @@ def test_no_crash_LinearOVM(self): self.tearDown_failsafe() +class TestFeasibleAccelFailsafe(TestInstantaneousFailsafe): + """ + Tests that the feasible accel failsafe of the base acceleration controller + does not fail under extreme conditions. + """ + + def test_no_crash_OVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(OVMController, { + "fail_safe": "feasible_accel" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + def test_no_crash_LinearOVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(LinearOVM, { + "fail_safe": "feasible_accel" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + +class TestObeySpeedLimitFailsafe(TestInstantaneousFailsafe): + """ + Tests that the obey speed limit failsafe of the base acceleration controller + does not fail under extreme conditions. + """ + + def test_no_crash_OVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(OVMController, { + "fail_safe": "obey_speed_limit" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + def test_no_crash_LinearOVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(LinearOVM, { + "fail_safe": "obey_speed_limit" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + +class TestBrokenFailsafe(TestInstantaneousFailsafe): + """ + Tests that the failsafe logic triggers exceptions when instantiated + incorrectly. + """ + + def test_invalid_failsafe_string(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(OVMController, { + "fail_safe": "default" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + additional_env_params = { + "target_velocity": 8, + "max_accel": 3, + "max_decel": 3, + "sort_vehicles": False + } + env_params = EnvParams(additional_params=additional_env_params) + + additional_net_params = { + "length": 100, + "lanes": 1, + "speed_limit": 30, + "resolution": 40 + } + net_params = NetParams(additional_params=additional_net_params) + + initial_config = InitialConfig(bunching=10) + + # create the environment and network classes, see that it raises ValueError + with self.assertRaises(ValueError): + ring_road_exp_setup(vehicles=vehicles, + env_params=env_params, + net_params=net_params, + initial_config=initial_config) + + self.tearDown_failsafe() + + def test_invalid_failsafe_type(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(LinearOVM, { + "fail_safe": True + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + additional_env_params = { + "target_velocity": 8, + "max_accel": 3, + "max_decel": 3, + "sort_vehicles": False + } + env_params = EnvParams(additional_params=additional_env_params) + + additional_net_params = { + "length": 100, + "lanes": 1, + "speed_limit": 30, + "resolution": 40 + } + net_params = NetParams(additional_params=additional_net_params) + + initial_config = InitialConfig(bunching=10) + + # create the environment and network classes, see that it raises ValueError + with self.assertRaises(ValueError): + ring_road_exp_setup(vehicles=vehicles, + env_params=env_params, + net_params=net_params, + initial_config=initial_config) + + self.tearDown_failsafe() + + class TestStaticLaneChanger(unittest.TestCase): """ Makes sure that vehicles with a static lane-changing controller do not diff --git a/tests/fast_tests/test_experiment_base_class.py b/tests/fast_tests/test_experiment_base_class.py index b3863a77c..458af1027 100644 --- a/tests/fast_tests/test_experiment_base_class.py +++ b/tests/fast_tests/test_experiment_base_class.py @@ -1,6 +1,7 @@ import unittest import os import time +import csv from flow.core.experiment import Experiment from flow.core.params import VehicleParams @@ -168,15 +169,44 @@ def test_convert_to_csv(self): time.sleep(1.0) # check that both the csv file exists and the xml file doesn't. - self.assertFalse(os.path.isfile(dir_path + "/{}-emission.xml".format( + self.assertFalse(os.path.isfile(dir_path + "/{}-0_emission.xml".format( exp.env.network.name))) - self.assertTrue(os.path.isfile(dir_path + "/{}-emission.csv".format( + self.assertTrue(os.path.isfile(dir_path + "/{}-0_emission.csv".format( exp.env.network.name))) + # check that the keys within the emission file matches its expected + # values + with open(dir_path + "/{}-0_emission.csv".format( + exp.env.network.name), "r") as f: + reader = csv.reader(f) + header = next(reader) + + self.assertListEqual(header, [ + "time", + "id", + "x", + "y", + "speed", + "headway", + "leader_id", + "target_accel_with_noise_with_failsafe", + "target_accel_no_noise_no_failsafe", + "target_accel_with_noise_no_failsafe", + "target_accel_no_noise_with_failsafe", + "realized_accel", + "road_grade", + "edge_id", + "lane_number", + "distance", + "relative_position", + "follower_id", + "leader_rel_speed", + ]) + time.sleep(0.1) # delete the files - os.remove(os.path.expanduser(dir_path + "/{}-emission.csv".format( + os.remove(os.path.expanduser(dir_path + "/{}-0_emission.csv".format( exp.env.network.name))) From d36da2e5144a40072b11e39f9da6725cc3f4441e Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 11:40:32 -0700 Subject: [PATCH 302/438] add 210 edgestarts for backwards compatibility (#985) * fixed h-baselines bug * potential bug fix * add 210 edgestarts for backwards compatibility * add 210 edgestarts for backwards compatibility * add 210 edgestarts for backwards compatibility * add 210 edgestarts for backwards compatibility * fastforward PR 989 * fix typo Co-authored-by: AboudyKreidieh --- flow/visualize/time_space_diagram.py | 66 +++++++++++++++++++++------- 1 file changed, 51 insertions(+), 15 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index 9f3da553d..4914fc6a7 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -27,7 +27,8 @@ import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot as plt -from matplotlib.collections import LineCollection +from matplotlib.collections import LineCollection, PatchCollection +from matplotlib.patches import Rectangle import matplotlib.colors as colors import numpy as np import pandas as pd @@ -186,8 +187,6 @@ def _highway(data): pd.DataFrame modified trajectory dataframe """ - data.loc[:, :] = data[(data['distance'] > 500)] - data.loc[:, :] = data[(data['distance'] < 2300)] segs = data[['time_step', 'distance', 'next_time', 'next_pos']].values.reshape((len(data), 2, 2)) return segs, data @@ -240,10 +239,6 @@ def _i210_subnetwork(data): pd.DataFrame modified trajectory dataframe """ - # Omit ghost edges - omit_edges = {'ghost0', '119257908#3'} - data.loc[:, :] = data[~data['edge_id'].isin(omit_edges)] - # Reset lane numbers that are offset by ramp lanes offset_edges = set(data[data['lane_id'] == 5]['edge_id'].unique()) data.loc[data['edge_id'].isin(offset_edges), 'lane_id'] -= 1 @@ -357,6 +352,22 @@ def _get_abs_pos(df, params): } elif params['network'] == HighwayNetwork: return df['x'] + elif params['network'] == I210SubNetwork: + edgestarts = { + '119257914': -5.0999999999995795, + '119257908#0': 56.49000000018306, + ':300944379_0': 56.18000000000016, + ':300944436_0': 753.4599999999871, + '119257908#1-AddedOnRampEdge': 756.3299999991157, + ':119257908#1-AddedOnRampNode_0': 853.530000000022, + '119257908#1': 856.7699999997207, + ':119257908#1-AddedOffRampNode_0': 1096.4499999999707, + '119257908#1-AddedOffRampEdge': 1099.6899999995558, + ':1686591010_1': 1198.1899999999541, + '119257908#2': 1203.6499999994803, + ':1842086610_1': 1780.2599999999056, + '119257908#3': 1784.7899999996537, + } else: edgestarts = defaultdict(float) @@ -374,7 +385,7 @@ def _get_abs_pos(df, params): return ret -def plot_tsd(ax, df, segs, args, lane=None): +def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None): """Plot the time-space diagram. Take the pre-processed segments and other meta-data, then plot all the line segments. @@ -391,6 +402,10 @@ def plot_tsd(ax, df, segs, args, lane=None): parsed arguments lane : int, optional lane number to be shown in plot title + ghost_edges : list or set of str + ghost edge names to be greyed out, default None + ghost_bounds : tuple + lower and upper bounds of domain, excluding ghost edges, default None Returns ------- @@ -398,8 +413,7 @@ def plot_tsd(ax, df, segs, args, lane=None): """ norm = plt.Normalize(args.min_speed, args.max_speed) - xmin = max(df['time_step'].min(), args.start) - xmax = min(df['time_step'].max(), args.stop) + xmin, xmax = df['time_step'].min(), df['time_step'].max() xbuffer = (xmax - xmin) * 0.025 # 2.5% of range ymin, ymax = df['distance'].min(), df['distance'].max() ybuffer = (ymax - ymin) * 0.025 # 2.5% of range @@ -413,6 +427,25 @@ def plot_tsd(ax, df, segs, args, lane=None): ax.add_collection(lc) ax.autoscale() + rects = [] + if ghost_edges: + y_domain_min = df[~df['edge_id'].isin(ghost_edges)]['distance'].min() + y_domain_max = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() + rects.append(Rectangle((xmin, y_domain_min), args.start - xmin, y_domain_max - y_domain_min)) + rects.append(Rectangle((xmin, ymin), xmax - xmin, y_domain_min - ymin)) + rects.append(Rectangle((xmin, y_domain_max), xmax - xmin, ymax - y_domain_max)) + elif ghost_bounds: + rects.append(Rectangle((xmin, ghost_bounds[0]), args.start - xmin, ghost_bounds[1] - ghost_bounds[0])) + rects.append(Rectangle((xmin, ymin), xmax - xmin, ghost_bounds[0] - ymin)) + rects.append(Rectangle((xmin, ghost_bounds[1]), xmax - xmin, ymax - ghost_bounds[1])) + else: + rects.append(Rectangle((xmin, ymin), args.start - xmin, ymax - ymin)) + + if rects: + pc = PatchCollection(rects, facecolor='grey', alpha=0.5, edgecolor=None) + pc.set_zorder(20) + ax.add_collection(pc) + if lane: ax.set_title('Time-Space Diagram: Lane {}'.format(lane), fontsize=25) else: @@ -452,8 +485,6 @@ def plot_tsd(ax, df, segs, args, lane=None): help='The minimum speed in the color range.') parser.add_argument('--start', type=float, default=0, help='initial time (in sec) in the plot.') - parser.add_argument('--stop', type=float, default=float('inf'), - help='final time (in sec) in the plot.') args = parser.parse_args() @@ -485,13 +516,17 @@ def plot_tsd(ax, df, segs, args, lane=None): for lane, df in traj_df.groupby('lane_id'): ax = plt.subplot(nlanes, 1, lane+1) - plot_tsd(ax, df, segs[lane], args, lane) + plot_tsd(ax, df, segs[lane], args, int(lane+1), ghost_edges={'ghost0', '119257908#3'}) + plt.tight_layout() else: # perform plotting operation fig = plt.figure(figsize=(16, 9)) ax = plt.axes() - plot_tsd(ax, traj_df, segs, args) + if flow_params['network'] == HighwayNetwork: + plot_tsd(ax, traj_df, segs, args, ghost_bounds=(500, 2300)) + else: + plot_tsd(ax, traj_df, segs, args) ########################################################################### # Note: For MergeNetwork only # @@ -502,4 +537,5 @@ def plot_tsd(ax, df, segs, args, lane=None): [-0.1, -0.1], linewidth=3, color="white") # ########################################################################### - plt.show() + outfile = args.trajectory_path.replace('csv', 'png') + plt.savefig(outfile) From 5b7e8b27a781d738395b81572c0f386cf3ef955a Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 11:45:45 -0700 Subject: [PATCH 303/438] Time-Space Diagrams automatically to S3 (#993) * Add time-space diagram plotting to experiment.py --- flow/core/experiment.py | 15 ++- flow/visualize/time_space_diagram.py | 148 ++++++++++++++++++--------- 2 files changed, 111 insertions(+), 52 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 464b0a405..a7ac07738 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -2,6 +2,7 @@ from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration from flow.data_pipeline.leaderboard_utils import network_name_translate +from flow.visualize.time_space_diagram import tsd_main from collections import defaultdict from datetime import datetime, timezone import logging @@ -20,8 +21,8 @@ class Experiment: the actions of RL agents in the network, type the following: >>> from flow.envs import Env - >>> flow_params = dict(...) # see the examples in exp_config - >>> exp = Experiment(flow_params) # for some experiment configuration + {'network': >>> self.env.network.__class__} = dict(...) # see the examples in exp_config + {'network': >>> exp = Experiment(self.env.network.__class__}) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be @@ -39,7 +40,7 @@ class can generate csv files from emission files produced by sumo. These ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams - >>> flow_params['sim'] = SimParams(emission_path="./data") + {'network': >>> self.env.network.__class__}['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: @@ -233,6 +234,11 @@ def rl_actions(*_): write_dict_to_csv(metadata_table_path, metadata, True) if to_aws: + tsd_main(trajectory_table_path, + {'network': self.env.network.__class__}, + min_speed=0, + max_speed=10, + start=self.env.env_params.warmup_steps) upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, source_id), @@ -241,5 +247,8 @@ def rl_actions(*_): 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), trajectory_table_path, {'network': metadata['network'][0], 'is_baseline': metadata['is_baseline'][0]}) + upload_to_s3('circles.data.pipeline', + 'time_space_diagram/date={0}/partition_name={1}/{1}.png'.format(cur_date, source_id), + trajectory_table_path.replace('csv', 'png')) return info_dict diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index b1500b48d..a9392e21d 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -382,7 +382,7 @@ def _get_abs_pos(df, params): return ret -def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None): +def plot_tsd(ax, df, segs, cmap, min_speed=0, max_speed=10, start=0, lane=None, ghost_edges=None, ghost_bounds=None): """Plot the time-space diagram. Take the pre-processed segments and other meta-data, then plot all the line segments. @@ -395,8 +395,12 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) data used for axes bounds and speed coloring segs : list of list of lists line segments to be plotted, where each segment is a list of two [x,y] pairs - args : dict - parsed arguments + min_speed : int or float + minimum speed in colorbar + max_speed : int or float + maximum speed in colorbar + start : int or float + starting time_step not greyed out lane : int, optional lane number to be shown in plot title ghost_edges : list or set of str @@ -408,7 +412,7 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) ------- None """ - norm = plt.Normalize(args.min_speed, args.max_speed) + norm = plt.Normalize(min_speed, max_speed) xmin, xmax = df['time_step'].min(), df['time_step'].max() xbuffer = (xmax - xmin) * 0.025 # 2.5% of range @@ -418,7 +422,7 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) ax.set_xlim(xmin - xbuffer, xmax + xbuffer) ax.set_ylim(ymin - ybuffer, ymax + ybuffer) - lc = LineCollection(segs, cmap=my_cmap, norm=norm) + lc = LineCollection(segs, cmap=cmap, norm=norm) lc.set_array(df['speed'].values) lc.set_linewidth(1) ax.add_collection(lc) @@ -428,15 +432,15 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) if ghost_edges: y_domain_min = df[~df['edge_id'].isin(ghost_edges)]['distance'].min() y_domain_max = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() - rects.append(Rectangle((xmin, y_domain_min), args.start - xmin, y_domain_max - y_domain_min)) + rects.append(Rectangle((xmin, y_domain_min), start - xmin, y_domain_max - y_domain_min)) rects.append(Rectangle((xmin, ymin), xmax - xmin, y_domain_min - ymin)) rects.append(Rectangle((xmin, y_domain_max), xmax - xmin, ymax - y_domain_max)) elif ghost_bounds: - rects.append(Rectangle((xmin, ghost_bounds[0]), args.start - xmin, ghost_bounds[1] - ghost_bounds[0])) + rects.append(Rectangle((xmin, ghost_bounds[0]), start - xmin, ghost_bounds[1] - ghost_bounds[0])) rects.append(Rectangle((xmin, ymin), xmax - xmin, ghost_bounds[0] - ymin)) rects.append(Rectangle((xmin, ghost_bounds[1]), xmax - xmin, ymax - ghost_bounds[1])) else: - rects.append(Rectangle((xmin, ymin), args.start - xmin, ymax - ymin)) + rects.append(Rectangle((xmin, ymin), start - xmin, ymax - ymin)) if rects: pc = PatchCollection(rects, facecolor='grey', alpha=0.5, edgecolor=None) @@ -457,41 +461,28 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) cbar.ax.tick_params(labelsize=18) -if __name__ == '__main__': - # create the parser - parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description='[Flow] Generates time space diagrams for flow networks.', - epilog='python time_space_diagram.py .csv ' - '.json') - - # required arguments - parser.add_argument('trajectory_path', type=str, - help='path to the Flow trajectory csv file.') - parser.add_argument('flow_params', type=str, - help='path to the flow_params json file.') - - # optional arguments - parser.add_argument('--steps', type=int, default=1, - help='rate at which steps are plotted.') - parser.add_argument('--title', type=str, default='Time Space Diagram', - help='rate at which steps are plotted.') - parser.add_argument('--max_speed', type=int, default=8, - help='The maximum speed in the color range.') - parser.add_argument('--min_speed', type=int, default=0, - help='The minimum speed in the color range.') - parser.add_argument('--start', type=float, default=0, - help='initial time (in sec) in the plot.') - - args = parser.parse_args() - - # flow_params is imported as a dictionary - if '.json' in args.flow_params: - flow_params = get_flow_params(args.flow_params) - else: - module = __import__("examples.exp_configs.non_rl", fromlist=[args.flow_params]) - flow_params = getattr(module, args.flow_params).flow_params +def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): + """Prepare and plot the time-space diagram. + Parameters + ---------- + trajectory_path : str + file path (for the .csv formatted file) + flow_params : dict + flow-specific parameters, including: + * "network" (str): name of the network that was used when generating + the emission file. Must be one of the network names mentioned in + ACCEPTABLE_NETWORKS, + * "net_params" (flow.core.params.NetParams): network-specific + parameters. This is used to collect the lengths of various network + links. + min_speed : int or float + minimum speed in colorbar + max_speed : int or float + maximum speed in colorbar + start : int or float + starting time_step not greyed out + """ # some plotting parameters cdict = { 'red': ((0, 0, 0), (0.2, 1, 1), (0.6, 1, 1), (1, 0, 0)), @@ -501,29 +492,50 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) my_cmap = colors.LinearSegmentedColormap('my_colormap', cdict, 1024) # Read trajectory csv into pandas dataframe - traj_df = import_data_from_trajectory(args.trajectory_path, flow_params) + traj_df = import_data_from_trajectory(trajectory_path, flow_params) # Convert df data into segments for plotting segs, traj_df = get_time_space_data(traj_df, flow_params) if flow_params['network'] == I210SubNetwork: nlanes = traj_df['lane_id'].nunique() - fig = plt.figure(figsize=(16, 9*nlanes)) + plt.figure(figsize=(16, 9*nlanes)) for lane, df in traj_df.groupby('lane_id'): ax = plt.subplot(nlanes, 1, lane+1) - plot_tsd(ax, df, segs[lane], args, int(lane+1), ghost_edges={'ghost0', '119257908#3'}) + plot_tsd(ax=ax, + df=df, + segs=segs[lane], + cmap=my_cmap, + min_speed=min_speed, + max_speed=max_speed, + start=start, + lane=int(lane+1), + ghost_edges={'ghost0', '119257908#3'}) plt.tight_layout() else: # perform plotting operation - fig = plt.figure(figsize=(16, 9)) + plt.figure(figsize=(16, 9)) ax = plt.axes() if flow_params['network'] == HighwayNetwork: - plot_tsd(ax, traj_df, segs, args, ghost_bounds=(500, 2300)) + plot_tsd(ax=ax, + df=traj_df, + segs=segs, + cmap=my_cmap, + min_speed=min_speed, + max_speed=max_speed, + start=start, + ghost_bounds=(500, 2300)) else: - plot_tsd(ax, traj_df, segs, args) + plot_tsd(ax=ax, + df=traj_df, + segs=segs, + cmap=my_cmap, + min_speed=min_speed, + max_speed=max_speed, + start=start) ########################################################################### # Note: For MergeNetwork only # @@ -534,5 +546,43 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) [-0.1, -0.1], linewidth=3, color="white") # ########################################################################### - outfile = args.trajectory_path.replace('csv', 'png') + outfile = trajectory_path.replace('csv', 'png') plt.savefig(outfile) + + +if __name__ == '__main__': + # create the parser + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description='[Flow] Generates time space diagrams for flow networks.', + epilog='python time_space_diagram.py .csv ' + '.json') + + # required arguments + parser.add_argument('trajectory_path', type=str, + help='path to the Flow trajectory csv file.') + parser.add_argument('flow_params', type=str, + help='path to the flow_params json file.') + + # optional arguments + parser.add_argument('--steps', type=int, default=1, + help='rate at which steps are plotted.') + parser.add_argument('--title', type=str, default='Time Space Diagram', + help='rate at which steps are plotted.') + parser.add_argument('--max_speed', type=int, default=8, + help='The maximum speed in the color range.') + parser.add_argument('--min_speed', type=int, default=0, + help='The minimum speed in the color range.') + parser.add_argument('--start', type=float, default=0, + help='initial time (in sec) in the plot.') + + args = parser.parse_args() + + # flow_params is imported as a dictionary + if '.json' in args.flow_params: + flow_params = get_flow_params(args.flow_params) + else: + module = __import__("examples.exp_configs.non_rl", fromlist=[args.flow_params]) + flow_params = getattr(module, args.flow_params).flow_params + + tsd_main(args.trajectory_path, flow_params, min_speed=args.min_speed, max_speed=args.max_speed, start=args.start) From c4ba7adbfe113fe0ce477f937e6e38dd181316e8 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 8 Jul 2020 12:08:06 -0700 Subject: [PATCH 304/438] Query Prereq Check (#987) * prereq dict added to query * prereq checking mechanism implemented, not tested yet * prereq checking tested * change to more flexible filter handling * make safety_rate and safety_max_value floats * ignore nulls in fact_top_scores * fix typo * remove unneeded import * replace uneccessary use of list to set * add queries to pre-bin histogram data * fix the serialization issue with set, convert to list before write as json * fix query * fix query * fixed query bug Co-authored-by: liljonnystyle --- flow/data_pipeline/data_pipeline.py | 40 ++++- flow/data_pipeline/lambda_function.py | 79 +++++----- flow/data_pipeline/query.py | 215 +++++++++++++++++--------- 3 files changed, 221 insertions(+), 113 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 858640914..626c59e39 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -1,11 +1,13 @@ """contains class and helper functions for the data pipeline.""" import pandas as pd import boto3 -from flow.data_pipeline.query import QueryStrings +from botocore.exceptions import ClientError +from flow.data_pipeline.query import QueryStrings, prerequisites from time import time from datetime import date import csv from io import StringIO +import json def generate_trajectory_table(data_path, extra_info, partition_name): @@ -158,6 +160,42 @@ def update_baseline(s3, baseline_network, baseline_source_id): Body=new_str.getvalue().replace('\r', '').encode()) +def get_completed_queries(s3, source_id): + """Return the deserialized list of completed queries from S3.""" + try: + completed_queries_obj = \ + s3.get_object(Bucket='circles.data.pipeline', Key='lambda_temp/{}'.format(source_id))['Body'] + completed_queries = json.loads(completed_queries_obj.read().decode('utf-8')) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchKey': + completed_queries = set() + else: + raise + return set(completed_queries) + + +def put_completed_queries(s3, completed_queries): + """Put all the completed queries lists into S3 as in a serialized json format.""" + for source_id, completed_queries_set in completed_queries.items(): + completed_queries_list = list(completed_queries_set) + completed_queries_json = json.dumps(completed_queries_list) + s3.put_object(Bucket='circles.data.pipeline', Key='lambda_temp/{}'.format(source_id), + Body=completed_queries_json.encode('utf-8')) + + +def get_ready_queries(completed_queries, new_query): + """Return queries whose prerequisite queries are completed.""" + readied_queries = [] + unfinished_queries = prerequisites.keys() - completed_queries + upadted_completed_queries = completed_queries.copy() + upadted_completed_queries.add(new_query) + for query_name in unfinished_queries: + if not prerequisites[query_name][1].issubset(completed_queries): + if prerequisites[query_name][1].issubset(upadted_completed_queries): + readied_queries.append((query_name, prerequisites[query_name][0])) + return readied_queries + + class AthenaQuery: """Class used to run queries. diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 97f625eab..1d813f98b 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,9 +1,9 @@ """lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus -from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data, update_baseline -from flow.data_pipeline.query import tags, tables, network_using_edge, summary_tables -from flow.data_pipeline.query import X_FILTER, EDGE_FILTER, WARMUP_STEPS, HORIZON_STEPS +from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data, update_baseline, \ + get_ready_queries, get_completed_queries, put_completed_queries +from flow.data_pipeline.query import tables, network_filters, summary_tables, triggers s3 = boto3.client('s3') queryEngine = AthenaQuery() @@ -11,6 +11,8 @@ def lambda_handler(event, context): """Handle S3 put event on AWS Lambda.""" + # stores all lists of completed query for each source_id + completed = {} records = [] # do a pre-sweep to handle tasks other than initalizing a query for record in event['Records']: @@ -19,58 +21,55 @@ def lambda_handler(event, context): table = key.split('/')[0] if table not in tables: continue - # delete unwanted metadata files - if (key[-9:] == '.metadata'): - s3.delete_object(Bucket=bucket, Key=key) - continue - + s3.delete_object(Bucket=bucket, Key=(key + '.metadata')) # load the partition for newly added table query_date = key.split('/')[-3].split('=')[-1] partition = key.split('/')[-2].split('=')[-1] + source_id = "flow_{}".format(partition.split('_')[1]) + if table == "fact_vehicle_trace": + query_name = "FACT_VEHICLE_TRACE" + else: + query_name = partition.replace(source_id, "")[1:] queryEngine.repair_partition(table, query_date, partition) - # delete obsolete data if table in summary_tables: delete_obsolete_data(s3, key, table) - # add table that need to start a query to list - if table in tags.keys(): - records.append((bucket, key, table, query_date, partition)) + if query_name in triggers: + records.append((bucket, key, table, query_name, query_date, partition, source_id)) # initialize the queries - start_filter = WARMUP_STEPS - stop_filter = WARMUP_STEPS + HORIZON_STEPS - for bucket, key, table, query_date, partition in records: - source_id = "flow_{}".format(partition.split('_')[1]) + for bucket, key, table, query_name, query_date, partition, source_id in records: + # retrieve the set of completed query for this source_id if not already available + if source_id not in completed.keys(): + completed[source_id] = get_completed_queries(s3, source_id) + # if query already recorded before, skip it. This is to tolerate repetitive execution by Lambda + if query_name in completed[source_id]: + continue + # retrieve metadata and use it to determine the right loc_filter metadata_key = "fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv".format(query_date, source_id) response = s3.head_object(Bucket=bucket, Key=metadata_key) - loc_filter = X_FILTER if 'network' in response["Metadata"]: - if response["Metadata"]['network'] in network_using_edge: - loc_filter = EDGE_FILTER + network = response["Metadata"]['network'] + loc_filter = network_filters[network]['loc_filter'] + start_filter = network_filters[network]['warmup_steps'] + stop_filter = network_filters[network]['horizon_steps'] + + # update baseline if needed if table == 'fact_vehicle_trace' \ and 'is_baseline' in response['Metadata'] and response['Metadata']['is_baseline'] == 'True': - update_baseline(s3, response["Metadata"]['network'], source_id) - - query_dict = tags[table] - - # handle different energy models - if table == "fact_energy_trace": - energy_model_id = partition.replace(source_id, "")[1:] - query_dict = tags[energy_model_id] + update_baseline(s3, network, source_id) + readied_queries = get_ready_queries(completed[source_id], query_name) + completed[source_id].add(query_name) # initialize queries and store them at appropriate locations - for table_name, query_list in query_dict.items(): - for query_name in query_list: - result_location = 's3://circles.data.pipeline/{}/date={}/partition_name={}_{}'.format(table_name, - query_date, - source_id, - query_name) - queryEngine.run_query(query_name, - result_location, - query_date, - partition, - loc_filter=loc_filter, - start_filter=start_filter, - stop_filter=stop_filter) + for readied_query_name, table_name in readied_queries: + result_location = 's3://circles.data.pipeline/{}/date={}/partition_name={}_{}'.format(table_name, + query_date, + source_id, + readied_query_name) + queryEngine.run_query(readied_query_name, result_location, query_date, partition, loc_filter=loc_filter, + start_filter=start_filter, stop_filter=stop_filter) + # stores all the updated lists of completed queries back to S3 + put_completed_queries(s3, completed) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 302048632..adc472176 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -1,78 +1,90 @@ """stores all the pre-defined query strings.""" +from collections import defaultdict from enum import Enum # tags for different queries -tags = { - "fact_vehicle_trace": { - "fact_energy_trace": [ - "POWER_DEMAND_MODEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" - ], - "fact_safety_metrics": [ - "FACT_SAFETY_METRICS" - ], - "fact_network_throughput_agg": [ - "FACT_NETWORK_THROUGHPUT_AGG" - ], - "fact_network_inflows_outflows": [ - "FACT_NETWORK_INFLOWS_OUTFLOWS" - ], - "fact_vehicle_counts_by_time": [ - "FACT_VEHICLE_COUNTS_BY_TIME" - ] - }, - "fact_energy_trace": {}, - "fact_vehicle_counts_by_time": {}, - "fact_safety_metrics": { - "fact_safety_metrics_agg": [ - "FACT_SAFETY_METRICS_AGG" - ] - }, - "POWER_DEMAND_MODEL_DENOISED_ACCEL": { - "fact_vehicle_fuel_efficiency_agg": [ - "FACT_VEHICLE_FUEL_EFFICIENCY_AGG" - ], - "fact_network_metrics_by_distance_agg": [ - "FACT_NETWORK_METRICS_BY_DISTANCE_AGG" - ], - "fact_network_metrics_by_time_agg": [ - "FACT_NETWORK_METRICS_BY_TIME_AGG" - ] - }, - "POWER_DEMAND_MODEL": {}, - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL": {}, - "fact_vehicle_fuel_efficiency_agg": { - "fact_network_fuel_efficiency_agg": [ - "FACT_NETWORK_FUEL_EFFICIENCY_AGG" - ] - }, - "fact_network_fuel_efficiency_agg": { - "leaderboard_chart": [ - "LEADERBOARD_CHART" - ] - }, - "leaderboard_chart": { - "leaderboard_chart_agg": [ - "LEADERBOARD_CHART_AGG" - ] - }, - "leaderboard_chart_agg": { - "fact_top_scores": [ - "FACT_TOP_SCORES" - ] - } +prerequisites = { + "POWER_DEMAND_MODEL": ( + "fact_energy_trace", {"FACT_VEHICLE_TRACE"} + ), + "POWER_DEMAND_MODEL_DENOISED_ACCEL": ( + "fact_energy_trace", {"FACT_VEHICLE_TRACE"} + ), + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL": ( + "fact_energy_trace", {"FACT_VEHICLE_TRACE"} + ), + "FACT_SAFETY_METRICS": ( + "fact_safety_metrics", {"FACT_VEHICLE_TRACE"} + ), + "FACT_NETWORK_THROUGHPUT_AGG": ( + "fact_network_throughput_agg", {"FACT_VEHICLE_TRACE"} + ), + "FACT_NETWORK_INFLOWS_OUTFLOWS": ( + "fact_network_inflows_outflows", {"FACT_VEHICLE_TRACE"} + ), + "FACT_VEHICLE_COUNTS_BY_TIME": ( + "fact_vehicle_counts_by_time", {"FACT_VEHICLE_TRACE"} + ), + "FACT_VEHICLE_FUEL_EFFICIENCY_AGG": ( + "fact_vehicle_fuel_efficiency_agg", {"FACT_VEHICLE_TRACE", + "POWER_DEMAND_MODEL_DENOISED_ACCEL"} + ), + "FACT_NETWORK_METRICS_BY_DISTANCE_AGG": ( + "fact_network_metrics_by_distance_agg", {"FACT_VEHICLE_TRACE", + "POWER_DEMAND_MODEL_DENOISED_ACCEL"} + ), + "FACT_NETWORK_METRICS_BY_TIME_AGG": ( + "fact_network_metrics_by_time_agg", {"FACT_VEHICLE_TRACE", + "POWER_DEMAND_MODEL_DENOISED_ACCEL"} + ), + "FACT_VEHICLE_FUEL_EFFICIENCY_BINNED": ( + "fact_vehicle_fuel_efficiency_binned", {"FACT_VEHICLE_FUEL_EFFICIENCY_AGG"} + ), + "FACT_NETWORK_FUEL_EFFICIENCY_AGG": ( + "fact_network_fuel_efficiency_agg", {"FACT_VEHICLE_FUEL_EFFICIENCY_AGG"} + ), + "FACT_SAFETY_METRICS_AGG": ( + "fact_safety_metrics_agg", {"FACT_SAFETY_METRICS"} + ), + "FACT_SAFETY_METRICS_BINNED": ( + "fact_safety_metrics_binned", {"FACT_SAFETY_METRICS"} + ), + "LEADERBOARD_CHART": ( + "leaderboard_chart", {"FACT_NETWORK_THROUGHPUT_AGG", + "FACT_NETWORK_FUEL_EFFICIENCY_AGG", + "FACT_SAFETY_METRICS_AGG"} + ), + "LEADERBOARD_CHART_AGG": ( + "leaderboard_chart_agg", {"LEADERBOARD_CHART"} + ), + "FACT_TOP_SCORES": ( + "fact_top_scores", {"LEADERBOARD_CHART_AGG"} + ), } +triggers = [ + "FACT_VEHICLE_TRACE", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "FACT_VEHICLE_FUEL_EFFICIENCY_AGG", + "FACT_SAFETY_METRICS", + "FACT_NETWORK_THROUGHPUT_AGG", + "FACT_NETWORK_FUEL_EFFICIENCY_AGG", + "FACT_SAFETY_METRICS_AGG", + "LEADERBOARD_CHART", + "LEADERBOARD_CHART_AGG" +] + tables = [ "fact_vehicle_trace", "fact_energy_trace", "fact_vehicle_counts_by_time", "fact_safety_metrics", "fact_safety_metrics_agg", + "fact_safety_metrics_binned", "fact_network_throughput_agg", "fact_network_inflows_outflows", "fact_vehicle_fuel_efficiency_agg", + "fact_vehicle_fuel_efficiency_binned", "fact_network_metrics_by_distance_agg", "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", @@ -84,15 +96,16 @@ summary_tables = ["leaderboard_chart_agg", "fact_top_scores"] -network_using_edge = ["I-210 without Ramps"] - -X_FILTER = "x BETWEEN 500 AND 2300" - -EDGE_FILTER = "edge_id <> ALL (VALUES 'ghost0', '119257908#3')" - -WARMUP_STEPS = 600 * 3 * 0.4 - -HORIZON_STEPS = 1000 * 3 * 0.4 +network_filters = defaultdict(lambda: { + 'loc_filter': "x BETWEEN 500 AND 2300", + 'warmup_steps': 500 * 3 * 0.4, + 'horizon_steps': 1000 * 3 * 0.4 + }) +network_filters['I-210 without Ramps'] = { + 'loc_filter': "edge_id <> ALL (VALUES 'ghost0', '119257908#3')", + 'warmup_steps': 600 * 3 * 0.4, + 'horizon_steps': 1000 * 3 * 0.4 + } VEHICLE_POWER_DEMAND_TACOMA_FINAL_SELECT = """ SELECT @@ -231,7 +244,7 @@ class QueryStrings(Enum): value_lower_right*(headway-headway_lower)*(rel_speed_upper-leader_rel_speed) + value_upper_left*(headway_upper-headway)*(leader_rel_speed-rel_speed_lower) + value_upper_right*(headway-headway_lower)*(leader_rel_speed-rel_speed_lower) - ) / ((headway_upper-headway_lower)*(rel_speed_upper-rel_speed_lower)), 200) AS safety_value, + ) / ((headway_upper-headway_lower)*(rel_speed_upper-rel_speed_lower)), 200.0) AS safety_value, vt.source_id FROM fact_vehicle_trace vt LEFT OUTER JOIN fact_safety_matrix sm ON 1 = 1 @@ -248,13 +261,42 @@ class QueryStrings(Enum): FACT_SAFETY_METRICS_AGG = """ SELECT source_id, - SUM(CASE WHEN safety_value < 0 THEN 1 ELSE 0 END) * 100 / COUNT() safety_rate, + SUM(CASE WHEN safety_value < 0 THEN 1.0 ELSE 0.0 END) * 100.0 / COUNT() safety_rate, MAX(safety_value) AS safety_value_max FROM fact_safety_metrics WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}_FACT_SAFETY_METRICS\' GROUP BY 1 + ; + """ + + FACT_SAFETY_METRICS_BINNED = """ + WITH unfilter_bins AS ( + SELECT + ROW_NUMBER() OVER() - 51 AS lb, + ROW_NUMBER() OVER() - 50 AS ub + FROM fact_safety_metrics + ), bins AS ( + SELECT + lb, + ub + FROM unfilter_bins + WHERE 1=1 + AND lb >= -10 + AND ub <= 10 + ) + SELECT + CONCAT('[', CAST(bins.lb AS VARCHAR), ', ', CAST(bins.ub AS VARCHAR), ')') AS safety_value_bin, + COUNT() AS count + FROM bins, fact_safety_metrics fsm + WHERE 1 = 1 + AND fsm.date = \'{date}\' + AND fsm.partition_name = \'{partition}_FACT_SAFETY_METRICS\' + AND fsm.safety_value >= bins.lb + AND fsm.safety_value < bins.ub + GROUP BY 1 + ; """ FACT_NETWORK_THROUGHPUT_AGG = """ @@ -326,6 +368,35 @@ class QueryStrings(Enum): ; """ + FACT_VEHICLE_FUEL_EFFICIENCY_BINNED = """ + WITH unfilter_bins AS ( + SELECT + ROW_NUMBER() OVER() - 1 AS lb, + ROW_NUMBER() OVER() AS ub + FROM fact_safety_metrics + ) bins AS ( + SELECT + lb, + ub + FROM unfilter_bins + WHERE 1=1 + AND lb >= 0 + AND ub <= 20 + ) + SELECT + CONCAT('[', CAST(bins.lb AS VARCHAR), ', ', CAST(bins.ub AS VARCHAR), ')') AS fuel_efficiency_bin, + COUNT() AS count + FROM bins, fact_vehicle_fuel_efficiency_agg agg + WHERE 1 = 1 + AND agg.date = \'{date}\' + AND agg.partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND agg.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND 1000 * agg.efficiency_meters_per_joules >= bins.lb + AND 1000 * agg.efficiency_meters_per_joules < bins.ub + GROUP BY 1 + ; + """ + FACT_NETWORK_FUEL_EFFICIENCY_AGG = """ SELECT source_id, @@ -701,7 +772,7 @@ class QueryStrings(Enum): SELECT network, submission_date, - LAG(max_score, 1) OVER (PARTITION BY network ORDER BY submission_date ASC) AS max_score + LAG(max_score IGNORE NULLS, 1) OVER (PARTITION BY network ORDER BY submission_date ASC) AS max_score FROM curr_max ), unioned AS ( SELECT * FROM curr_max From b40e4353f2a4736c3eb1e652e48e6497c43b5d5a Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Wed, 8 Jul 2020 12:22:38 -0700 Subject: [PATCH 305/438] Requirements update (#963) * updated requirements.txt and environment.yml * Visualizer tests fixes * remove .func Co-authored-by: akashvelu --- environment.yml | 25 ++++++----- flow/visualize/visualizer_rllib.py | 4 +- requirements.txt | 11 +++-- .../multi_agent/checkpoint_1/checkpoint-1 | Bin 10209 -> 20358 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes tests/data/rllib_data/multi_agent/params.json | 40 +++++++++++------- tests/data/rllib_data/multi_agent/params.pkl | Bin 17562 -> 21381 bytes .../single_agent/checkpoint_1/checkpoint-1 | Bin 582 -> 26194 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes .../data/rllib_data/single_agent/params.json | 26 ++++++++---- tests/data/rllib_data/single_agent/params.pkl | Bin 6414 -> 6687 bytes 11 files changed, 64 insertions(+), 42 deletions(-) diff --git a/environment.yml b/environment.yml index f57c8d33d..97d9ad6f8 100644 --- a/environment.yml +++ b/environment.yml @@ -1,18 +1,17 @@ name: flow dependencies: - - python==3.6.8 - - scipy==1.1.0 - - lxml==4.4.1 - - six==1.11.0 - - path.py - - python-dateutil==2.7.3 - - pip>=18.0 - - tensorflow==1.9.0 - - cloudpickle==1.2.1 - - setuptools==41.0.0 - - plotly==2.4.0 + - python==3.7.3 - pip: + - scipy==1.1.0 + - lxml==4.4.1 + - six==1.11.0 + - path.py + - python-dateutil==2.7.3 + - pip>=18.0 + - tensorflow==1.15.2 + - setuptools==41.0.0 + - plotly==2.4.0 - gym==0.14.0 - pyprind==2.11.2 - nose2==0.8.0 @@ -21,9 +20,9 @@ dependencies: - matplotlib==3.0.0 - dill - lz4 - - ray==0.7.3 + - ray==0.8.0 - setproctitle - psutil - opencv-python - - boto3==1.4.8 + - boto3==1.10.45 - redis~=2.10.6 diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 8c38a91c1..c1dd83193 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -166,7 +166,7 @@ def visualizer_rllib(args): if multiagent: rets = {} # map the agent id to its policy - policy_map_fn = config['multiagent']['policy_mapping_fn'].func + policy_map_fn = config['multiagent']['policy_mapping_fn'] for key in config['multiagent']['policies'].keys(): rets[key] = [] else: @@ -177,7 +177,7 @@ def visualizer_rllib(args): if multiagent: state_init = {} # map the agent id to its policy - policy_map_fn = config['multiagent']['policy_mapping_fn'].func + policy_map_fn = config['multiagent']['policy_mapping_fn'] size = config['model']['lstm_cell_size'] for key in config['multiagent']['policies'].keys(): state_init[key] = [np.zeros(size, np.float32), diff --git a/requirements.txt b/requirements.txt index ccb971a99..a4f6f83f8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,19 +9,24 @@ path.py joblib==0.10.3 python-dateutil==2.7.3 cached_property -cloudpickle==1.2.0 pyglet==1.3.2 matplotlib==3.1.0 imutils==0.5.1 numpydoc -ray==0.7.3 +ray==0.8.0 opencv-python dill lz4 setproctitle psutil opencv-python -boto3==1.4.8 +boto3==1.10.45 redis~=2.10.6 pandas==0.24.2 plotly==2.4.0 +tabulate +tensorflow==1.15.2 +awscli==1.16.309 +torch==1.4.0 +pytz +tensorboardX diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 index 0693ed4b62a9cabcdbecb267201ea862144f212c..d346e9dc58b39a5b511ced70927eac1d0d32579b 100644 GIT binary patch literal 20358 zcmZU)c{oKn`xww<#`NGS4uu8$eLzHzgEsBdrxPk1j+#8YJS!rC1?(e=S=!wiG{{Wq^O+#b5w zKg95VMoGB_I{Y(SiYKNQ#S;(l6d!#cBy?@4FHd4GZ;apQYscz(%Kh^eqmAo*OpHe7 z+~(`glU%=Tdq7Z_;rhT}U&Day{-K-K29M4mI#)6rF{fg%fy6 zZoKij&Z3^eqoy!CWmi|%1%F3>|NJ=%h4NI^?Y9*Y6%wkuFCvJRzCj*c`hXiJ55r%Z zI%qt?g}8;2xLcADKIQmh@ajke^W*f(&~(AO!7})`(v_~dCNJ1UW^mVE83At?C&(F8 z6im09$gC1ortzNk&~uasB;U^xoYhnk#K+7Mxb_*~M*kn+GrkWlrJcd5**wNY0r?!RumkKwT4)Zs6eyj3(CyzB8LZKiRn2f!Qb;b+>zTn@=8An zIAStP%Wre8)w~c~kZH~Rn$ro=s|>+QIg?#HMo7@`sfyGFXTf=sX;fdzQ392>bB?pY%amdS$Rlg#JFSjjS;K} zsKc$%x)>rgjQ$~Y(CQHcSt*_lmu8cZY2-Tt&(Zn7Y~Rz|QQ<2xKX zjT3;6h{7Dy@-uhj#Ro~ z$qX%lV%9(eFs_U>*A=Eu_x zVxrVb-%ve4Lsb&7`5g%}WW(XYxIJWp-U4zxPoMcH_YMo=uS5FX&!AD90*fAKbG=+2 za?aiihi=hXNNW1Y+^3~5vt&FsjXc9r%T(B2XTasvGlJ*(v$*3|D&Y`2o@=+)o-1ED zotsqhoa~jljyj(8Kmy`mV~d?Y(}EJ-gizkZb)GW+$79!RUK_%j zH{O*0c;3`Fo|@nI{dSQLrL(Q|DhfFo8?rfu>IK$+U8h;QWp3yE5tHZtP4sA}SnbCD zl$FZqXdA<+QIz3NJsZb)cQk>MXL+yjnv#pP<^HM6x!Mf=hgqtu@<171h)GWgW{8|Ml70&fKf<`xgaPxkrrQsfY43)_IQquh~1+ z`fvCB|HIAHf6dqYkNH||ylMXp9NMEF@Q>>+cFrX^p}{!KS(+=ha3N{=Jc4SnayVUj zJ&5-w!IW7Vg7S;0c&sLZ7I6dUl~5lz;$_F3^^4*Do#_C^slr&VAudp`=*9PkJ3%Z_ znj8IfJeNPa5?4B@;rY}futzZojlx{HadzjySTqZzaywz01{YH8ys+FZAB&Ytxj_mt z0?r0wZuIy*8Z9D;nPcx^`KJJGz&#N`L1GYh-}v1EvG{jT&@npi*+#@c5y6QywIFCU z0OblrVz=@dz_ZJcG|n7y-ZsMEfvwp3a-rZ{RyEx7oK6nh9uLLMaiBN4oK$TkpjTrt zxqo}J;MuJ(f$ioK;3kpLbWFgH(z->0+i`-?%)b=D_0_DgVcg>|`i>JO0zADn2bx{W}t zIs@{qW0K&}z%YF`=QR$`e~DTRE>xW% zVCn6TmO*O-*>2TP&6|%Bts2}LYA4XDvJdtq8*=-M4!~lcSMWXCoHcL`!s9lDIKSE# zK+{uDdRK>A?C}ok-wvZfmbSq7U;@dJaRi}l%J^Nx5qV;A)c$injNz$q=kwn}+Uv{s z)F>ABn*O53s4rM!X9_Jnc3jOJ18j$)J~wVdKbY~%u*u;Kl-QdH9t?hkw%}SElGA>+a9K|Cr<@(1@V zNF76U>7e)-@R;cf>wd`yE^#fnGrmf5`5qmRse2agIR%3D{46|oHxjq3l@qLBWVtV2 zox#`I-B>y8H09}p@}{rzRQ!K@=$`;E<6jTb{f`Ifx$*S>Hvr81Cjk7Z459Arb#z`_ z4VjpCfGqu0M9ntcpxxcxBs(R8*uGDrw_OUTnQ1(|75#|$*mQ`Dxt>oPi)zW?6M1CS zh7`(|Jx$eS=g?fkG-B(0gpAWTLJYGG(FrXf)U9#!oss$Uct|CwI$uI`>obUYS{8|0 z+CW0K9i%*iP~NO{|0mNh{WsGv{MT@!{}^uU#xwcH@c-ltrvJFUypYhfj7-?o-pkeu z{UFvh1@ur!IbAbokGF$s@F!hE+h+b?9TnHXxnmKyuK5j3N!t&F^D^lY6(_hk=NP-f zP6Jyl9%DSL%4hglL{=%PB69=^N=m(Xa`N;M|APL~PM{7Wxs3)tZrN`AJ! zvvsi8wIDyY8rIo+124jR7N^gv4m znYAg7#@;T#LH9GnqV^a`xOSWNT{T5+b19koHV%?tChS|`L$qNzS+jwMhCUBaL)aWl z1{T5$Ek53;*1-*J!Z5V>4OKle2^XtH(x1tDF;7z)rDku&Ha#QUNVeed$Gg~)5Le=0 zHVFc{9hkJxnasF~)i96SMJse7VPpSfJpa3p4roTh2ZMvqykQ(=DZ0Um<7I{9r}%X0$1p zhECynr0}jU1g);-E6_Y}&=uT&*Ce5;R)H`>KKhf~B!(G7iawxeofC5;Vu&{!uv z8~crFh#mSei(8N4%-FZ|_nSq;qVOph_wX(WnjS?KPBx@3?D~n9Rz6(asRD1_1Ot?> zL^-Tv_hxyqa=l~m+mu`6^_U|xYHtX9?8qTbODyOoAqRStza68C*22C!ui2IZjx=@X zGBhM+5c%+CqBupBRa$tQRoe8G-Vc}qCfAMd>OxgCo;DW(%m&E=8w(od5)Z0cMX*9R zh7Lcif(Gdf=>@CIaM~QSoUI+$G&4uaBl2kFd3>W6VVD#JwGtW5y=B$dr z@;~BmCUGmB+r5A>mpo2eHf$y~x$^K}{Vl40-j7~Yy+k&B6=!ZcG~geX6Ev%|j;zX_ zik7FnvCqGo*(#$3UyL?mgS-wtKGjdRpWKKFO7BU%>nXhD_K;CB%fOP-2c#=v4y@05 z4`M@obkpfG5NNOt)k;lp@LMXf*|KDe0i~OtoPq7%cEG92b3t=L2Afmk1k@@TkJ7`? zea*S?$3YPibKQvvh>D{TEs`Wfe=J_|>BfjVsYKKGJ)NA@3i^*vvtvFjfYeq^sP?HL z>jmk|d&4cX_pdeH-Nu6nUZUh~tTeM|uL1E6X6awB!{aknk$GJ=X^h%icHEd~Xb1{H zmmLPMa``GUTkj~{F0=&hUvUSHw=+Ca-ax(8OyKc}XndK$BhPn8!L6h6_)+f^>II!4 zi(eN)fJ6q}n^Zt*K3dV)W6zWKU*Ayw0TIr{(-PRKW(H?|$x!1ze~D}AQVbnBgUuIR z3C$3O^{EA9!>grm^G_=k6DcEfxj3vz$)y3`4Df@}1Jc+b4)3=+;k?y#BunWO38^S1 zt&?5Rme)rVI>lj!%VF}VI|_c?eZV-)Q^K+}Q|Mo(3^*ly4@qk`)n9dhy`11e!!IMh zeWC{#R!t+VR${2+avKTi7GN{?ImuI1rzB9zCb7#xtS$ke7X(tl?ax4~(iwm0Jex zJ-&}jKGRJ6Zk~imld_S#csx7%SU6q0wUSDGi@;+4d;Dj5NieV>k?DL~MfC!mQ8?5M zBUd(2v!8dE8^dQfGa4o_v72M*1@|PNhVjT(?jU1>Md9*YMYs{T269Gp&|z>jcuajl zDkt8C!+ok?w^bL~uRmhNR8G^D;y4m9BeAjPjtP{NtD#7BEGy+*4?>;Fz`634Qf?Wo zl}{rV1d;TJa4uPBz(t!2srV{rgzD-dO+h`m z-%&|dU2Y&wAG6`TO%c+W*r>cZ1Ex&sWl{l>(stz?330OpjT<3wb;eV2(2S39 zB^&VLnqJa7IkWL;`ET0RM;V)oAz1w3G-tL&KP^4`kqEU*GR3Pd(RyJouu6=_%bP1e z*4Q2SPQ@6L<4F|NTFBx#Uo;o7Y+T^s&I6lDbDdO#Zv{d;kS+`YHyfJ&aaDssb;#UxokBi&Jls7 zwi@)+n1hUWwHJz?n~G~T&!c8h0W{^!bbRik399eKV7^`o_3IcQkl~FL>!!h4j!`x}O%>bQNciy)a(b&LgB}lQ*PSA4kMJRK^AyngX&jc-^^-rEl_;n4ijsp%BL{%mlyhqL{f@sL`R}Ap7Lf zH>NCE9t_%fIAcr>l>K7pA^*+fQd3O*ae}nLDq?(pEnN`ZPg}p6L6^vM z%F7YM70(-q@yS{|QzJqBh0j4~odNyP`kLHa&=+)!vl;%PYgbpG8Hm( zUy=zOx#-xrjcIi{P4B;wg5!Tv*pt`d>GR*eVAwYg-_CQzj%{!GB@y2k^LTL(J2xJ_ zk68;zd=)xY?+be^!wD9?6lQd#rUO%*iwUb3V7~Sc)BJO6rp6GNl^er;OS7X%0gHeQ z6~m+RW?`|;V#xR9kON03`{7S9X1p`NvTg_Z<>O=Me0ZHWKDtVsZ&uODm6|v^MjTwn z{^oz+`LX2%J884xM|#!qEYVnQ!bC?G(s7&Sk(k6Ku&RG7{(fFd*N7F-=tG-G(vc~+ zQ6K~%dR!P0pH4U1DYCi+pUCCSn(Xq~O|-*{YR zapoW{GROq(vjXz{Q66>qr~^3}34mwU5Y@Yxu-C-{s-?Bydwv24!n+ugcfz1({)6#a z&_Ui>1;DkLk0EGy0=6eQz{I_iF{y7K{#Muvcdk{l9j{(fE7XF{dtUHiyA=4(_9dG- zVsPV!SejPRhL{~k9@?zH*?xc)BMqd{EeI`}&Vhn#HhthNjs3eVVZu-y3DkVax(Qz5 z&&_f;>#iY&L|f3oT5>z(F)@(ra>6~wqex)p2m1Zn0zA0#84||_bn?+Wa23zO&8u$m>m-m&%&MdA*FxyT z{5z=i!30WoIU(ne6-GAN&^V)0s9Jso&A*7#E7MG1Fn9tgtZ=0-)4$Q|N$R-i<|(-U zeFk{em2g%mU8cWocR;IkC2P$@!_(L=thLQ+8nyi}@t3?z1;vq^4>_H5|7LRttn`IB zoI!Hxz;e=k`z#GBh=a(#{vi4)AM3j{h%Y|@JX5o1NwyuVxy>in@<#E1Z7%G&=!$Bc zxv=OO~Rsdl&qJ$?F ziC26iFf>;K4ZjP+{nA?Y-KzxbnI;a7mHp(K-FKF2zZhKW67luemqhsQdT3Ac1pReG zbVIuz>Ra-NkL)Hi)e&Y6(;&Khl=K}C4WjsjOEr>`DPwY-YO54b>GDo`SMLyAP_hK; z%wCAq&hk6r&$msocf(lxK!$j)Pe|= zDEz+j8O}5nL(2;dG|9yn?tH8v_y1VadOH=;c+3U`Qasog=ZvokeewFZ2#nLUht)cD z?9|UssB-uoCU?(Yx=w2~w0j??b$5#SlIvyh;eNGrUn@kJkB|y_+kSbo*hmR>~=-xd-#S81G_yh~^ zZ9a@c9jRE_z5t4L8>51e4E8^^h2Kx7qQu8!vT)-X=HTKfxY93$tfhnW?~!~m?0bvu zJRySy4;K-WBtEVER)BL(E1<6QHt?Ueo;18#fj{arK=agHCS_OzrDh&rilncTH-Ddy z2hVpRPeBe|zAy%#D>gXcq8Fr{c)>s3w4a8$@JYdp=Y(e#$}|7J)YH`HALwE6FFUaO z4?CFc#+&oM(8KB<=y9cGHs?b^3xB}0_$Ue&W$cjgmXUZ0{?}`FOIy36vyG?9qZj|j5tQ?G&w<= zf&Xc&E%RDIi^KN)11IkWvTIXvr79_Rn1p8rK3%)jQ7 z|CkSMJnnzdht0p}!#DaWjWF-SeQIN{=}kON?kXd$fjzLzT1Fs^sknPzIlfIOz?~J= zNSbu;zEC1;la+3;%DF*&XZqqnCm!mxjs=ZTGMLB7!u|dk@KP!sidHTlkG9`|v-OF% zAt#h3*W?4|wieC5qL1xgHnD9>C-RT&6~`M|E9fno@layRp<^#PLyB`gy7eS7(Va%* zw|pNAi)_J`CTn;&{w^)-lgIkb2|>#+G;%>WZNs@m0B4sjI<)-7kJ~NU=e|i zYUF4Gs2IHV$bz!FH06$_#Eax3iC<;|%_`at$(el4S0hi@Ati~|PRU@vk`Sn$A|Q7D zE|us*m2e>=AFjcmMd+87@G;5ieMB4=jhPcvwdF(fPa4{5u=2WIB?(`4h_@G7r> z=0-(ggu_L;BlbLKlyRBn?jiiMW-NA3)4=#Yx=`N!nt7%u0kaO}!;{f<=soo>cpo1k z+BOalyF~@sDom_L%K~~cVIjFQWfo0R`#=&mJfLM&b~x!}6bW#wgq(1SVWNsCEPisd zTrfk)=}M4Owh8z{i8wbM;PuzZbdhQoQ>ZY3Y}v1Z+2b~WiiHVGnW#!7eht&|h)<2a zlP&P_Vjb$A_!N&jJ1~XIGr*vCIS76Rk@Y<0cH|-sEJDg2Bx|o_AwSDtl!T9m->=@V z8&*n^{<0iWQz!xJ7mt=amc>vtn1SDC_>q!ZreLz6m+Wbh!AXWWu(GO%bs9IAQEIS^=^5e#5%C+i}Zp6T#K?+xTdF9I4e_jH`D}!A)D$;Nr{g5LvT; zmJi54gy=&YTE3K+4+$V}j5j{@i6IAeeZq&!GN@#NEq*^34Sbb)sA()jUxO~1^DP_a zr|?l^eH9VRp9bEY_vqrB6F8&xE$LSYpl05kluuS-#a|tGDwa*(jh2xv8m74AN*e6u z2~g~;2EIwv#!IQ;DA=@}=H}04PiHm~XVoLP;mT34DUBl5<&Qas_~U8G(+0*>Ab~sD zYq991<>w4FWigisQL(gEfAf>h%-9BtQge`VQx%6D z)3M$i>9n=u(B38&@^apgiRq{LuDP1PJGGTA7F>bVpJst;j3Wu;AH|YWVc=G(jDe0j z;d5jKT`TMa$G#;r1UvN7<*CYu^UKKv!xq|8eV;kiSPGlnK2ZOspUGW^1n{1^n=rY2 zM)br+P?2n>;Tqq_n~qFU{x=X#30nR)?5R-mpe(GT|>qc5a(C z?s7}O{(g1%)$^B?6z`>f7uC`%IXQ9^e9?9H9h9ErgZ2-Dpy}Zkx>95rX>u9|F5ws1 zs>58oKClzsU6)6_!&QWvz7f3Ei_vW(5gyW z!7~DReLK>-nM=17h0}(O&9SIVSOq2Q0~aF;E_c8!pw(;BIEW)``(tbuO&s*Q8_!_-@ovWI>wB*mZO z(K5^dI4@i9Q0hLMccTMZ-rU70Lpy1w^L27Cua=g^&H;Cg<G`9#!nCL zif@A21q(2Ax|Cq#m=HGkWaG}jEbxkT!R`oEe#8ARbe-P|UQ@Vez4Ii|#&Ql#-ORqx zw}9=7*5kUP97xvmrt-eEuzd0$acvnO^SDPLjJuTF-WQA;P5yvU?HMMp;S!xUAPGWj z%$%*09>JcZl_cnK7Og3Drndez_;>IEnS6~UH#a2VxcmcvFTKef^<4U>W*Ty)8o}8G zO9`#*p|?y_L4CAwcj4|+sy-(NeeNg0?_h86UYCVUuYBP*r8KYh3kY>@WDJGZ;-yJ8 zU~KuAK3{u*JtrLr>!t$;rY|RFSKNYqyX|22zCnKbUO9B*@aXao4G@%`V%EjvF(XQr z%-Rnz(A;Q86u0^jvu9eUCp`lv?K=woTa#$?!FRwt6iWjx+7kQgU86`kj$dVNfW0f- zu~LNrulL6p-Vq17GJidLL@u6jF{ZS(V>?dooe0N#5^ze@0dNbCBxRtL z7Q9cTJ{PutmA(e%ggjx#Ot8fVMZxTsX?M{FhR9B{@8m%DTHHHlKhu<%h#swB;5nuY zuY@lEzT_pc$Tyx1c^`&z)=bB){4;3z=qTKDdI;!XOY^uz_ZleC16lm=%BRuQZz>>U05bGj^C#t@Xefe^NeQi_W zVq7#XdtwfPaed?>T!iZ%J?L^9MHsWXntT#6PJ=Fk#c7Op^=$*TYj6b`N99wXhtU5LwAFl|4JC5F@_I=xEQ(YFe9eK!JYd6DHzH6btWG-oz zAu#>TVLJBPUD_Zw9}=<`gVx|!=qnnHxgGj+$FXEweYBKTq)VgONF}3FodV>?4Z7N| z0!oi;hPMa)ux;;Fu)lV{#7ASTVNbX`S}%SHGi96zCpwXCW)w!_el@w1Xvj(9zoHH^ zPk>Rg0SY^oj-rqP?2(&|kC(q;IX`7Uw{0HYxupp!qoSba_ac6{|03$4K8rnik_*8v zf>EQ!fT1Gs81^L#?$7sxynQTrAzMd(tg(Sf50(MKD6TNX7qQi$@1@^>c8@krQ(J`M4@URPsV7KZZYX$LZ^TWvUEoe@0@E#j0oT?( zVcHG$(0RsJv9doBoYT)^6)K<~HF^*PD#mb#vtH@P#VF5-dw`WoA*>NVB zWDRGLRr!6)i;x(q(&kBh@2XJl-6E1CNTUu{GKs~POwv-sBlLL%^_0vcPMbDSIk{Bo z_$!@QgC4b#PbXa`qo`iuF_Ia3i=DaDo!Ixgl5;Wz^u(tu!gCDeIsISNhuOcXj|Kl4 z?))FaUEFvJ|BF6c|3x3hQd}~r#ETTVm7v|JSfak6g6|WQ2xHBQIgf8=;)Vx2G9+&d zI!<#zVTmr=WG#%HfvMy{;tdq4Ji~rgjtBJ)D>&jL1(&Q$!M;?5y%~^8)KpB+?{EM; z_3$jY`auu6*4|?*v!@UhE&-!F9h_IEMRO*cq6;8{CdOZ&?(apwDDwt&x0Dbhdnb}N ztqydT!%=ceRSOO$C!%CQ79H_;O?tP8GLI)k61884shHg}&J)>_V6`m_3qOb8?%^7! za5&Abe*T^0EqKZ-_R50w1(V2YkwCgWbc7h*s-jzOGQ`O_1F}u<$I2dh>wX z(oaFL3}xtEmJXuB;`s4-GUW-aK+cQ(Bv@S-c2_Kc!s@zX_`(Ts0JI+rJc@vk1vuDY3cb?6mU%RfSveV)a6P$2<_wiQxza|M~G zBe1utlsw)fMKl+cpz59_Y)jEDtZTkcZ%24T#^8K9@62z~m{0MT0kBTy!eBneiKBkx zE)@h%L0cUSq>nFyVRJOzdn<-1%8eY~wKrL%#me|z`!PKnXOHJAPJ;R2=XB+kE10)& zDNY+r!+ux|p?G>aYKRx&tuL00R81!vxoitbQMO`VoRXuW*3XDv@8QPHnxTY!CWDXv zYJ%FUKJvp~mMCi1(0ZR;BvQWsi}PKX&LzpTP4zm}d7y@E+ONn;Rt#QWoeZaL%R;!o zgUHLiXIII)faf(IQgJndPT`Bg{aR(~+R%4wcNH#HZ4Qs0Yp_cki~7$^*89V_Hlz;^*S6=}L6*L4DA$TsP@JXg~GGw=4*<_K(bwH%HB@N@!G~j4zf&lED4rK=QE& zs$C8y*W%iU?fJ{}T6zs_UAqNx_hmAs3J&-?zJa#>sDyagpYU*3INplLq)(eFsA6dz zEibrFPTDjRqaOpLr|Ay#3ww-er)}ZalR|26wizwD7QuO~N|JN?I1yPa3xC4ppnFX$ zy0o4{UQjc?%lIB_y4%S7I2Q^r!`;+BdInBe9mH?j&t>G-O-A4Sx9FOChv>wzaq#u& zLo%Yp;Du|KsKxa#R&j7VhS{DW`@iL*`}jlXiBFgzeiaR^&_LNmk@%wKHv9bgM|!|A zkenV{#&(P6QNODk7-N$}SAEYw)$koSWv(dm(Xg8Is^2F+Rvo4ue)iC5_kc~}7r{^4 zTHKlKh4JUKU`63nTy$$XyGIa5^Et(s*49MlN8W^c8!u439jCDImN|*g8r5Q_HWGt9 zmzj@C6Y=@IA$sR_5C&`*p^D0*ofb7mjH=j1`!B!bJpZ|e?mXuX*e^|LqggsJ`xp7} zG8y`;v{^|H8EVEkLtkFrOD5E}k^829Xw{W|`dZ>DiJEt`u}f(UOehzHmf3}v=zfft z8YRPpdB;iL@f|c=Esk|i*vd&5O{wy#*N`@|_xu=NOSBhH0X3x^^t9R`*!HOc*43Wn zteG4|ZXW+mI$lQ8Ydz(#Nca^!nO;uB1Ow!f{UuV^a0h%2?Waq!F4L>&=Qsh2{xTDi z50EP5T&z6lPR71?LEnv?jc=7pu(;74dskeb*<<5TktpK2oJ6>hR7tHz#jtZ;6STV+ z!X|5dvcA_m1I`s&(I*E&Km=1cnPOYW$i+5V*nN*)xcY`O=vYEzhNn{JIn$wZ{~NY^ z?P#f{=!*k=I>cxZ2M(`H!uvi`@t3Cv;rcB{gLt{oZrNh$eZT~=obIt=7gUBU40BXnQ-{*lEo+eNMuI zs_W@yv$yPAbvIlpHpE(uKShF%IDnZ2-s7X!zqvKIE^;Kn$B>@_V^vwc))(>8@X z%gSNp4*qO3?_3FYhHg@~q^WpnMHUtR`KeK%NexzeR+93w`=RpP37jxFoR(Id;ZGV> zvn(v}#4i&yp=FmQeo=oyhlg8nugYv-cz*N>6@jm_Iw31B0gb0_C5^gLf^Q-7FfTbA za^}1wOAmad4F?6pR%9!j+_njIA&nUAip9^yhpBH^6k9ubuA`kGm zG+O8}q#uZ;DShkFIMtfgxoeS`CkBYfQWw0JWrjm`9J-||VH8<5(0c<4P_=6d`c-Qa z+ZhBmR4bxYwIrii^v0i6L9NsrW);f^hw$f;ga#diPB=>CiXPIl z!Bg?EXa`lECJA9`CqaJ90epBj3{Kj`Nwo9}s6gXNcKw9jI6yfTZR!m@j>iocAY7XvsO6Y#M6XeS>8=NOA1KYj@gSS#5xLUMPk7_j@ zPj->7H^xziH|D?}?W~rstHVE{ABnKc1FDmz2Qz8|SQi6ZcI%g1vZU1r#~Q3Bwhq#8 zG_8hw)yzj(_cV~+w*Vsgj4}OE28o@m1Ye%0z*<#(DnBt6XDBSj8OEl#fnS1JU+UqS zSPcEM%n%Q3`%OMcB-7XhPsm%BsZ_W*(B;`@IR zn2$`)(J9~)y^;^@E_C4jJgS)-3R?S+h8@c#rU9bdzrxxiWaKB^^v)kQn@_X9jzk^x^oH4e)i!BIG31GdilJ z%xMirx?(h+%pr^5UXcr255Gy$OP$FxnNo7*+CfH4v6MJ2^Tn@U*3)&)`>@mS5SgpG z0FpCuuxR02vO!S{T#V+>*4RUYtd7O*TjiLz-4Nk;B4rx9!RXo}PGhw)(H}fS1-bc9 zrA?q2PLP2YKWW6=)41yE4=NIqL9N~^fjH+a6aD%z^hC-M8Gap`k{QW}Z*_s+l}E|5 z)MB*!(TV7w55s29*+12W%-?N0>4e+|^o2t$eNm$gvJQTjvSmE=IN(RMsu;$;BU)KAp=aevd>3y{4YL{YLi{+Hc({s6c20wU zfDyV{?-G?A=8T1VU#gz;4`7~$sLKTMMS02EEk!XhhDja=X~k)0!V+2YU@;Hos5>)mUn z9j0w`Ugu@7h|Hr}1J+RH>4Wb!3g|Ch2D8MW*;;FP1awSH1fR5A##MMb*{1c8-rM-nba>*)i}sAVP;QkIF>fLZdx7G{Y_*w;yvs)7_>JvY%luJ8Z$f!uxSV z{Q|9hcNaEna)WdC)8X10VPa25mD(jn)a%(#c6ZS_n7}SYb9Pjv>=^}p>kKg}qmk9| zo=#=^#)ALP-(VGeSIgQ_LgCur%;@i((sfJ@vV_*1q@HP2H3prUd?ZkG_E_=hO;bMIF zqYs+j?8isdC1mTC@5FWT15!756sJypOosm+N7wfWL^|d*`@T+vTsqTB`kM!d!;;JF zS1&`zsMl%?P_hMuu1`z>nS$S|mg9+N6H-ugj{f}S2G1`oz!AMvd{`(YkP4_lLCP`a zNT&f^`nQleJ=P*0^-9@A-6=%ec>-CM^Brtg&n?!=I0f*2oT;YEO2GkWG*0qzoOsQw{?d90ui(l}{q8eB{MIUp=+H+^D*n)4n zy(nvO1L?=r=;!4D8E-C=hmGHG($O)3lOJy32zP`^s-;0@=OL8!i=f;Sd-3Mi`SgwN z7fxZl7?@ca!1&4-RGk|Ni@h#nPMm%+xU56r}M0^(jD58Nru zIITAxPIe`d0znMen-zhK&;!oMJs1^H7e0Ig-U|A+9m==j1@8y^qzQdgR=7s3CjtiUC7>ynbTMn699dylP{TL=@1Z_sn29ZY9`18QHr zp7PeJ(ZgN+IHT1Yo)>1I4lGQsM28q?GA0#0sdrD_HdWZ>OqvZWi@SF@(WIrVAy z_xvBcd{>5fKX)<4CC1WhT7o~q`ia3cHQaLa0En-fIC@qv9ewSKkynyOS2swa;8_^8 zrS(LfH;D0Xlc_?}CgjiL!3FMJS~7P6-U`#;D)lO0MMNRo^|P?bTxLsrp6^3T=6GQ|UPATpiO<5d|$llQ6=EU&B=-I!SC)+5@H1C&;tF-?VFN44UpdiWl<7a7UIK;tlkIPc_Zh>241xTjxVn zN)vhCwS`7*m!N{b#f+g^2Bgio#@Tw+8moNXkmA}b47<7rJu{Rb+h&NW-ucbhJ9=0n z>~t;WMQj1n6Irx&Sd+Xg98adbKZ`B)mLR@26=u2hP;cH&3|dl4E-8x%nBzC8qq~gY zZ?GnKyf!7-v#$`nBXLk|ybogv4*@6q819l60jV-W+WlcHtR6E=D}J`Ko$Jd9H^7-J znQ;_XSM0!QLuouT?;fr-4Fa)pFFeS!l8&_R^zQr>z>OSY_DQ;9tP>aaoyvmd+(RVJ z{4zNr{E|-Tvc$px9jq&uirp2G+}H_1^w+%?^!ev^U_6_UpeX`2cQv5#4sXm4DyKsq zpF+vXdb%WH2K0O3z5T@qP#%5r^`9 zC+UK6We7A}20b<$X!9%r&T|8@Fue@t?l}g_o_?pp8+Az!jH<(iH5tthQxmqCp9j4#!kC(@7Br`MvM6QxO?D$yAgEqC)j+5EYdnzeJ;lfNqgY4LFiWC}gS(_l;|-aA?ti1qldS~N^Dn)9$uWITu^&G;WS4?AC7)&{}7yn^#l^!RV zVoIJ9soplGRf*y#6V)SDQUj=9alFwK-p18u!&kh zGu@=v7V62*9^F)URU|xoC+Jb?r*UkESsgdXzXbzd)nb-x7<6$nsNVq@jc_tzFUzvX zqI4R~cHT{rJEtHz?<9^Nc%2fwi&?5m4|YOh9Pkr=rAE6XY+Lh$sccsQi{*nT;<~t+ zfAautaEZ`yj3fIX;TG!d-Q@j z5;IxmelfPd!3qq{JF*G8524PsgQ)wu7cDqMq)$hzeAzmaWJTS#%huk#55R}^p)P0a;Pdx+aVFJ7@Swmx{C9!2!Pq2o~)$HR#c`x@ z*^o3$Zi+8iW}xVsfDfu$uzmTj^5zE{C~4OWN=)4f>hUtnpm=z0m`b` zQERyc_}DF?(qe18?$v>JdTLS66*rlm^&)ifc}&k8C($I?bm(y5=#^p_ED1Lvi*h5d z?e7B5bW?fTmks>FGyBnwKPI`&Z=xBijPRYuH6d)24BqA|)3n8DR1tHWep@V{-D6Ff zwRItG-K#=AHudu4z(9FJK?5FLU=5i^>fvRO20c0cPM$lsjLhd%$u)WwFoQD}S^Kwm z9J3bL?by+Ld;dJV^;8go?SpBKNOzf-f03uPJcdr|*X--NX!4S!5a->*S5+M0FZO$n z4=q3P!*1lVE6q3A>(h;xJ2DPVspl}&RtX%j_#5?I=fkqMd+CGgB6*}=6uUGx4__K@ z5^)Gacv;4Q`y)$Ao3fT-mpqbJ*Hy{2Bk%Dt*#I~hwnCoaFc#0RiG`(;{K?SANA3{B z@qVHe)aU6^sL7rMdG~*zu=NvYr{=(-e-23`ov=?-)oDq=hG}e-Lw3>f^V(o}N}!HX z6_KEc0E-Q(a5^~;)t98>=m8I@(R?Kw)9YqIzZ%fc+7x)|c^eZiC|u|}q7cGI58>m6DZ=^aHuU@|Wj&sDqG^6LiyS+Iwytr5 zqQ}J&cMWsMc%}rAQByc?jD*nP6?jTNpUoYkePQOzQcQQ%!2^0OWYH@TntQ~+ijZ~U zS&V`a2d?0YhBDH!?Z5&}Ll!skDAREa;k~dN#Cq0wsv7Bu8J{%+NyGPNAeJB zdaH}DY&{Nrz7&=8>zMwjC{Cqf5gXf;0!EX|nY}y$?7WxI0H<+K@k<`ASs98e`Ww(w zw>0Vu?tu8+9rAP1Ft%z`J}ql=$1`(ZVe8_zOtbbWd#{&-Zs#LV_nZognUlcuujbQC ztI&!QXy1=FD@ zQjmX7G_8!C4%H*L}_qs=EnWuBFJe&jrFjEqBIl8qmhz-K@Io znJ~x41rk;+q-foK;tTv9O3ygL`ivUFUcR?tlY7}fS9vUj4)LXe)WML{^osrIqX_x6v6q_OC_&8f zP?Dz{V)swDg2@+=lUR^~P9%t6(c(T60FCY@Cb zJx_dsUE*y!V}vHN)YuDIu@T&^k7X#`84iJ~6S#q0n?a(R4O45Muv?e<x$}!c3qDD|)oSgFjpFoA`fnnEe)5$DU3#B9+L=IKT1>I^1k!Ugi7c zN(aiJy)=c6{xuZV`;COT7kN7Pu9$BT@49vJN@k#6z@nB$qPG}65Td>VHjj#tpJ>|# zdb?LZtq85E`roF0&SfyB#0XkKdr^4A5`L+-BR5UBL|SD!biC`Cd|Ofy)*FSO$BjL7 zX{!b(^4Vl#JrBJWYhiCsRZuypPV=mtu)W?MhsuM)@hCGCK%tFB!Qbh|>St|-% z8(UeBaW*|vSVSvIqT!Ea|DbUwfzfluqH@szkVJJb?V?T`n(P41`Rj0*hZ$U)HypmL zF2OsVRal5b8nN@B&(3tn6LL6{lb31F^mH27JA^G(%wdN^vI`}{GAMj-xO|YOC)*g3 zjyWNrq}Xde1x$^Ho>tdzz}H#4fk72Ek~j6WuVhk3f9cBqr#}8;fH?hh5S+Um1Q%PW z>wgT8Ro@Me;F@$+I!MN1m&+Jm6u^9!YNL9-C3~~b9Cgm?F_q>Z?3|g4)eST7O_m!L z@`-G6as*q{)gte&JQyR|x1r3|8F%Nzv*L?7xWPM|Jt=g@h$nm4t3Cm&;oCmEY8=ky zeLN|TIUB}oyyIB2uN|ApB5=Zo2qsz zP26eiT<&F}nWTm(^Y`zH8=1yl!a+}-TNb{LTd$=c1WR{w1&a?CUGdn)jkU@WT+8=y zi{r)#s*A1*$sMs=$(uvM^jjN1F(JpH1XehE3vpt$vl9ZdT); zxv!Lj1ZWFBHEI$c%Q|6XSSDw6>ks~EnXb_FFiQB;->UGzWhQJ|c}XHJ=O1lTIeCW{)&TMN8C-wE0 z`nlR^{uscIsHOj!f&b4c0o_$L+DZd|%%Eq4bkp|(YQ)Fl&%al)oquGrE1}P%H#>FllfSQvhnJhL(~nTR-dm)70*w9*V#6`m literal 10209 zcmXYX2{cvT`@RSv$&g4&6b*)=TK$u^&epqmDkiULJcu??8{r^Kr zJ8if5A6zHi})gh%fPln#~qUq3f44xMZ-Vm~e|RDM;c!d!bX&v9d&VL}z1oSf|b zjg|j3_9Ee-O8yxZ1==DaeSW`D&|)fV4|xp33)XX$o^KU)HV5K9!5r?@#EkBW{ zt;51e?%WmwH{qbqAVG&{VbP0e+{U7Q407?|(vFufct}zBZ^L|;ck~Z#KxyuqIqUJG zOdOoH@(}v*`FN>=5e}@&!^Zv7&8(;B3UjPfg>#zag<#SL>Z*~jxcLe=L|>w@Rt+>f z*_*3?I{ zxqEV=xaVhGCz?zNCdWqz*Iv;PuADSZ=rX#T8}~$!D?8tWdn7!Nd*i@+h#U|H#fO_w zs@hy=+odIJ$aChVl#HNURx6IKc!6EAzd=Vo3qN+0lJ{5rxl&OsIHN5M-Sn4n?Yd*g z4~u@dbI?qQouf$Ng2*hN)xi3zy5WWfI;Ik-W?yhyV&=)g~+p_yCgsN!@ zYYGP-;iDq=m6M(Dskt3jdVeV=wf8VDS@{@e@tI=IjlvSn$fi8r*YaZCiql0L^-t!U zKG8Z(=dN;&s(&TVqNJ49o_?5j@M}HqYep$&^~ExtMOZ0MC$E~9J?$Xp^M_o{zOX!w znq~>-idzDY8dY&(FPHMpR+REKIv(ac-B`veytJOfP0TbozcZ7!Kg> z!CED7kU9>dZiiW~ICs3_xD$SSn~jppCR7=>kjT5#;pp*h{8q1uZ~9hY$1@L*^m;^Q ze$rv(zMW&c%Z<@Q`8>`ZT|)gM7vO_Ksjx@S2nIS^h_X=yJ+vW~OzAiZ4lfl*b(Aq| z@=8Tr=MpyS#SgkhgwpdjQb?SNH<)ZN0mnl(snX8CU#G?y6~2?68x@nn~*E9Nd;hv^@e;OvE`A#P1FtsVa!{goU++AEv% z$f!e>!hFzd3}$~T%!Xxk*O)2xv z<8vu=W!RDBrN&5SEu9b=a#rP{sC2bTSC8>&BNa&;?%R;N$@u`jpmz3WB2T5q{`Nr>>a*M zj`Nm6=m%dMihf7VTQ4KVHH`Mthk4 zk69$w>mGetD~@X)QZ!q$6p}Q{@k-DnFy4L?(^a!^8Ak~gzBos!Zi|6L^mA%`!2*l3 zwIKbaDDZAokP?k#dfHP7KW|ATF7YBz=wnXMrklO>Zwl&3CgI+;0$O@s1-?b`!75%7 zE8PEMX3D0J5WgzC>^wqb$NLh_)KYjUohZ0FO$n=y6_P_ex~Qfuju+mS!=g78aIbzl zU8l@MvT|9`@2g@;Q_S z&zcQ}Z6Z+Sq)Yx?%B0gnSFznwe-Mi~9?n?`v zRhy{Xn{KkeGZ3_!*W&Qgc_1Zn4d(@1zz-uUY1>FC`Mmuc-tJ!nB~zEcl%)W=b~=zz z^N374H5I?ioQiq!hIrFu5e9h=(z8#wRe)tY%-itE! z^jtYsJZvkoRWc9VzIM=P+i!IFHF@m4kjg}tPQ(6$QS#X08}nOK1RXuOuycnBof)En zFFdTlA|Qtr{!GFpjmg;EX3Lr$V~KtZLq*G{0vjxb6+OZ1?o&agBU?UV?&l`@`G*{8 zq_i^Z%em~!G6NjJFjAux5c{nxKW3IF$sub4j@6L?-!fjS^Zi$?W_q=7NW|6lVn@z1*)}lki@(A(4&K!!LIxg8zZ?2hwpZh2TP|@)$)82y;qe~ zemP6!t_6^Ou_~N!C=JF%?ZullE6CC`6~-l`3QcvNQa{yx;v!m#WlOwZ{?0sXjo}b$ z;(%#uW$C+JEZR&nWxBR5!gi_Sg8S35$){J_aN{)_YF;&q4BKXzWoY&uryC zd)0|o&nQ1mT_nD7AI1b{k=R%{a&Jx@E*ouS-F$P%cTN=PvD`;H zl(k7tWCL^`SO$_UGO$?lIrIFR5o%?(lXESP$RFcbpsbP%lC`>!-1?bxt<}I^4O-As z^PPHg_)sw36Sh3`#pj)IV0(5hY&Y?Q+8|5p+PfVaKP`dpKev*eNHb={IYTqCpr#Yfz3gSb*~HV!?w#a6xyi77;R&=qXoAAV zGV<#)#nlrUVeLj+^jejU;bt#b_nAnN{QX!5Ra@#+u#ZaXj;&Rzby#ONg_>R8z)W;H z0(NQzn7j8nbK<=*O5_HU?5GYB+oTKqpUyE}m-UH(+*-^?XTkZXD!q2B1bzM)z_4f! zy_N2Pg8Y04&{;&ZziPocjyC=}xCTpR8PGi|CqwMlPUymV4EXjA7gjBXx^-u%Xth6j zZ%IUcjUA4XENszG0?pq>n0&OA8tBM^ET)<92Jca|$qCejxk)svbkTulk0P3mpcQ); zQ%ddd;ss?G*P#iv=9H+}mlJn)W%3}r#q^A0li>Em2zZ{_MMd5|r|jQQ^xT{XVJ3#K zdgTQ0IPO48= z)cHT9ZvzgH#dT?%$`8rZPHZx;oHEub(FgMGH!-_5tpoFO8t}5Rz-&qQRt!J20*fj> zbC|^vwEIa4&W($~)}}jTn~N#7x#qE(d)4uCmk%8Kk^;RYE(Et{!iSh45-jk**^_Ru zp8xJro-G5eJ;P?3zb8XjTmn6IVIp#ze~^v>NxI8H7qmk2apg%3QX=Jre_W=Z{G0}i z+q#q*-ndSEzeHf;ZwqRq-AUG|)G+TXkC4sbktDq4GaZ*P8IKtmQ5)%bAZ+CkY%PTT zoEdOCX%_|(1u{uT0Tg~7!IjDa@}wl1HQKq4xqNpeabH_Ovt{H#WobRFNQ^;u6<@Y* zUL9&nDkDz#Kmx9d;O6VM=+%c7WXH(K)}_aclR1*#$ee9`cv={`18-A|13# z?*ty#CrFIlw9s4>f!I+qyP z%8)>P26m@kVg5dIrLraoRKHPpCUTX3kn|R-!;c{W3F+gIL%pW`C&Nw6FFadjH(#Te}A%f*n?4BBF*3Av+l z;klavgt;hSZ0|LCZ^t_}#kGvdcpymydNLR}s|jWMt*GAV8K9v%7b`_;NR?(VtG=m; zEIt{Iu8~)0rjj3=2$4r~;e6bud=H}NPqV1zjWGCikS^S`37R&Fv;4>@Xr;Ih-3^?O zDG-HEuQ!leKA}+1R13!wPBYfOl(Bc@BqUF`O4`RakchUKkeqCeCTr&~i`gT1iob`h zJ6ejT9Q{~hjkoN|HTAr-h=1hu$%$l9mJe-pK1R+in~dNb0e^mqVEgA0vh>zNG8&o) zDfefA#@CzlbKN&JwVpb97L2fbU&(u;5bEA?IAy>qkI#|ldeYnfiTQkG7 zQ{2ULma382sPr#>qjQ+4-`RbFQ7OhOK>dWNCog`{KV!a9i7t=ayE_ ze{$)i>C(Mog8cO&rj71SW|HzneDzBKrYAI|1rpV#`ALWVaJtv&nf+I#ZT5+C%Jh7L zoFF6e5x=_kCa*-iiJ$a#h1o(w`{n>K%8wBL%(HrH#`~{iaJt75NT(yiJRDHkdYL|K%TyIml0qlN3aIzcsDb zkTpGZ_ybQt!19e=z2SsJOc1y^Z!%Rd@a272=wKT8uGn;`hpFjX{Q&+h%W_ULM%Q`JNeEsh=j?e!D8NF#=#!U$n4%f*Lo4w}pHA)l6>IzR!5& z@Co3_Z$rf!j4;d73c4C&xH~5~awmUZhkHJS3p3`+2yI$!!=(-hs8Vqj-Zg9k_xg9V zYlZ~(rukIv6~pzyRQo{g!ryV!Li;(WW$AL={(5ky<_Iy^lPi2tsK-^?q|L4K+%3e{ zKIp1yEBx~J7;>Y>3Fmg~#bjO$)TUSovn>}0%VtFj8x<|FUBwt@$?xEbpUl9|Z4SaB z%lX1RKmTG=iX`{XZB_2KW!5e+5vJd~3V)Wr0>4^u zu6glJjD0MDTRqIV$_{^-&h@~2+c6GDZGADyOojWs-$VHJus_$MQ$^U_be5*|iNYQ3 zL0aBZ#_QZy$}9G&;B}AYbDU}lIllA|C*tr?jyPM+@sX+Gz5I~L5wEG_B>y_ZvllPs zlzk}Y`N@}YN<6AKX%>0B2ajENnro_f@6<|pG0_z~Wz{N9Xw%!~rt-1%L(6#U50>%P zDJF5$>Z&+@eD-nl=T~xK6-s$CjVd`&$J=N_-U~tNuq9pg-+snM(*j-7Z*f%3WoUF2 zLmXGw!C;Ub%4yd@-n|TgOw}=PU6F>|Gq$YNrTwU{`+?eo>?e|;j#NqeG}FO3$`r>M z;==R=G;qRZ#$QStb06E|z$FDx)LIL6nwI2(n#mVi^FpK`d=j`C;CE0Lm+Z`x1xAvJ<(A9|hJ9p(0ho*L_b9foE`r#G4 zw=j^IwX2qX9Mk+hE?5lq-i~BOff?#_Wuu^aJ9sgZX_re98kgvSs8s>|xu=sV=xM?^ zvvfH2^cxWy+sjiO{78I=A7o7*`#xHCj~;nBM6J#&L&MRd{LrF6h&(WlEPA|;COI6y z^z~J+5fu?3xB4e>YFLBuRsm% z>Pn;Stt~i9*PYxj8`H=72s7c9Jn|rvi5kBeIy1MBkOPNl@Uwj+&1(m-t@=$*bo8)w z?th4n@)G#FkBb9)$05jNv*E7K$;F)WwEV(Tl72-FvPB;=lCNDDJ)d-BWX0jKWhy*Q zSAl`qh7i5Pn610YMRS`qQ52ogp6H>10rMkQxVS&{xaSalwn- z)PI#bsynqIZ&?s&zLP{IZ2d_MpKIU}DRo@E8KaraWN!Om;#0+EIOPk;os1cTDTspUHa}Sp zyF#LokWP*y9>kTo!$j=%ezWhl%m}~uBo$sa20>3S>=RqgPCPgOh08>#-)b9B4thj0 zpUJXQXH%HBVT8W?84nJX8{vxDT8unhOCs-2B_XQ1aO(I}`oU~7$|tyzoH2b?EOQ$y zNONQYgSsH$?MAXSCy~B;f0iv!eM0=a=i-t<9XxH8M`sj2qZ$V1iEXzrr6 z@EZdxVD!K{Z!@))%47BRF>rih1OEJY5N_Q+2t~%vSj(rkDChfq`oJNSO11=(m7_{D z_0w!vIO7mwq}GPyLx*5Ae+t|>8c(G+J*UQdIc$%fB3z`2RA?1U>(^J3xQL06*=CQK z4S6JA_B(%G`!G4;t4MWkzM!Q$jL4#q7|fG%fj`CPsdfbmi|xy>>7X?+h&}+?^M=W} zQA6nF4Zyt^eR!HAiun~DG}h0A%9t-8K3|=oqB5WT_a%c&YST6Q^yG}-y81=pk(?_K z(|u1wy6vf;znKY?I>ZiZ??cz!vrvI+0Rdkd$>2%_nEL294c?N1^PRnE&z{RP*D@cR zCYF;XpGRbQ>UsLIH^eOJ+A0E4b{MtuIPG1`AoH%5F0W94t>MKWFIS5PE9>d5%GIR# ziUAm3wx(@?3t-ZM3~YMcL9B|C+2jX~Oq{3({0UiWX6IQ+5A`&W4yDa>!`>}q>dJWX zeS6(lZe58nqMM0P*E72LS~03VRKb^bZ_}->7trSxxwPi+M5H7W!y@`=ZvSjp)T9B^ zuU%nZoaiRG4{yTMVOgA8_<-)3yB{xfCW23WIb6A{hYO8%jIbk((J68oPCSWQo% z-N6J+la~|aE=d?{K0*?&w+kL^J4DwlSB9F&8}U-@GsdJx3{zKRgZth>rX(T5w7u3D ze7`l(<2TZv|Lr7HoS%SqKi0tUj)`cx;|A0E^%MC$Qbn#jdW^jTj>yh*f}5|$oo`P_IL= zKQil4a+rZ?(NA#@V20b!90QvhHu|oDaRE+n<*}1;Brpq3eB8ziRPNjjz z$MW*13VfHEfvk!s_qby=)*lvynvA)yu3;INzOG?iTZic3sXrj+#Sst&HjxR{18kqo z7e*!X6qGEkg<$Z-JKg(LeV2gY%y}GcAj=o(D0ly9L{ApuMK6ei8*|pHWOT=itQx_3$vtsOCWnfsOEKW7L zBEYxf@kv2C8g1=|Z&OY}OGrLj`1J%{{CdN5Z>9)7d}0avYZSO63Q1(|7*{qHXo7{D z6#n)bhyFq0f{Vjtg1qww=HD#k=r7)ab55$G#cmTQoqhyk6ypWg+NRP@ zjwVEU$iUa0CgwqGAFXh1z(=Ac?1_bU;bwrrm^K*-Tl3_omMX!^(h_KS*_ZBXjDxuj zJ4v{GDt62|2#UVHsdSMMEU1%Ya;uGTO~eK~<|bf5KbEsiI-#bqK50 z2KGk~$$v2e71o`mrV){tE|zcBAC-w07Hy;#^V}d)ejSdRT|&-mQ^8@a9n>kV38pz| zU{kyq)c$)wQw;~{lSL}fV%N#Ud$zIXU-#1a-rf8!OJ&&4zieQ-`W$?xv7dCgx(k~3 zY=n-kC(J5vg=St3qxxPRwmZyX7mw>>VkT&jymDXGbj2gS=ujJxx%rL^Jn+XGzk2B= z5odaGx;^Fv9b%I@>&e|TK12=c!(DfGqViK61|p?k>gF`OA(KdJglf#xd5?(+2jF9s zJN(>x2&PE}vrz|c(&G~=Nr(O>JaTP>k>YA##79rGvO7dRU(yEpqyols`=+tS|)j*bc-~O z4F9I-XGLM@EH!A%&Z4Q2LV8^+5a#p)evD26`^jBIw!j-T?)Ss}i`8Vmv=^*Um4lWL0{5V?f0E}IT$eq>BqSW7>K3P&(`)#& zxMe;`2~WnH_MgPEHk<5xQx3ePd#T=ue?+7C3(f!Aj+67J5Uns@6*WXNr5p76g>$5@{|J45(VoZzY{9SH2^e6~i$bx@@Ig(9!u?1XI2;b0 zYfWi|$9IxaI}1l9%fpj2E&8E;2Xxm|!rCQYnV`#*e*6&+&Nli#tG;QZ4D|B z;DgUcTG*fiYvIKERRFbnLFE24FgSC8lpe_--@`R%X4EvS_*#t#i|-KK;-{>k&s`KzY(HgD(Sz!Had03yZ9HLBqkP+J}g5UGx zvHRV6`p~@>-%DI1&Fyb_9_iv;ik!Titub%qnD#PcBPxMZuGqdh#A$+&G zNW)un@Z>gEIC;MjzHVL(!VAjW$Es=6J@yxK&1;ww!i@PC%Fjd8v=?M$!?-a`{~FaW zlVc(V`Jf&AnAYvcr0YKg(qBdSu%dY<2D(iE{*G^KSr}mR!vip*Zoo`pLn8k3t0&(w zHc}uxozv~FUrB$bj^@05tn+SQPGazANMjK$QB@ANG#X1|Y) z!@MXD)c%x4e{Vg=>UbrBLCsh8@C_AU`C%wObO;1RpRmhv5}USJftj&O9*3&a;L6yZ zv!<%lOnO=t%0IAzSie{}_9`EG43ofhOC<{9lbN1d4-#<|Q6?KRNo zvw~^3918XJtLcMnRqU&h7iK;76}(MP4)AUcRdLkJOL<;trM%ihMLd7$N}hRh2`_PJ zDQ|W`C8yrHg5&?Aio^UZ<$WfVoQ=AroVT47oHuv#dDq7LJ;}zUysxe$JoC;X-o~?a zye$30oR}?zyv>=1c};2sycWqy&c(87o>$Kij)iS0Peb7lFX>-3&(JbSu=|}7uX|d& z>FTgI{F&|(1iJSY^Cv%>CeXfgo_~G83x0jlC(|mLX7=mH9@ExYjNsb^8MD*fPfT%@ zky+V?eZ1yl0#obrgZ!$?C44O};IC+!#BW!*YN{Ifm^VJ^6#r*V9?yJ0&1{d-27cay z`KH#zL;SLte|avM?tDGla?XveE;43U6A7IX9;)oWGeUpNeJ3?`ynRP_(6*qMF{hrG kcj)-Jj_ZfUYM`g=*s1U^zo3x7FyH_4+llW9m59*)f66*xxBvhE diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata index 7eef2ef15bba26f49eb7e79079714b5c7015bddd..febe7b205e46a15ce78f3248344fddfc47a3eb3e 100644 GIT binary patch delta 147 zcmdnOc!_a>yh3Vfs%f%?foYO~Ns5V?WolxwrMYoZvN4dKY+`6)IMH4^!=N;Ri-Ccm zBr`WPz9hdSF{hB#-68FY_Claw1Rsz{Eyyg+Pf0C?%CdQjM(_fKixbOIQ{oeg<5Mz| mOA6ULBe;QrMTwR1WvNBQnfZBz91*NQ5d%F7J%d8dQau2Xqbj8U delta 117 zcmcb_xP@_oyh4hlr9q;lk*PtFQId&iN}`2{shOprp-HNxMUqi!qUA(;ZLfq^5nK!m z3?-SlsqrQGC5btOtnLm)-yTdlz`zi}2b4)I$SlrJNiBxTvU!U{@B)R46U$Ol;uDMG NQ!", - "Box(3,)", + "Box(6,)", "Box(1,)", {} ] }, - "policies_to_train": [ - "av" - ], - "policy_mapping_fn": "tune.function(.policy_mapping_fn at 0x7fda132e6c80>)" + "policies_to_train": null, + "policy_mapping_fn": "" }, + "no_done_at_end": false, + "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 30, + "num_sgd_iter": 10, "num_workers": 2, + "object_store_memory": 0, + "object_store_memory_per_worker": 0, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -110,7 +118,7 @@ "sgd_minibatch_size": 128, "shuffle_buffer_size": 0, "shuffle_sequences": true, - "simple_optimizer": true, + "simple_optimizer": false, "soft_horizon": false, "synchronize_filters": true, "tf_session_args": { @@ -126,7 +134,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 60000, + "train_batch_size": 30000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/multi_agent/params.pkl b/tests/data/rllib_data/multi_agent/params.pkl index cd832aa1c3eb1713e608fef452dbe168746e4cfa..192cf7558830fe2e280e383cf7777e9ee669a7f0 100644 GIT binary patch delta 10700 zcmb7K2|SeF`kxv5n(X^dMD|^jEoDMQq%h5DW{eqTk$ptAOrdwQXr-jIC|a~AC0Z0M zT8Jdls#R2||9NL5{ciVv@14(Q&YW}J=RD_ppY1&7ol{nT%&9`^W{Q@kgdhZ;biNr+ zhR%t@tYI)?iA)wBje@$Btsw&y14Bu;KPH|7IuSetHWN#sV{lkDo{V8pQi%LO8&1h% zBIJpDucV0Qu-J?^3>MF(Fz6Tpg~f(uqvjiSi}7Sx3=$hdW-uu!3_8DnCr@W!2n;$A zgJolgbOMxuGC=Lnf_9=7dD&Y^>reX_#$l5%1R|bDGj!Ac=Q9?A$;KoS$rL<|$O6wv zSQ?86tq?H~v9pne%H@=xjUv7%TSurv#08QU)wCwj7;CJE^hB0b9EZlHU}K1MwiS~? zkFmny!4y`3aIY8K>jk%=xuWW7zbP>!N(_fd#1JVlWHv+;T?8E$Juc3nQ%DRZ6eC7c zU}6ar4hsV_gjZ_=jfveL(TQwWtf0eCF#TkkWi_3f)dB%)KbUjShKDSZ6I!iPZlsT0;(t2-HZU z0VPrJQ&b(2#Ym)WUGWTCB+6DADpAy`>s3%jA)V@O$R#3?_E7u`HON}pQCO?XkXD~Y zSA~MA<)k5s{Ml>&d=rNF0;wyr-LCbBwI&kCuc{3D<12B zb#QjIb+&b}cfb+sNp=nnu1-SMR*I4)Q_ybXzfb8xi9;cT33arQ(PJkiz0*%^vEf#>U|Y*eU&)k+Y!)&0^$r=(Q$KFV=cF3j|tgY%G>VP9)Me){JroB`8N3u`7ZexxgUD0r8SeyCwK59 zF&GM+!UovUG$=yH7TE_C>S%Kxk{^)=c#1JZHi(}jEQo9%GSETL;gfrL5;O)-B8ko? zKP5jWKLLC?bNH~f1zQ0I6OYBm0^b2Vc_6AKTQNbTz*)r$jSrrYj*Ww#k}tyz5``vc zbZ~-#%*fk$(!jD8$AU;iq|tzZ1i|)89-sUnZv@)`kYBgaUN+760VYVn)6W4e2h^i}{bDw1x8YOw5&$J_x?K`6Nj4C{Ot}Fu_K^vWkyq z@X6nRrxGUy1OfmOH~|2qh#_(4!mVwZ@l*8yRXQv-@;9nFsPkkom;?@%HYp%s z-wX@}OQ$n{!+@QU1>^>NL>_~QT{Ec}xGM-A`Kvt#?jY}`>H?0yB6(`S?HC+FJOv*M zM9t#h;ux@dv-nY9VlEV~t1CvG18AWVUA4*DAziJ>+I3w$E|DjL!NkxQAm$6_r_KcA zf-yW1AP9k|SsZ~2|0(E=JVhWEFej7AhyfmtflX&hH65Hpq2V|PE=DtC)Eu61X0c5GAI|04wr!hsIx&kicfVD z(2)7$(#gd_Mf#ee0%NGrpN;HyxLfaQEjV_o$s8AfJQ%yj@bAvGY#!!jA8ux!#rcj(@Lo&QRGc*#70ACTb zl8!)cmcbW$Br@>4Wz=Vp7%(+vX_<45{EdC^15dc>NEehZoPQgW{p|V8;Z~^qR$<{B()M6wTfdGIz$%Z^p zftlqA(EnzQuABl-O^5X&JN4`z(5czru|V(gcp4=hGetM4^69dun39F$H|pkJ!m#)K zA4MP!Z{Y+CHa?z~jA6tJ=ce~)MG*z<;E^H|04`-sMkcz2W0ply!?yhDzm3-l>{u7irNTlR$#9$ zsh7c&xLZcP%@fGqx*TH&2A*GRg0hpZMHYz|d-2vOCp|~nUp>OUC4Vg$e2$}tF zc%U&;-RS8K2wS8nut;?_f{mmgC`b|#cExNY3K~5yk~ed)A4n+umSI9dy+T7U zXuk+P^#NGfFQ{M&ofz~PA8Fv10Hh>R8YP6uQza&Wq!U9UVwrRdi6~hA-wjou(do(@ zoF;~Dffzu+^AyCP1`~N`zM0(LV$olHl-dc8l@lxjq^iQOJQ+ZFvTG=8GDdh_VDWj9 zlZhy(%EE*cd>oNYW)OhiOj-j%5XZ#O;J{{X!F`zva#cnmk;$YGfW-*n)t@ZUO+F|D zh*mSe`7ee-$I`&Yhh@gVw9_ILoOy^(?)>nsR!H7JE+%gx@zPf~yZh|-cz z4TQnTPmk^(3Q{R>@1Kz|2vX27;O6tBLE;V4cm{z;1KaLYE=2Vc44H})u%f6t1(JjN zgkpm`gZ`RU2~q)`CY*2y56A>cB85f4QD_u)GKNV^0ONo?aQ|u$V2c-;%H-*JnypZ! z;Uy^Gh~Ri9+_sdN7!zm!2ih{cog%Riv96>y3EQt+tPw|EcgLpG1Ct_ja z;%T5ChQ)#Ris`PXftuQ;sowx=;a+G0Y*54P53BiaU6T3*Z~+;zDOXtIPfIy?l314T zP{yZz0%X&y4c@kGhs-36Z-3L^{y1saHvx&l;$Y>{YqzG%=EGMc{#aGL+muYi*rj=`Z5hy)Cd z2m|{MEEl05hUILoI8yNWlK~b7;?jR&5HDEM^zl;kcnlNdu#7l9Pgy8cVbcmDOu+fz zFfd60haqs#;er;F~KOoSxY3A znEd~&Ffbl9r6$h*qb4q_ETNvf8>Iv<^)gqlE_@MX$+Wp{a4{|WAWPWf`*>mCdh^{01w7+gArpGBboO|c%w0_kwXKk508hb=CXp`cMFQ3Iv&)w^3RnQcY zN@=g~&1oR!pSE1nG3+7Yzo^w@=CzLd180xjj%(;Nc+Y*ImaG^NcYL|%(&FQEu2ez9 zrI&cC{PDXjD2hiM`ynQmwpdSMpVf?_r447lh?h!dX#EhWO<1>YLsmjx0J;7rQ+~G2 zu!`OMVfo@?>yJ_`&+i=ab=N7sRI_1?RUTxpS98Vkgxu<#?-(g*v^R-E9wAi>bd`_t zc}xBNOYilTb4w-H=83+ve7v}NSwQ(<)11}apCf2m3&i?oJ#5dZ3}@H|wUt%SwL@_O zwjR;_tL65(NcWfKCmxl@J^e7d)k7=%ldh$_RJ~^B;>g(Jda9Z~)*tjV&o1N65KU-e zH`^Y29GqxgswfjVix^LPI9O8<8D&5qmzWf-OFxyjP~$MSrEl??jL?*D?EZI%kLmj9 zhr*ABF_Fz;7xu>goHuIgbvG$^RU4IcOTJm3q*QQ{Us++^p^c4n_ZwNqZfHA{cXEfK z*-I-@_$xR6p5qqCk|pcq?PbUh@Ly^qwqwF3bQV8;>gKG$S#aIlbl^;Q(R&hG52Mmv zIZl(*H6G6!Gj-tMfOpzwxBI0%({mgsuG?=KQWtqt+`)G=4|am-^nP~8pVjK z95XJdthq?;*w%%ndw63zZg$>1b0}x=NUUk86=9&^qkq%E^AS7Z(wKw0%jdA~pQI9c zhrf*8-cw%bxp&6Nt*`t~ndlM3>uDG@>;B}n#_F>Yha)t@xDGp6XV&(8I9AdAv%Wg< zy(HbJJ4N5t!}v`Bd;TKwh^}kt$9tNJq?R3YInB3oMiH^iEa@jvtMcBfWk2|tE;fWb z>@xG?pwFD2d4oebeVI{KnwLKfC8HQ*Zm7B@x?-)zHD#;NoPdkB?k?6@PLR9Qo_fvr zv4ttK?*>-&DXom#RQO{S!cs|tF(ZGG)4&eM$U0ora`5Enn-50s9c~qWgZhH{CT2D! z+rDA+?%LMi^)1yyGi`NxC?Q9?GDEr;Vf|f!DE(Pgo*?x$=4XMe?nFXlB4_E)6R*`K z`e7*u8of{az{w)jLTguNB*V4Pag~In$@z-@nT4MVn_vFe&#eo1ayxwa)^%rO4Ktr@ zNFIB?`Nht`l0Zv2zm)a`?WlG4g3%{qcbv#w>3h>FeBs;cv{Z+y_0a+46Db61ufeeu z3w`Gfr+&_$UcT$LZ~1je3(v;h{`IeCWm;?L%(>bkH}XOI&C>espAgmP^EBDV+m0O6 zm){U5(pB70W*2RopT!-q4NJOHr@FF#6J77Fxv#XP=+=1|PJ2!%D1?)e>!fx4qPiBD zWNk*w-BwZ8SnjuA@4RZ34pyqS$m9b3KwjrTHBLW!(_sV0(X|RkkEh%>epgk|c<}RB zY;m6YP^vZYltHkgss*k2X&L3^(e!8aBtwm==cwcKBhMx(tIQINxO)xo$9fXG4=s&< zcIO+X-FSVY;cCShae?HVW3R(7+btJcDKF4$@LIS-J!@2^W-Xs_qQ9PL^5Ae>Us8h8 zqCoi=9zLa;f{dC~`&IPjipZ{l5k<$#p1dCq-R6iz&Sgnv*L@l;@rqE4FurNc-xC(E zqBPrn&FvnCk*h4OB|(Kd5t1%$bBBB9b(wj~^Wm&RG{&q!R%cUd1-ia-*Q1uS$Mdm- zo_pIJE0+h%Ihc>dB53&&YL)MUTDV;ok~bbX@Nj!z zxut>|ap7%IMnB?I%>MV0UTYhNK6KtYbboY9+`THZhn397erBURJ{4#B`1YeV419z<&XWEufi`#egn|(^mTF%>DP|WX||J{Km{{2GRzzxpLZPXUo z=sN*tb1gfg4-l|zAI4KY7jp~I6O`k<^_BE{KK3u9!nDg}j6_J$whuRmx~rTrPmz&a z;jG=1*j0Y*gUAR?_E~oA*2XIXJ&}%-+8arVklxCI@i_NbhxpH;?c%34Jwo6X54X5j zjr?qWhj*YtEXDz|X85sU}mtbJLt)r}B?Y1X~{omr!Uai6?t*dmCAm=`b!m}hR*7xM z>*aofuM1@pyFzhoWmg^)Zu0EM8Xqe@DbxFIVEDEEt0GL1(SoSsN0m>R7h9VuoWH7_ zcZ$45cC&^|+$X{QRt7V>b;Ig=4b(x0vOs>VbrqLfp=HFQsKdVQN%2fr~t@-kK*?2qRoV6dD zvpRwB{eps@?KmdCcoTh|xpLNX#`kp<$r@haA-alcfpc}`=j0@R| zUwSjUyNBG?_Uyz7xv+%7j}Ezq-}Jl~xfUVS{M6vMYlDmS(iiT|f%wI%F>j5^chAav zxX%CBTNi7Or5Qf>3;iFA;}^}aJ<`-vtR-4;m=ajtvC3I#@z@e{>rx%g%Ti1j%ez8Q zY&KXkL{GhPpTyp^)V|2m`#WLlId}D&`fac74Rw<^U1aeL^W|H{DvJ4sToMbm7y0et zK6BM``BwWa$h*2i98y@Y)z|5%W(cM`qR6{s?(z8U<*D|=-rzw`lOn+`Nr(H)Da8qWo7uQhqb9q9Sc6JI1{;FQ;^z< zfKx6XB!wKF6d4>pVz3;R;E!3Zn6TH<)uE{2O>P@s_@ zt6U1V{(Zgdj$5Mc_AL<;CR|0O;3 z03d`Hl?HyfR4yq0CHrN+&r zH^T98QJS&hFESWkmmWllQ*o4ReA4#*XzEH_E8SoO1n zdp)jp>#D3ubrC-?RL^di&F-xdr077E{VB3;`=4T-YHK#2{oe7KOFcW59kFh&THorO zA-kx4=g$(YgQiy)n5wECP0Eken%>I|QQcCttu1?Wy?nR+SX@?mRP2#^N8-5cs$H|t zp)a0X2}xR}P5)R>-M`GN*4Mb@5FyvD+4&_`y<=aR$Fq{R3NxTo$g!d5I>*2C zb&WqYKjV<1_Q>kAMp#THtMpN>cDl^F!n>Dlw`JDMYCT|?nkv)&e4gE%w(<2E>JPu< z8B`NS=JviwSY;D_RHk3F*KhN#Z#jiQ#s*0~p%)$2ZtpYRC_38N8@QLs{I=VDe`dg{ zivy9)3%QyXY62ew(8ywSkBG&y<1Wr;Cxo#rvIo7vrg{McrdbUEl6LDjACpSgo>`k)6Lf zIcAyOk`a7`9%<~H?H7m|U@x-5%~hss^L_A-;-l_cT$FhRk-1=NqoR>v&Xu=&0?kFw z;^!axaN6EPtlnSI0 zynUgJ06nf-=ejq8L07Xrw_Wl#x>wV1vFP@tfqVPiCysIoR*QruE$};C6`+tOYPuo1 zqNMn(-8qAH6hiAuj*a~7v$NaV4tBSUSjH~a{n3DKIOCJiQA7>J>>oW;OIU4yy@v3f zqx~{lvv1LsoTd=juU}O~Bh-~S_Y(uMvt_e2;{1AKg1J%OZ#O)9H72TtlCUj0GyB>7 zwPL4Ec$D_7tl3g*#nqx}6WLiF5}fr9(yq*lW_Xv-*V=v1KBD|0o`%P zdXt2^o{JPDR;&CmDr=d1Sy{AOWuT;MVt-a7Dfraj+)VEH^DnGX`{;z-cMf}u@L!yb zb6m9ar0CmE_18zYy$#msDk*+Ydi1m(%693xV@D4x^dA^PDSTQ=Iey~ozJ2=ooTu5C z&8>qv*8=68JC2i$5a&H*2cN}qH*e`jYhT)R^V4F_BSQoYccm4bquu?VX0`V46Nr>^ z?P=C3L7P3?aUZxRHODU*6!KT^ktFjlp4ZQ$K~8i_|=8Iffq(J>5mBUb%Bq-Rn7ptcKq3J&d6vpAzqyOmJgT zAx!y4#mb|fnW=k-FP>-xqGT_3CLWg$r<*-?a?dAN$uo`EOJv5<8o{g9wOXIq=7DYp z=laHxcbvPm>p}EO+#Av0)gn04vO1+kV{!L${_~ELYohy~$#FAnzlKXhq4)QRy}Va( zReNO18d}E%&kSGnYDu4k^ZmYP%zl6Twy~o0%$2b=yI8Ya<3d$>9Omp&D9j58PFZmB zL>IZ>)n`N1u+<@XCOgXA943gos|WKfBB{zxI27)d<^Nvb}iBzR#+%Sq5cVj+}yxO?j$>Y1$ zf}ffu?fNE{y)Q&Yr00z2&U1@zmGrPlo_o>j(*D@={=v;oD$bTQXIiTa@z{mlU1NK; z+w~r3u$`agk$)R)yw>jp_rl879+NQpt#zXEuUud2$%&#q`_EO(pD7x_DmbzJ#dw9@ z;I4^ijHG!@wdO_#nH)L8sGHBZR*r?vTCyaQYcXDn)H)1)8YpnS+@3A{Bxm_NsaLB` z;`T`fOAWtybkp-}u>E4>rM6BxUOjFgDzbO{bEK2h+OmzK+eEYu{1nJp>_>F@cdrO(_@MG8XvvyO7x{VX+Tzg|***e&Eq0TRXiIc^xAC;xyBuYo!HdNKvJy%)0nY`f$qq<|!*Ea|E zeW+>8C@_mivic#1H}K&`x!VI{@le?vArMSL6^YVI5M+ahf?ZI+f^uRjo@XJRb5F3v1#pCcKh0PoO{)|Ehg82mb(*W>- zjHra3zRvHXVM0(NxSdZ16lD`e8LRSHY*g=8p~#aGByP7ut}ObhLC6kcWfwr zJB@}93c=C{ol@-k~;&5GypHGSzlTuAb~{b8J2bn>Y$K%AW|d_ zJjg!}Zd$YZA06(aKw()K8R`EBRWnovw3Xd}Oa=j2CGM>gOXW%9z$^&L*-P_6STSK_ zN&uchWjdf3+;H7LI0UL7z*c#}2{|oMK^g^12*UVaX*d8wB}C$xjtAeK*riZ9EcXEk z9#{Bey&gls0};LyEDevL2H;6}8aW7qBVnmjCI~qYw@(NGvO*9QavEAhFdk}T@H#o| zcC~RLlL)v7CJ?R)90vP0?%;-RfFnROD5ogNxmp1&6b*i~$AX6yT?M!>7+*3DgJGhn ziA-lEsI8>nOmj|P9$^A$t4+||2_TdDe`$>WZMZ6M0(4i_vj}DIWBdpt62>ni2nYKP z3s0FS%1cDFMbdjy# z7d2^m9Z(OnGlc!|G$@Gtu#lHyAcB4be(mq)-{$e-ffH(dYyw&~=2#;KxCfLMV7lD3%n0r!u{ms%SJ2 z%ivR_;%OnlKUhNGp|;hgKtecdX}Ad-+q8ib4HQBO1G_b3Wpsckh9CtSp+zB)2tHa^ ze|!*)%8ZBPfu4YZno?}=RYy~fLkB1ZNs4Of-Z0PzaXv zS91u>83rsI98AVQnxK&VVJC<63qeFDX+zJ10!SEakT0tdL#1IMOdu0a6oiNZ1wqs}?0e>-$7Zd%P_<+g}*`fXn zO>kCIal=pgOb7~t$De}rC9oXnhdTN7Wau;m5eY#-MxkQa{sO?_N8Aq4g8aJyP+_pM z3<&|LWwkow|AONu;eW;P7Yw2~MDH*EVwpx43LHW(<|m1Nm<5*MAJD*h-8E7$D1LYv z@x@?+gGmt>axn83WDfrg+1|g9@x#2SEWZrJLUTI{BmCUq`cI7Dtg)!=KP>cLko|5P zB24ZF2(Vqxgyq-_eL4C++{?vs9qp$>`9YQ;vK@yU3wA11plj7E{tz{we08pZ>;YFD z%3xVh$Ti4cL;Sa>SRMPj3FOefg#b+HhnUbUV;k&7ps;}q2dryZ@VLG>9q)_rCEy_D zt0DO#Fi=onNJ4?Z2L}cm3ai@xH7HgE2!RO0J_Q9yEfKCb)U2wR8w!ba_;6?m#b8+r z2*wW&G5cF+Y+Id^e}=|CeCltShdKNTX~&S7+5fdkVFteOu<+0T+m;0c2ljn)xwR2pSnlb|o+wJmgS3g+lO!db519 z5XR915{+c(e@)7(A%7Ek1T9$b<@Y%m&K5ZTT-Tut05}T70P#Q~esyh&0-A~9u<-J5 zp$Qj;P=N+Il7NS3;0aL_ZnA?57pMSQ(7*t34HB^v0koruX>eEKIksu2AMgg%WWn{y`?O4Wi88f^svs7u4W@h-X?N zeOP%DJTE|$i5+ksl%E0g&;bdr(W(w?#BNwvG@6x$&{?Ge6dgXvmV5}r(Ul)RC=c^t zumc2tfJuA`gLnoNFc0E$=w_AHDwCH`eRVJ$8qE5!2Cs!u$RJY-s;);%kvr>FMjd=! zUyOQWSkW&LaWvm7;q4~zt)i zF+)A`jK{GzO=Ol$FFwIHW}rE$7<)diL~VQPcmi z+E~dzPR6^JSB}imLyz#8ZhI^xvZYYSA|q2q>RWpS81ztnd$82T9wnKb7mEaAj%lo{ zL*6<$*VQs9T)On~WpmV6{eb^IM6Rxl;BcwKHl|ElP@~;lH-$F zp7^w#YjcyWlKv(u$I!dHQP{Or=P624{XHt2=4Uot+^IA-q*y%nNvD2DVu@)ju{&RQ z>16K2z5!o(AjNnQd>oOR#iN?i+Zt%j>CNlSBhBcV573& z7mgKkxI6pW*(CLf&%w`{DIE#hko&`=IMIpETU^&4I*EN?-+!@Jz&)=iUvPxNSC?zO zAyVeBUQb)`VC8t=G4EqonLjbQyCyWhG%7p0=rr1YLWJZjL_3Sd3&t1+2NWH!J;p5@~T}59^ zt|LD7jPyF}Tl5i(FXb@_7d%q%$Zi=NNgwx=c<`eB8j39_`j23bQ0@zSrq8C z+ZMCNHF5j}%yZr6r09wYF__xIFVhIZ)X)x{r|hJBE)Pw+uQ!~J$)9~`Myt?%r9(fBX?n0e)~Wx6(a}YEV*vlvlR`}yMY>T1 zE?Ng?BVWExQ#fI(#h7bA`bS!he|F$Fi?JHS9Vj5}QXJzAeIjr*0C(6d&xZ&O+Pqq6 z1;@f&Yjb4cW0fq^y1`pjjb3B^>7>^=A+{CSkJDNet=@M&zb<2a>al0UCUcxRzUxax zPG+SK7<(h}UDxGy`n;vw!Ct(!CnJh1X0ld|{ej`~qEs3Od}*yXR+b<2X! zrV+kaIA2jmmLvN-H|jq3OmU6%Qm{Lx z=^AM!80y82IdEdF46}6Wi22(CW8dnLqgyyCT*kxVZudqE@Y78O?~>lAJL_1ncjU1L z3x5`8oKf4dhwgo5A<)q*tD)hsKyJ&&&Y4+Jrp69Q&T~HWZ~H>hzD#!LPUxA9FY79Z zR~|hu{zmGCx36T{mvH}eTN{PDQomCk9#J#eR_#G)V&bOPub^+rzNmPT&n9+bIB(ab zkrh#QFMN_uh{l_}^uBz{_#=#Yg%_6%b5QQgg>_~42KoiZ2{tXO)P4)1nlpCS+q9lx z8$EgO5^hHFT5eTWsw~&T;doDT%j_6SQy}pMSDw|V)W+&SNm1V7ZQ1H`omV7dE|iJA zX;6^!velnoIJ_@J$ibJ+XR-Oe0e$ro*pyV-8CojgyIdP3K2bFTl|y?L1^I_%7> zKg4BnQ=kvOt)Wu$exeX}o|TLGMr49fc@|BUJMPBHi-L%$TQ_bVsm)%wYJQg8%E4)> z$!9i~c(U1A{3Di>GEW@o$@6xK$Z+Z2UZR z&B?TfnK)+DzCK)@{<5s@;EC^5Z^a(4yATl1cNV-Kce=marJ6RFX(y!CVz;80&NcdV zQr9Tq)2l)f$qd0i z)#r;))He2g3aDK<##KczcFX6l-dspqEcP;9?^&HI=#_H$*a7p6C(}tsF~Dxp>-3 zM7Cr5p`f`7D~PSh&*?eL^gcF;SBb>PsYmZ$1RcNlsS7Mk)wcX_8L@Nf-5zo2(G9%K zA%_+ozwsI6SB&jFNgW8?ZPD>+EJGt|UCra~(Unt5y>a(5NFS0eIwsw`QyH>oE7)u3 zQLQ{}YZJZCcY%O9+C1%${c3tdbv{A2&ra}-=0~rHHOSVKlnZX<^tU(G6>miH72U{A z|9Z}!j?sAW6erN)Xm`}Uyu#(c^=|dq0y|25R6Hic!=iPc*hI4p*UFBh z4`0ehJg$#AvoonEcOkyGt5uyt6g<@-th8tvH`Ee4QYi7>ljIE zZVuIae`nFMF6O$L-8UwkNV}D7h1RY;b1xZ=_ zi`RW~b)Pn$U*R^?OZL8*^m70aN>I3+6dU&BEyL{fHp#E)%<%gX8gTjvrFjkr z+ttkl_~p?j#D~}d<(O*6;6EjakDyy(7hLor5p*cIhH}DwxcZ;7RJhA(s>}|h$#pOj z;vfH)Ar`D5AO1On{%b}Kvsq1skx15n&;K0oulBs60=g2}paRZ`i5zh4KcZq~ z!*n7a)G2vKJr7G?M;QhjIuv-ePqTj>Js>SdyM6MxfOt+Z-J ztU`*T#EzR6nu&A>MCimQQp>*rMC{y z)I2K2WO}VVZ=9}UKV_3e(oB>Wh?=0Enc(_(ry=JX$iJf3I<&*Y-FDgDUE9ze?ZLfp z8HJ8!+T#UR-j^z%Z;X^=uNR5CaNV8bYO8Dgqnc|58ZU+z;@#D!?dJ~_YKh1ed3z?M z;wlPTwpWdBG+T#Ja(FLp@8_2NHsW>R5zoc2R8EQfK0;Yq=`_tm_R=xla)*!%dTKX+ zv~yXaU|pq>+tV{)V(D5B@9b$x!qsJWE{YEaf=kK{{8^^|rfYV2 z9cwSBgg>H~|fc_y4~xO06z zUqxS`OqqZG(5c6EDfb_n&$nfCzq@^`4}0yeqa>k}Bvw5$8YSnM*T~2bJ4@e=c_pcKJhvRrWrRfb4 zyo8(V9~1~v3K3on?tBG_JtgH@Bj>rz7_od4r%kWx9R`KxU3G-a;@fy_b&PViHX%Kd z+Y81^a80T?)$wZM&vRU#Vm-E7RLv{Y?D}pgt5GAWgN}Rqg8xc;qW9E27X=Ud4MpF1 z{BY)qz!8C)iUJc(HG^q|Rc2+Ko!jX<9GTIKv^VX`{YgSfOZ!J{_A8V4s;fsr;sgT> zJ#6NUXQ*k?rEL!Fh{(Z)?;|iW)lm1k9MvH9HHUX6p(0vjZoPJqvmEouBjawUt3|G-gx;{ZexYLZ(ml>?ZD$zuP^)Zb=5cyJq{i3qknK;Y2i@mBc)Fr8D4}@t56isJwUrij>}S_h2FH=i{6jm#i=|z5 z%Nv|7CyV+a1wQ?8?XpL4hLU7(^h0qun=MNL($N!vDYs|08c^lHnz_t&x7p~1+D+az zr#8x$SQD+iUoBgNj`W=TzR{I5xkQ`^N&&mib=0$U{ZGOwi?48JaTiVQhsRI$w)BUF5z*-xGteP@-ATr_qHZT~|f zuHNw0#ISd?z+G=sni0=7;T79Ebn+p!N6H2KenN#xN|92Go^HGC1=YSSt*12y)+DlB zeK=r+6lgb3sh30Psow)iCT`E?OK3wG7=_L8?$?$N$$Pi%mMJKbGmk^jP7zf3g?E(R zD{vf6B0PR_-Y#BqJ>A>K6%j+Vx{#rjz^x+iV14fnpQOv`ZJyK5gMp|#PI~9J8#QO| zjrL{AFS92%Cw)4h9eO?@HE2VBTeD>Gh%Xt}y{=5?5D_<*{9kO>^rO92 zuH`RVtKogocwkAdcuBWRyz85Id7H-Gjf=f+Kg@lYn^#{^k>#5`ii;CCmbBokA>p`x zwg544t8Z~Z-5~4KNI|-})@;dfy1g&jcdos8=)!#;8#9eyfn@J)f8Ud*tN9-ZByy@m zS>R@lt4re#qNa}Ra%^ba_o;Ct`WTm8h3Up}M<0G$33IpBi=HCiy$4+0FDqYE9JiddbdWb$+Ty0ly-v5?jy;9$Iog}) zW64{N`r@CU+{ov8t*)%3A31w$u(e~^)4*_a{3}~7K#?viZRvYZW~67TuertQRG7dz zuI0l)6F2l)!nWsp(?i9Y?pjM$pwo>DT`^bcBZoSU*2mVon7gjc5t(tQ<<;U>gG^6B zq3(m%D^AA0HhbFDEo<3p3>n;}^at87@Dmy|?Gl zvqaoeojd$*N)~D~y1U-Z&+K|BIwCg>ylC0BZ%;ycoXs%;-m&R{6Q}Hb3x`5F=L7bE zizWcJB=zdn&%73T4sQT@n9mlk+@d;xPDBRYoKf#+*UTsFwRYu^O01Eu(dERCS#t8* z_MCQT4nCRs&Hg&_8P|Btf$a_txaA^7bvvwt&0F@{C(tg+d6HE3zB515QTe^w(d7=VD!YM`cLkqb*ewYPW~8zGhN!SCN46Y)P&NpGlA zIi~{(_Vl>~h>d8yvl&R8G{793!z3iVEYv;R@bYvSW2B&93w3|wKKT$2!#b0wh0nD^ zR?@*aQxCZ^ZFZ|m7rKXbkTlMI){M3hO)!YS$)6Ftx3m9=ZX9Yrb>ezXo$yv3Qs)`5 zMB#JD*j>Hmdr$6@FK+I}PE2od&r^wZKGm+YRnXBm%f zvdw%FU+GWOQm!&N#q&<1!+71=aP?YpsEf_HG~M_uQj(Iz?BmaC-4;hD4_>c&qBL`+ zZ8}WhRL?WSkY#>r&x(*{HtKUKuX@$a_PWr|9BFw0VUEV~oT`iI=?YH!;(Hr8uF&dK)b3xCfc>g;eH(5VBjFw7gQ(zc-O+8qHIv6Y$=ER+0ef5Q8xbYM2 zLgCEzR~6f*+AY(v!gDpuS8%#v?vBzPgQ}wwqm269A9wOwg=06N)jzL!{8A`Uuc)WE zWPC2N$c3_JDT_BW@ZNC6({5{~ZiJyeUAR&y@u0+xyRc)SYm-qhVyR zJN>}GG|yw_ph3rgFS3&>_n$qGOqlFrYa3=V`_)>8q4y__n2h$j%iO7(&R-~REWx!Q z($8d7s_TAz7Q(;uB)WGO$A3tNk3j8IhEOn>M2F5sK<5mCgW=OK(2fy90Ny;Og#_WX zsMx?@XbYRc{mZ!oRtn8vhg*JZ1N_*>LOQruviq`j+<=e-rWyl@fwlt}3afvI!~5{? z=8F%3M2LiW(EvOKI@3b;*9wkc5)Gjw)Zv093h~=6-0vIhKX=zzJ8ukb3?w&nW(31z z?Gdm@{P?#XN}Cxn|Z8u`((; zJS18u=_C^>A(8lB$7RLGLzgUz4T+8sO0E-1`KgExED8x-xMEps!1AbNOM>Ht(!tRS z<1M0>En5;~u_AWKvKWj1x6)L((qzaWLNd^g((}gNCgabypNxJE{8KenS|NGQMvGL186ohJi zQvcLwB^>A`;U<|bRG%RnG}2AlPf~o86KZ&Pc)0!*zy5u5lZX`#4$5?r5Z{F*GX%WE zk}0C+t3vQjO9gJrxjT|d*_gV17zQ@ zq0ijtu+(=J*;hi#!e`_B-!il(@;q<8rw-C&%~9CbL#rLLDERYZ3aCkhzV14hCv>99 ztS`_xc_+?3`VKa2DOf0j^rV#59(3~@g3UYL)Y@r>_^RHk}>bBBAyE@3rK1o$SX3*qFT~hHc=FA^D z;g{il;P*NVhZ*if{bf%eeeOJy6fkYfFF-f8|L8Jdk-MvK{5Ae_A(ljA5NctJO$;8 z_Dmz|BPYFMG=yI(r=N)#v~&JVe%r_*F0Xk7&MMZYEiQ|2UX3w-Y^^>`@6|;kzLY=w zWHyGclEnwH%jk60Y1%wo6~8AappEHtuK95i4w~E~`sE^pw623?4?E1vPmO^Vqi&FK zNP^Oc#V~5S7JjbjVyfz=;oDOM^e+2rU#IwzMU3A<$+lae%kvXkQ#Ov>;|iF2&3x9g z|1da?(MOv@IruYaHgciA;CWLpm60@MRL-ZOu?5g0H50#j_2XY?AFy}vj3I%g4^yo$ zL$lz6Flurn9eFi@0;d~c>}8y8C6K_fwjsP>==1tVFls#<#v4B3!DOW_J~A2u71j?~t78xUsdNS$jJIUB zm%SGSwokJ29QTVkSx=)`-z{)~gCW^p_NBSHbMbeuE2m+W%CtTuW1`Fs3XL5^Pe=KY z_TgzT^xfGxNG>huT9$Fj#LheP6W^ zaw-n8XO|Da3yUGRTfv*e>TSe`Kgr7Q}`vGgwgNozNrk8ZHPtGtSFZ=5M_ zbQ#JvyHc%XI(a+l;FIky*-xV@@M(Y-I9>e(? zC%3Xki6;ajQnWECv7ELh*`e9yQ~cy%n`z3Je5`jJ#gEQWV^J!P1%EsG*xtkaG0n^d zN6TulH*KJ23D%SNE?(R7Ln_(`XZDdMHG9F2Hk z3R|rVaG6p)>{@z@^L<)^N*fZHUEfSdsWyjW=KHCB;Y>*VvV&I1Y2dtoQ~pZK`(Do z$dO|789x7k2dH-^(67F!f*?bq=7dX1B=pz};S(Rg$i#b-&YB+{Hsbb(i z5H^1M0h_A)Y0}r7=&|37_y=FXJ);FGTlpq~D?)g6>@^Ds+)S#YhEmIr5dK^J5AbKl zNWXtEysch<617G+!&jA^@=So`9xqvDZ#ubr8%=rErc7;LF7I`CI}R7EqRS(4!O~_k zW&BP+*AIrcDU4%PSIuDD%|@mum_Tm>-PpaT#kg{S_`hv(4OI<#0j@{A*yy`O_#<~b zP7ONF>hnclcqxj$gnYrrg^Or}cwRr(D50k8PC-U+EoQxrpwE5wd`;XNoILHS;ESva z=QdNBv=41(gCk^-WVTXk`ylMLk;nA3gJ8^;;+gh`czv%IT}Vj6g4fn?)vFBEr+wx{ zW932StS)XJoy9(V=px?>sSw%T4oU%Oq`7wvxAbobQ{53m{K6#qF29UZ^s0e0oiy6_ zTN<`ml%uBKM6xJLqe*FLP$Fw#w3R9e1{H$}WDDb{n((HIPOr5I3ppH2Y(=lH6xx&_=U* zShj7K=)&_^I5O-U>-`W#McwsG&1yW|5!K?FZEx|L%MSYEyo1a%H85S>0M-^?XU$Q0 zRP?@|!*;{1wD@2kMaC_po6)8CcBcdl(JH{WTb@*=GKA}LiiDwiwn5K@&Fq&$4-2Yt zX9K=`#5JwA*t`MV%sA2n_v)-<`y^xtQkT=>Yuky7{t02Dm1y*>i)`sKEoNEK%XN)7 z$bM?o;)06t$Q?OD=D*FE`BE7QDA|i)Foh4jb{Wp(6wpJ(R4|z1!k%BqWU}6#?3n)^ zIOJYR?US4^=GSKGoLWXhRJ`DVg&BJoX~t%Z*~Q%VX~3u}tJt69G}vrcjlautzzJe0 zFzz5r3VYAe=k0)ScbQWE)GQ*rlYv_~YbGR+9 zKo&2(@ODidlg#grC1wrO)O?VBN9nP&$*FL_`~({>k%T`+s?&Xr!1K5=U23#JCHRc$ z-x}Gq-Z^|s?M$rMaupLxqqs9gL(pz^1Yb36H@l@&fP*B)LG|=J_U=h8Tfe*-_j*p` zi>&X%^!bn2p>G>N?bZ!g*QU!}-0BC@;>TmUrzV%lUxLaZt-OLy26^Vb2Q0mduf_@4 zo1X#f>yay9srCikmA2rt{%crtO(kYoyn_B?{ZKUN2U{%{$lm`3w79m2j?^B38TU7! zy+jf8+Pq-vKCI!NHm)PDfMA@{p2S`XjbK^7Q>1*LTd(Egut;uCVB`-Eiuz7g<#SYq8pTZ|4t!5)zyBWM241E!;012a^e47TT8X2RX=6L+H*qT&) z1F?UhHtmSc#+uuD{M_9WFk!wFxya<@u#K1gBT;uza!=}xshsVwQj3N+Bt zVZ7&Fc;+RZ6C3*TvjqZrkhO>|1(Y$j!R?}buT9YN!yT@0_f8t>yPN876|tT-{fPE# zL}$Z$EJxgvJ#LjojpaNm$X3TS%^UGp<0-f^Clb}hE~0Znebk%j19f*^Le~WaD4p~J z%7;DxnTu2Dhx~ayrKS+~R?j50X^Jc)tUndrpN%iq72>Rt0-EkUmO8MSPAa~{w!(4j z^H}jSf!bWZRj!fsBn_?mh$)Y&LX9u|*n>@B^j1cl1tqqUmEHih`ENRRv}!!t ztgr!3O87Dzt*LM-MUL$6s$==j37FYlNAv$|q%FTeq&fc@^eURc`mkqsVXYQ-w{ROJ zzb&WV`4F^LUEEahVI!%( z+Q;{n<)e+Eo8af_bedXyf!6n|fsT*snCH4W{&bWkE0BJV3bV68P2Q7LrKsZHgjUFu zA4U)L53$KVKEeTYJ=}Z44sVAXfVVx7U>MOKgqpF!Awhma{ulKcv}9q7P%BI!>|Y@| zG)$~QwPy%*{!yU@sY2bbL79^hTbREnKbMJ4%qqDp z>wESF+;+i<-T|VBv}(bK_ICSbudb%wH&=^h&08z-I@8{?Z;((h!&F!F%D>ru(?w;0 z_Ktpn2d;{O)*K&E-|c$47Qg!IeB5 z{Qs5^w{Y2tkpEYP8~mpf!+%N{HbXf4e+0FWxR`$>vEIBtxYR3!X2*Jx?58eT_C<}H zCF8I+QVw1IJb{ol4YKK5LXJ}pvmHN%(SElmc2z-_vpHK&hrU-}<+A>G;uw!E?_=?q zx+%rXQ75j7pHXC(AyRl?8;fhb~g>Cli5SC+)169!uq&fofXij zDT!sfFR~DYrIh+85FI=YKwN_t=J_um`Q<~|45dY+*SLXGh!0?qM$%l{>^| zl|XmpLY6w+0;9?-xG}{%tUqf^v&Bo(Ni|t|IrbcU*UTeF`N04-NBM&*7qjW16S1Wp z=w8i9QnL@o_9P258N3oiSVQVAN zXRsIgO-p5Y!mXtIF$IG!dt=_Ek9hovgJA6 zJ2sR0y%^YhH9dq1P9KB~fn}_x!~&i6X}?!7zRmazCmq9a^p)2%eB~VOn0f)aSq?(;D~)(O>j3Au<|gwnk!Jc5Mi?sT zjplzv2pXqwR@NiXTd<8Dl$YYn^X`~+z?zzO$dkyRfz8Xz=kKvi@afbdTFlP{37Hji zApR3uxhs$UG#p{^tERy`M}5}tvXRAo38o|0n_1{5b7pd}O%Pei@$dTikjtzKF#UTX zZJ#!TyvE-~rJr-~+wip*_9p-)l&{A}->1OGxg~hv2w=F|J`9k^0h`q4)Gu@ybN@RD z>lR7kJH=d7oTG%N=FOnXy2s&zCKM?|UigQZ?n;M{6;WaoZwY55XJ+Bju3b>9tw95R*ke({7|N=ufTq@13Mei`pQQcN`74aIT}i=!RYlY> zYYOW1=9AtFcj%ZsiuVXxjm8JF1hLBFSZ=uozv=B#ocvG?qYBp0;r`aN|E(>?4m?Ot zwC)mj{yd62q#bgW$)S{U6083*O8`5PP?ev86KN&xat&pheKrbqmRGP1z1w-`0S#Di zK8>7Pj*!QgwYWaF9`!3tu`^Q!V`mP=qrHxlciRrlRmWrCw-(WhAT=_-n~X||XIOHE zBt~5wMYkvMT&tQE>L%sT(5c#N(Ju{TVJ)2TOfNF(s0Ca9e01_i=Jm>5C^*gyjfeTu zD(MQ5WbFg^Vib;F^OD&Q8F9NYy9HNRX5iIdf7l|$>n!H=NkN)tJ_a9M%W3bKPp(_b z$RqhF3?4X&a^Fs25s9Z*n0y6IDZPZLzT&x}$H=3o2ESDlFtaI3XwuuC>`>cj*1B~) z@(WM0;V*`umQw-6f2?Whu6jfU-@Dm^-WY5jbCA07^x*Wasoabs65wRMgj~!T1tFfV zpl4AHo*8F{aUcA7w?P?H9{a(5L5Kmxb(W#&vJ>o~@--%Bs^~D!-H#F%p5lIVmQwJ} zE!>Mq`kYVnOqkJflFgXW!9D%vh}99NS=-=hdfdIAg>RDPp18_Uzem$BaMdd&{B(&8 zlW4+*t$M7MU1bX$v)IT(LOj-)&4l5qm=WlNH{yf1l-F`7Kj001sJw_8i=-XyXX)aX zGrzzp!yAIO4;MH~)pCu7KhZ!%7aCpLxV}Y-wAiF*f^feAJkV~S;)%(q9M;KAU6jNI zchvFnsY7Abg$9Z!Dx}iUa}a&YVAe$oQk*J@{P>|*y;T5iKAZ8WoC>i^2jRxO$84nO z3N~Mc!_VcX*!l;#lpVDR^ws`CLO7#ID>8YdmzJ1W^$f;+n1fJgk0wV0&@9~>m5Z-Z zzM>Al9#nDb^gYZZr9j-u1LB$EWH{OhEpOJsxDueKtGht_mFV;7B&OJY0IB=7;yKVC zjWq|*!Z8VA30?}b$4(*W9qD9Hxpg zA?i>T$MD9nyIj~WHI%Xp!k(!cp!cXR{@SyS30HjJRtFts`&0(P9K)$tHf8|KJ>r9s z6L#|!`*ZR5?}Ip`Sp>VU{Nj?L{h9aeHe3$_=}z(jrd@NIqHNpX<$_XL+Lp$a`WjPv zs2uj3=zxqyCGdG{OrJG!s9UFs%vGG=+$T%!$Z7@H^f8k?JpP1POc{phdQ!Oj!XsMN zF9Kz{WhpJ(88Z$#@D2Vd*!ZMc40knHgtH?qsaK~&(`@#0oC&A(ek5Ht-b7xNO(K=Y zd*JdxIpX+a{Ccht44qzrs?sy2K3;}y*wr$_b8Ref?N`|HsF#bc8iS|oe#4+;`pumq ze8`t&kn4eySU#vK zgdWMPYUNx07LeAGG;sX%90e*8&4Jd|WR!6p&5~*%;oc2MDjWsDPn&S;(X+VzT#+F8 z&3p9IUB~5apGb0xjiB&lD{Bkw0bA2J@{2T~OS3k>cJ;+{R?dvo*$hA<>8&t1?ItLz zoq>F>5peBmHVP|4@k?qxUwQX5{+cO|m&$UPZpJQHTGz~$d9<;ZRq^O9af9hBZv$D` zsW5z18P+OvUv$YaO(YOg3J@C$^)g_rPI3u4%L&XRIl1+tb99wY_oP>@0G! zsU%NF8@e=62Jd#7;)a8jbnSa8-igy835OB*`C;g`?| zYH2FuE!rQFm)0$&ajpWD>yN;?SK*NRZ9jSGD$smcDavn|#2wx zflbF@_>IxbHfJ3+E)L>Sl|7;UHlXa=?|fp&R9JrQAz!0?m<89L_U9f+vE;p!Jgqy0r!bbz1O>zkl z+!?+GSDUtwcH}*pzwI`4Jlo1fXqsa~d@>z9#8I#ID^@deJ1W<1XHu`VsMg8SZotwz zm?Txp`uw8tq?9gtnIOCu%w(e@FLSPYWnhuUS)AfOoW^)sz;xGT82x)IsfI2Gd)*Ew zy8as`Z=Ay)KGMcd`>>5(UmQuFO^?91A+69|lLneSGtgB14vwFl2Qw!mA(vXiHRzX- z!P9y+e782sc@c*ngGOVFgahtAu$@U47Q=ylN4W^MNf>M|Ue`8-klr~H>iwF*Z+kQ! zvuEd0#~ed)+GCDepSIwgvmw;cctEs;AsmMGignGX_Q?TQGn15c4 z;yY8B|EglJ9-)lynL-y~bMbzpdi@z1SLDsyL?K{yTn0U|% zJ);{)LvuHjr+bkw+7@QlT;vbj%7me63N#{5{NLRb&6i);!sc{1p=xqA4Iiw^Hf4BI zMRPo#@IV3In+MbWn_0~NwFAZOOTp{*z06=>8Y=gha8g!fxa26|DaUP~H#wR0pLPrD zKUuSyzvHp;(R7k*naexd+OdZ7{m?dG1H_lBu!f3!jIRpjPGro3$T>e*f|olP$0%Y` z`Y^mCd;_;CQc-08m;acqjt?7;vf1}{(^LIAn&sy}DvSK+c2LfGM`)p=Hlk!Kc7u{J1}kth&$%%FQi+t`27kd1|a1hQk1j z$&3YNal%ot!qGwhi!(j$KhCtpe~K9MPZ5?ggk%3L;vbKB+`p1oRYKzE2YDuZ=Z)T8 z-B9s!zChdTHgqi61Wmu)>5hdV*=Ww;T9!rQgAJ4L+mQsEIBzc-|F)8C-8Ta6Oz>hI zZ_M!3wOTqdW;#WVe@?XD6}J}c#?d~%*mNmvx@$TD@25Oq7bhNpRSl1L(X$y}4?Rp0|Ip@i!l_rXaDj9H9xN`HRr|9$>e13d~=t z^$+P{)$ik&blE`q@kt-|6>q0$Tb9#;G*{ZWqlD2$S-?I09lY=}n`>!KVGVPkOFkOz z-`B-D&87Gxzlhb1)@EA*mt#Wp2#Wc%fDaeh)9ytvl)pfleYOx_m}3nbv)#`1sh%L? zNjj8TFb8wEJ#2@%8p?e71}QD2{LS1yY}ng6Xqz{j394OSnpO#I@o0g2`JqrXFp|1w zW?*5~GM2o%kXIYL1oPG+S(umdI^xIrf{imNi)Gh%``cjCn~K|93c&T)6=oVNMJ0YI_@Y7sE9?ze?R4a0H2m%7cc0*6Li$li)Cc%0ahSX8H;@L( z>|(2W_R_qfdtB1YAl7+k3tHdZ&fPgSi5axarU6ZRL1OMGbe?7hM`zCj&+K71c-3Wr zac2=)ee}TR3PrRs(~uIqw~*?y8f-D%#gf#zp>gMPc(q~``@Lcxoh_-Ls-tquYLYZo zD%GLnU=!3oTLdn8{g_+p7OwpG7V791N*jmHq3V+on7LvPlX-1R%~~g!Q}iIrUiA)4 zf5yRgA1S(`rc7-whq9^PLGa`T?vEJi%Hr5P;@pK4;k6gAys({h|(l(cs4`bY}cY@SYJ#U*5sLK7R%Zehi?-%PBCn*qQWe=g`l^QLx); zF~6EW!QV>rp!18QX^TY#+q6jzXF8b(IPK|V{uju#MFTx=3SdiZ5$J7or7IG5>E4== zxNB@G4*a7`vm&F#GT#XO_Bo?dkS#8_bQ_{fZg76$R=3=>6~3K5Nj<*~LebGOMA^Zp z8C%NT!ALZFcL-yvI$)x768NV#z+&+;vsYKr-ce};zVFe7%Im49X{JU*Vu(vEcpHb_e%H>3@&CR7c{6% zV>bx9N6_q-H(+%{CU~M+S+xNg+@OXBzPN9R!km?JE%(jJI=FFH|{|CluR@j`hXWM zoQw);R+Q7y15N$hvBq>7Cymh>%C@wXQ2n$T zmfU)oS$d7epKgnplgA`{lc+|AC-gD9SuQNy>^HpoQ_0VE51_XT%*E-1BFr0H1B;7f zF+E40q*at)%{UdN;~2_&tyoTfI{VN_zKF$cKFWOTw!xyb7z}Yf&9-bcAPJ36{CW!~ zW)*Y-o(0KKv-3$fvT_oo|9r`t*9T(TmN}pkexJM9UsQeJ=k_dWT|SjM9YNFKCj^pvHo^XN1E~Fa5?B6u z17{A6r@Ni6jr+)zN@$Uc>uFHh?}#RM9k6Ob1G#0Xa^4RONq+QYXz|(y zzfEKKsd@*1ciO~WR<`k3FQRFAi33f|;4tCUJ2oJ10NeB_2aWy0SaiTd%)5R=^y$?m zluZA|G@b0xL#2UpkBY>tWn(GjtUA?;v53UcM-X7*M>@whV3V&Bt|+<>eJAHod$1i9 zEOaE{`YdWxvBB8MF5F_{2JC@fV5HNZ{@7>Ikl_*Z@IVBL2d2xYO^N5qIJ3F?Nv_A3 zC5NeEfnEi+9vXr>C#7?Sx6iOZK?v&%&Sg5}!XC=k@;`@8W?g4v*m7xW%65IqMS47i zM9BhFoWC6heVzdGE}EdD*#Jy#Zs*m`X7JT@E77Z}oqL_RhpCUSW|`j8Fs$hloZFd& zD^LXGF&pVoi3=%yx1$+@1JE@_%ybAJ`42X6*(e$qrcjz>IGD z9^l4xTjSffe5w49V37O{6{hl8WaNp5=X zMf3+OUbZgu?4D#F!W3P@S{qey#Q%#S_^c+EF=_(5L zvBz7cVx;h~fof~iiq?0Usz*sgGndHq$#TQw8$DP7{$W|`q0lOY&3>yK!U`7AV# zThBghoaLFXzA2OPp*k#Hun*u{A?x6FKw*hBO<4Jy6G>bW)oj;bHzIXd!<*Y|u7?)p zZjQwAKlAbY&r~?)`;8?$y8uSx+qkR=4e&`z4<<}&}_BJepG7bgLiFZx1DT6zb}puf!}LyBgSD;5OLO?9xZtYHEmUJsc{6_zf?q}`PyvA`0IRP zOga<< zUS5Kq0&nBfF_Fyij2n01q$OU`sKG&{`t+e(0bG^*$ob|6KIv`?O8A|?Y>5U~D(qzw zm7Mrl%Udw+t5WmPB5gK#>s6L6sfeGhq%yzzo|GEC1x~Fkf{wor@Y3u6bl2%Z!Ea4? zvw9fSfBC|EmQTYn`E^XGL!3nN(t-^){rU1;_t~}CrLd(;1^4TFkn@FDv9+`d#<)4N zg+&=m6tIwrH1@(w!!&qaW|KIBh5RfK=k-)@ zH&$a|ekaSnHV&UDl+h<~GUd^v7m&PV5xmr%3h`b$u&1+|*E_3#pVGduqyCntq#Dhp zD1_p{(=#c+w?D2)^uW+_x7dU^#<+UV6BJnQ#L`KT#91!{J2x-EPVXDg{UVpf@7_em z+wL>H<=eooK6zRa%3rl8f>A~rm;fEh6@oUp(G9$XZ#yAvhpOH2tC^-t%zH8t3U zW8rXDW;sS}8^%hO4uIocGB|wH8PN^r`E)Yn6CCjD$DJ*SrpK$Np;5+mJn(xd*f0Lg z!ZN%-X-^nAeLcxuIb=}dCrMl>w&l781=5L+^RV-!19rC?fSH^T4U_l83HF6}nHuP# zuN2$=DubWqqrx_3T@vdCY4+8;9ZJpav#<8)oN#=s(CU9n35*-_FATB%4@a>1ha=d| z5Kj1S7-IJ?3|Uv5ExPEE($qZLN<>-}f)6wA*=q*-3XZ9Gi$13AYdV{kE*dvAyeVMK zOwmQHO3|TJyPEDEI(H&ww7zKa(NvMoh$;3RLRG=<4u3&wvbN~#(wlK%_e?xD_$>RXp++XP4+U3!O{lKYh%z$n!Fl&w(X!~BROu>YQE!F3 z$v$oTu?1W%l)JsgzHyv+fN12 z0v!*!ChyBOs5P>6*$*IR_6dHn_IFm&#F>>$7Ocg(2F|X9HQeF9y7HClJqd|1&o|s$d4=Z#Jw&h zV4>2Fe5_ZJPiqksSDN6OyZYeptsHhJJK{Nw9B~WmiUT$T)0nBVY3lbJlAALe`;||o z0SV?bzG5y@pM#;-zHhcP>z*OTPr8X#3s8^!-UDJy89?K71ZR zt24(DZX8JC9|j`VP>CbYo?)xs&ca$@9JzOiU3Bj=xMs{y8aqM)&n&3Nnc|Q=>z1SY zpu$fL;_Tgr+Y%yI>Z*kQ8`I;?cri9ajF zu9=ASR&D}MDJ`ZOTEnhPQzr9>S6r0aTedkVm(~nVUBpq9q^0 znXjE}+x3-Twx|}C6gZP0-~(JqG^VM4CvtUbTZx^D!=wp11lty{GY57s$v1_V;JO9^ zj$MJ1eWzLF%_XE$TZ|>%96i_k14I8FgRGSq6j(aQ;r{kJm~h_+BLoF3{+caWrd?x# zDc3}q?=^9gr#I$LIS&w;0~`I-&@OE;-Zu#cnWG0-p4ukn8g7ImOAP70=Rv&CAP!(w z4u-uIF?c|&zr(IoI~ig*%{M2JRNq`w(N&?|`5t)ZP&BS9y+~=ppEC)~Vv(~@jot=D z(V+CJVB#Ewm*+hI(W;Rcv-AUVY*VDCvM)seW#5?Z?(<+=FM}sQ+J3_z9^$p^C`|q> zOTTZ8F70EeSvL%)Pxr^y-O=pT^EceY&^qPC?NBDQoOJ2RvlqK(30M9q<0MlS` zn{?tmj-E4*`U3v2++oY9cby9+uaqF8^b?SoFCd-sIp~mOL3+s=wBcz0&X(38#kH>N z@0J1dv@Q|e&mRa?Yu>P>5*y&}luP_vtfh{jO_URF3!99!$?;o193YBhHd-5~wQoDi z8{`9}&B0Xr?l$!8dd06Dn!~oZE8(Nwrw}HP!?~SCM5||Dp^*S*t}H|A(U)o1u}|IpNt6cB?9nw_Hhb$!{5dfe8+-%s^PxA?5-}G~E%hY#w_TiJ-$d$;v7yqj z+i_#Ub(Xb5n)!t6(X*K54jHX*!MF zq>bZv8B*T8gHQEDS`xYj`}cX`X~P?A{!0bgplwedhOg$%H$3HL z)jMI(vx^X6kxt(SDq(c=SYl>k%x1lV-ROTCCRQI~{enKT?gP!Nr1T*lI!VNz)-IrZ z?uc2UA;b@>#jvDU`Z`g2Q)EuE4|XvY^EJp={i*bS7-eM#JV%C>B-0$#-A5m5_iPcjfVur7~wKFMBh;-hcO!3h)Q(TZ<9Y*5R}Cab>&XfoUZ3%k_ecXAbX=F>3Lo2`YV($|6A zd&nLft`Iy^-c7+@Ch;1MxolYWS#~5vg?Hk-xVEM9ILYJFsL{HW{al=aakC#lz_|X+ zYW4$h^^u#Dnb04XdsNW!%+q9iZZz(gY)Ze?%s|@c9#{v}!uqG_oW14(7~|;!mbv5U zoYHJICFeJA_N8wV1~)G`?u;h;?r|lvMpLzDDtV~5P_oqt^!PXvTHaZ) zKYA&w;HV}>ZSZDu6c^Eutv>XAZ49hccQbQ8`Ha=5NYcV~b?P^)na>mivA;~nEuS~u?6y#{&BS72e;$?P^BVa;M&;KU_gsJ6=@t8v?bw$EzR&8J`$8l1Kh}bO zBSZKR;xBuPQx+Dc{s-5KJr={SW~$(_qWjwGklkFP8TI$nz^nhH*YH#*(k8b z->h()K?%NihR`4*K=bbh5R+0_U2Z5vq&i|~;9$ymI0|fX=8__3O&dC=@*?*ge42s* z&0JfK{k@~{&b?m9vbrM@PQJ~psn_J^-tnR6_EM~94(FdNtYT>*Dag%Krs_gBJheN4 zEzk9#_K$!OHCy3A${%iw4wCap1<;8IU`vwTK*e<)E~rlh|2O?9HCRaB?#{rQ8g<;Z zn2T(_-Z@w~;39R)6+##L4O^Zj(Zgk_xVa|^w^}vQ%JkLn{6zrH8)87it5;H{^%Hm) zq5~Up>zHnz2RAeF2An&fj_tNXXxySjR3WvPGkNm}4@U;mmDYo-Y=RawEWM4k>uueH&Bc#vU=Eon-fUpmJC<)XCe+y z;`uUh8t6#1*h(DWNmZF|*n}AkXpmqn_*y+0r{1@N{;zFdzj%Y<*N-+7WEx8QHyr1u zs>9{uwg_IeeW9JbC!lt%1!w-~x+u^wlXl&lO+Fvu1%?a#Kw_0Ib%=DR zut%Rl>&o#*mLXOD*^f)x*WkKqN|aV+ikmd$(0uq7*f=YRt8>VMfv#H6asE3=JX?T+ zKObR%FFZxP10%UgjU5=QzMH$U=Q$s+{31N){kII=j2w{V?j zAIo;IH$pX5K1i48+FRqb(pwbS@(@j(+h~qzqu7GhX0`k!rrhR${MnO~*L;}mk58qr z8!9N@sYY@}Jf8SggX;UjC`n~Bg@-dijLQ>9uF`1=9~z3+TI{Lm-6Iy%?S`TE2k{3l zj^f@-lw`Rx^l5v|S(vN39_nMCLu=+I_Qt}S9WmbkS9_w_1*?r{QY?kp=H2k3bmg;de82B*bD;kO4nnX17rRwkE3EtTQy@f>@cyzvOAOPP~|aUwlNH|$N@0LAmy z;11`F5Vy$)N5K~Ikhsf?ThqZbxsjDAL{dM?Ec~rMh$5#m_Huncylj1$bv|%mhs0w{ zSfT=>Zf#_9weFGJOMe=({s}E`J_a~j9&5#Yk{r}xM28CwF|GyW)gx*8?L+)DQ8`^0 z{DBWL)Tij-E@-1PhOV!f%Q7lQ!TI~`th~q#*V(NTb={keNjBfe3ezxfZU%Z8Y+}NR zu|lW+J0MLX{oz7!C0;;@zPMHn6kUo!ytn6O9P%H^LR_|;dDN(PTT^x;jHQ#>`1*iAOE0@eeeDWYA5s9iyx`tKoERzX_{8ltG=Z#QVbjjA8N4L)lI&=2!pGqtAjh6yZ4)_hgR3NssO_ z1)a&L%&u~$;~uk5&2>z9>KDjSte_dityEM|hEx9LvupSAnb{5xrnxv5+3R6=ZPRJ) zRDKq^IV5rOFNk-)@zq?$b02y*D;>L3a&VUQ5kcF#J){ynlBUher1MI5*qdBkZm{D4 zSgfu97nF~nTiw3@r>`@Q%CYVKzGya;iqcFaLuE**>pXVIl!Qt&2n~ixaZ!c{jhY7y z5*3vUk)cps=iZ4(rc4o~LMYTNM7Ip@<$0d_x8Akh_xJpLt#z!mkMq3NxsUJue7^^u zR&Iogxhys<;S?+{A4R|Bg;0uHC%5aqHaO4E1E&Mc*mkcDzv&KPb4};4Z7W4w)A*~b zq>Bv<$>+S`)o&vG5;(r6b?yjfNk3rk@bQwW}QzJ@}r|z zm3=y!j3?>z9U(5XZA7URWlXBBm-X%n1^JnBaAjyFxJIodAE^-W@n|31O-{ynkDb74 z?HY0qPU7cv6+q=WJyv{q5ucLNiJKpCv~_nayDnu3VPDq>cHUe78gnElp?E38x-SC< zg)y{g*a;ZfYk_Bu)`Lpo2R2+FB*ktE{_+nI)K7c~mhnT#%r2h}ZSq9V9hq?Uh#c!4 zxsNxwmdQr=WpD>%%xG&sCI2#SAp5J|Ew&**iTb83Vc+_ek)D(x{Vw*U=|djC7nx8B z5}Lu}=K&PAHx&bdgINC^t7%Sh1%6V!#@WfFvXd%z1dodEvrS@{zT{pKJ6F4bf?R(x zjS*+a`Sxs(?XeIm@RfX``BxSqw;nFMEQjO?u5@6*4%&3B3{$>^2^ipbB?lpF23Me9e{ps#?kCj~}lg5P~z=!qI0e7DhyA=u{ z)WVdG&zb;_*RNw~0$D1#VFmSNj?}JP$d-MI$1cTiIKH9@%O@F9g0DHWsj1R(Z+kB0 zYag14r#DH?MQH5XBzXMj6UyAG=bqI>bBzHvA?@NyPXE+Jn(yES3*0Urj{DCQ@_?!`Hu?@sMQESauHQpQ&^F2GrP3l7#*CaCO&RQV@Iy@+A2b(K`%ec!Mg!|O=gF)h zT;T4=Nl-g$DCO%PU>BaGP(gor2&#Sw)#C1q6l&1uqImFTxP=VXo@GW2E8y?91NqdA zMYw2mCi{qs`LT&(XlAf6J8gc0cZ#b*kH(L<$X^`<*ArRz!Exk}aTAOsD@3x%auBnm z6Mwi~uP~i5mptZsK;rHNTvSIGEL+*11+*LR#qTq?r6c#V(=Wc@j@4&SdUY0@Q&Aur zwe2)!!f7lYH;y_#E7B~xotT^W38X(XaZa0Vvf&-dRAG4r*giG7^w5N`$DQA1bCmym zaW93sEr*Gc8>w4%FYB{71c&{cVD+Kv7<0}7?0Vkds4x?-2;T>J!4g#A=f$L^J?3Ly zJ;L4J)__SvEOhI>5q&B;h6^3T(L{R&r7Bp#t93V+>G~3okoSkkGC9^^J%+RLzsUOQ zzrnH1`*F0h3wxoe!Jg!8!NSU3F>iR9oAK~G)3PaL-?cWtgTNN+SSQqZ@5)5dX&(rfHlVV(Z(zse@@zi zQfGjQj`{G)J76%f9Sq z=|N*jt6c`P6%0w>W{<^1v)RA{`{-fAI5hby4a>8qz}N}(P$cuZqJCN=f6i(mnaY*Y zzBiWy8s2tTwKkX9GIYp1PZQ*Z?}Wsk>Tv#X44*F?8rZ2DlCXk2eX+f+}7*9(A0kj_7R5{tjRZ^wr^jR0m38 zK2vKp1M4&^QGUxfJd~6NBHAL_bQ($+sVQgb@9X~KmrvD!?AJVcGzgphf;oq zw0zZ7wl%gA58c>6z4oaXo^z1NU6@bcvl=C>nmK0!Q<8l%2)yp7)ep<^m=~P?P?bIpq|Cec+2;5G$i+_u5h8>99nohq2koDI?!GJ4W5io zfxF-cp-ICrN!gog-FFi{XBEPZmLhB(ypL6?Mxv*`IlH{|JFEJ2i79C9;|iueLQAvL z_&`Ge(+62_>%`eICZF=~`LrfBA-j~~8U}#;S23=1vyiWmFvSykx?;NF3<}+A*y&5H zxa@9_xPNgF`?@u0b#x0WRbC_RS?u9+nhXurk^;T#@u2hd8Es}3tWkRpJ-MAsbq}^u zk-ZI;RnHOi>clWmAH#CC>a%C^GSKKdj#)1Zh1olzS$me)81=fHwTOn(_kziEafLQ( zT@ym-M-<_4MjZd+*Cx8Utc%$X+Qw>&t!U=$@zA-^2d&D4*xT|REAGgFN78pnUtvn~ z*L#a&T?WJ2*i~q>Q-ZB3(xMxhduY}7V!Et$3{IGeO#qVxq&)f-zv-zPrG8@E_V}YX z{h~MRkbcSjF1*Aq@JJym{W|7u0u;0I9_RW!gT2#fL~YqZ2)enPg0}@&ZvxUXg6<(@Pn!>ULv7M3 z>e7Ekn%d>e*;Wdcy|;l2g^Os?Bo93Bss`&bLW!PPQU0%GRJYZJcJKqp)<2862jAz^ zX*}gAuO`P)uLzwN&>V*omQo{=@lfSZ+<9a*`Rf9yJrBdjDjjUL(lDs^ie>lWvgl`@ zJsWUvFog`52=$ihY2)cMS}{@qY>yW((;iKX)i-7p6a6u1v?Jd8=#8I}b*LgH3nJ4x z(Yy2>o)?^@`|}WY?X;oD>H#o$<$T(dTaTJbN7$IL>TpHV04_Y*%3VGaThVAXg1Sy> z!{|FzsFcyme)pQwRW3(xsW^_i{AnNtt3`pz3|%T3cm~_vg_7*zr7-z@CyUR204+T- zpta!|>!@u(Q%6Oze%dHDK3(B=uKR&&Yx>h7^R+PIdpmnxh@vp@*rg?WJuhWA6esG> zBw6*-tUas(9Og=a&BZOaJuHC=FTP<}=b^TKM)XN)Efj-Rx);jPDvtmcEF=#>#A8>K=GBLDjPN*8!TWD+3?P2T^6G zAN~qF%FBOOA}yB$d{ZKpr%ufQh1`KOc7i-@`z3(f^qw&sRH}*X&#LER)DN)hBX`i!yk~rE!XAwDzESb~Kr#2%)3Gp>SEWx9 zKgl(HAnvKq+edLTuXayl?&fH)sx^d8=SxEyUuJVT4Q(tX z;OWSER?Lj3$|R9~s>HBEiJ^?7O6gg&Hhr+1K(~Abu;EJtRCQWx{#JIT^R1uwN|j{z zFlZQ9Tnxl>zc*8ASb@NGVm^EG@h#SVP=(%~>rnr-H#R!F5cKVrg-v&qS##oXvT@1e z0){XYUoGWQ{W@9R<)a^#-qm$4RGw78UvZWg1|f+;lfiV@4VT?q-> zss(PltwHuxKl~)lve;dJjeU{d1YM4+VR33GlTV!l>kEf6vlZ{*>&Gs1&3r-U6trOK z2Y>V{7=SO_tl>pyI~!w}ND-@Iz*Au!HtL**`7$jyAuomhknF`mS2gg^9nO+u&*M?m zMI3Awfkvn^6D|rAF8<#rktzSCMC||35gh*L2#yZICI1maoc<+-IGbmeJq}-5vBCUk z#o;I)QPvb4QO(0GqOoba1)<4lqE`Xlg4IP#w0CxbAX;XvU}J7f#j^dTqWBM!MBSfV zMTNnN6>0BJh@vuYiW*a`M6)N277dLp7AY_4Ez7f9gCMMbat`0)C|B*zN|09V!>(ql6HFL?y z_7*-l{g&SD(c&!T{9;-?*I1<5-zeKw$I67`>6c77cAu?433IUx$Ylxh71It&Z{LF# zGeeo*=1ejw-9RU=#4z>p11!kWA7`s*QhDYj%8|a#a^K}q#9t*LF=acrFyYx}38EwC2kcF{yO^pR@t&E>8iPem1HOJA4H;3h$-zsLekbptp$*D(qX`$xLQcVm zt@mKP`bXSyxP{&|HL>set0;GOu1J$zfc_=tq3cRKy~`O#d8V5|d$k2kkrih;WWMLz zbFx8Z&2k7j8AlJuZ%-bAi4xF_5j|4o`w4;lAAk3`uXWC_48X$`!9_E-|j7jlnFjevxQ%jjnr;a~oe><0wgV8ux z00&wAGFAS@##{_t*dNoc9i*sVv2?g!C0q5*3nYiWWq*yhj2Etc!TBSlaZ>IVxSG~p z;pn2veUortRFws;12RCA{sK-u5R%nPLt1hE2EQw78ICpZ1AqI2l-u$b3!88Q>eUTM zv^bZd!so;9kzwq0X)5$|`vJ2pVd)KBVv%t>dv1G>t_2&=nS!gJs5%ZBp1i_A*Nyl? z)!F<2jYl|m-aY1eK?70-O<`etG%Fhw4U5Jr!s>uJ+%fqYDRo)$BlXMZYx{KZq+cJx z#o*Y4?2nin5(Kf$VyeR69rGU*PRm^{p>6qgDD({AB699CYrzQ%munGFBbCPITuh{M ziC$PXcr>Y5B=W~2cJSleZ(y6J433;@%_bel!GRf__{^k5^k%0GKJp$Rp0K#{k8_e} zL3tM6Q@WGw-nNIWezhO1oYP6Bbfv&{&;(}fssdX%Z*ZxZOrm51kmIlNnt}bGE=(Hi z%jJ?|#1I2g)2TVruD zjwQj%LJSLVV@lJuL*Mn6Y;s6G3{BF7a=J}b<_qbPjWqa+t|H~jf>-o97_6^PH>a%U zzgoMIVSzD4s})k_nhY9oEgC0eJYeer*5lpBr$BY?ck-5e!A~E4in&Lc@o9&%@ZQiE zba(7uc_N+IQE{xA=J37nJjRFgEe_+IzGhs%mtbqW34GXe7Y1=2Fe+>v6X}>FWT~`d?+5JCk|E-duKJ$QgDsEtP~LWGQC;3Gz#nqu7O`P?`V4s-t4bWBOCR zBUX}5O^$>{-7&PP=qQxLAEZZWLNNE4fNAv~aa{RVHY28PsxBAYoe4##VE!oNL%=hCy zE{+3Et%qr!-$-|+{>591yxB+Z_Z6;2CvldVAMH%6pyN}N*sPrEV0!5Tye?b8veu;I zXxV3ik@f&bY%k&XtHWq|QZu{I63UiH9Ky17&R{=l55)D(VFQ-!#UuOr!3;6oBe6|^ z246YP6@A&o%Rjb*U^^x9IULH=jpxwQw+=8~uoPyEa)gCDi|LG3ojAiM99GGRSZml} zG~AvD{oiOpKk+-^C$p8l8fsIlx(VC$+>2hfnp5pSVl&4Eh+|HVatCX*p=IM2-p)jk zerr~u?hAEN^!m*ti$8OEpEp3#;QbIYErFsMRmdgnFmv35xaI3ZOYV*x7v(jmGU;}< zSfbRUyQgg-XwP)$)JmsAQqO2i`!w{k+{wFMeoivi4^!uoLG;qGQcNDiVx;O<)?{eN zTxvR5YeF8xyy-{!#!|R`uovd(i{o_Pet^4!_v6UP$9XrwbxyU-n5Aok({Z5#cv#-& zB;)U(^oOV5y4xP+Zo5npud6UCr5S$uR52Oxl=+TDCK>PU#>DVrFh6z&)vp?b<_DVD z?#F9s!w+#prD`FV_V#g-C4;CYZwJ**Q>90;BPttQTDbkvp3GrEKP&%<=LAP59On`Z zZ-GfyHXD*w$pUljVc)J9P&;A^KWy7xmiW2?X2&~Ipq30IT`a=D-SfdRwGzFi8^CIh zFo+#F6;9jPLcpgU>}-nz`GAqENqkhdzZFd17e&*HYqs=FX*HW8e8XQIQosl89>q@O zL_&OVG=|OeCDG6@koEVZ)1za#OYN&*(0X?^=HNW`xo8qK4V%gB513Db+gDRtlPufN zIRhpvp9a&!(xYl?D|VX8P<7q~KJ?gNI$Wv@>$BZ3CEU(~q{k zE1{K#8wq!yva4zGB_dV&_x$#KU7r#s~#JMJXg9x+iIBRYaFGb+IvjU)W1#Zr~IjT0#9 zl@0bRGvV({*@G@7q3~+6xaIGTg&&rZAXT;$DkORKxt}GjcpplNzuVc8)A?XAy@yYC zI)pL4a?m}iko&sC7qo@CFlN;w40v)8x}zkl%uWe_+wMih)=TK{EHOEgvIbt($#PyY z`q(l>2J#inp|Us~9)x$ZHJ!sjJtjp=^NpgM`3`XN`#bpj(Te+a-v;hvJYqIYrfi6H z6SFP8N||{A?oF!+tvJz1i%03wtBu3xNM|J$gaKSNr^WC1F+|wZW_ru}so{`khp}`hC90msOQ;2T- zO+OtD!I!23Z1UNqU_B`T*Dg$9Wl{x@|LGa)w5(y~(oBmVgI!%U0X7V{ERLRah2%03n;mw6mFi{V_hr$59_5f*_!9Z0%kWov z9d`Cb;L4svw%ya8MT!l4HcES$Qu}0Fa_ktVSe3`J#N(d513n1)G_>LS*f?0c{xCSL z9SiMwg;Y1m4*IG5X0Zi4GrStdWPcL0g*7r`gX>(9ZY|Ti{ z#DcY*A-W}n(jy9RzP2toi*qfOM=plA9~Dfse~75?s1Iv3iKjmXhbs@-EA$jZR8rX)9yAmOF?}|G3H0HBSf@&Nzd6CK+IoVmQ`~u4S@!heCJQF3>$?1Xr62Xv&L0 zqD*F6`1h$XhR-0du<%Z2UAFUM;$%W(3laWG-hLR8nCg1Me){0Ai$ zvRUK}J~uq5|Bz?Q#J3(ll?-PijkB5aDG~GN-iL)B2f=F+faf=260(nRNlRwAq2x;H zNSw|xzi_Plvl7kxbsx{}98bQ_N7GwDD(~ymA1tjS*qA7N7-qsj$u$*xFTsFp3(;M@ z5g(_^F{#ZO*m{2x{djN5rLC-E<)-Dpy)~fN%jIZz>m;p{+)r&zr(l8CS<8;m{prn* zE;i9}Bbm3Hr`qB;61ncBh^Xnv*(&k(`%L)@cL&pr{3iMxvWOb;L@dNC4%$tHkdXEU zBP#E4gZ6BKFO$RsU$8lOh^0e^=?W}3atW+d@dCMz6QRLrF8sAClolM(rm^#mV!nnI z*6A!}n}u#*HYJi?Pd*Lpx{|oG?$6eu+ILjMH zIjn|&SIOWO7Y8l-rZBgLBvf)TV-7E6sQ;-rW`BMQ7uM_rbH()8;t6Naz<4u?0#*6c z?EN(Hj2ZBE?ARn(TiB)J0)F{#S#8Z?EI1U8b+_X%y*VGcThw9m!l_L8`at}7VmZ5% zRSMffPGf1(F$!!nCXWupCjTV*(CSHtd1F%6(u4D#&+*kyoJ9H&OW=gqOn$}Kmh;g6 z%|Da3qLL*OScgXyE>|~$JH2<<_akz&CS?OGdOnq=8vI~^ClV=oWPf`^Q|AvD6f14Skh5ZA_|dr>a+wkgmS+0E=Y{Gf6+`tfmmlUD>GZR|$Ssm7txRB62P`Kj1QzHKnMppi# zCan6SCU`gqJ^v$&c>PNl*`YlRIll%HsqBT3mi7HzIEKd!eV-b@q z5~$2!g6rF|1XF)Bh{BYc1+i?H;GohrQOl7c0T-Ann0D|%`6a)4k&5avUf^6{d8nmL z@at!-DE!O-fo)uss4>M%~6E*)|k7M4zr`msS?E)_fK8 zth^@JX7J7O%122-z{dc~sVkpYDsQnC1sAUtN#{!Q7d#~R3tuuU)%40l4`dZZ(OCoe z0TxD_Ma_PJmA0{H9ctHS1nN-WrP27eE|#gu|Bvd0-uP}6{LEMCK1gKwjHY$+-^ zRN@}r)}tvUCS2F6k+tv0@mJmk68zv z$Mby`*@mLaDBK<>4EA)^{BO1VYj1Y^Q_0Rhm4rA5cm1hEB2u{fPaTqHXT>t%og%`K?p%1cQsDoU*6D&+KLXf5P&W=tyNZZG5s zD&*DhX7sjeE#!kLOev`>NaZTzk6>b8U`R756!2#7W^6AM3@Q}T@Mex+1d7@e3j6u_ zdHn|hFyYNmQYezt>HOpP69N0J%f&Xd@$!CtR!f2@PW{!%5+{&&`n_S&Ix zb`N4J_U>Ie(RP3MHrw?w?E4K>TK1mVT(SSVy2AcfCm?QbqxaA5wEOmb z?+sSj9?M-}yEd<7uU?_EO^@XAy_N-!ta+L`_PVAWv@UPWv8|3Uwe~+$wDeVoAlNdWB38oIrklK}lwAW>xBB?KDogSH20aQX_bQ Z!X-tCnR%&2@x>*HC8>q1wS{b@dH^{&vL65d diff --git a/tests/data/rllib_data/single_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/single_agent/checkpoint_1/checkpoint-1.tune_metadata index e83b72aeaf2291e2f177e78504c94fde7e5a3392..55b72be28978f4b959b001b57aad8683ce0f562a 100644 GIT binary patch delta 105 zcmdnOc!_a>yn>;LiIGWil3_}kiMdH~YHCudrJ;dks!3v6iiufrs?kJyZ7GA&2rdQ& zhLX(O)cBJ8lEj=sR(FTQ4ck^VOl;9ph~Ngw6(v^2m!%dJXXfV>azwBKMGW*T^b86) GOZ5Pp1Rp;D delta 75 zcmcb_xP@_oyh2K{ak8bMiII7tiLptlxw)lqD0&z+vtKr&yE|cKHf@4J z!HdOE8YW&u@vH|At``s9yoeX?>QBf)@CP`XL|Ut}u*|;iy*E4G_x4v_U6|WVJuA$P z4c4RH*-kc$0<=xp17N(jsmrH@W%;$RV2vlZ*N7B2);TShLqb$_1g;}a+R(dEin`yb z>Efn(WJT+nc^q*;1LR;qXoy_Gg*>NMtnca0Nlt45)hQ!w8us=&lOaVe4FNd~@u*Bqb2F}-k@2MNokn245Q zt(4>)W6=;SlDkG(FJF|gu_pV*?A4l2w`~ZU+-4;7YzM1j?e*ln5jL%%u?0H~Bpgo} zSzNJva6AYFsu3?@fPi>)5noRET8=p`i8xBEl7E*U(qGm>5J{&K@!+u0H@A<$kSLOYfGGRxmZ*Q-cjsk-_Tn3NY2YhnNb+pQ!E0kLLCGXQ8jj#X|jFCV+sl rRuVRoR__BANX1FD^bnsKZczi+$R%OV{fGTGCH^t}P}59#cjoFnXm$xn~ZNi9pwnbO1J>F4G@Sy*J# z#7POv%-+lszrUUQi_unK2X_xoaei7!d`5mzW>tRPX&nzxnl9-g4lUW6_ zu_Plk9%S`qS(Y3|ZixLr*BO~jp22!rqBuD^yA1I>alb>G@4CPOr%_h%RmYR{7 zoReA%lq@MKoqUo_d$T0_EXK{3IVLbNo9LNN-pD0Bxt1$r@&+!6$*;LqF&mk3O`aei zz4?}a2os~xrMVD;wi(OSDG7N zo?n!mT2ws6o2iEb#7{3M1#%cBYl&x0l-Cm#C@D(J%!^M-EJ@CYFV3t=o#LDDYNC(Q VW;@ZhjGNz!S2Hq7PR^Fx0st4Wrak}w From bb1f4f5fb06382b609e3ced212b2ac62790e0e96 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 16:39:19 -0700 Subject: [PATCH 306/438] remove extra whitespace --- flow/core/params.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/core/params.py b/flow/core/params.py index c6feb5086..6f3ec2fbc 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -308,8 +308,8 @@ def add(self, if energy_model not in ENERGY_MODELS: print('{} for vehicle {} is not a valid energy model. Defaulting to {}\n'.format(energy_model, - veh_id, - DEFAULT_ENERGY_MODEL)) + veh_id, + DEFAULT_ENERGY_MODEL)) energy_model = DEFAULT_ENERGY_MODEL type_params = {} From 9f1a8344c00b863915311d4cd3ae74cee960cf97 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 16:42:33 -0700 Subject: [PATCH 307/438] whitespace linting --- flow/core/experiment.py | 8 ++++---- flow/data_pipeline/query.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index a7ac07738..f29c547ab 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -235,10 +235,10 @@ def rl_actions(*_): if to_aws: tsd_main(trajectory_table_path, - {'network': self.env.network.__class__}, - min_speed=0, - max_speed=10, - start=self.env.env_params.warmup_steps) + {'network': self.env.network.__class__}, + min_speed=0, + max_speed=10, + start=self.env.env_params.warmup_steps) upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, source_id), diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index adc472176..a57c6e19e 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -278,13 +278,13 @@ class QueryStrings(Enum): ROW_NUMBER() OVER() - 50 AS ub FROM fact_safety_metrics ), bins AS ( - SELECT + SELECT lb, ub FROM unfilter_bins WHERE 1=1 AND lb >= -10 - AND ub <= 10 + AND ub <= 10 ) SELECT CONCAT('[', CAST(bins.lb AS VARCHAR), ', ', CAST(bins.ub AS VARCHAR), ')') AS safety_value_bin, From 220994e38df38aa0ba1b0ae3123b0af4f0332c0d Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 17:30:42 -0700 Subject: [PATCH 308/438] Update energy query with new power demand model (#996) * update tacoma power demand query, meters/Joules -> mpg conversion --- flow/data_pipeline/query.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index a57c6e19e..558488d8e 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -114,13 +114,12 @@ speed, acceleration, road_grade, - GREATEST(0, 2041 * speed * (( - CASE - WHEN acceleration > 0 THEN 1 - WHEN acceleration < 0 THEN 0 - ELSE 0.5 - END * (1 - {0}) + {0}) * acceleration + 9.807 * SIN(road_grade) - ) + 2041 * 9.807 * 0.0027 * speed + 0.5 * 1.225 * 3.2 * 0.4 * POW(speed,3)) AS power, + GREATEST(0, 2041 * acceleration * speed + + 3405.5481762 + + 83.12392997 * speed + + 6.7650718327 * POW(speed,2) + + 0.7041355229 * POW(speed,3) + ) + GREATEST(0, 4598.7155 * accel + 975.12719 * accel * speed) AS power, \'{1}\' AS energy_model_id, source_id FROM {2} @@ -361,7 +360,7 @@ class QueryStrings(Enum): distance_meters, power_watts * time_step_size_seconds AS energy_joules, distance_meters / (power_watts * time_step_size_seconds) AS efficiency_meters_per_joules, - 19972 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon + 33561 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon FROM sub_fact_vehicle_trace WHERE 1 = 1 AND power_watts * time_step_size_seconds != 0 @@ -404,7 +403,7 @@ class QueryStrings(Enum): SUM(distance_meters) AS distance_meters, SUM(energy_joules) AS energy_joules, SUM(distance_meters) / SUM(energy_joules) AS efficiency_meters_per_joules, - 19972 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon + 33561 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 AND date = \'{date}\' @@ -420,7 +419,7 @@ class QueryStrings(Enum): t.source_id, e.energy_model_id, e.efficiency_meters_per_joules, - 19972 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, + 33561 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, t.throughput_per_hour, s.safety_rate, s.safety_value_max From f1ded54309240880cf03a652873aa5bbba227f04 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 19:48:02 -0700 Subject: [PATCH 309/438] Power-Demand Model fix (#995) * fix some implementation errors in energy models * pull i210_dev and fix flake8 --- flow/energy_models/base_energy.py | 4 +- flow/energy_models/power_demand.py | 63 +++++++++++++++++++++++------ flow/energy_models/toyota_energy.py | 15 ++++--- 3 files changed, 59 insertions(+), 23 deletions(-) diff --git a/flow/energy_models/base_energy.py b/flow/energy_models/base_energy.py index bf1e16e09..a16c84694 100644 --- a/flow/energy_models/base_energy.py +++ b/flow/energy_models/base_energy.py @@ -12,9 +12,7 @@ class BaseEnergyModel(metaclass=ABCMeta): to be an averaged-size vehicle. """ - def __init__(self, kernel): - self.k = kernel - + def __init__(self): # 15 kilowatts = 1 gallon/hour conversion factor self.conversion = 15e3 diff --git a/flow/energy_models/power_demand.py b/flow/energy_models/power_demand.py index ddf09b2fc..d8c6bc9ec 100644 --- a/flow/energy_models/power_demand.py +++ b/flow/energy_models/power_demand.py @@ -16,14 +16,12 @@ class PowerDemandModel(BaseEnergyModel, metaclass=ABCMeta): """ def __init__(self, - kernel, mass=2041, area=3.2, rolling_res_coeff=0.0027, aerodynamic_drag_coeff=0.4, p1_correction=4598.7155, p3_correction=975.12719): - self.k = kernel self.g = 9.807 self.rho_air = 1.225 self.gamma = 1 @@ -31,8 +29,7 @@ def __init__(self, self.cross_area = area self.rolling_res_coeff = rolling_res_coeff self.aerodynamic_drag_coeff = aerodynamic_drag_coeff - self.p1_correction = p1_correction - self.p3_correction = p3_correction + self.power_correction_coeffs = np.array([p1_correction, p3_correction]) def calculate_power_at_the_wheels(self, accel, speed, grade): """Calculate the instantaneous power required. @@ -91,7 +88,8 @@ def get_power_correction_factor(self, accel, speed, grade): ------- float """ - return self.p1_correction * accel + self.p3_correction * accel * speed + state_variables = np.array([accel, accel * speed]) + return max(0, np.dot(self.power_correction_coeffs, state_variables)) def get_instantaneous_power(self, accel, speed, grade): """See parent class. @@ -100,27 +98,68 @@ def get_instantaneous_power(self, accel, speed, grade): """ regen_cap = self.get_regen_cap(accel, speed, grade) power_at_the_wheels = max(regen_cap, self.calculate_power_at_the_wheels(accel, speed, grade)) - correction_factor = max(regen_cap, self.get_power_correction_factor(accel, speed, grade)) + correction_factor = self.get_power_correction_factor(accel, speed, grade) return power_at_the_wheels + correction_factor class PDMCombustionEngine(PowerDemandModel): """Power Demand Model for a combustion engine vehicle.""" + def __init__(self, + idle_coeff=3405.5481762, + linear_friction_coeff=83.123929917, + quadratic_friction_coeff=6.7650718327, + drag_coeff=0.7041355229, + p1_correction=4598.7155, + p3_correction=975.12719): + super(PDMCombustionEngine, self).__init__() + self.fuel_consumption_power_coeffs = np.array([idle_coeff, + linear_friction_coeff, + quadratic_friction_coeff, + drag_coeff]) + def get_regen_cap(self, accel, speed, grade): """See parent class.""" return 0 + def calculate_fuel_consumption_power(self, accel, speed, grade): + """Calculate the instantaneous power from a fitted function to Toyota Tacoma fuel consumption. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + state_variables = np.array([1, speed, speed**2, speed**3]) + power_0 = np.dot(self.fuel_consumption_power_coeffs, state_variables) + return max(self.mass * accel * speed + power_0, 0) + + def get_instantaneous_power(self, accel, speed, grade): + """See parent class.""" + fuel_consumption_power = self.calculate_fuel_consumption_power(accel, speed, grade) + power_correction_factor = self.get_power_correction_factor(accel, speed, grade) + return fuel_consumption_power + power_correction_factor + class PDMElectric(PowerDemandModel): """Power Demand Model for an electric vehicle.""" - def __init__(self, kernel): - super(PDMElectric, self).__init__(kernel, - mass=1663, - area=2.4, - rolling_res_coeff=0.007, - aerodynamic_drag_coeff=0.24) + def __init__(self, + mass=1663, + area=2.4, + rolling_res_coeff=0.007, + aerodynamic_drag_coeff=0.24): + super(PDMElectric, self).__init__(mass=mass, + area=area, + rolling_res_coeff=rolling_res_coeff, + aerodynamic_drag_coeff=aerodynamic_drag_coeff) def get_regen_cap(self, accel, speed, grade): """See parent class.""" diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index d24b41662..64036aab6 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -9,9 +9,7 @@ class ToyotaModel(BaseEnergyModel, metaclass=ABCMeta): """Base Toyota Energy model class.""" - def __init__(self, kernel, filename=None): - self.k = kernel - + def __init__(self, filename): # download file from s3 bucket s3 = boto3.client('s3') s3.download_file('toyota.restricted', filename, 'temp.pkl') @@ -30,14 +28,15 @@ def get_instantaneous_power(self, accel, speed, grade): class PriusEnergy(ToyotaModel): """Toyota Prius (EV) energy model class.""" - def __init__(self, kernel, soc=0.9): - super(PriusEnergy, self).__init__(kernel, filename='prius_ev.pkl') + def __init__(self, sim_step, soc=0.9): + super(PriusEnergy, self).__init__(filename='prius_ev.pkl') + self.sim_step = sim_step self.soc = soc def get_instantaneous_power(self, accel, speed, grade): """See parent class.""" socdot = self.toyota_energy(self.soc, accel, speed, grade) - self.soc -= socdot * self.k.env.sim_step + self.soc -= socdot * self.sim_step # FIXME (Joy): convert socdot to power return socdot @@ -45,8 +44,8 @@ def get_instantaneous_power(self, accel, speed, grade): class TacomaEnergy(ToyotaModel): """Toyota Tacoma energy model class.""" - def __init__(self, kernel): - super(TacomaEnergy, self).__init__(kernel, filename='tacoma.pkl') + def __init__(self): + super(TacomaEnergy, self).__init__(filename='tacoma.pkl') def get_instantaneous_power(self, accel, speed, grade): """See parent class.""" From f63cc37262c70f1f41cd4785dff2c4c8e758a5e8 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 9 Jul 2020 14:41:51 -0700 Subject: [PATCH 310/438] convert tacoma fc to gallons per hour --- flow/energy_models/power_demand.py | 3 ++- flow/energy_models/toyota_energy.py | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/flow/energy_models/power_demand.py b/flow/energy_models/power_demand.py index d8c6bc9ec..8cb5fd20c 100644 --- a/flow/energy_models/power_demand.py +++ b/flow/energy_models/power_demand.py @@ -1,8 +1,9 @@ """Script containing the vehicle power demand model energy classes.""" +from abc import ABCMeta, abstractmethod import math import numpy as np + from flow.energy_models.base_energy import BaseEnergyModel -from abc import ABCMeta, abstractmethod class PowerDemandModel(BaseEnergyModel, metaclass=ABCMeta): diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index 64036aab6..ae1ecf9f3 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -1,9 +1,10 @@ """Script containing the Toyota energy classes.""" +from abc import ABCMeta, abstractmethod import dill as pickle import boto3 -from flow.energy_models.base_energy import BaseEnergyModel import os -from abc import ABCMeta, abstractmethod + +from flow.energy_models.base_energy import BaseEnergyModel class ToyotaModel(BaseEnergyModel, metaclass=ABCMeta): @@ -54,4 +55,4 @@ def get_instantaneous_power(self, accel, speed, grade): def get_instantaneous_fuel_consumption(self, accel, speed, grade): """See parent class.""" fc = self.toyota_energy(accel, speed, grade) - return fc + return fc * 3600.0 / 3217.25 From c2836e86bab242b9181fab1e72b8a4c173967f75 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 9 Jul 2020 15:09:55 -0700 Subject: [PATCH 311/438] comment on road grade; exception handling on unpickling --- flow/energy_models/base_energy.py | 3 +++ flow/energy_models/toyota_energy.py | 9 ++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/flow/energy_models/base_energy.py b/flow/energy_models/base_energy.py index a16c84694..fe7f463bb 100644 --- a/flow/energy_models/base_energy.py +++ b/flow/energy_models/base_energy.py @@ -10,6 +10,9 @@ class BaseEnergyModel(metaclass=ABCMeta): vehicle type: whether EV or Combustion Engine, Toyota Prius or Tacoma or non-Toyota vehicles. Non-Toyota vehicles are set by default to be an averaged-size vehicle. + + Note: road grade is included as an input parameter, but the + functional dependence on road grade is not yet implemented. """ def __init__(self): diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index ae1ecf9f3..492304b48 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -14,8 +14,15 @@ def __init__(self, filename): # download file from s3 bucket s3 = boto3.client('s3') s3.download_file('toyota.restricted', filename, 'temp.pkl') + with open('temp.pkl', 'rb') as file: - self.toyota_energy = pickle.load(file) + try: + self.toyota_energy = pickle.load(file) + except TypeError: + print('Must use Python version 3.6.8 to unpickle') + # delete pickle file + os.remove(file) + raise # delete pickle file os.remove(file) From f5659041f0ff789f0f388252282e7edd4c316b42 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 9 Jul 2020 17:08:48 -0700 Subject: [PATCH 312/438] modify i210_replay --- flow/core/experiment.py | 33 +++++++++++++++++++++++++++++++++ flow/visualize/i210_replay.py | 10 +++++++++- 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index d3e241ff8..62e872ccb 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -2,6 +2,7 @@ from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration from flow.data_pipeline.leaderboard_utils import network_name_translate +from flow.core.rewards import veh_energy_consumption from collections import defaultdict from datetime import datetime, timezone import logging @@ -142,7 +143,13 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only info_dict = { "velocities": [], "outflows": [], + "avg_trip_energy": [], + "avg_trip_time": [], + "total_completed_trips": [] } + all_trip_energy_distribution = defaultdict(lambda: []) + all_trip_time_distribution = defaultdict(lambda: []) + info_dict.update({ key: [] for key in self.custom_callables.keys() }) @@ -185,10 +192,15 @@ def rl_actions(*_): else: ret = 0 vel = [] + per_vehicle_energy_trace = defaultdict(lambda: []) + completed_veh_types = {} + completed_vehicle_avg_energy = {} + completed_vehicle_travel_time = {} custom_vals = {key: [] for key in self.custom_callables.keys()} run_id = "run_{}".format(i) self.env.pipeline_params = (extra_info, source_id, run_id) state = self.env.reset() + initial_vehicles = set(self.env.k.vehicle.get_ids()) for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) @@ -216,6 +228,24 @@ def rl_actions(*_): for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) + for past_veh_id in per_vehicle_energy_trace.keys(): + if past_veh_id not in veh_ids and past_veh_id not in completed_vehicle_avg_energy: + all_trip_energy_distribution[completed_veh_types[past_veh_id]].append( + np.sum(per_vehicle_energy_trace[past_veh_id])) + all_trip_time_distribution[completed_veh_types[past_veh_id]].append( + len(per_vehicle_energy_trace[past_veh_id])) + completed_vehicle_avg_energy[past_veh_id] = np.sum(per_vehicle_energy_trace[past_veh_id]) + completed_vehicle_travel_time[past_veh_id] = len(per_vehicle_energy_trace[past_veh_id]) + + for veh_id in veh_ids: + if veh_id not in initial_vehicles: + if veh_id not in per_vehicle_energy_trace: + # we have to skip the first step's energy calculation + per_vehicle_energy_trace[veh_id].append(0) + completed_veh_types[veh_id] = self.env.k.vehicle.get_type(veh_id) + else: + per_vehicle_energy_trace[veh_id].append(-1 * veh_energy_consumption(self.env, veh_id)) + if multiagent and done['__all__']: break if type(done) is dict and done['__all__'] or type(done) is not dict and done: @@ -233,6 +263,9 @@ def rl_actions(*_): info_dict["returns"] = rets info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) + info_dict["avg_trip_energy"].append(np.mean(list(completed_vehicle_avg_energy.values()))) + info_dict["avg_trip_time"].append(np.mean(list(completed_vehicle_travel_time.values()))) + info_dict["total_completed_trips"].append(len(list(completed_vehicle_avg_energy.values()))) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 49189a12d..588dee795 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -484,6 +484,10 @@ def create_parser(): action='store_true', help='specifies whether this is a baseline run' ) + parser.add_argument( + '--exp_config', type=str, + help='Name of the experiment configuration file, as located in ' + 'exp_configs/non_rl.') return parser @@ -502,7 +506,11 @@ def create_parser(): rllib_config = get_rllib_config(rllib_result_dir) - flow_params = deepcopy(I210_MA_DEFAULT_FLOW_PARAMS) + if args.exp_config: + module = __import__("../../examples/exp_configs.non_rl", fromlist=[args.exp_config]) + flow_params = getattr(module, args.exp_config).flow_params + else: + flow_params = deepcopy(I210_MA_DEFAULT_FLOW_PARAMS) if args.multi_node: ray.init(redis_address='localhost:6379') From 97333cff14f2742dd76fe503dd35c0d11e6c06a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nathan=20Lichtl=C3=A9?= Date: Fri, 10 Jul 2020 02:43:56 +0200 Subject: [PATCH 313/438] add --multi_node flag --- examples/train.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index d062fd39a..3bdd751e6 100644 --- a/examples/train.py +++ b/examples/train.py @@ -84,6 +84,9 @@ def parse_args(args): parser.add_argument( '--checkpoint_path', type=str, default=None, help='Directory with checkpoint to restore training from.') + parser.add_argument('--multi_node', action='store_true', + help='Set to true if this will be run in cluster mode.' + 'Relevant for rllib') return parser.parse_known_args(args)[0] @@ -350,7 +353,9 @@ def train_rllib(submodule, flags): def trial_str_creator(trial): return "{}_{}".format(trial.trainable_name, trial.experiment_tag) - if flags.local_mode: + if flags.multi_node: + ray.init(redis_address='localhost:6379') + elif flags.local_mode: ray.init(local_mode=True) else: ray.init() From b8b1ec91461bae846811ab333cb40bc44bbacf6d Mon Sep 17 00:00:00 2001 From: Rchide Date: Fri, 10 Jul 2020 10:35:53 +0200 Subject: [PATCH 314/438] Update sumo_setup files + Hardcode restart_instance=True --- flow/core/params.py | 4 ++-- scripts/setup_libsumo_ubuntu.sh | 19 ------------------- scripts/setup_sumo_osx.sh | 2 +- scripts/setup_sumo_ubuntu1404.sh | 8 +++++--- scripts/setup_sumo_ubuntu1604.sh | 14 +++++++++----- scripts/setup_sumo_ubuntu1804.sh | 13 ++++++++----- 6 files changed, 25 insertions(+), 35 deletions(-) delete mode 100755 scripts/setup_libsumo_ubuntu.sh diff --git a/flow/core/params.py b/flow/core/params.py index dd10b85ca..0f9857b00 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -396,7 +396,7 @@ class SimParams(object): def __init__(self, sim_step=0.1, render=False, - restart_instance=False, + restart_instance=True, emission_path=None, save_render=False, sight_radius=25, @@ -406,7 +406,7 @@ def __init__(self, """Instantiate SimParams.""" self.sim_step = sim_step self.render = render - self.restart_instance = restart_instance + self.restart_instance = True#restart_instance self.emission_path = emission_path self.save_render = save_render self.sight_radius = sight_radius diff --git a/scripts/setup_libsumo_ubuntu.sh b/scripts/setup_libsumo_ubuntu.sh deleted file mode 100755 index 09cee8c08..000000000 --- a/scripts/setup_libsumo_ubuntu.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -echo "Installing system dependencies for SUMO" -sudo apt-get update -sudo apt-get install cmake python g++ libxerces-c-dev libfox-1.6-dev -sudo apt-get install libgdal-dev libproj-dev libgl2ps-dev swig - -echo "Installing sumo binaries and python tools" -mkdir -p $HOME/sumo_binaries -pushd $HOME/sumo_binaries -git clone https://github.com/eclipse/sumo.git -cd sumo -git checkout 2147d155b1 -cmake . -make -j$(nproc) -popd - -echo 'export PATH="$PATH:$HOME/sumo_binaries/sumo/bin"' >> ~/.bashrc -echo 'export SUMO_HOME="$HOME/sumo_binaries/sumo"' >> ~/.bashrc -echo 'export PYTHONPATH="$PYTHONPATH:$HOME/sumo_binaries/sumo/tools"' >> ~/.bashrc diff --git a/scripts/setup_sumo_osx.sh b/scripts/setup_sumo_osx.sh index 581e4b880..6f036678f 100755 --- a/scripts/setup_sumo_osx.sh +++ b/scripts/setup_sumo_osx.sh @@ -12,7 +12,7 @@ brew install Caskroom/cask/xquartz autoconf automake pkg-config libtool gdal pro echo "Installing sumo binaries" mkdir -p $HOME/sumo_binaries/bin pushd $HOME/sumo_binaries/bin -wget https://akreidieh.s3.amazonaws.com/sumo/flow-0.4.0/binaries-mac.tar.xz +wget https://flow-sumo.s3-us-west-1.amazonaws.com/libsumo/sumo_binaries_macos.zip tar -xf binaries-mac.tar.xz rm binaries-mac.tar.xz chmod +x * diff --git a/scripts/setup_sumo_ubuntu1404.sh b/scripts/setup_sumo_ubuntu1404.sh index 41d4cb113..ea6487183 100755 --- a/scripts/setup_sumo_ubuntu1404.sh +++ b/scripts/setup_sumo_ubuntu1404.sh @@ -11,10 +11,12 @@ sudo apt-get install -y libfox-1.6-dev echo "Installing sumo binaries" mkdir -p $HOME/sumo_binaries/bin pushd $HOME/sumo_binaries/bin -wget https://akreidieh.s3.amazonaws.com/sumo/flow-0.4.0/binaries-ubuntu1404.tar.xz -tar -xf binaries-ubuntu1404.tar.xz -rm binaries-ubuntu1404.tar.xz +wget https://flow-sumo.s3-us-west-1.amazonaws.com/libsumo/sumo_binaries_ubuntu1404.tar.gz +tar -xf sumo_binaries_ubuntu1404.tar.gz +rm sumo_binaries_ubuntu1404.tar.gz chmod +x * popd +echo '# Added by Sumo / Libsumo instalation' >> ~/.bashrc echo 'export PATH="$HOME/sumo_binaries/bin:$PATH"' >> ~/.bashrc echo 'export SUMO_HOME="$HOME/sumo_binaries/bin"' >> ~/.bashrc +echo 'export PYTHONPATH="$PYTHONPATH:$HOME/sumo_binaries/tools"' >> ~/.bashrc diff --git a/scripts/setup_sumo_ubuntu1604.sh b/scripts/setup_sumo_ubuntu1604.sh index 744a11929..7fe4b2629 100755 --- a/scripts/setup_sumo_ubuntu1604.sh +++ b/scripts/setup_sumo_ubuntu1604.sh @@ -1,4 +1,5 @@ #!/bin/bash + echo "Installing system dependencies for SUMO" sudo apt-get update sudo apt-get install -y cmake swig libgtest-dev python-pygame python-scipy @@ -9,12 +10,15 @@ sudo apt-get install -y python3-dev sudo pip3 install cmake cython echo "Installing sumo binaries" -mkdir -p $HOME/sumo_binaries/bin -pushd $HOME/sumo_binaries/bin -wget https://akreidieh.s3.amazonaws.com/sumo/flow-0.4.0/binaries-ubuntu1604.tar.xz -tar -xf binaries-ubuntu1604.tar.xz -rm binaries-ubuntu1604.tar.xz +cd $HOME +wget https://flow-sumo.s3-us-west-1.amazonaws.com/libsumo/sumo_binaries_ubuntu1804.tar.gz +tar -zxvf sumo_binaries_ubuntu1804.tar.gz +rm sumo_binaries_ubuntu1804.tar.gz +cd sumo_binaries chmod +x * popd + +echo '# Added by Sumo / Libsumo instalation' >> ~/.bashrc echo 'export PATH="$HOME/sumo_binaries/bin:$PATH"' >> ~/.bashrc echo 'export SUMO_HOME="$HOME/sumo_binaries/bin"' >> ~/.bashrc +echo 'export PYTHONPATH="$PYTHONPATH:$HOME/sumo_binaries/tools"' >> ~/.bashrc diff --git a/scripts/setup_sumo_ubuntu1804.sh b/scripts/setup_sumo_ubuntu1804.sh index c9e2a09af..7fe4b2629 100755 --- a/scripts/setup_sumo_ubuntu1804.sh +++ b/scripts/setup_sumo_ubuntu1804.sh @@ -10,12 +10,15 @@ sudo apt-get install -y python3-dev sudo pip3 install cmake cython echo "Installing sumo binaries" -mkdir -p $HOME/sumo_binaries/bin -pushd $HOME/sumo_binaries/bin -wget https://akreidieh.s3.amazonaws.com/sumo/flow-0.4.0/binaries-ubuntu1804.tar.xz -tar -xf binaries-ubuntu1804.tar.xz -rm binaries-ubuntu1804.tar.xz +cd $HOME +wget https://flow-sumo.s3-us-west-1.amazonaws.com/libsumo/sumo_binaries_ubuntu1804.tar.gz +tar -zxvf sumo_binaries_ubuntu1804.tar.gz +rm sumo_binaries_ubuntu1804.tar.gz +cd sumo_binaries chmod +x * popd + +echo '# Added by Sumo / Libsumo instalation' >> ~/.bashrc echo 'export PATH="$HOME/sumo_binaries/bin:$PATH"' >> ~/.bashrc echo 'export SUMO_HOME="$HOME/sumo_binaries/bin"' >> ~/.bashrc +echo 'export PYTHONPATH="$PYTHONPATH:$HOME/sumo_binaries/tools"' >> ~/.bashrc From 18f3fd204dfe4ffac1336e4ab038b2392d56b5ca Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 10 Jul 2020 18:33:48 -0700 Subject: [PATCH 315/438] fix minor flake8 --- flow/core/params.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/params.py b/flow/core/params.py index 0f9857b00..216202e90 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -406,7 +406,7 @@ def __init__(self, """Instantiate SimParams.""" self.sim_step = sim_step self.render = render - self.restart_instance = True#restart_instance + self.restart_instance = True # restart_instance self.emission_path = emission_path self.save_render = save_render self.sight_radius = sight_radius From 3ac508aaf380c11c0d2ad8ee0bad6ca920fcf905 Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Fri, 10 Jul 2020 19:43:48 -0700 Subject: [PATCH 316/438] Ak/i210 master merge (#994) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * implement HighwayNetwork for Time-Space Diagrams (#979) * fixed h-baselines bug (#982) * Replicated changes in 867. Done bug (#980) * Aimsun changes minus reset * removed crash attribute * tensorflow 1.15.2 * merge custom output and failsafes to master (#981) * add write_to_csv() function to master * include pipeline README.md * add data pipeline __init__ * add experiment.py changes * add write_to_csv() function to master * change warning print to ValueError message * update to new update_accel methods * add display_warnings boolean * add get_next_speed() function to base vehicle class * revert addition of get_next_speed * merge custom output and failsafes to master * add write_to_csv() function to master * add display_warnings boolean * add get_next_speed() function to base vehicle class * revert addition of get_next_speed * revert change to get_feasible_action call signature * change print syntax to be python3.5 compliant * add tests for new failsafe features * smooth default to True * rearrange raise exception for test coverage * moved simulation logging to the simulation kernel (#991) * add 210 edgestarts for backwards compatibility (#985) * fastforward PR 989 * fix typo * Requirements update (#963) * updated requirements.txt and environment.yml * Visualizer tests fixes * remove .func * move all miles_per_* rewards to instantaneous_mpg * update reward fns to new get_accel() method * made tests faster * some fixes to utils * change the column order, modify the pipeline to use SUMO emission file * write metadata to csv * change apply_acceleration smoothness setting * make save_csv return the file paths Co-authored-by: AboudyKreidieh Co-authored-by: liljonnystyle Co-authored-by: Kathy Jang Co-authored-by: Nathan Lichtlé Co-authored-by: akashvelu Co-authored-by: Brent Zhao --- environment.yml | 2 +- examples/exp_configs/non_rl/highway_single.py | 7 +- .../exp_configs/non_rl/i210_subnetwork.py | 7 +- .../rl/multiagent/multiagent_i210.py | 3 +- examples/train.py | 29 ++- flow/controllers/base_controller.py | 137 ++++++------- flow/controllers/car_following_models.py | 52 +++-- flow/core/experiment.py | 150 ++++++++------ flow/core/kernel/simulation/traci.py | 190 ++++++++++++++++-- flow/core/kernel/vehicle/base.py | 64 +----- flow/core/kernel/vehicle/traci.py | 77 +++---- flow/core/rewards.py | 61 +++++- flow/data_pipeline/data_pipeline.py | 47 ++--- flow/energy_models/base_energy.py | 2 + flow/energy_models/power_demand.py | 6 +- flow/energy_models/toyota_energy.py | 2 + flow/envs/base.py | 31 ++- flow/envs/multiagent/base.py | 14 +- flow/envs/multiagent/i210.py | 31 +-- flow/utils/rllib.py | 5 +- flow/visualize/i210_replay.py | 11 +- flow/visualize/time_space_diagram.py | 35 +++- flow/visualize/visualizer_rllib.py | 8 +- requirements.txt | 2 +- .../multi_agent/checkpoint_1/checkpoint-1 | Bin 10209 -> 20358 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes tests/data/rllib_data/multi_agent/params.json | 40 ++-- tests/data/rllib_data/multi_agent/params.pkl | Bin 17562 -> 21381 bytes .../single_agent/checkpoint_1/checkpoint-1 | Bin 582 -> 26194 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes .../data/rllib_data/single_agent/params.json | 28 ++- tests/data/rllib_data/single_agent/params.pkl | Bin 6414 -> 6687 bytes tests/fast_tests/test_controllers.py | 169 ++++++++++++++++ tests/fast_tests/test_examples.py | 48 ++++- .../fast_tests/test_experiment_base_class.py | 36 +++- tests/fast_tests/test_rewards.py | 26 --- 36 files changed, 860 insertions(+), 460 deletions(-) diff --git a/environment.yml b/environment.yml index 162bed533..ecbe5785f 100644 --- a/environment.yml +++ b/environment.yml @@ -9,7 +9,7 @@ dependencies: - path.py - python-dateutil==2.7.3 - pip>=18.0 - - tensorflow==1.14.0 + - tensorflow==1.15.2 - setuptools==41.0.0 - plotly==2.4.0 - gym==0.14.0 diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index 8ec189140..ff486b3f5 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -11,7 +11,7 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams -from flow.core.rewards import miles_per_gallon, miles_per_megajoule +from flow.core.rewards import instantaneous_mpg from flow.core.params import SumoCarFollowingParams from flow.networks import HighwayNetwork from flow.envs import TestEnv @@ -147,10 +147,7 @@ env.k.vehicle.get_speed(env.k.vehicle.get_ids()))), "avg_outflow": lambda env: np.nan_to_num( env.k.vehicle.get_outflow_rate(120)), - "miles_per_megajoule": lambda env: np.nan_to_num( - miles_per_megajoule(env, env.k.vehicle.get_ids(), gain=1.0) - ), "miles_per_gallon": lambda env: np.nan_to_num( - miles_per_gallon(env, env.k.vehicle.get_ids(), gain=1.0) + instantaneous_mpg(env, env.k.vehicle.get_ids(), gain=1.0) ) } diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 73e49caef..9e415fc65 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -12,8 +12,7 @@ from flow.core.params import VehicleParams from flow.core.params import InitialConfig from flow.core.params import InFlows -from flow.core.rewards import miles_per_gallon -from flow.core.rewards import miles_per_megajoule +from flow.core.rewards import instantaneous_mpg from flow.networks import I210SubNetwork from flow.networks.i210_subnetwork import EDGES_DISTRIBUTION from flow.envs import TestEnv @@ -211,8 +210,6 @@ def valid_ids(env, veh_ids): env.k.vehicle.get_speed(valid_ids(env, env.k.vehicle.get_ids())))), "avg_outflow": lambda env: np.nan_to_num( env.k.vehicle.get_outflow_rate(120)), - "mpg": lambda env: miles_per_gallon( + "mpg": lambda env: instantaneous_mpg( env, valid_ids(env, env.k.vehicle.get_ids()), gain=1.0), - "mpj": lambda env: miles_per_megajoule( - env, valid_ids(env, env.k.vehicle.get_ids()), gain=1.0), } diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index a1c2e4f25..3a8207eb8 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -29,7 +29,8 @@ # Specify some configurable constants. # # =========================================================================== # -# whether to include the downstream slow-down edge in the network as well as a ghost cell at the upstream edge +# whether to include the downstream slow-down edge in the network as well as a +# ghost cell at the upstream edge WANT_BOUNDARY_CONDITIONS = True # whether to include vehicles on the on-ramp ON_RAMP = False diff --git a/examples/train.py b/examples/train.py index 3bdd751e6..f889ac9b6 100644 --- a/examples/train.py +++ b/examples/train.py @@ -17,7 +17,7 @@ import pytz from flow.core.util import ensure_dir -from flow.core.rewards import miles_per_gallon, miles_per_megajoule +from flow.core.rewards import instantaneous_mpg from flow.utils.registry import env_constructor from flow.utils.rllib import FlowParamsEncoder, get_flow_params from flow.utils.registry import make_create_env @@ -42,10 +42,6 @@ def parse_args(args): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') - parser.add_argument( - 'exp_title', type=str, - help='Name of experiment that results will be stored in') - # optional input parameters parser.add_argument( '--rl_trainer', type=str, default="rllib", @@ -76,7 +72,8 @@ def parse_args(args): parser.add_argument( '--rollout_size', type=int, default=1000, help='How many steps are in a training batch.') - parser.add_argument('--use_s3', action='store_true', help='If true, upload results to s3') + parser.add_argument('--use_s3', action='store_true', default=False, + help='If true, upload results to s3') parser.add_argument('--local_mode', action='store_true', default=False, help='If true only 1 CPU will be used') parser.add_argument('--render', action='store_true', default=False, @@ -84,6 +81,9 @@ def parse_args(args): parser.add_argument( '--checkpoint_path', type=str, default=None, help='Directory with checkpoint to restore training from.') + parser.add_argument( + '--exp_title', type=str, default=None, + help='Name of experiment that results will be stored in') parser.add_argument('--multi_node', action='store_true', help='Set to true if this will be run in cluster mode.' 'Relevant for rllib') @@ -147,7 +147,7 @@ def setup_exps_rllib(flow_params, number of CPUs to run the experiment over n_rollouts : int number of rollouts per training iteration - flags: + flags : TODO custom arguments policy_graphs : dict, optional TODO @@ -243,8 +243,7 @@ def on_episode_start(info): episode.user_data["avg_speed"] = [] episode.user_data["avg_speed_avs"] = [] episode.user_data["avg_energy"] = [] - episode.user_data["avg_mpg"] = [] - episode.user_data["avg_mpj"] = [] + episode.user_data["inst_mpg"] = [] episode.user_data["num_cars"] = [] episode.user_data["avg_accel_human"] = [] episode.user_data["avg_accel_avs"] = [] @@ -275,8 +274,7 @@ def on_episode_step(info): av_speed = np.mean([speed for speed in env.k.vehicle.get_speed(rl_ids) if speed >= 0]) if not np.isnan(av_speed): episode.user_data["avg_speed_avs"].append(av_speed) - episode.user_data["avg_mpg"].append(miles_per_gallon(env, veh_ids, gain=1.0)) - episode.user_data["avg_mpj"].append(miles_per_megajoule(env, veh_ids, gain=1.0)) + episode.user_data["inst_mpg"].append(instantaneous_mpg(env, veh_ids, gain=1.0)) episode.user_data["num_cars"].append(len(env.k.vehicle.get_ids())) episode.user_data["avg_accel_human"].append(np.nan_to_num(np.mean( [np.abs((env.k.vehicle.get_speed(veh_id) - env.k.vehicle.get_previous_speed(veh_id))/env.sim_step) for @@ -295,8 +293,7 @@ def on_episode_end(info): episode.custom_metrics["avg_speed_avs"] = avg_speed_avs episode.custom_metrics["avg_accel_avs"] = np.mean(episode.user_data["avg_accel_avs"]) episode.custom_metrics["avg_energy_per_veh"] = np.mean(episode.user_data["avg_energy"]) - episode.custom_metrics["avg_mpg_per_veh"] = np.mean(episode.user_data["avg_mpg"]) - episode.custom_metrics["avg_mpj_per_veh"] = np.mean(episode.user_data["avg_mpj"]) + episode.custom_metrics["avg_mpg_per_veh"] = np.mean(episode.user_data["inst_mpg"]) episode.custom_metrics["num_cars"] = np.mean(episode.user_data["num_cars"]) def on_train_result(info): @@ -361,7 +358,7 @@ def trial_str_creator(trial): ray.init() exp_dict = { "run_or_experiment": alg_run, - "name": flags.exp_title, + "name": flags.exp_title or flow_params['exp_tag'], "config": config, "checkpoint_freq": flags.checkpoint_freq, "checkpoint_at_end": True, @@ -373,9 +370,9 @@ def trial_str_creator(trial): } date = datetime.now(tz=pytz.utc) date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") - s3_string = "s3://i210.experiments/i210/" \ - + date + '/' + flags.exp_title if flags.use_s3: + s3_string = "s3://i210.experiments/i210/" \ + + date + '/' + flags.exp_title exp_dict['upload_dir'] = s3_string tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 9806413e0..a657bf87c 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -38,6 +38,8 @@ class BaseController(metaclass=ABCMeta): List of failsafes which can be "instantaneous", "safe_velocity", "feasible_accel", or "obey_speed_limit". The order of applying the falsafes will be based on the order in the list. + display_warnings : bool + Flag for toggling on/off printing failsafe warnings to screen. noise : double variance of the gaussian from which to sample a noisy acceleration """ @@ -47,6 +49,7 @@ def __init__(self, car_following_params, delay=0, fail_safe=None, + display_warnings=False, noise=0): """Instantiate the base class for acceleration behavior.""" self.veh_id = veh_id @@ -59,17 +62,27 @@ def __init__(self, # longitudinal failsafe used by the vehicle if isinstance(fail_safe, str): - self.fail_safe = [fail_safe] + failsafe_list = [fail_safe] elif isinstance(fail_safe, list) or fail_safe is None: - self.fail_safe = fail_safe + failsafe_list = fail_safe else: - print( - "==========================================================\n" - "WARNING: fail_safe should be string or list of strings. \n" - "Set fal_safe to None\n" - "==========================================================\n" - ) - self.fail_safe = None + raise ValueError("fail_safe should be string or list of strings. Setting fail_safe to None\n") + + failsafe_map = { + 'instantaneous': self.get_safe_action_instantaneous, + 'safe_velocity': self.get_safe_velocity_action, + 'feasible_accel': lambda _, accel: self.get_feasible_action(accel), + 'obey_speed_limit': self.get_obey_speed_limit_action + } + self.failsafes = [] + if failsafe_list: + for check in failsafe_list: + if check in failsafe_map: + self.failsafes.append(failsafe_map.get(check)) + else: + raise ValueError('Skipping {}, as it is not a valid failsafe.'.format(check)) + + self.display_warnings = display_warnings self.max_accel = car_following_params.controller_params['accel'] # max deaccel should always be a positive @@ -103,11 +116,11 @@ def get_action(self, env): float the modified form of the acceleration """ - # clear the current stored accel_no_noise_no_failsafe of this vehicle None - env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) - env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) - env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) - env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, None) + # clear the current stored accels of this vehicle to None + env.k.vehicle.update_accel(self.veh_id, None, noise=False, failsafe=False) + env.k.vehicle.update_accel(self.veh_id, None, noise=False, failsafe=True) + env.k.vehicle.update_accel(self.veh_id, None, noise=True, failsafe=False) + env.k.vehicle.update_accel(self.veh_id, None, noise=True, failsafe=True) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed @@ -128,45 +141,24 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) + env.k.vehicle.update_accel(self.veh_id, accel, noise=False, failsafe=False) accel_no_noise_with_failsafe = accel - if self.fail_safe is not None: - for check in self.fail_safe: - if check == 'instantaneous': - accel_no_noise_with_failsafe = self.get_safe_action_instantaneous( - env, accel_no_noise_with_failsafe) - elif check == 'safe_velocity': - accel_no_noise_with_failsafe = self.get_safe_velocity_action( - env, accel_no_noise_with_failsafe) - elif check == 'feasible_accel': - accel_no_noise_with_failsafe = self.get_feasible_action( - accel_no_noise_with_failsafe) - elif check == 'obey_speed_limit': - accel_no_noise_with_failsafe = self.get_obey_speed_limit_action( - env, accel_no_noise_with_failsafe) - - env.k.vehicle.update_accel_no_noise_with_failsafe( - self.veh_id, accel_no_noise_with_failsafe) + for failsafe in self.failsafes: + accel_no_noise_with_failsafe = failsafe(env, accel_no_noise_with_failsafe) + + env.k.vehicle.update_accel(self.veh_id, accel_no_noise_with_failsafe, noise=False, failsafe=True) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) - env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, accel) + env.k.vehicle.update_accel(self.veh_id, accel, noise=True, failsafe=False) # run the fail-safes, if requested - if self.fail_safe is not None: - for check in self.fail_safe: - if check == 'instantaneous': - accel = self.get_safe_action_instantaneous(env, accel) - elif check == 'safe_velocity': - accel = self.get_safe_velocity_action(env, accel) - elif check == 'feasible_accel': - accel = self.get_feasible_action(accel) - elif check == 'obey_speed_limit': - accel = self.get_obey_speed_limit_action(env, accel) - - env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) + for failsafe in self.failsafes: + accel = failsafe(env, accel) + + env.k.vehicle.update_accel(self.veh_id, accel, noise=True, failsafe=True) return accel def get_safe_action_instantaneous(self, env, action): @@ -212,11 +204,12 @@ def get_safe_action_instantaneous(self, env, action): # if the vehicle will crash into the vehicle ahead of it in the # next time step (assuming the vehicle ahead of it is not # moving), then stop immediately - print( - "=====================================\n" - "Vehicle {} is about to crash. Instantaneous acceleration " - "clipping applied.\n" - "=====================================".format(self.veh_id)) + if self.display_warnings: + print( + "=====================================\n" + "Vehicle {} is about to crash. Instantaneous acceleration " + "clipping applied.\n" + "=====================================".format(self.veh_id)) return -this_vel / sim_step else: @@ -296,11 +289,12 @@ def safe_velocity(self, env): # edge_speed_limit = env.k.network.speed_limit(this_edge) if this_vel > v_safe: - print( - "=====================================\n" - "Speed of vehicle {} is greater than safe speed. Safe velocity " - "clipping applied.\n" - "=====================================".format(self.veh_id)) + if self.display_warnings: + print( + "=====================================\n" + "Speed of vehicle {} is greater than safe speed. Safe velocity " + "clipping applied.\n" + "=====================================".format(self.veh_id)) return v_safe @@ -333,11 +327,12 @@ def get_obey_speed_limit_action(self, env, action): if this_vel + action * sim_step > edge_speed_limit: if edge_speed_limit > 0: - print( - "=====================================\n" - "Speed of vehicle {} is greater than speed limit. Obey " - "speed limit clipping applied.\n" - "=====================================".format(self.veh_id)) + if self.display_warnings: + print( + "=====================================\n" + "Speed of vehicle {} is greater than speed limit. Obey " + "speed limit clipping applied.\n" + "=====================================".format(self.veh_id)) return (edge_speed_limit - this_vel) / sim_step else: return -this_vel / sim_step @@ -365,19 +360,21 @@ def get_feasible_action(self, action): if action > self.max_accel: action = self.max_accel - print( - "=====================================\n" - "Acceleration of vehicle {} is greater than the max " - "acceleration. Feasible acceleration clipping applied.\n" - "=====================================".format(self.veh_id)) + if self.display_warnings: + print( + "=====================================\n" + "Acceleration of vehicle {} is greater than the max " + "acceleration. Feasible acceleration clipping applied.\n" + "=====================================".format(self.veh_id)) if action < -self.max_deaccel: action = -self.max_deaccel - print( - "=====================================\n" - "Deceleration of vehicle {} is greater than the max " - "deceleration. Feasible acceleration clipping applied.\n" - "=====================================".format(self.veh_id)) + if self.display_warnings: + print( + "=====================================\n" + "Deceleration of vehicle {} is greater than the max " + "deceleration. Feasible acceleration clipping applied.\n" + "=====================================".format(self.veh_id)) return action diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index 280c94d37..2840e291e 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -56,7 +56,8 @@ def __init__(self, v_des=8, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate a CFM controller.""" BaseController.__init__( self, @@ -64,7 +65,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.k_d = k_d @@ -132,7 +135,8 @@ def __init__(self, v_des=8, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate a Bilateral car-following model controller.""" BaseController.__init__( self, @@ -140,7 +144,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.k_d = k_d @@ -212,7 +218,8 @@ def __init__(self, a=0, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate a Linear Adaptive Cruise controller.""" BaseController.__init__( self, @@ -220,7 +227,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.k_1 = k_1 @@ -289,7 +298,8 @@ def __init__(self, v_max=30, time_delay=0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate an Optimal Vehicle Model controller.""" BaseController.__init__( self, @@ -297,7 +307,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.v_max = v_max self.alpha = alpha @@ -364,7 +376,8 @@ def __init__(self, h_st=5, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate a Linear OVM controller.""" BaseController.__init__( self, @@ -372,7 +385,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id # 4.8*1.85 for case I, 3.8*1.85 for case II, per Nakayama self.v_max = v_max @@ -445,6 +460,7 @@ def __init__(self, time_delay=0.0, noise=0, fail_safe=None, + display_warnings=False, car_following_params=None): """Instantiate an IDM controller.""" BaseController.__init__( @@ -453,7 +469,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.v0 = v0 self.T = T self.a = a @@ -546,7 +564,8 @@ def __init__(self, tau=1, delay=0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate a Gipps' controller.""" BaseController.__init__( self, @@ -554,8 +573,9 @@ def __init__(self, car_following_params, delay=delay, fail_safe=fail_safe, - noise=noise - ) + noise=noise, + display_warnings=display_warnings, + ) self.v_desired = v0 self.acc = acc @@ -627,7 +647,8 @@ def __init__(self, want_max_accel=False, time_delay=0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate an Bando controller.""" BaseController.__init__( self, @@ -636,6 +657,7 @@ def __init__(self, delay=time_delay, fail_safe=fail_safe, noise=noise, + display_warnings=display_warnings, ) self.veh_id = veh_id self.v_max = v_max diff --git a/flow/core/experiment.py b/flow/core/experiment.py index f29c547ab..b9ce3ac0e 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,15 +1,19 @@ """Contains an experiment class for running simulations.""" from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration +from flow.data_pipeline.data_pipeline import upload_to_s3 +from flow.data_pipeline.data_pipeline import get_configuration +from flow.data_pipeline.data_pipeline import generate_trajectory_table +from flow.data_pipeline.data_pipeline import write_dict_to_csv from flow.data_pipeline.leaderboard_utils import network_name_translate from flow.visualize.time_space_diagram import tsd_main from collections import defaultdict -from datetime import datetime, timezone +from datetime import timezone +from datetime import datetime import logging import time -import os import numpy as np import uuid +import os class Experiment: @@ -21,8 +25,8 @@ class Experiment: the actions of RL agents in the network, type the following: >>> from flow.envs import Env - {'network': >>> self.env.network.__class__} = dict(...) # see the examples in exp_config - {'network': >>> exp = Experiment(self.env.network.__class__}) # for some experiment configuration + >>> flow_params = dict(...) # see the examples in exp_config + >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be @@ -40,7 +44,7 @@ class can generate csv files from emission files produced by sumo. These ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams - {'network': >>> self.env.network.__class__}['sim'] = SimParams(emission_path="./data") + >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: @@ -89,7 +93,13 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query="", is_baseline=False): + def run(self, + num_runs, + rl_actions=None, + convert_to_csv=False, + to_aws=None, + only_query="", + is_baseline=False): """Run the given network for a set number of runs. Parameters @@ -151,35 +161,45 @@ def rl_actions(*_): t = time.time() times = [] - # data pipeline - extra_info = defaultdict(lambda: []) - source_id = 'flow_{}'.format(uuid.uuid4().hex) - metadata = defaultdict(lambda: []) - # collect current time - cur_datetime = datetime.now(timezone.utc) - cur_date = cur_datetime.date().isoformat() - cur_time = cur_datetime.time().isoformat() - # collecting information for metadata table - metadata['source_id'].append(source_id) - metadata['submission_time'].append(cur_time) - metadata['network'].append(network_name_translate(self.env.network.name.split('_20')[0])) - metadata['is_baseline'].append(str(is_baseline)) - if to_aws: - name, strategy = get_configuration() - metadata['submitter_name'].append(name) - metadata['strategy'].append(strategy) - if convert_to_csv and self.env.simulator == "traci": - dir_path = self.env.sim_params.emission_path - trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + # data pipeline + source_id = 'flow_{}'.format(uuid.uuid4().hex) + metadata = defaultdict(lambda: []) + + # collect current time + cur_datetime = datetime.now(timezone.utc) + cur_date = cur_datetime.date().isoformat() + cur_time = cur_datetime.time().isoformat() + if to_aws: + # collecting information for metadata table + metadata['source_id'].append(source_id) + metadata['submission_time'].append(cur_time) + metadata['network'].append( + network_name_translate(self.env.network.name.split('_20')[0])) + metadata['is_baseline'].append(str(is_baseline)) + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) + + # emission-specific parameters + dir_path = self.env.sim_params.emission_path + trajectory_table_path = os.path.join( + dir_path, '{}.csv'.format(source_id)) + metadata_table_path = os.path.join( + dir_path, '{}_METADATA.csv'.format(source_id)) + else: + source_id = None + trajectory_table_path = None + metadata_table_path = None + metadata = None + cur_date = None + + emission_files = [] for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} - run_id = "run_{}".format(i) - self.env.pipeline_params = (extra_info, source_id, run_id) state = self.env.reset() for j in range(num_steps): t0 = time.time() @@ -192,19 +212,11 @@ def rl_actions(*_): vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward - # collect additional information for the data pipeline - get_extra_info(self.env.k.vehicle, extra_info, veh_ids, source_id, run_id) - - # write to disk every 100 steps - if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0: - write_dict_to_csv(trajectory_table_path, extra_info, not j) - extra_info.clear() - # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) - if type(done) is dict and done['__all__'] or type(done) is not dict and done: + if type(done) is dict and done['__all__'] or done is True: break # Store the information from the run in info_dict. @@ -217,6 +229,11 @@ def rl_actions(*_): print("Round {0}, return: {1}".format(i, ret)) + # Save emission data at the end of every rollout. This is skipped + # by the internal method if no emission path was specified. + if self.env.simulator == "traci": + emission_files.append(self.env.k.simulation.save_emission(run_id=i)) + # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( @@ -226,29 +243,38 @@ def rl_actions(*_): print("steps/second:", np.mean(times)) self.env.terminate() - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - write_dict_to_csv(trajectory_table_path, extra_info) + if to_aws: + generate_trajectory_table(emission_files, trajectory_table_path, source_id) write_dict_to_csv(metadata_table_path, metadata, True) - - if to_aws: - tsd_main(trajectory_table_path, - {'network': self.env.network.__class__}, - min_speed=0, - max_speed=10, - start=self.env.env_params.warmup_steps) - upload_to_s3('circles.data.pipeline', - 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, - source_id), - metadata_table_path) - upload_to_s3('circles.data.pipeline', - 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), - trajectory_table_path, - {'network': metadata['network'][0], 'is_baseline': metadata['is_baseline'][0]}) - upload_to_s3('circles.data.pipeline', - 'time_space_diagram/date={0}/partition_name={1}/{1}.png'.format(cur_date, source_id), - trajectory_table_path.replace('csv', 'png')) + tsd_main( + trajectory_table_path, + {'network': self.env.network.__class__}, + min_speed=0, + max_speed=10, + start=self.env.env_params.warmup_steps + ) + exit() + upload_to_s3( + 'circles.data.pipeline', + 'metadata_table/date={0}/partition_name={1}_METADATA/' + '{1}_METADATA.csv'.format(cur_date, source_id), + metadata_table_path + ) + upload_to_s3( + 'circles.data.pipeline', + 'fact_vehicle_trace/date={0}/partition_name={1}/' + '{1}.csv'.format(cur_date, source_id), + trajectory_table_path, + {'network': metadata['network'][0], + 'is_baseline': metadata['is_baseline'][0]} + ) + upload_to_s3( + 'circles.data.pipeline', + 'time_space_diagram/date={0}/partition_name={1}/' + '{1}.png'.format(cur_date, source_id), + trajectory_table_path.replace('csv', 'png') + ) + os.remove(trajectory_table_path) + os.remove(metadata_table_path) return info_dict diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index ed21c924b..79fc12eaa 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -11,6 +11,7 @@ import logging import subprocess import signal +import csv # Number of retries on restarting SUMO before giving up RETRIES_ON_ERROR = 10 @@ -20,6 +21,32 @@ class TraCISimulation(KernelSimulation): """Sumo simulation kernel. Extends flow.core.kernel.simulation.KernelSimulation + + Attributes + ---------- + sumo_proc : subprocess.Popen + contains the subprocess.Popen instance used to start traci + sim_step : float + seconds per simulation step + emission_path : str or None + Path to the folder in which to create the emissions output. Emissions + output is not generated if this value is not specified + time : float + used to internally keep track of the simulation time + stored_data : dict >> + a dict object used to store additional data if an emission file is + provided. The first key corresponds to the name of the vehicle, the + second corresponds to the time the sample was issued, and the final + keys represent the additional data stored at every given time for every + vehicle, and consists of the following keys: + + * acceleration (no noise): the accelerations issued to the vehicle, + excluding noise + * acceleration (requested): the requested acceleration by the vehicle, + including noise + * acceleration (actual): the actual acceleration by the vehicle, + collected by computing the difference between the speeds of the + vehicle and dividing it by the sim_step term """ def __init__(self, master_kernel): @@ -32,8 +59,12 @@ def __init__(self, master_kernel): sub-kernels) """ KernelSimulation.__init__(self, master_kernel) - # contains the subprocess.Popen instance used to start traci + self.sumo_proc = None + self.sim_step = None + self.emission_path = None + self.time = 0 + self.stored_data = dict() def pass_api(self, kernel_api): """See parent class. @@ -61,10 +92,61 @@ def simulation_step(self): def update(self, reset): """See parent class.""" - pass + if reset: + self.time = 0 + else: + self.time += self.sim_step + + # Collect the additional data to store in the emission file. + if self.emission_path is not None: + kv = self.master_kernel.vehicle + for veh_id in self.master_kernel.vehicle.get_ids(): + t = round(self.time, 2) + + # some miscellaneous pre-processing + position = kv.get_2d_position(veh_id) + + # Make sure dictionaries corresponding to the vehicle and + # time are available. + if veh_id not in self.stored_data.keys(): + self.stored_data[veh_id] = dict() + if t not in self.stored_data[veh_id].keys(): + self.stored_data[veh_id][t] = dict() + + # Add the speed, position, and lane data. + self.stored_data[veh_id][t].update({ + "speed": kv.get_speed(veh_id), + "lane_number": kv.get_lane(veh_id), + "edge_id": kv.get_edge(veh_id), + "relative_position": kv.get_position(veh_id), + "x": position[0], + "y": position[1], + "headway": kv.get_headway(veh_id), + "leader_id": kv.get_leader(veh_id), + "follower_id": kv.get_follower(veh_id), + "leader_rel_speed": + kv.get_speed(kv.get_leader(veh_id)) + - kv.get_speed(veh_id), + "target_accel_with_noise_with_failsafe": + kv.get_accel(veh_id, noise=True, failsafe=True), + "target_accel_no_noise_no_failsafe": + kv.get_accel(veh_id, noise=False, failsafe=False), + "target_accel_with_noise_no_failsafe": + kv.get_accel(veh_id, noise=True, failsafe=False), + "target_accel_no_noise_with_failsafe": + kv.get_accel(veh_id, noise=False, failsafe=True), + "realized_accel": + kv.get_realized_accel(veh_id), + "road_grade": kv.get_road_grade(veh_id), + "distance": kv.get_distance(veh_id), + }) def close(self): """See parent class.""" + # Save the emission data to a csv. + if self.emission_path is not None: + self.save_emission() + self.kernel_api.close() def check_collision(self): @@ -74,10 +156,24 @@ def check_collision(self): def start_simulation(self, network, sim_params): """Start a sumo simulation instance. - This method uses the configuration files created by the network class - to initialize a sumo instance. Also initializes a traci connection to - interface with sumo from Python. + This method performs the following operations: + + 1. It collect the simulation step size and the emission path + information. If an emission path is specifies, it ensures that the + path exists. + 2. It also uses the configuration files created by the network class to + initialize a sumo instance. + 3. Finally, It initializes a traci connection to interface with sumo + from Python and returns the connection. """ + # Save the simulation step size (for later use). + self.sim_step = sim_params.sim_step + + # Update the emission path term. + self.emission_path = sim_params.emission_path + if self.emission_path is not None: + ensure_dir(self.emission_path) + error = None for _ in range(RETRIES_ON_ERROR): try: @@ -113,17 +209,6 @@ def start_simulation(self, network, sim_params): sumo_call.append("--lateral-resolution") sumo_call.append(str(sim_params.lateral_resolution)) - # add the emission path to the sumo command (if requested) - if sim_params.emission_path is not None: - ensure_dir(sim_params.emission_path) - emission_out = os.path.join( - sim_params.emission_path, - "{0}-emission.xml".format(network.name)) - sumo_call.append("--emission-output") - sumo_call.append(emission_out) - else: - emission_out = None - if sim_params.overtake_right: sumo_call.append("--lanechange.overtake-right") sumo_call.append("true") @@ -150,7 +235,7 @@ def start_simulation(self, network, sim_params): if sim_params.num_clients > 1: logging.info(" Num clients are" + str(sim_params.num_clients)) - logging.debug(" Emission file: " + str(emission_out)) + logging.debug(" Emission file: " + str(self.emission_path)) logging.debug(" Step length: " + str(sim_params.sim_step)) # Opening the I/O thread to SUMO @@ -184,3 +269,74 @@ def teardown_sumo(self): os.killpg(self.sumo_proc.pid, signal.SIGTERM) except Exception as e: print("Error during teardown: {}".format(e)) + + def save_emission(self, run_id=0): + """Save any collected emission data to a csv file. + + If no data was collected, nothing happens. Moreover, any internally + stored data by this class is cleared whenever data is stored. + + Parameters + ---------- + run_id : int + the rollout number, appended to the name of the emission file. Used + to store emission files from multiple rollouts run sequentially. + + Returns + ------- + emission_file_path: str + the relative path of the emission file + """ + # If there is no stored data, ignore this operation. This is to ensure + # that data isn't deleted if the operation is called twice. + if len(self.stored_data) == 0: + return + + # Get a csv name for the emission file. + name = "{}-{}_emission.csv".format( + self.master_kernel.network.network.name, run_id) + + # The name of all stored data-points (excluding id and time) + stored_ids = [ + "x", + "y", + "speed", + "headway", + "leader_id", + "follower_id", + "leader_rel_speed", + "target_accel_with_noise_with_failsafe", + "target_accel_no_noise_no_failsafe", + "target_accel_with_noise_no_failsafe", + "target_accel_no_noise_with_failsafe", + "realized_accel", + "road_grade", + "edge_id", + "lane_number", + "distance", + "relative_position", + ] + + # Update the stored data to push to the csv file. + final_data = {"time": [], "id": []} + final_data.update({key: [] for key in stored_ids}) + + for veh_id in self.stored_data.keys(): + for t in self.stored_data[veh_id].keys(): + final_data['time'].append(t) + final_data['id'].append(veh_id) + for key in stored_ids: + final_data[key].append(self.stored_data[veh_id][t][key]) + + emission_file_path = os.path.join(self.emission_path, name) + with open(emission_file_path, "w") as f: + print(emission_file_path, self.emission_path) + writer = csv.writer(f, delimiter=',') + writer.writerow(final_data.keys()) + writer.writerows(zip(*final_data.values())) + + # Clear all memory from the stored data. This is useful if this + # function is called in between resets. + self.stored_data.clear() + + return emission_file_path diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 843ec7eb6..226528259 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -128,25 +128,13 @@ def remove(self, veh_id): pass @abstractmethod - def apply_acceleration(self, veh_id, acc): + def apply_acceleration(self, veh_id, acc, smooth_duration=0): """Apply the acceleration requested by a vehicle in the simulator. - In SUMO, this function applies slowDown method which applies smoothing. - - Parameters - ---------- - veh_id : str or list of str - list of vehicle identifiers - acc : float or array_like - requested accelerations from the vehicles - """ - raise NotImplementedError - - def apply_acceleration_not_smooth(self, veh_id, acc): - """Apply the acceleration requested by a vehicle in the simulator. - - In SUMO, this function applies setSpeed method which doesn't apply - smoothing. + In SUMO, this function applies setSpeed for smooth_duration=0, otherwise + the slowDown method applies acceleration smoothly over the smooth_duration + time (in seconds). For more information, see: + https://sumo.dlr.de/pydoc/traci._vehicle.html#VehicleDomain-slowDown Parameters ---------- @@ -154,6 +142,8 @@ def apply_acceleration_not_smooth(self, veh_id, acc): list of vehicle identifiers acc : float or array_like requested accelerations from the vehicles + smooth_duration : float + duration in seconds over which acceleration should be smoothly applied, default: 0 """ pass @@ -373,6 +363,7 @@ def get_energy_model(self, veh_id, error=""): vehicle id, or list of vehicle ids error : str value that is returned if the vehicle is not found + Returns ------- subclass of BaseEnergyModel @@ -785,23 +776,8 @@ def get_accel(self, veh_id): pass @abstractmethod - def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): - """Update stored acceleration without noise without failsafe of vehicle with veh_id.""" - pass - - @abstractmethod - def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): - """Update stored acceleration without noise with failsafe of vehicle with veh_id.""" - raise NotImplementedError - - @abstractmethod - def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): - """Update stored acceleration with noise without failsafe of vehicle with veh_id.""" - pass - - @abstractmethod - def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): - """Update stored acceleration with noise with failsafe of vehicle with veh_id.""" + def update_accel(self, veh_id, accel, noise=True, failsafe=True): + """Update stored acceleration of vehicle with veh_id.""" pass @abstractmethod @@ -809,26 +785,6 @@ def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" pass - @abstractmethod - def get_accel_no_noise_no_failsafe(self, veh_id): - """Return the acceleration without noise without failsafe of vehicle with veh_id.""" - pass - - @abstractmethod - def get_accel_no_noise_with_failsafe(self, veh_id): - """Return the acceleration without noise with failsafe of vehicle with veh_id.""" - pass - - @abstractmethod - def get_accel_with_noise_no_failsafe(self, veh_id): - """Return the acceleration with noise without failsafe of vehicle with veh_id.""" - pass - - @abstractmethod - def get_accel_with_noise_with_failsafe(self, veh_id): - """Return the acceleration with noise with failsafe of vehicle with veh_id.""" - pass - @abstractmethod def get_realized_accel(self, veh_id): """Return the acceleration that the vehicle actually make.""" diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index ef401d180..8f76b40d0 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -293,10 +293,8 @@ def _add_departed(self, veh_id, veh_type): self.__vehicles[veh_id]["type"] = veh_type # specify energy model - energy_model = \ - self.type_parameters[veh_type]["energy_model"] - self.__vehicles[veh_id]["energy_model"] = \ - energy_model[0](veh_id, **energy_model[1]) + self.__vehicles[veh_id]["energy_model"] = self.type_parameters[ + veh_type]["energy_model"]() car_following_params = \ self.type_parameters[veh_type]["car_following_params"] @@ -971,7 +969,7 @@ def _prev_edge_followers(self, veh_id, edge_dict, lane, num_edges): return tailway, follower - def apply_acceleration(self, veh_ids, acc): + def apply_acceleration(self, veh_ids, acc, smooth_duration=0): """See parent class.""" # to handle the case of a single vehicle if type(veh_ids) == str: @@ -983,7 +981,10 @@ def apply_acceleration(self, veh_ids, acc): self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) - self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) + if smooth_duration: + self.kernel_api.vehicle.slowDown(vid, next_vel, smooth_duration) + else: + self.kernel_api.vehicle.setSpeed(vid, next_vel) def apply_acceleration_not_smooth(self, veh_ids, acc): """See parent class.""" @@ -1158,52 +1159,34 @@ def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) - # add for data pipeline - def get_accel(self, veh_id): + def get_accel(self, veh_id, noise=True, failsafe=True): """See parent class.""" - if "accel" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel"] = None - return self.__vehicles[veh_id]["accel"] - - def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): - """See parent class.""" - self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = accel_no_noise_no_failsafe - - def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): - """See parent class.""" - self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = accel_no_noise_with_failsafe - - def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): - """See parent class.""" - self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe - - def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): - """See parent class.""" - self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = accel_with_noise_with_failsafe - - def get_accel_no_noise_no_failsafe(self, veh_id): - """See parent class.""" - if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None - return self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] + metric_name = 'accel' + if noise: + metric_name += '_with_noise' + else: + metric_name += '_no_noise' + if failsafe: + metric_name += '_with_failsafe' + else: + metric_name += '_no_failsafe' - def get_accel_no_noise_with_failsafe(self, veh_id): - """See parent class.""" - if "accel_no_noise_with_failsafe" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None - return self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] + return self.__vehicles[veh_id].get(metric_name, None) \ + or self.get_realized_accel(veh_id) - def get_accel_with_noise_no_failsafe(self, veh_id): + def update_accel(self, veh_id, accel, noise=True, failsafe=True): """See parent class.""" - if "accel_with_noise_no_failsafe" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None - return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] + metric_name = 'accel' + if noise: + metric_name += '_with_noise' + else: + metric_name += '_no_noise' + if failsafe: + metric_name += '_with_failsafe' + else: + metric_name += '_no_failsafe' - def get_accel_with_noise_with_failsafe(self, veh_id): - """See parent class.""" - if "accel_with_noise_with_failsafe" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = None - return self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] + self.__vehicles[veh_id][metric_name] = accel def get_realized_accel(self, veh_id): """See parent class.""" diff --git a/flow/core/rewards.py b/flow/core/rewards.py index b4af4c5bc..33960f8cd 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -306,6 +306,61 @@ def punish_rl_lane_changes(env, penalty=1): return total_lane_change_penalty +def energy_consumption(env, gain=.001): + """Calculate power consumption for all vehicle. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + + Parameters + ---------- + env : flow.envs.Env + the environment variable, which contains information on the current + state of the system. + gain : float + scaling factor for the reward + """ + veh_ids = env.k.vehicle.get_ids() + return veh_energy_consumption(env, veh_ids, gain) + + +def veh_energy_consumption(env, veh_ids=None, gain=.001): + """Calculate power consumption of a vehicle. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + + Parameters + ---------- + env : flow.envs.Env + the environment variable, which contains information on the current + state of the system. + veh_ids : [list] or str + list of veh_ids or single veh_id to compute the reward over + gain : float + scaling factor for the reward + """ + if veh_ids is None: + veh_ids = env.k.vehicle.get_ids() + elif not isinstance(veh_ids, list): + veh_ids = [veh_ids] + + power = 0 + for veh_id in veh_ids: + if veh_id not in env.k.vehicle.previous_speeds: + continue + energy_model = env.k.vehicle.get_energy_model(veh_id) + if energy_model != "": + speed = env.k.vehicle.get_speed(veh_id) + accel = env.k.vehicle.get_accel(veh_id, noise=False, failsafe=True) + grade = env.k.vehicle.get_road_grade(veh_id) + power += energy_model.get_instantaneous_power(accel, speed, grade) + + return -gain * power + + def instantaneous_mpg(env, veh_ids=None, gain=.001): """Calculate the instantaneous mpg for every simulation step specific to the vehicle type. @@ -330,16 +385,16 @@ def instantaneous_mpg(env, veh_ids=None, gain=.001): energy_model = env.k.vehicle.get_energy_model(veh_id) if energy_model != "": speed = env.k.vehicle.get_speed(veh_id) - accel = env.k.vehicle.get_accel_no_noise_with_failsafe(veh_id) + accel = env.k.vehicle.get_accel(veh_id, noise=False, failsafe=True) grade = env.k.vehicle.get_road_grade(veh_id) gallons_per_hr = energy_model.get_instantaneous_fuel_consumption(accel, speed, grade) - if gallons_per_hr > 0 and speed >= 0.0: + if speed >= 0.0: cumulative_gallons += gallons_per_hr cumulative_distance += speed cumulative_gallons /= 3600.0 cumulative_distance /= 1609.0 # miles / gallon is (distance_dot * \delta t) / (gallons_dot * \delta t) - mpg = cumulative_distance / cumulative_gallons + mpg = cumulative_distance / (cumulative_gallons + 1e-6) return mpg * gain diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 626c59e39..f0e3637f6 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -10,37 +10,24 @@ import json -def generate_trajectory_table(data_path, extra_info, partition_name): - """Generate desired output for the trajectory_table based on standard SUMO emission. +def generate_trajectory_table(emission_files, trajectory_table_path, source_id): + """Generate desired output for the trajectory_table based on SUMO emissions. Parameters ---------- - data_path : str - path to the standard SUMO emission - extra_info : dict - extra information needed in the trajectory table, collected from flow - partition_name : str - the name of the partition to put this output to - - Returns - ------- - output_file_path : str - the local path of the outputted csv file + emission_files : list + paths to the SUMO emission + trajectory_table_path : str + path to the file for S3 upload only + source_id : str + a unique id for the simulation that generate these emissions """ - raw_output = pd.read_csv(data_path, index_col=["time", "id"]) - required_cols = {"time", "id", "speed", "x", "y"} - raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) - - extra_info = pd.DataFrame.from_dict(extra_info) - extra_info.set_index(["time", "id"]) - raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) - - # add the partition column - # raw_output['partition'] = partition_name - raw_output = raw_output.sort_values(by=["time", "id"]) - output_file_path = data_path[:-4]+"_trajectory.csv" - raw_output.to_csv(output_file_path, index=False) - return output_file_path + for i in range(len(emission_files)): + emission_output = pd.read_csv(emission_files[i]) + emission_output['source_id'] = source_id + emission_output['run_id'] = "run_{}".format(i) + # add header row to the file only at the first run (when i==0) + emission_output.to_csv(trajectory_table_path, mode='a+', index=False, header=(i == 0)) def write_dict_to_csv(data_path, extra_info, include_header=False): @@ -97,11 +84,11 @@ def get_extra_info(veh_kernel, extra_info, veh_ids, source_id, run_id): veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) extra_info["target_accel_with_noise_with_failsafe"].append(veh_kernel.get_accel(vid)) extra_info["target_accel_no_noise_no_failsafe"].append( - veh_kernel.get_accel_no_noise_no_failsafe(vid)) + veh_kernel.get_accel(vid, noise=False, failsafe=False)) extra_info["target_accel_with_noise_no_failsafe"].append( - veh_kernel.get_accel_with_noise_no_failsafe(vid)) + veh_kernel.get_accel(vid, noise=True, failsafe=False)) extra_info["target_accel_no_noise_with_failsafe"].append( - veh_kernel.get_accel_no_noise_with_failsafe(vid)) + veh_kernel.get_accel(vid, noise=False, failsafe=True)) extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) extra_info["edge_id"].append(veh_kernel.get_edge(vid)) diff --git a/flow/energy_models/base_energy.py b/flow/energy_models/base_energy.py index fe7f463bb..ed75efd09 100644 --- a/flow/energy_models/base_energy.py +++ b/flow/energy_models/base_energy.py @@ -33,6 +33,7 @@ def get_instantaneous_power(self, accel, speed, grade): Instantaneous speed of the vehicle grade : float Instantaneous road grade of the vehicle + Returns ------- float @@ -53,6 +54,7 @@ def get_instantaneous_fuel_consumption(self, accel, speed, grade): Instantaneous speed of the vehicle grade : float Instantaneous road grade of the vehicle + Returns ------- float diff --git a/flow/energy_models/power_demand.py b/flow/energy_models/power_demand.py index 8cb5fd20c..d8cd918e0 100644 --- a/flow/energy_models/power_demand.py +++ b/flow/energy_models/power_demand.py @@ -23,6 +23,8 @@ def __init__(self, aerodynamic_drag_coeff=0.4, p1_correction=4598.7155, p3_correction=975.12719): + super(PowerDemandModel, self).__init__() + self.g = 9.807 self.rho_air = 1.225 self.gamma = 1 @@ -68,6 +70,7 @@ def get_regen_cap(self, accel, speed, grade): Instantaneous speed of the vehicle grade : float Instantaneous road grade of the vehicle + Returns ------- float @@ -85,6 +88,7 @@ def get_power_correction_factor(self, accel, speed, grade): Instantaneous speed of the vehicle grade : float Instantaneous road grade of the vehicle + Returns ------- float @@ -113,7 +117,7 @@ def __init__(self, drag_coeff=0.7041355229, p1_correction=4598.7155, p3_correction=975.12719): - super(PDMCombustionEngine, self).__init__() + super(PDMCombustionEngine, self).__init__(p1_correction=p1_correction, p3_correction=p3_correction) self.fuel_consumption_power_coeffs = np.array([idle_coeff, linear_friction_coeff, quadratic_friction_coeff, diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index 492304b48..b65b7a0c1 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -11,6 +11,8 @@ class ToyotaModel(BaseEnergyModel, metaclass=ABCMeta): """Base Toyota Energy model class.""" def __init__(self, filename): + super(ToyotaModel, self).__init__() + # download file from s3 bucket s3 = boto3.client('s3') s3.download_file('toyota.restricted', filename, 'temp.pkl') diff --git a/flow/envs/base.py b/flow/envs/base.py index 8a36d6a47..c8df037b0 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -151,11 +151,12 @@ def __init__(self, self.state = None self.obs_var_labels = [] - self.num_training_iters = 0 + # number of training iterations (used by the rllib training procedure) + self._num_training_iters = 0 # track IDs that have ever been observed in the system - self.observed_ids = set() - self.observed_rl_ids = set() + self._observed_ids = set() + self._observed_rl_ids = set() # simulation step size self.sim_step = sim_params.sim_step @@ -333,8 +334,8 @@ def step(self, rl_actions): for _ in range(self.env_params.sims_per_step): # This tracks vehicles that have appeared during warmup steps if self.time_counter <= self.env_params.sims_per_step * self.env_params.warmup_steps: - self.observed_ids.update(self.k.vehicle.get_ids()) - self.observed_rl_ids.update(self.k.vehicle.get_rl_ids()) + self._observed_ids.update(self.k.vehicle.get_ids()) + self._observed_rl_ids.update(self.k.vehicle.get_rl_ids()) self.time_counter += 1 self.step_counter += 1 @@ -390,7 +391,7 @@ def step(self, rl_actions): # crash encodes whether the simulator experienced a collision crash = self.k.simulation.check_collision() - self.crash = crash + # stop collecting new simulation steps if there is a collision if crash: break @@ -411,16 +412,6 @@ def step(self, rl_actions): # time horizon being met done = (self.time_counter >= self.env_params.sims_per_step * (self.env_params.warmup_steps + self.env_params.horizon)) - if crash: - print( - "**********************************************************\n" - "**********************************************************\n" - "**********************************************************\n" - "WARNING: There was a crash. \n" - "**********************************************************\n" - "**********************************************************\n" - "**********************************************************" - ) # compute the info for each agent infos = {} @@ -454,8 +445,8 @@ def reset(self): self.time_counter = 0 # reset the observed ids - self.observed_ids = set() - self.observed_rl_ids = set() + self._observed_ids = set() + self._observed_rl_ids = set() # Now that we've passed the possibly fake init steps some rl libraries # do, we can feel free to actually render things @@ -836,3 +827,7 @@ def pyglet_render(self): sight = self.renderer.get_sight( orientation, id) self.sights.append(sight) + + def set_iteration_num(self): + """Increment the number of training iterations.""" + self._num_training_iters += 1 diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index 77a3d2c12..e708a6ce6 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -50,8 +50,8 @@ def step(self, rl_actions): """ for _ in range(self.env_params.sims_per_step): if self.time_counter <= self.env_params.sims_per_step * self.env_params.warmup_steps: - self.observed_ids.update(self.k.vehicle.get_ids()) - self.observed_rl_ids.update(self.k.vehicle.get_rl_ids()) + self._observed_ids.update(self.k.vehicle.get_ids()) + self._observed_rl_ids.update(self.k.vehicle.get_rl_ids()) self.time_counter += 1 self.step_counter += 1 @@ -104,7 +104,7 @@ def step(self, rl_actions): # crash encodes whether the simulator experienced a collision crash = self.k.simulation.check_collision() - self.crash = crash + # stop collecting new simulation steps if there is a collision if crash: print('A CRASH! A CRASH!!!!!! AAAAAAAAAH!!!!!') @@ -155,8 +155,8 @@ def reset(self, new_inflow_rate=None): self.time_counter = 0 # reset the observed ids - self.observed_ids = set() - self.observed_rl_ids = set() + self._observed_ids = set() + self._observed_rl_ids = set() # Now that we've passed the possibly fake init steps some rl libraries # do, we can feel free to actually render things @@ -322,7 +322,3 @@ def apply_rl_actions(self, rl_actions=None): # clip according to the action space requirements clipped_actions = self.clip_actions(rl_actions) self._apply_rl_actions(clipped_actions) - - def set_iteration_num(self): - """Increment the number of training iterations.""" - self.num_training_iters += 1 diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 450a0269c..004208cb4 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -3,7 +3,7 @@ from gym.spaces import Box import numpy as np -from flow.core.rewards import miles_per_gallon, miles_per_megajoule +from flow.core.rewards import instantaneous_mpg from flow.envs.multiagent.base import MultiEnv # largest number of lanes on any given edge in the network @@ -75,7 +75,6 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): self.control_range = env_params.additional_params.get('control_range', None) self.no_control_edges = env_params.additional_params.get('no_control_edges', []) self.mpg_reward = env_params.additional_params["mpg_reward"] - self.mpj_reward = env_params.additional_params["mpj_reward"] self.look_back_length = env_params.additional_params["look_back_length"] # whether to add a slight reward for opening up a gap that will be annealed out N iterations in @@ -90,7 +89,6 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): # how many timesteps to anneal the headway curriculum over self.speed_curriculum_iters = env_params.additional_params["speed_curriculum_iters"] self.speed_reward_gain = env_params.additional_params["speed_reward_gain"] - self.num_training_iters = 0 self.leader = [] # penalize stops @@ -197,23 +195,12 @@ def compute_reward(self, rl_actions, **kwargs): for rl_id in valid_ids: rewards[rl_id] = 0 if self.mpg_reward: - rewards[rl_id] = miles_per_gallon(self, rl_id, gain=1.0) / 100.0 + rewards[rl_id] = instantaneous_mpg(self, rl_id, gain=1.0) / 100.0 follow_id = rl_id for i in range(self.look_back_length): follow_id = self.k.vehicle.get_follower(follow_id) if follow_id not in ["", None]: - rewards[rl_id] += miles_per_gallon(self, follow_id, gain=1.0) / 100.0 - else: - break - elif self.mpj_reward: - rewards[rl_id] = miles_per_megajoule(self, rl_id, gain=1.0) / 100.0 - follow_id = rl_id - for i in range(self.look_back_length): - follow_id = self.k.vehicle.get_follower(follow_id) - if follow_id not in ["", None]: - # if self.time_counter > 700 and miles_per_megajoule(self, follow_id, gain=1.0) > 1.0: - # import ipdb; ipdb.set_trace() - rewards[rl_id] += miles_per_megajoule(self, follow_id, gain=1.0) / 100.0 + rewards[rl_id] += instantaneous_mpg(self, follow_id, gain=1.0) / 100.0 else: break else: @@ -230,7 +217,7 @@ def compute_reward(self, rl_actions, **kwargs): else: if self.mpg_reward: - reward = np.nan_to_num(miles_per_gallon(self, valid_human_ids, gain=1.0)) / 100.0 + reward = np.nan_to_num(instantaneous_mpg(self, valid_human_ids, gain=1.0)) / 100.0 else: speeds = self.k.vehicle.get_speed(valid_human_ids) des_speed = self.env_params.additional_params["target_velocity"] @@ -244,7 +231,7 @@ def compute_reward(self, rl_actions, **kwargs): rewards = {rl_id: reward for rl_id in valid_ids} # curriculum over time-gaps - if self.headway_curriculum and self.num_training_iters <= self.headway_curriculum_iters: + if self.headway_curriculum and self._num_training_iters <= self.headway_curriculum_iters: t_min = self.min_time_headway # smallest acceptable time headway for veh_id, rew in rewards.items(): lead_id = self.k.vehicle.get_leader(veh_id) @@ -254,12 +241,12 @@ def compute_reward(self, rl_actions, **kwargs): t_headway = max( self.k.vehicle.get_headway(veh_id) / self.k.vehicle.get_speed(veh_id), 0) - scaling_factor = max(0, 1 - self.num_training_iters / self.headway_curriculum_iters) + scaling_factor = max(0, 1 - self._num_training_iters / self.headway_curriculum_iters) penalty += scaling_factor * self.headway_reward_gain * min((t_headway - t_min) / t_min, 0) rewards[veh_id] += penalty - if self.speed_curriculum and self.num_training_iters <= self.speed_curriculum_iters: + if self.speed_curriculum and self._num_training_iters <= self.speed_curriculum_iters: des_speed = self.env_params.additional_params["target_velocity"] for veh_id, rew in rewards.items(): @@ -275,7 +262,7 @@ def compute_reward(self, rl_actions, **kwargs): speed_reward += ((des_speed - np.abs(speed - des_speed)) ** 2) / (des_speed ** 2) else: break - scaling_factor = max(0, 1 - self.num_training_iters / self.speed_curriculum_iters) + scaling_factor = max(0, 1 - self._num_training_iters / self.speed_curriculum_iters) rewards[veh_id] += speed_reward * scaling_factor * self.speed_reward_gain @@ -345,7 +332,7 @@ def additional_command(self): departed_ids = self.k.vehicle.get_departed_ids() if isinstance(departed_ids, tuple) and len(departed_ids) > 0: for veh_id in departed_ids: - if veh_id not in self.observed_ids: + if veh_id not in self._observed_ids: self.k.vehicle.remove(veh_id) def state_util(self, rl_id): diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index 4d2d8553f..fc3229e52 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -95,8 +95,9 @@ def get_flow_params(config): if type(config) == dict: flow_params = json.loads(config['env_config']['flow_params']) else: - config = json.load(open(config, 'r')) - flow_params = json.loads(config['env_config']['flow_params']) + flow_params = json.load(open(config, 'r')) + if 'env_config' in flow_params: + flow_params = json.loads(flow_params['env_config']['flow_params']) # reinitialize the vehicles class from stored data veh = VehicleParams() diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index c9e820b15..4c7498413 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -247,6 +247,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= metadata['strategy'].append(strategy) i = 0 + t = 0 while i < args.num_rollouts: print("Rollout iter", i) vel = [] @@ -259,7 +260,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= env.pipeline_params = (extra_info, source_id, run_id) state = env.reset() initial_vehicles = set(env.k.vehicle.get_ids()) - for _ in range(env_params.horizon): + for t in range(env_params.horizon): if rllib_config: if multiagent: action = {} @@ -311,13 +312,13 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= else: per_vehicle_energy_trace[veh_id].append(-1 * veh_energy_consumption(env, veh_id)) - if type(done) is dict and done['__all__']: - break - elif type(done) is not dict and done: + if type(done) is dict and done['__all__'] or done is True: break elif max_completed_trips is not None and len(completed_vehicle_avg_energy) > max_completed_trips: break - if env.crash: + + if t < env_params.horizon - 1: + # Early terminations signify a collision. print("Crash on iter", i) else: # Store the information from the run in info_dict. diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index a9392e21d..a9742e249 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -4,10 +4,13 @@ file, and then uses this data to generate a time-space diagram, with the x-axis being the time (in seconds), the y-axis being the position of a vehicle, and color representing the speed of te vehicles. + If the number of simulation steps is too dense, you can plot every nth step in the plot by setting the input `--steps=n`. + Note: This script assumes that the provided network has only one lane on the each edge, or one lane on the main highway in the case of MergeNetwork. + Usage ----- :: @@ -167,9 +170,6 @@ def _merge(data): def _highway(data): r"""Generate time and position data for the highway. - We generate plots for all lanes, so the segments are wrapped in - a dictionary. - Parameters ---------- data : pd.DataFrame @@ -382,10 +382,20 @@ def _get_abs_pos(df, params): return ret -def plot_tsd(ax, df, segs, cmap, min_speed=0, max_speed=10, start=0, lane=None, ghost_edges=None, ghost_bounds=None): +def plot_tsd(ax, + df, + segs, + cmap, + min_speed=0, + max_speed=10, + start=0, + lane=None, + ghost_edges=None, + ghost_bounds=None): """Plot the time-space diagram. - Take the pre-processed segments and other meta-data, then plot all the line segments. + Take the pre-processed segments and other meta-data, then plot all the line + segments. Parameters ---------- @@ -394,7 +404,8 @@ def plot_tsd(ax, df, segs, cmap, min_speed=0, max_speed=10, start=0, lane=None, df : pd.DataFrame data used for axes bounds and speed coloring segs : list of list of lists - line segments to be plotted, where each segment is a list of two [x,y] pairs + line segments to be plotted, where each segment is a list of two [x,y] + pairs min_speed : int or float minimum speed in colorbar max_speed : int or float @@ -407,10 +418,6 @@ def plot_tsd(ax, df, segs, cmap, min_speed=0, max_speed=10, start=0, lane=None, ghost edge names to be greyed out, default None ghost_bounds : tuple lower and upper bounds of domain, excluding ghost edges, default None - - Returns - ------- - None """ norm = plt.Normalize(min_speed, max_speed) @@ -585,4 +592,10 @@ def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): module = __import__("examples.exp_configs.non_rl", fromlist=[args.flow_params]) flow_params = getattr(module, args.flow_params).flow_params - tsd_main(args.trajectory_path, flow_params, min_speed=args.min_speed, max_speed=args.max_speed, start=args.start) + tsd_main( + args.trajectory_path, + flow_params, + min_speed=args.min_speed, + max_speed=args.max_speed, + start=args.start + ) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 261dcbbc3..ec96e3306 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -26,7 +26,7 @@ from ray.rllib.agents.registry import get_agent_class from ray.tune.registry import register_env -from flow.core.rewards import miles_per_gallon, miles_per_megajoule +from flow.core.rewards import instantaneous_mpg from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.utils.rllib import get_flow_params @@ -234,7 +234,6 @@ def visualizer_rllib(args): final_outflows = [] final_inflows = [] mpg = [] - mpj = [] mean_speed = [] std_speed = [] for i in range(args.num_rollouts): @@ -254,8 +253,7 @@ def visualizer_rllib(args): if speeds: vel.append(np.mean(speeds)) - mpg.append(miles_per_gallon(env.unwrapped, vehicles.get_ids(), gain=1.0)) - mpj.append(miles_per_megajoule(env.unwrapped, vehicles.get_ids(), gain=1.0)) + mpg.append(instantaneous_mpg(env.unwrapped, vehicles.get_ids(), gain=1.0)) if multiagent: action = {} @@ -329,8 +327,6 @@ def visualizer_rllib(args): print('Average, std miles per gallon: {}, {}'.format(np.mean(mpg), np.std(mpg))) - print('Average, std miles per megajoule: {}, {}'.format(np.mean(mpj), np.std(mpj))) - # Compute arrival rate of vehicles in the last 500 sec of the run print("\nOutflows (veh/hr):") print(final_outflows) diff --git a/requirements.txt b/requirements.txt index f06c3c69f..a4f6f83f8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,7 +25,7 @@ redis~=2.10.6 pandas==0.24.2 plotly==2.4.0 tabulate -tensorflow==1.14.0 +tensorflow==1.15.2 awscli==1.16.309 torch==1.4.0 pytz diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 index 0693ed4b62a9cabcdbecb267201ea862144f212c..d346e9dc58b39a5b511ced70927eac1d0d32579b 100644 GIT binary patch literal 20358 zcmZU)c{oKn`xww<#`NGS4uu8$eLzHzgEsBdrxPk1j+#8YJS!rC1?(e=S=!wiG{{Wq^O+#b5w zKg95VMoGB_I{Y(SiYKNQ#S;(l6d!#cBy?@4FHd4GZ;apQYscz(%Kh^eqmAo*OpHe7 z+~(`glU%=Tdq7Z_;rhT}U&Day{-K-K29M4mI#)6rF{fg%fy6 zZoKij&Z3^eqoy!CWmi|%1%F3>|NJ=%h4NI^?Y9*Y6%wkuFCvJRzCj*c`hXiJ55r%Z zI%qt?g}8;2xLcADKIQmh@ajke^W*f(&~(AO!7})`(v_~dCNJ1UW^mVE83At?C&(F8 z6im09$gC1ortzNk&~uasB;U^xoYhnk#K+7Mxb_*~M*kn+GrkWlrJcd5**wNY0r?!RumkKwT4)Zs6eyj3(CyzB8LZKiRn2f!Qb;b+>zTn@=8An zIAStP%Wre8)w~c~kZH~Rn$ro=s|>+QIg?#HMo7@`sfyGFXTf=sX;fdzQ392>bB?pY%amdS$Rlg#JFSjjS;K} zsKc$%x)>rgjQ$~Y(CQHcSt*_lmu8cZY2-Tt&(Zn7Y~Rz|QQ<2xKX zjT3;6h{7Dy@-uhj#Ro~ z$qX%lV%9(eFs_U>*A=Eu_x zVxrVb-%ve4Lsb&7`5g%}WW(XYxIJWp-U4zxPoMcH_YMo=uS5FX&!AD90*fAKbG=+2 za?aiihi=hXNNW1Y+^3~5vt&FsjXc9r%T(B2XTasvGlJ*(v$*3|D&Y`2o@=+)o-1ED zotsqhoa~jljyj(8Kmy`mV~d?Y(}EJ-gizkZb)GW+$79!RUK_%j zH{O*0c;3`Fo|@nI{dSQLrL(Q|DhfFo8?rfu>IK$+U8h;QWp3yE5tHZtP4sA}SnbCD zl$FZqXdA<+QIz3NJsZb)cQk>MXL+yjnv#pP<^HM6x!Mf=hgqtu@<171h)GWgW{8|Ml70&fKf<`xgaPxkrrQsfY43)_IQquh~1+ z`fvCB|HIAHf6dqYkNH||ylMXp9NMEF@Q>>+cFrX^p}{!KS(+=ha3N{=Jc4SnayVUj zJ&5-w!IW7Vg7S;0c&sLZ7I6dUl~5lz;$_F3^^4*Do#_C^slr&VAudp`=*9PkJ3%Z_ znj8IfJeNPa5?4B@;rY}futzZojlx{HadzjySTqZzaywz01{YH8ys+FZAB&Ytxj_mt z0?r0wZuIy*8Z9D;nPcx^`KJJGz&#N`L1GYh-}v1EvG{jT&@npi*+#@c5y6QywIFCU z0OblrVz=@dz_ZJcG|n7y-ZsMEfvwp3a-rZ{RyEx7oK6nh9uLLMaiBN4oK$TkpjTrt zxqo}J;MuJ(f$ioK;3kpLbWFgH(z->0+i`-?%)b=D_0_DgVcg>|`i>JO0zADn2bx{W}t zIs@{qW0K&}z%YF`=QR$`e~DTRE>xW% zVCn6TmO*O-*>2TP&6|%Bts2}LYA4XDvJdtq8*=-M4!~lcSMWXCoHcL`!s9lDIKSE# zK+{uDdRK>A?C}ok-wvZfmbSq7U;@dJaRi}l%J^Nx5qV;A)c$injNz$q=kwn}+Uv{s z)F>ABn*O53s4rM!X9_Jnc3jOJ18j$)J~wVdKbY~%u*u;Kl-QdH9t?hkw%}SElGA>+a9K|Cr<@(1@V zNF76U>7e)-@R;cf>wd`yE^#fnGrmf5`5qmRse2agIR%3D{46|oHxjq3l@qLBWVtV2 zox#`I-B>y8H09}p@}{rzRQ!K@=$`;E<6jTb{f`Ifx$*S>Hvr81Cjk7Z459Arb#z`_ z4VjpCfGqu0M9ntcpxxcxBs(R8*uGDrw_OUTnQ1(|75#|$*mQ`Dxt>oPi)zW?6M1CS zh7`(|Jx$eS=g?fkG-B(0gpAWTLJYGG(FrXf)U9#!oss$Uct|CwI$uI`>obUYS{8|0 z+CW0K9i%*iP~NO{|0mNh{WsGv{MT@!{}^uU#xwcH@c-ltrvJFUypYhfj7-?o-pkeu z{UFvh1@ur!IbAbokGF$s@F!hE+h+b?9TnHXxnmKyuK5j3N!t&F^D^lY6(_hk=NP-f zP6Jyl9%DSL%4hglL{=%PB69=^N=m(Xa`N;M|APL~PM{7Wxs3)tZrN`AJ! zvvsi8wIDyY8rIo+124jR7N^gv4m znYAg7#@;T#LH9GnqV^a`xOSWNT{T5+b19koHV%?tChS|`L$qNzS+jwMhCUBaL)aWl z1{T5$Ek53;*1-*J!Z5V>4OKle2^XtH(x1tDF;7z)rDku&Ha#QUNVeed$Gg~)5Le=0 zHVFc{9hkJxnasF~)i96SMJse7VPpSfJpa3p4roTh2ZMvqykQ(=DZ0Um<7I{9r}%X0$1p zhECynr0}jU1g);-E6_Y}&=uT&*Ce5;R)H`>KKhf~B!(G7iawxeofC5;Vu&{!uv z8~crFh#mSei(8N4%-FZ|_nSq;qVOph_wX(WnjS?KPBx@3?D~n9Rz6(asRD1_1Ot?> zL^-Tv_hxyqa=l~m+mu`6^_U|xYHtX9?8qTbODyOoAqRStza68C*22C!ui2IZjx=@X zGBhM+5c%+CqBupBRa$tQRoe8G-Vc}qCfAMd>OxgCo;DW(%m&E=8w(od5)Z0cMX*9R zh7Lcif(Gdf=>@CIaM~QSoUI+$G&4uaBl2kFd3>W6VVD#JwGtW5y=B$dr z@;~BmCUGmB+r5A>mpo2eHf$y~x$^K}{Vl40-j7~Yy+k&B6=!ZcG~geX6Ev%|j;zX_ zik7FnvCqGo*(#$3UyL?mgS-wtKGjdRpWKKFO7BU%>nXhD_K;CB%fOP-2c#=v4y@05 z4`M@obkpfG5NNOt)k;lp@LMXf*|KDe0i~OtoPq7%cEG92b3t=L2Afmk1k@@TkJ7`? zea*S?$3YPibKQvvh>D{TEs`Wfe=J_|>BfjVsYKKGJ)NA@3i^*vvtvFjfYeq^sP?HL z>jmk|d&4cX_pdeH-Nu6nUZUh~tTeM|uL1E6X6awB!{aknk$GJ=X^h%icHEd~Xb1{H zmmLPMa``GUTkj~{F0=&hUvUSHw=+Ca-ax(8OyKc}XndK$BhPn8!L6h6_)+f^>II!4 zi(eN)fJ6q}n^Zt*K3dV)W6zWKU*Ayw0TIr{(-PRKW(H?|$x!1ze~D}AQVbnBgUuIR z3C$3O^{EA9!>grm^G_=k6DcEfxj3vz$)y3`4Df@}1Jc+b4)3=+;k?y#BunWO38^S1 zt&?5Rme)rVI>lj!%VF}VI|_c?eZV-)Q^K+}Q|Mo(3^*ly4@qk`)n9dhy`11e!!IMh zeWC{#R!t+VR${2+avKTi7GN{?ImuI1rzB9zCb7#xtS$ke7X(tl?ax4~(iwm0Jex zJ-&}jKGRJ6Zk~imld_S#csx7%SU6q0wUSDGi@;+4d;Dj5NieV>k?DL~MfC!mQ8?5M zBUd(2v!8dE8^dQfGa4o_v72M*1@|PNhVjT(?jU1>Md9*YMYs{T269Gp&|z>jcuajl zDkt8C!+ok?w^bL~uRmhNR8G^D;y4m9BeAjPjtP{NtD#7BEGy+*4?>;Fz`634Qf?Wo zl}{rV1d;TJa4uPBz(t!2srV{rgzD-dO+h`m z-%&|dU2Y&wAG6`TO%c+W*r>cZ1Ex&sWl{l>(stz?330OpjT<3wb;eV2(2S39 zB^&VLnqJa7IkWL;`ET0RM;V)oAz1w3G-tL&KP^4`kqEU*GR3Pd(RyJouu6=_%bP1e z*4Q2SPQ@6L<4F|NTFBx#Uo;o7Y+T^s&I6lDbDdO#Zv{d;kS+`YHyfJ&aaDssb;#UxokBi&Jls7 zwi@)+n1hUWwHJz?n~G~T&!c8h0W{^!bbRik399eKV7^`o_3IcQkl~FL>!!h4j!`x}O%>bQNciy)a(b&LgB}lQ*PSA4kMJRK^AyngX&jc-^^-rEl_;n4ijsp%BL{%mlyhqL{f@sL`R}Ap7Lf zH>NCE9t_%fIAcr>l>K7pA^*+fQd3O*ae}nLDq?(pEnN`ZPg}p6L6^vM z%F7YM70(-q@yS{|QzJqBh0j4~odNyP`kLHa&=+)!vl;%PYgbpG8Hm( zUy=zOx#-xrjcIi{P4B;wg5!Tv*pt`d>GR*eVAwYg-_CQzj%{!GB@y2k^LTL(J2xJ_ zk68;zd=)xY?+be^!wD9?6lQd#rUO%*iwUb3V7~Sc)BJO6rp6GNl^er;OS7X%0gHeQ z6~m+RW?`|;V#xR9kON03`{7S9X1p`NvTg_Z<>O=Me0ZHWKDtVsZ&uODm6|v^MjTwn z{^oz+`LX2%J884xM|#!qEYVnQ!bC?G(s7&Sk(k6Ku&RG7{(fFd*N7F-=tG-G(vc~+ zQ6K~%dR!P0pH4U1DYCi+pUCCSn(Xq~O|-*{YR zapoW{GROq(vjXz{Q66>qr~^3}34mwU5Y@Yxu-C-{s-?Bydwv24!n+ugcfz1({)6#a z&_Ui>1;DkLk0EGy0=6eQz{I_iF{y7K{#Muvcdk{l9j{(fE7XF{dtUHiyA=4(_9dG- zVsPV!SejPRhL{~k9@?zH*?xc)BMqd{EeI`}&Vhn#HhthNjs3eVVZu-y3DkVax(Qz5 z&&_f;>#iY&L|f3oT5>z(F)@(ra>6~wqex)p2m1Zn0zA0#84||_bn?+Wa23zO&8u$m>m-m&%&MdA*FxyT z{5z=i!30WoIU(ne6-GAN&^V)0s9Jso&A*7#E7MG1Fn9tgtZ=0-)4$Q|N$R-i<|(-U zeFk{em2g%mU8cWocR;IkC2P$@!_(L=thLQ+8nyi}@t3?z1;vq^4>_H5|7LRttn`IB zoI!Hxz;e=k`z#GBh=a(#{vi4)AM3j{h%Y|@JX5o1NwyuVxy>in@<#E1Z7%G&=!$Bc zxv=OO~Rsdl&qJ$?F ziC26iFf>;K4ZjP+{nA?Y-KzxbnI;a7mHp(K-FKF2zZhKW67luemqhsQdT3Ac1pReG zbVIuz>Ra-NkL)Hi)e&Y6(;&Khl=K}C4WjsjOEr>`DPwY-YO54b>GDo`SMLyAP_hK; z%wCAq&hk6r&$msocf(lxK!$j)Pe|= zDEz+j8O}5nL(2;dG|9yn?tH8v_y1VadOH=;c+3U`Qasog=ZvokeewFZ2#nLUht)cD z?9|UssB-uoCU?(Yx=w2~w0j??b$5#SlIvyh;eNGrUn@kJkB|y_+kSbo*hmR>~=-xd-#S81G_yh~^ zZ9a@c9jRE_z5t4L8>51e4E8^^h2Kx7qQu8!vT)-X=HTKfxY93$tfhnW?~!~m?0bvu zJRySy4;K-WBtEVER)BL(E1<6QHt?Ueo;18#fj{arK=agHCS_OzrDh&rilncTH-Ddy z2hVpRPeBe|zAy%#D>gXcq8Fr{c)>s3w4a8$@JYdp=Y(e#$}|7J)YH`HALwE6FFUaO z4?CFc#+&oM(8KB<=y9cGHs?b^3xB}0_$Ue&W$cjgmXUZ0{?}`FOIy36vyG?9qZj|j5tQ?G&w<= zf&Xc&E%RDIi^KN)11IkWvTIXvr79_Rn1p8rK3%)jQ7 z|CkSMJnnzdht0p}!#DaWjWF-SeQIN{=}kON?kXd$fjzLzT1Fs^sknPzIlfIOz?~J= zNSbu;zEC1;la+3;%DF*&XZqqnCm!mxjs=ZTGMLB7!u|dk@KP!sidHTlkG9`|v-OF% zAt#h3*W?4|wieC5qL1xgHnD9>C-RT&6~`M|E9fno@layRp<^#PLyB`gy7eS7(Va%* zw|pNAi)_J`CTn;&{w^)-lgIkb2|>#+G;%>WZNs@m0B4sjI<)-7kJ~NU=e|i zYUF4Gs2IHV$bz!FH06$_#Eax3iC<;|%_`at$(el4S0hi@Ati~|PRU@vk`Sn$A|Q7D zE|us*m2e>=AFjcmMd+87@G;5ieMB4=jhPcvwdF(fPa4{5u=2WIB?(`4h_@G7r> z=0-(ggu_L;BlbLKlyRBn?jiiMW-NA3)4=#Yx=`N!nt7%u0kaO}!;{f<=soo>cpo1k z+BOalyF~@sDom_L%K~~cVIjFQWfo0R`#=&mJfLM&b~x!}6bW#wgq(1SVWNsCEPisd zTrfk)=}M4Owh8z{i8wbM;PuzZbdhQoQ>ZY3Y}v1Z+2b~WiiHVGnW#!7eht&|h)<2a zlP&P_Vjb$A_!N&jJ1~XIGr*vCIS76Rk@Y<0cH|-sEJDg2Bx|o_AwSDtl!T9m->=@V z8&*n^{<0iWQz!xJ7mt=amc>vtn1SDC_>q!ZreLz6m+Wbh!AXWWu(GO%bs9IAQEIS^=^5e#5%C+i}Zp6T#K?+xTdF9I4e_jH`D}!A)D$;Nr{g5LvT; zmJi54gy=&YTE3K+4+$V}j5j{@i6IAeeZq&!GN@#NEq*^34Sbb)sA()jUxO~1^DP_a zr|?l^eH9VRp9bEY_vqrB6F8&xE$LSYpl05kluuS-#a|tGDwa*(jh2xv8m74AN*e6u z2~g~;2EIwv#!IQ;DA=@}=H}04PiHm~XVoLP;mT34DUBl5<&Qas_~U8G(+0*>Ab~sD zYq991<>w4FWigisQL(gEfAf>h%-9BtQge`VQx%6D z)3M$i>9n=u(B38&@^apgiRq{LuDP1PJGGTA7F>bVpJst;j3Wu;AH|YWVc=G(jDe0j z;d5jKT`TMa$G#;r1UvN7<*CYu^UKKv!xq|8eV;kiSPGlnK2ZOspUGW^1n{1^n=rY2 zM)br+P?2n>;Tqq_n~qFU{x=X#30nR)?5R-mpe(GT|>qc5a(C z?s7}O{(g1%)$^B?6z`>f7uC`%IXQ9^e9?9H9h9ErgZ2-Dpy}Zkx>95rX>u9|F5ws1 zs>58oKClzsU6)6_!&QWvz7f3Ei_vW(5gyW z!7~DReLK>-nM=17h0}(O&9SIVSOq2Q0~aF;E_c8!pw(;BIEW)``(tbuO&s*Q8_!_-@ovWI>wB*mZO z(K5^dI4@i9Q0hLMccTMZ-rU70Lpy1w^L27Cua=g^&H;Cg<G`9#!nCL zif@A21q(2Ax|Cq#m=HGkWaG}jEbxkT!R`oEe#8ARbe-P|UQ@Vez4Ii|#&Ql#-ORqx zw}9=7*5kUP97xvmrt-eEuzd0$acvnO^SDPLjJuTF-WQA;P5yvU?HMMp;S!xUAPGWj z%$%*09>JcZl_cnK7Og3Drndez_;>IEnS6~UH#a2VxcmcvFTKef^<4U>W*Ty)8o}8G zO9`#*p|?y_L4CAwcj4|+sy-(NeeNg0?_h86UYCVUuYBP*r8KYh3kY>@WDJGZ;-yJ8 zU~KuAK3{u*JtrLr>!t$;rY|RFSKNYqyX|22zCnKbUO9B*@aXao4G@%`V%EjvF(XQr z%-Rnz(A;Q86u0^jvu9eUCp`lv?K=woTa#$?!FRwt6iWjx+7kQgU86`kj$dVNfW0f- zu~LNrulL6p-Vq17GJidLL@u6jF{ZS(V>?dooe0N#5^ze@0dNbCBxRtL z7Q9cTJ{PutmA(e%ggjx#Ot8fVMZxTsX?M{FhR9B{@8m%DTHHHlKhu<%h#swB;5nuY zuY@lEzT_pc$Tyx1c^`&z)=bB){4;3z=qTKDdI;!XOY^uz_ZleC16lm=%BRuQZz>>U05bGj^C#t@Xefe^NeQi_W zVq7#XdtwfPaed?>T!iZ%J?L^9MHsWXntT#6PJ=Fk#c7Op^=$*TYj6b`N99wXhtU5LwAFl|4JC5F@_I=xEQ(YFe9eK!JYd6DHzH6btWG-oz zAu#>TVLJBPUD_Zw9}=<`gVx|!=qnnHxgGj+$FXEweYBKTq)VgONF}3FodV>?4Z7N| z0!oi;hPMa)ux;;Fu)lV{#7ASTVNbX`S}%SHGi96zCpwXCW)w!_el@w1Xvj(9zoHH^ zPk>Rg0SY^oj-rqP?2(&|kC(q;IX`7Uw{0HYxupp!qoSba_ac6{|03$4K8rnik_*8v zf>EQ!fT1Gs81^L#?$7sxynQTrAzMd(tg(Sf50(MKD6TNX7qQi$@1@^>c8@krQ(J`M4@URPsV7KZZYX$LZ^TWvUEoe@0@E#j0oT?( zVcHG$(0RsJv9doBoYT)^6)K<~HF^*PD#mb#vtH@P#VF5-dw`WoA*>NVB zWDRGLRr!6)i;x(q(&kBh@2XJl-6E1CNTUu{GKs~POwv-sBlLL%^_0vcPMbDSIk{Bo z_$!@QgC4b#PbXa`qo`iuF_Ia3i=DaDo!Ixgl5;Wz^u(tu!gCDeIsISNhuOcXj|Kl4 z?))FaUEFvJ|BF6c|3x3hQd}~r#ETTVm7v|JSfak6g6|WQ2xHBQIgf8=;)Vx2G9+&d zI!<#zVTmr=WG#%HfvMy{;tdq4Ji~rgjtBJ)D>&jL1(&Q$!M;?5y%~^8)KpB+?{EM; z_3$jY`auu6*4|?*v!@UhE&-!F9h_IEMRO*cq6;8{CdOZ&?(apwDDwt&x0Dbhdnb}N ztqydT!%=ceRSOO$C!%CQ79H_;O?tP8GLI)k61884shHg}&J)>_V6`m_3qOb8?%^7! za5&Abe*T^0EqKZ-_R50w1(V2YkwCgWbc7h*s-jzOGQ`O_1F}u<$I2dh>wX z(oaFL3}xtEmJXuB;`s4-GUW-aK+cQ(Bv@S-c2_Kc!s@zX_`(Ts0JI+rJc@vk1vuDY3cb?6mU%RfSveV)a6P$2<_wiQxza|M~G zBe1utlsw)fMKl+cpz59_Y)jEDtZTkcZ%24T#^8K9@62z~m{0MT0kBTy!eBneiKBkx zE)@h%L0cUSq>nFyVRJOzdn<-1%8eY~wKrL%#me|z`!PKnXOHJAPJ;R2=XB+kE10)& zDNY+r!+ux|p?G>aYKRx&tuL00R81!vxoitbQMO`VoRXuW*3XDv@8QPHnxTY!CWDXv zYJ%FUKJvp~mMCi1(0ZR;BvQWsi}PKX&LzpTP4zm}d7y@E+ONn;Rt#QWoeZaL%R;!o zgUHLiXIII)faf(IQgJndPT`Bg{aR(~+R%4wcNH#HZ4Qs0Yp_cki~7$^*89V_Hlz;^*S6=}L6*L4DA$TsP@JXg~GGw=4*<_K(bwH%HB@N@!G~j4zf&lED4rK=QE& zs$C8y*W%iU?fJ{}T6zs_UAqNx_hmAs3J&-?zJa#>sDyagpYU*3INplLq)(eFsA6dz zEibrFPTDjRqaOpLr|Ay#3ww-er)}ZalR|26wizwD7QuO~N|JN?I1yPa3xC4ppnFX$ zy0o4{UQjc?%lIB_y4%S7I2Q^r!`;+BdInBe9mH?j&t>G-O-A4Sx9FOChv>wzaq#u& zLo%Yp;Du|KsKxa#R&j7VhS{DW`@iL*`}jlXiBFgzeiaR^&_LNmk@%wKHv9bgM|!|A zkenV{#&(P6QNODk7-N$}SAEYw)$koSWv(dm(Xg8Is^2F+Rvo4ue)iC5_kc~}7r{^4 zTHKlKh4JUKU`63nTy$$XyGIa5^Et(s*49MlN8W^c8!u439jCDImN|*g8r5Q_HWGt9 zmzj@C6Y=@IA$sR_5C&`*p^D0*ofb7mjH=j1`!B!bJpZ|e?mXuX*e^|LqggsJ`xp7} zG8y`;v{^|H8EVEkLtkFrOD5E}k^829Xw{W|`dZ>DiJEt`u}f(UOehzHmf3}v=zfft z8YRPpdB;iL@f|c=Esk|i*vd&5O{wy#*N`@|_xu=NOSBhH0X3x^^t9R`*!HOc*43Wn zteG4|ZXW+mI$lQ8Ydz(#Nca^!nO;uB1Ow!f{UuV^a0h%2?Waq!F4L>&=Qsh2{xTDi z50EP5T&z6lPR71?LEnv?jc=7pu(;74dskeb*<<5TktpK2oJ6>hR7tHz#jtZ;6STV+ z!X|5dvcA_m1I`s&(I*E&Km=1cnPOYW$i+5V*nN*)xcY`O=vYEzhNn{JIn$wZ{~NY^ z?P#f{=!*k=I>cxZ2M(`H!uvi`@t3Cv;rcB{gLt{oZrNh$eZT~=obIt=7gUBU40BXnQ-{*lEo+eNMuI zs_W@yv$yPAbvIlpHpE(uKShF%IDnZ2-s7X!zqvKIE^;Kn$B>@_V^vwc))(>8@X z%gSNp4*qO3?_3FYhHg@~q^WpnMHUtR`KeK%NexzeR+93w`=RpP37jxFoR(Id;ZGV> zvn(v}#4i&yp=FmQeo=oyhlg8nugYv-cz*N>6@jm_Iw31B0gb0_C5^gLf^Q-7FfTbA za^}1wOAmad4F?6pR%9!j+_njIA&nUAip9^yhpBH^6k9ubuA`kGm zG+O8}q#uZ;DShkFIMtfgxoeS`CkBYfQWw0JWrjm`9J-||VH8<5(0c<4P_=6d`c-Qa z+ZhBmR4bxYwIrii^v0i6L9NsrW);f^hw$f;ga#diPB=>CiXPIl z!Bg?EXa`lECJA9`CqaJ90epBj3{Kj`Nwo9}s6gXNcKw9jI6yfTZR!m@j>iocAY7XvsO6Y#M6XeS>8=NOA1KYj@gSS#5xLUMPk7_j@ zPj->7H^xziH|D?}?W~rstHVE{ABnKc1FDmz2Qz8|SQi6ZcI%g1vZU1r#~Q3Bwhq#8 zG_8hw)yzj(_cV~+w*Vsgj4}OE28o@m1Ye%0z*<#(DnBt6XDBSj8OEl#fnS1JU+UqS zSPcEM%n%Q3`%OMcB-7XhPsm%BsZ_W*(B;`@IR zn2$`)(J9~)y^;^@E_C4jJgS)-3R?S+h8@c#rU9bdzrxxiWaKB^^v)kQn@_X9jzk^x^oH4e)i!BIG31GdilJ z%xMirx?(h+%pr^5UXcr255Gy$OP$FxnNo7*+CfH4v6MJ2^Tn@U*3)&)`>@mS5SgpG z0FpCuuxR02vO!S{T#V+>*4RUYtd7O*TjiLz-4Nk;B4rx9!RXo}PGhw)(H}fS1-bc9 zrA?q2PLP2YKWW6=)41yE4=NIqL9N~^fjH+a6aD%z^hC-M8Gap`k{QW}Z*_s+l}E|5 z)MB*!(TV7w55s29*+12W%-?N0>4e+|^o2t$eNm$gvJQTjvSmE=IN(RMsu;$;BU)KAp=aevd>3y{4YL{YLi{+Hc({s6c20wU zfDyV{?-G?A=8T1VU#gz;4`7~$sLKTMMS02EEk!XhhDja=X~k)0!V+2YU@;Hos5>)mUn z9j0w`Ugu@7h|Hr}1J+RH>4Wb!3g|Ch2D8MW*;;FP1awSH1fR5A##MMb*{1c8-rM-nba>*)i}sAVP;QkIF>fLZdx7G{Y_*w;yvs)7_>JvY%luJ8Z$f!uxSV z{Q|9hcNaEna)WdC)8X10VPa25mD(jn)a%(#c6ZS_n7}SYb9Pjv>=^}p>kKg}qmk9| zo=#=^#)ALP-(VGeSIgQ_LgCur%;@i((sfJ@vV_*1q@HP2H3prUd?ZkG_E_=hO;bMIF zqYs+j?8isdC1mTC@5FWT15!756sJypOosm+N7wfWL^|d*`@T+vTsqTB`kM!d!;;JF zS1&`zsMl%?P_hMuu1`z>nS$S|mg9+N6H-ugj{f}S2G1`oz!AMvd{`(YkP4_lLCP`a zNT&f^`nQleJ=P*0^-9@A-6=%ec>-CM^Brtg&n?!=I0f*2oT;YEO2GkWG*0qzoOsQw{?d90ui(l}{q8eB{MIUp=+H+^D*n)4n zy(nvO1L?=r=;!4D8E-C=hmGHG($O)3lOJy32zP`^s-;0@=OL8!i=f;Sd-3Mi`SgwN z7fxZl7?@ca!1&4-RGk|Ni@h#nPMm%+xU56r}M0^(jD58Nru zIITAxPIe`d0znMen-zhK&;!oMJs1^H7e0Ig-U|A+9m==j1@8y^qzQdgR=7s3CjtiUC7>ynbTMn699dylP{TL=@1Z_sn29ZY9`18QHr zp7PeJ(ZgN+IHT1Yo)>1I4lGQsM28q?GA0#0sdrD_HdWZ>OqvZWi@SF@(WIrVAy z_xvBcd{>5fKX)<4CC1WhT7o~q`ia3cHQaLa0En-fIC@qv9ewSKkynyOS2swa;8_^8 zrS(LfH;D0Xlc_?}CgjiL!3FMJS~7P6-U`#;D)lO0MMNRo^|P?bTxLsrp6^3T=6GQ|UPATpiO<5d|$llQ6=EU&B=-I!SC)+5@H1C&;tF-?VFN44UpdiWl<7a7UIK;tlkIPc_Zh>241xTjxVn zN)vhCwS`7*m!N{b#f+g^2Bgio#@Tw+8moNXkmA}b47<7rJu{Rb+h&NW-ucbhJ9=0n z>~t;WMQj1n6Irx&Sd+Xg98adbKZ`B)mLR@26=u2hP;cH&3|dl4E-8x%nBzC8qq~gY zZ?GnKyf!7-v#$`nBXLk|ybogv4*@6q819l60jV-W+WlcHtR6E=D}J`Ko$Jd9H^7-J znQ;_XSM0!QLuouT?;fr-4Fa)pFFeS!l8&_R^zQr>z>OSY_DQ;9tP>aaoyvmd+(RVJ z{4zNr{E|-Tvc$px9jq&uirp2G+}H_1^w+%?^!ev^U_6_UpeX`2cQv5#4sXm4DyKsq zpF+vXdb%WH2K0O3z5T@qP#%5r^`9 zC+UK6We7A}20b<$X!9%r&T|8@Fue@t?l}g_o_?pp8+Az!jH<(iH5tthQxmqCp9j4#!kC(@7Br`MvM6QxO?D$yAgEqC)j+5EYdnzeJ;lfNqgY4LFiWC}gS(_l;|-aA?ti1qldS~N^Dn)9$uWITu^&G;WS4?AC7)&{}7yn^#l^!RV zVoIJ9soplGRf*y#6V)SDQUj=9alFwK-p18u!&kh zGu@=v7V62*9^F)URU|xoC+Jb?r*UkESsgdXzXbzd)nb-x7<6$nsNVq@jc_tzFUzvX zqI4R~cHT{rJEtHz?<9^Nc%2fwi&?5m4|YOh9Pkr=rAE6XY+Lh$sccsQi{*nT;<~t+ zfAautaEZ`yj3fIX;TG!d-Q@j z5;IxmelfPd!3qq{JF*G8524PsgQ)wu7cDqMq)$hzeAzmaWJTS#%huk#55R}^p)P0a;Pdx+aVFJ7@Swmx{C9!2!Pq2o~)$HR#c`x@ z*^o3$Zi+8iW}xVsfDfu$uzmTj^5zE{C~4OWN=)4f>hUtnpm=z0m`b` zQERyc_}DF?(qe18?$v>JdTLS66*rlm^&)ifc}&k8C($I?bm(y5=#^p_ED1Lvi*h5d z?e7B5bW?fTmks>FGyBnwKPI`&Z=xBijPRYuH6d)24BqA|)3n8DR1tHWep@V{-D6Ff zwRItG-K#=AHudu4z(9FJK?5FLU=5i^>fvRO20c0cPM$lsjLhd%$u)WwFoQD}S^Kwm z9J3bL?by+Ld;dJV^;8go?SpBKNOzf-f03uPJcdr|*X--NX!4S!5a->*S5+M0FZO$n z4=q3P!*1lVE6q3A>(h;xJ2DPVspl}&RtX%j_#5?I=fkqMd+CGgB6*}=6uUGx4__K@ z5^)Gacv;4Q`y)$Ao3fT-mpqbJ*Hy{2Bk%Dt*#I~hwnCoaFc#0RiG`(;{K?SANA3{B z@qVHe)aU6^sL7rMdG~*zu=NvYr{=(-e-23`ov=?-)oDq=hG}e-Lw3>f^V(o}N}!HX z6_KEc0E-Q(a5^~;)t98>=m8I@(R?Kw)9YqIzZ%fc+7x)|c^eZiC|u|}q7cGI58>m6DZ=^aHuU@|Wj&sDqG^6LiyS+Iwytr5 zqQ}J&cMWsMc%}rAQByc?jD*nP6?jTNpUoYkePQOzQcQQ%!2^0OWYH@TntQ~+ijZ~U zS&V`a2d?0YhBDH!?Z5&}Ll!skDAREa;k~dN#Cq0wsv7Bu8J{%+NyGPNAeJB zdaH}DY&{Nrz7&=8>zMwjC{Cqf5gXf;0!EX|nY}y$?7WxI0H<+K@k<`ASs98e`Ww(w zw>0Vu?tu8+9rAP1Ft%z`J}ql=$1`(ZVe8_zOtbbWd#{&-Zs#LV_nZognUlcuujbQC ztI&!QXy1=FD@ zQjmX7G_8!C4%H*L}_qs=EnWuBFJe&jrFjEqBIl8qmhz-K@Io znJ~x41rk;+q-foK;tTv9O3ygL`ivUFUcR?tlY7}fS9vUj4)LXe)WML{^osrIqX_x6v6q_OC_&8f zP?Dz{V)swDg2@+=lUR^~P9%t6(c(T60FCY@Cb zJx_dsUE*y!V}vHN)YuDIu@T&^k7X#`84iJ~6S#q0n?a(R4O45Muv?e<x$}!c3qDD|)oSgFjpFoA`fnnEe)5$DU3#B9+L=IKT1>I^1k!Ugi7c zN(aiJy)=c6{xuZV`;COT7kN7Pu9$BT@49vJN@k#6z@nB$qPG}65Td>VHjj#tpJ>|# zdb?LZtq85E`roF0&SfyB#0XkKdr^4A5`L+-BR5UBL|SD!biC`Cd|Ofy)*FSO$BjL7 zX{!b(^4Vl#JrBJWYhiCsRZuypPV=mtu)W?MhsuM)@hCGCK%tFB!Qbh|>St|-% z8(UeBaW*|vSVSvIqT!Ea|DbUwfzfluqH@szkVJJb?V?T`n(P41`Rj0*hZ$U)HypmL zF2OsVRal5b8nN@B&(3tn6LL6{lb31F^mH27JA^G(%wdN^vI`}{GAMj-xO|YOC)*g3 zjyWNrq}Xde1x$^Ho>tdzz}H#4fk72Ek~j6WuVhk3f9cBqr#}8;fH?hh5S+Um1Q%PW z>wgT8Ro@Me;F@$+I!MN1m&+Jm6u^9!YNL9-C3~~b9Cgm?F_q>Z?3|g4)eST7O_m!L z@`-G6as*q{)gte&JQyR|x1r3|8F%Nzv*L?7xWPM|Jt=g@h$nm4t3Cm&;oCmEY8=ky zeLN|TIUB}oyyIB2uN|ApB5=Zo2qsz zP26eiT<&F}nWTm(^Y`zH8=1yl!a+}-TNb{LTd$=c1WR{w1&a?CUGdn)jkU@WT+8=y zi{r)#s*A1*$sMs=$(uvM^jjN1F(JpH1XehE3vpt$vl9ZdT); zxv!Lj1ZWFBHEI$c%Q|6XSSDw6>ks~EnXb_FFiQB;->UGzWhQJ|c}XHJ=O1lTIeCW{)&TMN8C-wE0 z`nlR^{uscIsHOj!f&b4c0o_$L+DZd|%%Eq4bkp|(YQ)Fl&%al)oquGrE1}P%H#>FllfSQvhnJhL(~nTR-dm)70*w9*V#6`m literal 10209 zcmXYX2{cvT`@RSv$&g4&6b*)=TK$u^&epqmDkiULJcu??8{r^Kr zJ8if5A6zHi})gh%fPln#~qUq3f44xMZ-Vm~e|RDM;c!d!bX&v9d&VL}z1oSf|b zjg|j3_9Ee-O8yxZ1==DaeSW`D&|)fV4|xp33)XX$o^KU)HV5K9!5r?@#EkBW{ zt;51e?%WmwH{qbqAVG&{VbP0e+{U7Q407?|(vFufct}zBZ^L|;ck~Z#KxyuqIqUJG zOdOoH@(}v*`FN>=5e}@&!^Zv7&8(;B3UjPfg>#zag<#SL>Z*~jxcLe=L|>w@Rt+>f z*_*3?I{ zxqEV=xaVhGCz?zNCdWqz*Iv;PuADSZ=rX#T8}~$!D?8tWdn7!Nd*i@+h#U|H#fO_w zs@hy=+odIJ$aChVl#HNURx6IKc!6EAzd=Vo3qN+0lJ{5rxl&OsIHN5M-Sn4n?Yd*g z4~u@dbI?qQouf$Ng2*hN)xi3zy5WWfI;Ik-W?yhyV&=)g~+p_yCgsN!@ zYYGP-;iDq=m6M(Dskt3jdVeV=wf8VDS@{@e@tI=IjlvSn$fi8r*YaZCiql0L^-t!U zKG8Z(=dN;&s(&TVqNJ49o_?5j@M}HqYep$&^~ExtMOZ0MC$E~9J?$Xp^M_o{zOX!w znq~>-idzDY8dY&(FPHMpR+REKIv(ac-B`veytJOfP0TbozcZ7!Kg> z!CED7kU9>dZiiW~ICs3_xD$SSn~jppCR7=>kjT5#;pp*h{8q1uZ~9hY$1@L*^m;^Q ze$rv(zMW&c%Z<@Q`8>`ZT|)gM7vO_Ksjx@S2nIS^h_X=yJ+vW~OzAiZ4lfl*b(Aq| z@=8Tr=MpyS#SgkhgwpdjQb?SNH<)ZN0mnl(snX8CU#G?y6~2?68x@nn~*E9Nd;hv^@e;OvE`A#P1FtsVa!{goU++AEv% z$f!e>!hFzd3}$~T%!Xxk*O)2xv z<8vu=W!RDBrN&5SEu9b=a#rP{sC2bTSC8>&BNa&;?%R;N$@u`jpmz3WB2T5q{`Nr>>a*M zj`Nm6=m%dMihf7VTQ4KVHH`Mthk4 zk69$w>mGetD~@X)QZ!q$6p}Q{@k-DnFy4L?(^a!^8Ak~gzBos!Zi|6L^mA%`!2*l3 zwIKbaDDZAokP?k#dfHP7KW|ATF7YBz=wnXMrklO>Zwl&3CgI+;0$O@s1-?b`!75%7 zE8PEMX3D0J5WgzC>^wqb$NLh_)KYjUohZ0FO$n=y6_P_ex~Qfuju+mS!=g78aIbzl zU8l@MvT|9`@2g@;Q_S z&zcQ}Z6Z+Sq)Yx?%B0gnSFznwe-Mi~9?n?`v zRhy{Xn{KkeGZ3_!*W&Qgc_1Zn4d(@1zz-uUY1>FC`Mmuc-tJ!nB~zEcl%)W=b~=zz z^N374H5I?ioQiq!hIrFu5e9h=(z8#wRe)tY%-itE! z^jtYsJZvkoRWc9VzIM=P+i!IFHF@m4kjg}tPQ(6$QS#X08}nOK1RXuOuycnBof)En zFFdTlA|Qtr{!GFpjmg;EX3Lr$V~KtZLq*G{0vjxb6+OZ1?o&agBU?UV?&l`@`G*{8 zq_i^Z%em~!G6NjJFjAux5c{nxKW3IF$sub4j@6L?-!fjS^Zi$?W_q=7NW|6lVn@z1*)}lki@(A(4&K!!LIxg8zZ?2hwpZh2TP|@)$)82y;qe~ zemP6!t_6^Ou_~N!C=JF%?ZullE6CC`6~-l`3QcvNQa{yx;v!m#WlOwZ{?0sXjo}b$ z;(%#uW$C+JEZR&nWxBR5!gi_Sg8S35$){J_aN{)_YF;&q4BKXzWoY&uryC zd)0|o&nQ1mT_nD7AI1b{k=R%{a&Jx@E*ouS-F$P%cTN=PvD`;H zl(k7tWCL^`SO$_UGO$?lIrIFR5o%?(lXESP$RFcbpsbP%lC`>!-1?bxt<}I^4O-As z^PPHg_)sw36Sh3`#pj)IV0(5hY&Y?Q+8|5p+PfVaKP`dpKev*eNHb={IYTqCpr#Yfz3gSb*~HV!?w#a6xyi77;R&=qXoAAV zGV<#)#nlrUVeLj+^jejU;bt#b_nAnN{QX!5Ra@#+u#ZaXj;&Rzby#ONg_>R8z)W;H z0(NQzn7j8nbK<=*O5_HU?5GYB+oTKqpUyE}m-UH(+*-^?XTkZXD!q2B1bzM)z_4f! zy_N2Pg8Y04&{;&ZziPocjyC=}xCTpR8PGi|CqwMlPUymV4EXjA7gjBXx^-u%Xth6j zZ%IUcjUA4XENszG0?pq>n0&OA8tBM^ET)<92Jca|$qCejxk)svbkTulk0P3mpcQ); zQ%ddd;ss?G*P#iv=9H+}mlJn)W%3}r#q^A0li>Em2zZ{_MMd5|r|jQQ^xT{XVJ3#K zdgTQ0IPO48= z)cHT9ZvzgH#dT?%$`8rZPHZx;oHEub(FgMGH!-_5tpoFO8t}5Rz-&qQRt!J20*fj> zbC|^vwEIa4&W($~)}}jTn~N#7x#qE(d)4uCmk%8Kk^;RYE(Et{!iSh45-jk**^_Ru zp8xJro-G5eJ;P?3zb8XjTmn6IVIp#ze~^v>NxI8H7qmk2apg%3QX=Jre_W=Z{G0}i z+q#q*-ndSEzeHf;ZwqRq-AUG|)G+TXkC4sbktDq4GaZ*P8IKtmQ5)%bAZ+CkY%PTT zoEdOCX%_|(1u{uT0Tg~7!IjDa@}wl1HQKq4xqNpeabH_Ovt{H#WobRFNQ^;u6<@Y* zUL9&nDkDz#Kmx9d;O6VM=+%c7WXH(K)}_aclR1*#$ee9`cv={`18-A|13# z?*ty#CrFIlw9s4>f!I+qyP z%8)>P26m@kVg5dIrLraoRKHPpCUTX3kn|R-!;c{W3F+gIL%pW`C&Nw6FFadjH(#Te}A%f*n?4BBF*3Av+l z;klavgt;hSZ0|LCZ^t_}#kGvdcpymydNLR}s|jWMt*GAV8K9v%7b`_;NR?(VtG=m; zEIt{Iu8~)0rjj3=2$4r~;e6bud=H}NPqV1zjWGCikS^S`37R&Fv;4>@Xr;Ih-3^?O zDG-HEuQ!leKA}+1R13!wPBYfOl(Bc@BqUF`O4`RakchUKkeqCeCTr&~i`gT1iob`h zJ6ejT9Q{~hjkoN|HTAr-h=1hu$%$l9mJe-pK1R+in~dNb0e^mqVEgA0vh>zNG8&o) zDfefA#@CzlbKN&JwVpb97L2fbU&(u;5bEA?IAy>qkI#|ldeYnfiTQkG7 zQ{2ULma382sPr#>qjQ+4-`RbFQ7OhOK>dWNCog`{KV!a9i7t=ayE_ ze{$)i>C(Mog8cO&rj71SW|HzneDzBKrYAI|1rpV#`ALWVaJtv&nf+I#ZT5+C%Jh7L zoFF6e5x=_kCa*-iiJ$a#h1o(w`{n>K%8wBL%(HrH#`~{iaJt75NT(yiJRDHkdYL|K%TyIml0qlN3aIzcsDb zkTpGZ_ybQt!19e=z2SsJOc1y^Z!%Rd@a272=wKT8uGn;`hpFjX{Q&+h%W_ULM%Q`JNeEsh=j?e!D8NF#=#!U$n4%f*Lo4w}pHA)l6>IzR!5& z@Co3_Z$rf!j4;d73c4C&xH~5~awmUZhkHJS3p3`+2yI$!!=(-hs8Vqj-Zg9k_xg9V zYlZ~(rukIv6~pzyRQo{g!ryV!Li;(WW$AL={(5ky<_Iy^lPi2tsK-^?q|L4K+%3e{ zKIp1yEBx~J7;>Y>3Fmg~#bjO$)TUSovn>}0%VtFj8x<|FUBwt@$?xEbpUl9|Z4SaB z%lX1RKmTG=iX`{XZB_2KW!5e+5vJd~3V)Wr0>4^u zu6glJjD0MDTRqIV$_{^-&h@~2+c6GDZGADyOojWs-$VHJus_$MQ$^U_be5*|iNYQ3 zL0aBZ#_QZy$}9G&;B}AYbDU}lIllA|C*tr?jyPM+@sX+Gz5I~L5wEG_B>y_ZvllPs zlzk}Y`N@}YN<6AKX%>0B2ajENnro_f@6<|pG0_z~Wz{N9Xw%!~rt-1%L(6#U50>%P zDJF5$>Z&+@eD-nl=T~xK6-s$CjVd`&$J=N_-U~tNuq9pg-+snM(*j-7Z*f%3WoUF2 zLmXGw!C;Ub%4yd@-n|TgOw}=PU6F>|Gq$YNrTwU{`+?eo>?e|;j#NqeG}FO3$`r>M z;==R=G;qRZ#$QStb06E|z$FDx)LIL6nwI2(n#mVi^FpK`d=j`C;CE0Lm+Z`x1xAvJ<(A9|hJ9p(0ho*L_b9foE`r#G4 zw=j^IwX2qX9Mk+hE?5lq-i~BOff?#_Wuu^aJ9sgZX_re98kgvSs8s>|xu=sV=xM?^ zvvfH2^cxWy+sjiO{78I=A7o7*`#xHCj~;nBM6J#&L&MRd{LrF6h&(WlEPA|;COI6y z^z~J+5fu?3xB4e>YFLBuRsm% z>Pn;Stt~i9*PYxj8`H=72s7c9Jn|rvi5kBeIy1MBkOPNl@Uwj+&1(m-t@=$*bo8)w z?th4n@)G#FkBb9)$05jNv*E7K$;F)WwEV(Tl72-FvPB;=lCNDDJ)d-BWX0jKWhy*Q zSAl`qh7i5Pn610YMRS`qQ52ogp6H>10rMkQxVS&{xaSalwn- z)PI#bsynqIZ&?s&zLP{IZ2d_MpKIU}DRo@E8KaraWN!Om;#0+EIOPk;os1cTDTspUHa}Sp zyF#LokWP*y9>kTo!$j=%ezWhl%m}~uBo$sa20>3S>=RqgPCPgOh08>#-)b9B4thj0 zpUJXQXH%HBVT8W?84nJX8{vxDT8unhOCs-2B_XQ1aO(I}`oU~7$|tyzoH2b?EOQ$y zNONQYgSsH$?MAXSCy~B;f0iv!eM0=a=i-t<9XxH8M`sj2qZ$V1iEXzrr6 z@EZdxVD!K{Z!@))%47BRF>rih1OEJY5N_Q+2t~%vSj(rkDChfq`oJNSO11=(m7_{D z_0w!vIO7mwq}GPyLx*5Ae+t|>8c(G+J*UQdIc$%fB3z`2RA?1U>(^J3xQL06*=CQK z4S6JA_B(%G`!G4;t4MWkzM!Q$jL4#q7|fG%fj`CPsdfbmi|xy>>7X?+h&}+?^M=W} zQA6nF4Zyt^eR!HAiun~DG}h0A%9t-8K3|=oqB5WT_a%c&YST6Q^yG}-y81=pk(?_K z(|u1wy6vf;znKY?I>ZiZ??cz!vrvI+0Rdkd$>2%_nEL294c?N1^PRnE&z{RP*D@cR zCYF;XpGRbQ>UsLIH^eOJ+A0E4b{MtuIPG1`AoH%5F0W94t>MKWFIS5PE9>d5%GIR# ziUAm3wx(@?3t-ZM3~YMcL9B|C+2jX~Oq{3({0UiWX6IQ+5A`&W4yDa>!`>}q>dJWX zeS6(lZe58nqMM0P*E72LS~03VRKb^bZ_}->7trSxxwPi+M5H7W!y@`=ZvSjp)T9B^ zuU%nZoaiRG4{yTMVOgA8_<-)3yB{xfCW23WIb6A{hYO8%jIbk((J68oPCSWQo% z-N6J+la~|aE=d?{K0*?&w+kL^J4DwlSB9F&8}U-@GsdJx3{zKRgZth>rX(T5w7u3D ze7`l(<2TZv|Lr7HoS%SqKi0tUj)`cx;|A0E^%MC$Qbn#jdW^jTj>yh*f}5|$oo`P_IL= zKQil4a+rZ?(NA#@V20b!90QvhHu|oDaRE+n<*}1;Brpq3eB8ziRPNjjz z$MW*13VfHEfvk!s_qby=)*lvynvA)yu3;INzOG?iTZic3sXrj+#Sst&HjxR{18kqo z7e*!X6qGEkg<$Z-JKg(LeV2gY%y}GcAj=o(D0ly9L{ApuMK6ei8*|pHWOT=itQx_3$vtsOCWnfsOEKW7L zBEYxf@kv2C8g1=|Z&OY}OGrLj`1J%{{CdN5Z>9)7d}0avYZSO63Q1(|7*{qHXo7{D z6#n)bhyFq0f{Vjtg1qww=HD#k=r7)ab55$G#cmTQoqhyk6ypWg+NRP@ zjwVEU$iUa0CgwqGAFXh1z(=Ac?1_bU;bwrrm^K*-Tl3_omMX!^(h_KS*_ZBXjDxuj zJ4v{GDt62|2#UVHsdSMMEU1%Ya;uGTO~eK~<|bf5KbEsiI-#bqK50 z2KGk~$$v2e71o`mrV){tE|zcBAC-w07Hy;#^V}d)ejSdRT|&-mQ^8@a9n>kV38pz| zU{kyq)c$)wQw;~{lSL}fV%N#Ud$zIXU-#1a-rf8!OJ&&4zieQ-`W$?xv7dCgx(k~3 zY=n-kC(J5vg=St3qxxPRwmZyX7mw>>VkT&jymDXGbj2gS=ujJxx%rL^Jn+XGzk2B= z5odaGx;^Fv9b%I@>&e|TK12=c!(DfGqViK61|p?k>gF`OA(KdJglf#xd5?(+2jF9s zJN(>x2&PE}vrz|c(&G~=Nr(O>JaTP>k>YA##79rGvO7dRU(yEpqyols`=+tS|)j*bc-~O z4F9I-XGLM@EH!A%&Z4Q2LV8^+5a#p)evD26`^jBIw!j-T?)Ss}i`8Vmv=^*Um4lWL0{5V?f0E}IT$eq>BqSW7>K3P&(`)#& zxMe;`2~WnH_MgPEHk<5xQx3ePd#T=ue?+7C3(f!Aj+67J5Uns@6*WXNr5p76g>$5@{|J45(VoZzY{9SH2^e6~i$bx@@Ig(9!u?1XI2;b0 zYfWi|$9IxaI}1l9%fpj2E&8E;2Xxm|!rCQYnV`#*e*6&+&Nli#tG;QZ4D|B z;DgUcTG*fiYvIKERRFbnLFE24FgSC8lpe_--@`R%X4EvS_*#t#i|-KK;-{>k&s`KzY(HgD(Sz!Had03yZ9HLBqkP+J}g5UGx zvHRV6`p~@>-%DI1&Fyb_9_iv;ik!Titub%qnD#PcBPxMZuGqdh#A$+&G zNW)un@Z>gEIC;MjzHVL(!VAjW$Es=6J@yxK&1;ww!i@PC%Fjd8v=?M$!?-a`{~FaW zlVc(V`Jf&AnAYvcr0YKg(qBdSu%dY<2D(iE{*G^KSr}mR!vip*Zoo`pLn8k3t0&(w zHc}uxozv~FUrB$bj^@05tn+SQPGazANMjK$QB@ANG#X1|Y) z!@MXD)c%x4e{Vg=>UbrBLCsh8@C_AU`C%wObO;1RpRmhv5}USJftj&O9*3&a;L6yZ zv!<%lOnO=t%0IAzSie{}_9`EG43ofhOC<{9lbN1d4-#<|Q6?KRNo zvw~^3918XJtLcMnRqU&h7iK;76}(MP4)AUcRdLkJOL<;trM%ihMLd7$N}hRh2`_PJ zDQ|W`C8yrHg5&?Aio^UZ<$WfVoQ=AroVT47oHuv#dDq7LJ;}zUysxe$JoC;X-o~?a zye$30oR}?zyv>=1c};2sycWqy&c(87o>$Kij)iS0Peb7lFX>-3&(JbSu=|}7uX|d& z>FTgI{F&|(1iJSY^Cv%>CeXfgo_~G83x0jlC(|mLX7=mH9@ExYjNsb^8MD*fPfT%@ zky+V?eZ1yl0#obrgZ!$?C44O};IC+!#BW!*YN{Ifm^VJ^6#r*V9?yJ0&1{d-27cay z`KH#zL;SLte|avM?tDGla?XveE;43U6A7IX9;)oWGeUpNeJ3?`ynRP_(6*qMF{hrG kcj)-Jj_ZfUYM`g=*s1U^zo3x7FyH_4+llW9m59*)f66*xxBvhE diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata index 7eef2ef15bba26f49eb7e79079714b5c7015bddd..febe7b205e46a15ce78f3248344fddfc47a3eb3e 100644 GIT binary patch delta 147 zcmdnOc!_a>yh3Vfs%f%?foYO~Ns5V?WolxwrMYoZvN4dKY+`6)IMH4^!=N;Ri-Ccm zBr`WPz9hdSF{hB#-68FY_Claw1Rsz{Eyyg+Pf0C?%CdQjM(_fKixbOIQ{oeg<5Mz| mOA6ULBe;QrMTwR1WvNBQnfZBz91*NQ5d%F7J%d8dQau2Xqbj8U delta 117 zcmcb_xP@_oyh4hlr9q;lk*PtFQId&iN}`2{shOprp-HNxMUqi!qUA(;ZLfq^5nK!m z3?-SlsqrQGC5btOtnLm)-yTdlz`zi}2b4)I$SlrJNiBxTvU!U{@B)R46U$Ol;uDMG NQ!", - "Box(3,)", + "Box(6,)", "Box(1,)", {} ] }, - "policies_to_train": [ - "av" - ], - "policy_mapping_fn": "tune.function(.policy_mapping_fn at 0x7fda132e6c80>)" + "policies_to_train": null, + "policy_mapping_fn": "" }, + "no_done_at_end": false, + "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 30, + "num_sgd_iter": 10, "num_workers": 2, + "object_store_memory": 0, + "object_store_memory_per_worker": 0, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -110,7 +118,7 @@ "sgd_minibatch_size": 128, "shuffle_buffer_size": 0, "shuffle_sequences": true, - "simple_optimizer": true, + "simple_optimizer": false, "soft_horizon": false, "synchronize_filters": true, "tf_session_args": { @@ -126,7 +134,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 60000, + "train_batch_size": 30000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/multi_agent/params.pkl b/tests/data/rllib_data/multi_agent/params.pkl index cd832aa1c3eb1713e608fef452dbe168746e4cfa..192cf7558830fe2e280e383cf7777e9ee669a7f0 100644 GIT binary patch delta 10700 zcmb7K2|SeF`kxv5n(X^dMD|^jEoDMQq%h5DW{eqTk$ptAOrdwQXr-jIC|a~AC0Z0M zT8Jdls#R2||9NL5{ciVv@14(Q&YW}J=RD_ppY1&7ol{nT%&9`^W{Q@kgdhZ;biNr+ zhR%t@tYI)?iA)wBje@$Btsw&y14Bu;KPH|7IuSetHWN#sV{lkDo{V8pQi%LO8&1h% zBIJpDucV0Qu-J?^3>MF(Fz6Tpg~f(uqvjiSi}7Sx3=$hdW-uu!3_8DnCr@W!2n;$A zgJolgbOMxuGC=Lnf_9=7dD&Y^>reX_#$l5%1R|bDGj!Ac=Q9?A$;KoS$rL<|$O6wv zSQ?86tq?H~v9pne%H@=xjUv7%TSurv#08QU)wCwj7;CJE^hB0b9EZlHU}K1MwiS~? zkFmny!4y`3aIY8K>jk%=xuWW7zbP>!N(_fd#1JVlWHv+;T?8E$Juc3nQ%DRZ6eC7c zU}6ar4hsV_gjZ_=jfveL(TQwWtf0eCF#TkkWi_3f)dB%)KbUjShKDSZ6I!iPZlsT0;(t2-HZU z0VPrJQ&b(2#Ym)WUGWTCB+6DADpAy`>s3%jA)V@O$R#3?_E7u`HON}pQCO?XkXD~Y zSA~MA<)k5s{Ml>&d=rNF0;wyr-LCbBwI&kCuc{3D<12B zb#QjIb+&b}cfb+sNp=nnu1-SMR*I4)Q_ybXzfb8xi9;cT33arQ(PJkiz0*%^vEf#>U|Y*eU&)k+Y!)&0^$r=(Q$KFV=cF3j|tgY%G>VP9)Me){JroB`8N3u`7ZexxgUD0r8SeyCwK59 zF&GM+!UovUG$=yH7TE_C>S%Kxk{^)=c#1JZHi(}jEQo9%GSETL;gfrL5;O)-B8ko? zKP5jWKLLC?bNH~f1zQ0I6OYBm0^b2Vc_6AKTQNbTz*)r$jSrrYj*Ww#k}tyz5``vc zbZ~-#%*fk$(!jD8$AU;iq|tzZ1i|)89-sUnZv@)`kYBgaUN+760VYVn)6W4e2h^i}{bDw1x8YOw5&$J_x?K`6Nj4C{Ot}Fu_K^vWkyq z@X6nRrxGUy1OfmOH~|2qh#_(4!mVwZ@l*8yRXQv-@;9nFsPkkom;?@%HYp%s z-wX@}OQ$n{!+@QU1>^>NL>_~QT{Ec}xGM-A`Kvt#?jY}`>H?0yB6(`S?HC+FJOv*M zM9t#h;ux@dv-nY9VlEV~t1CvG18AWVUA4*DAziJ>+I3w$E|DjL!NkxQAm$6_r_KcA zf-yW1AP9k|SsZ~2|0(E=JVhWEFej7AhyfmtflX&hH65Hpq2V|PE=DtC)Eu61X0c5GAI|04wr!hsIx&kicfVD z(2)7$(#gd_Mf#ee0%NGrpN;HyxLfaQEjV_o$s8AfJQ%yj@bAvGY#!!jA8ux!#rcj(@Lo&QRGc*#70ACTb zl8!)cmcbW$Br@>4Wz=Vp7%(+vX_<45{EdC^15dc>NEehZoPQgW{p|V8;Z~^qR$<{B()M6wTfdGIz$%Z^p zftlqA(EnzQuABl-O^5X&JN4`z(5czru|V(gcp4=hGetM4^69dun39F$H|pkJ!m#)K zA4MP!Z{Y+CHa?z~jA6tJ=ce~)MG*z<;E^H|04`-sMkcz2W0ply!?yhDzm3-l>{u7irNTlR$#9$ zsh7c&xLZcP%@fGqx*TH&2A*GRg0hpZMHYz|d-2vOCp|~nUp>OUC4Vg$e2$}tF zc%U&;-RS8K2wS8nut;?_f{mmgC`b|#cExNY3K~5yk~ed)A4n+umSI9dy+T7U zXuk+P^#NGfFQ{M&ofz~PA8Fv10Hh>R8YP6uQza&Wq!U9UVwrRdi6~hA-wjou(do(@ zoF;~Dffzu+^AyCP1`~N`zM0(LV$olHl-dc8l@lxjq^iQOJQ+ZFvTG=8GDdh_VDWj9 zlZhy(%EE*cd>oNYW)OhiOj-j%5XZ#O;J{{X!F`zva#cnmk;$YGfW-*n)t@ZUO+F|D zh*mSe`7ee-$I`&Yhh@gVw9_ILoOy^(?)>nsR!H7JE+%gx@zPf~yZh|-cz z4TQnTPmk^(3Q{R>@1Kz|2vX27;O6tBLE;V4cm{z;1KaLYE=2Vc44H})u%f6t1(JjN zgkpm`gZ`RU2~q)`CY*2y56A>cB85f4QD_u)GKNV^0ONo?aQ|u$V2c-;%H-*JnypZ! z;Uy^Gh~Ri9+_sdN7!zm!2ih{cog%Riv96>y3EQt+tPw|EcgLpG1Ct_ja z;%T5ChQ)#Ris`PXftuQ;sowx=;a+G0Y*54P53BiaU6T3*Z~+;zDOXtIPfIy?l314T zP{yZz0%X&y4c@kGhs-36Z-3L^{y1saHvx&l;$Y>{YqzG%=EGMc{#aGL+muYi*rj=`Z5hy)Cd z2m|{MEEl05hUILoI8yNWlK~b7;?jR&5HDEM^zl;kcnlNdu#7l9Pgy8cVbcmDOu+fz zFfd60haqs#;er;F~KOoSxY3A znEd~&Ffbl9r6$h*qb4q_ETNvf8>Iv<^)gqlE_@MX$+Wp{a4{|WAWPWf`*>mCdh^{01w7+gArpGBboO|c%w0_kwXKk508hb=CXp`cMFQ3Iv&)w^3RnQcY zN@=g~&1oR!pSE1nG3+7Yzo^w@=CzLd180xjj%(;Nc+Y*ImaG^NcYL|%(&FQEu2ez9 zrI&cC{PDXjD2hiM`ynQmwpdSMpVf?_r447lh?h!dX#EhWO<1>YLsmjx0J;7rQ+~G2 zu!`OMVfo@?>yJ_`&+i=ab=N7sRI_1?RUTxpS98Vkgxu<#?-(g*v^R-E9wAi>bd`_t zc}xBNOYilTb4w-H=83+ve7v}NSwQ(<)11}apCf2m3&i?oJ#5dZ3}@H|wUt%SwL@_O zwjR;_tL65(NcWfKCmxl@J^e7d)k7=%ldh$_RJ~^B;>g(Jda9Z~)*tjV&o1N65KU-e zH`^Y29GqxgswfjVix^LPI9O8<8D&5qmzWf-OFxyjP~$MSrEl??jL?*D?EZI%kLmj9 zhr*ABF_Fz;7xu>goHuIgbvG$^RU4IcOTJm3q*QQ{Us++^p^c4n_ZwNqZfHA{cXEfK z*-I-@_$xR6p5qqCk|pcq?PbUh@Ly^qwqwF3bQV8;>gKG$S#aIlbl^;Q(R&hG52Mmv zIZl(*H6G6!Gj-tMfOpzwxBI0%({mgsuG?=KQWtqt+`)G=4|am-^nP~8pVjK z95XJdthq?;*w%%ndw63zZg$>1b0}x=NUUk86=9&^qkq%E^AS7Z(wKw0%jdA~pQI9c zhrf*8-cw%bxp&6Nt*`t~ndlM3>uDG@>;B}n#_F>Yha)t@xDGp6XV&(8I9AdAv%Wg< zy(HbJJ4N5t!}v`Bd;TKwh^}kt$9tNJq?R3YInB3oMiH^iEa@jvtMcBfWk2|tE;fWb z>@xG?pwFD2d4oebeVI{KnwLKfC8HQ*Zm7B@x?-)zHD#;NoPdkB?k?6@PLR9Qo_fvr zv4ttK?*>-&DXom#RQO{S!cs|tF(ZGG)4&eM$U0ora`5Enn-50s9c~qWgZhH{CT2D! z+rDA+?%LMi^)1yyGi`NxC?Q9?GDEr;Vf|f!DE(Pgo*?x$=4XMe?nFXlB4_E)6R*`K z`e7*u8of{az{w)jLTguNB*V4Pag~In$@z-@nT4MVn_vFe&#eo1ayxwa)^%rO4Ktr@ zNFIB?`Nht`l0Zv2zm)a`?WlG4g3%{qcbv#w>3h>FeBs;cv{Z+y_0a+46Db61ufeeu z3w`Gfr+&_$UcT$LZ~1je3(v;h{`IeCWm;?L%(>bkH}XOI&C>espAgmP^EBDV+m0O6 zm){U5(pB70W*2RopT!-q4NJOHr@FF#6J77Fxv#XP=+=1|PJ2!%D1?)e>!fx4qPiBD zWNk*w-BwZ8SnjuA@4RZ34pyqS$m9b3KwjrTHBLW!(_sV0(X|RkkEh%>epgk|c<}RB zY;m6YP^vZYltHkgss*k2X&L3^(e!8aBtwm==cwcKBhMx(tIQINxO)xo$9fXG4=s&< zcIO+X-FSVY;cCShae?HVW3R(7+btJcDKF4$@LIS-J!@2^W-Xs_qQ9PL^5Ae>Us8h8 zqCoi=9zLa;f{dC~`&IPjipZ{l5k<$#p1dCq-R6iz&Sgnv*L@l;@rqE4FurNc-xC(E zqBPrn&FvnCk*h4OB|(Kd5t1%$bBBB9b(wj~^Wm&RG{&q!R%cUd1-ia-*Q1uS$Mdm- zo_pIJE0+h%Ihc>dB53&&YL)MUTDV;ok~bbX@Nj!z zxut>|ap7%IMnB?I%>MV0UTYhNK6KtYbboY9+`THZhn397erBURJ{4#B`1YeV419z<&XWEufi`#egn|(^mTF%>DP|WX||J{Km{{2GRzzxpLZPXUo z=sN*tb1gfg4-l|zAI4KY7jp~I6O`k<^_BE{KK3u9!nDg}j6_J$whuRmx~rTrPmz&a z;jG=1*j0Y*gUAR?_E~oA*2XIXJ&}%-+8arVklxCI@i_NbhxpH;?c%34Jwo6X54X5j zjr?qWhj*YtEXDz|X85sU}mtbJLt)r}B?Y1X~{omr!Uai6?t*dmCAm=`b!m}hR*7xM z>*aofuM1@pyFzhoWmg^)Zu0EM8Xqe@DbxFIVEDEEt0GL1(SoSsN0m>R7h9VuoWH7_ zcZ$45cC&^|+$X{QRt7V>b;Ig=4b(x0vOs>VbrqLfp=HFQsKdVQN%2fr~t@-kK*?2qRoV6dD zvpRwB{eps@?KmdCcoTh|xpLNX#`kp<$r@haA-alcfpc}`=j0@R| zUwSjUyNBG?_Uyz7xv+%7j}Ezq-}Jl~xfUVS{M6vMYlDmS(iiT|f%wI%F>j5^chAav zxX%CBTNi7Or5Qf>3;iFA;}^}aJ<`-vtR-4;m=ajtvC3I#@z@e{>rx%g%Ti1j%ez8Q zY&KXkL{GhPpTyp^)V|2m`#WLlId}D&`fac74Rw<^U1aeL^W|H{DvJ4sToMbm7y0et zK6BM``BwWa$h*2i98y@Y)z|5%W(cM`qR6{s?(z8U<*D|=-rzw`lOn+`Nr(H)Da8qWo7uQhqb9q9Sc6JI1{;FQ;^z< zfKx6XB!wKF6d4>pVz3;R;E!3Zn6TH<)uE{2O>P@s_@ zt6U1V{(Zgdj$5Mc_AL<;CR|0O;3 z03d`Hl?HyfR4yq0CHrN+&r zH^T98QJS&hFESWkmmWllQ*o4ReA4#*XzEH_E8SoO1n zdp)jp>#D3ubrC-?RL^di&F-xdr077E{VB3;`=4T-YHK#2{oe7KOFcW59kFh&THorO zA-kx4=g$(YgQiy)n5wECP0Eken%>I|QQcCttu1?Wy?nR+SX@?mRP2#^N8-5cs$H|t zp)a0X2}xR}P5)R>-M`GN*4Mb@5FyvD+4&_`y<=aR$Fq{R3NxTo$g!d5I>*2C zb&WqYKjV<1_Q>kAMp#THtMpN>cDl^F!n>Dlw`JDMYCT|?nkv)&e4gE%w(<2E>JPu< z8B`NS=JviwSY;D_RHk3F*KhN#Z#jiQ#s*0~p%)$2ZtpYRC_38N8@QLs{I=VDe`dg{ zivy9)3%QyXY62ew(8ywSkBG&y<1Wr;Cxo#rvIo7vrg{McrdbUEl6LDjACpSgo>`k)6Lf zIcAyOk`a7`9%<~H?H7m|U@x-5%~hss^L_A-;-l_cT$FhRk-1=NqoR>v&Xu=&0?kFw z;^!axaN6EPtlnSI0 zynUgJ06nf-=ejq8L07Xrw_Wl#x>wV1vFP@tfqVPiCysIoR*QruE$};C6`+tOYPuo1 zqNMn(-8qAH6hiAuj*a~7v$NaV4tBSUSjH~a{n3DKIOCJiQA7>J>>oW;OIU4yy@v3f zqx~{lvv1LsoTd=juU}O~Bh-~S_Y(uMvt_e2;{1AKg1J%OZ#O)9H72TtlCUj0GyB>7 zwPL4Ec$D_7tl3g*#nqx}6WLiF5}fr9(yq*lW_Xv-*V=v1KBD|0o`%P zdXt2^o{JPDR;&CmDr=d1Sy{AOWuT;MVt-a7Dfraj+)VEH^DnGX`{;z-cMf}u@L!yb zb6m9ar0CmE_18zYy$#msDk*+Ydi1m(%693xV@D4x^dA^PDSTQ=Iey~ozJ2=ooTu5C z&8>qv*8=68JC2i$5a&H*2cN}qH*e`jYhT)R^V4F_BSQoYccm4bquu?VX0`V46Nr>^ z?P=C3L7P3?aUZxRHODU*6!KT^ktFjlp4ZQ$K~8i_|=8Iffq(J>5mBUb%Bq-Rn7ptcKq3J&d6vpAzqyOmJgT zAx!y4#mb|fnW=k-FP>-xqGT_3CLWg$r<*-?a?dAN$uo`EOJv5<8o{g9wOXIq=7DYp z=laHxcbvPm>p}EO+#Av0)gn04vO1+kV{!L${_~ELYohy~$#FAnzlKXhq4)QRy}Va( zReNO18d}E%&kSGnYDu4k^ZmYP%zl6Twy~o0%$2b=yI8Ya<3d$>9Omp&D9j58PFZmB zL>IZ>)n`N1u+<@XCOgXA943gos|WKfBB{zxI27)d<^Nvb}iBzR#+%Sq5cVj+}yxO?j$>Y1$ zf}ffu?fNE{y)Q&Yr00z2&U1@zmGrPlo_o>j(*D@={=v;oD$bTQXIiTa@z{mlU1NK; z+w~r3u$`agk$)R)yw>jp_rl879+NQpt#zXEuUud2$%&#q`_EO(pD7x_DmbzJ#dw9@ z;I4^ijHG!@wdO_#nH)L8sGHBZR*r?vTCyaQYcXDn)H)1)8YpnS+@3A{Bxm_NsaLB` z;`T`fOAWtybkp-}u>E4>rM6BxUOjFgDzbO{bEK2h+OmzK+eEYu{1nJp>_>F@cdrO(_@MG8XvvyO7x{VX+Tzg|***e&Eq0TRXiIc^xAC;xyBuYo!HdNKvJy%)0nY`f$qq<|!*Ea|E zeW+>8C@_mivic#1H}K&`x!VI{@le?vArMSL6^YVI5M+ahf?ZI+f^uRjo@XJRb5F3v1#pCcKh0PoO{)|Ehg82mb(*W>- zjHra3zRvHXVM0(NxSdZ16lD`e8LRSHY*g=8p~#aGByP7ut}ObhLC6kcWfwr zJB@}93c=C{ol@-k~;&5GypHGSzlTuAb~{b8J2bn>Y$K%AW|d_ zJjg!}Zd$YZA06(aKw()K8R`EBRWnovw3Xd}Oa=j2CGM>gOXW%9z$^&L*-P_6STSK_ zN&uchWjdf3+;H7LI0UL7z*c#}2{|oMK^g^12*UVaX*d8wB}C$xjtAeK*riZ9EcXEk z9#{Bey&gls0};LyEDevL2H;6}8aW7qBVnmjCI~qYw@(NGvO*9QavEAhFdk}T@H#o| zcC~RLlL)v7CJ?R)90vP0?%;-RfFnROD5ogNxmp1&6b*i~$AX6yT?M!>7+*3DgJGhn ziA-lEsI8>nOmj|P9$^A$t4+||2_TdDe`$>WZMZ6M0(4i_vj}DIWBdpt62>ni2nYKP z3s0FS%1cDFMbdjy# z7d2^m9Z(OnGlc!|G$@Gtu#lHyAcB4be(mq)-{$e-ffH(dYyw&~=2#;KxCfLMV7lD3%n0r!u{ms%SJ2 z%ivR_;%OnlKUhNGp|;hgKtecdX}Ad-+q8ib4HQBO1G_b3Wpsckh9CtSp+zB)2tHa^ ze|!*)%8ZBPfu4YZno?}=RYy~fLkB1ZNs4Of-Z0PzaXv zS91u>83rsI98AVQnxK&VVJC<63qeFDX+zJ10!SEakT0tdL#1IMOdu0a6oiNZ1wqs}?0e>-$7Zd%P_<+g}*`fXn zO>kCIal=pgOb7~t$De}rC9oXnhdTN7Wau;m5eY#-MxkQa{sO?_N8Aq4g8aJyP+_pM z3<&|LWwkow|AONu;eW;P7Yw2~MDH*EVwpx43LHW(<|m1Nm<5*MAJD*h-8E7$D1LYv z@x@?+gGmt>axn83WDfrg+1|g9@x#2SEWZrJLUTI{BmCUq`cI7Dtg)!=KP>cLko|5P zB24ZF2(Vqxgyq-_eL4C++{?vs9qp$>`9YQ;vK@yU3wA11plj7E{tz{we08pZ>;YFD z%3xVh$Ti4cL;Sa>SRMPj3FOefg#b+HhnUbUV;k&7ps;}q2dryZ@VLG>9q)_rCEy_D zt0DO#Fi=onNJ4?Z2L}cm3ai@xH7HgE2!RO0J_Q9yEfKCb)U2wR8w!ba_;6?m#b8+r z2*wW&G5cF+Y+Id^e}=|CeCltShdKNTX~&S7+5fdkVFteOu<+0T+m;0c2ljn)xwR2pSnlb|o+wJmgS3g+lO!db519 z5XR915{+c(e@)7(A%7Ek1T9$b<@Y%m&K5ZTT-Tut05}T70P#Q~esyh&0-A~9u<-J5 zp$Qj;P=N+Il7NS3;0aL_ZnA?57pMSQ(7*t34HB^v0koruX>eEKIksu2AMgg%WWn{y`?O4Wi88f^svs7u4W@h-X?N zeOP%DJTE|$i5+ksl%E0g&;bdr(W(w?#BNwvG@6x$&{?Ge6dgXvmV5}r(Ul)RC=c^t zumc2tfJuA`gLnoNFc0E$=w_AHDwCH`eRVJ$8qE5!2Cs!u$RJY-s;);%kvr>FMjd=! zUyOQWSkW&LaWvm7;q4~zt)i zF+)A`jK{GzO=Ol$FFwIHW}rE$7<)diL~VQPcmi z+E~dzPR6^JSB}imLyz#8ZhI^xvZYYSA|q2q>RWpS81ztnd$82T9wnKb7mEaAj%lo{ zL*6<$*VQs9T)On~WpmV6{eb^IM6Rxl;BcwKHl|ElP@~;lH-$F zp7^w#YjcyWlKv(u$I!dHQP{Or=P624{XHt2=4Uot+^IA-q*y%nNvD2DVu@)ju{&RQ z>16K2z5!o(AjNnQd>oOR#iN?i+Zt%j>CNlSBhBcV573& z7mgKkxI6pW*(CLf&%w`{DIE#hko&`=IMIpETU^&4I*EN?-+!@Jz&)=iUvPxNSC?zO zAyVeBUQb)`VC8t=G4EqonLjbQyCyWhG%7p0=rr1YLWJZjL_3Sd3&t1+2NWH!J;p5@~T}59^ zt|LD7jPyF}Tl5i(FXb@_7d%q%$Zi=NNgwx=c<`eB8j39_`j23bQ0@zSrq8C z+ZMCNHF5j}%yZr6r09wYF__xIFVhIZ)X)x{r|hJBE)Pw+uQ!~J$)9~`Myt?%r9(fBX?n0e)~Wx6(a}YEV*vlvlR`}yMY>T1 zE?Ng?BVWExQ#fI(#h7bA`bS!he|F$Fi?JHS9Vj5}QXJzAeIjr*0C(6d&xZ&O+Pqq6 z1;@f&Yjb4cW0fq^y1`pjjb3B^>7>^=A+{CSkJDNet=@M&zb<2a>al0UCUcxRzUxax zPG+SK7<(h}UDxGy`n;vw!Ct(!CnJh1X0ld|{ej`~qEs3Od}*yXR+b<2X! zrV+kaIA2jmmLvN-H|jq3OmU6%Qm{Lx z=^AM!80y82IdEdF46}6Wi22(CW8dnLqgyyCT*kxVZudqE@Y78O?~>lAJL_1ncjU1L z3x5`8oKf4dhwgo5A<)q*tD)hsKyJ&&&Y4+Jrp69Q&T~HWZ~H>hzD#!LPUxA9FY79Z zR~|hu{zmGCx36T{mvH}eTN{PDQomCk9#J#eR_#G)V&bOPub^+rzNmPT&n9+bIB(ab zkrh#QFMN_uh{l_}^uBz{_#=#Yg%_6%b5QQgg>_~42KoiZ2{tXO)P4)1nlpCS+q9lx z8$EgO5^hHFT5eTWsw~&T;doDT%j_6SQy}pMSDw|V)W+&SNm1V7ZQ1H`omV7dE|iJA zX;6^!velnoIJ_@J$ibJ+XR-Oe0e$ro*pyV-8CojgyIdP3K2bFTl|y?L1^I_%7> zKg4BnQ=kvOt)Wu$exeX}o|TLGMr49fc@|BUJMPBHi-L%$TQ_bVsm)%wYJQg8%E4)> z$!9i~c(U1A{3Di>GEW@o$@6xK$Z+Z2UZR z&B?TfnK)+DzCK)@{<5s@;EC^5Z^a(4yATl1cNV-Kce=marJ6RFX(y!CVz;80&NcdV zQr9Tq)2l)f$qd0i z)#r;))He2g3aDK<##KczcFX6l-dspqEcP;9?^&HI=#_H$*a7p6C(}tsF~Dxp>-3 zM7Cr5p`f`7D~PSh&*?eL^gcF;SBb>PsYmZ$1RcNlsS7Mk)wcX_8L@Nf-5zo2(G9%K zA%_+ozwsI6SB&jFNgW8?ZPD>+EJGt|UCra~(Unt5y>a(5NFS0eIwsw`QyH>oE7)u3 zQLQ{}YZJZCcY%O9+C1%${c3tdbv{A2&ra}-=0~rHHOSVKlnZX<^tU(G6>miH72U{A z|9Z}!j?sAW6erN)Xm`}Uyu#(c^=|dq0y|25R6Hic!=iPc*hI4p*UFBh z4`0ehJg$#AvoonEcOkyGt5uyt6g<@-th8tvH`Ee4QYi7>ljIE zZVuIae`nFMF6O$L-8UwkNV}D7h1RY;b1xZ=_ zi`RW~b)Pn$U*R^?OZL8*^m70aN>I3+6dU&BEyL{fHp#E)%<%gX8gTjvrFjkr z+ttkl_~p?j#D~}d<(O*6;6EjakDyy(7hLor5p*cIhH}DwxcZ;7RJhA(s>}|h$#pOj z;vfH)Ar`D5AO1On{%b}Kvsq1skx15n&;K0oulBs60=g2}paRZ`i5zh4KcZq~ z!*n7a)G2vKJr7G?M;QhjIuv-ePqTj>Js>SdyM6MxfOt+Z-J ztU`*T#EzR6nu&A>MCimQQp>*rMC{y z)I2K2WO}VVZ=9}UKV_3e(oB>Wh?=0Enc(_(ry=JX$iJf3I<&*Y-FDgDUE9ze?ZLfp z8HJ8!+T#UR-j^z%Z;X^=uNR5CaNV8bYO8Dgqnc|58ZU+z;@#D!?dJ~_YKh1ed3z?M z;wlPTwpWdBG+T#Ja(FLp@8_2NHsW>R5zoc2R8EQfK0;Yq=`_tm_R=xla)*!%dTKX+ zv~yXaU|pq>+tV{)V(D5B@9b$x!qsJWE{YEaf=kK{{8^^|rfYV2 z9cwSBgg>H~|fc_y4~xO06z zUqxS`OqqZG(5c6EDfb_n&$nfCzq@^`4}0yeqa>k}Bvw5$8YSnM*T~2bJ4@e=c_pcKJhvRrWrRfb4 zyo8(V9~1~v3K3on?tBG_JtgH@Bj>rz7_od4r%kWx9R`KxU3G-a;@fy_b&PViHX%Kd z+Y81^a80T?)$wZM&vRU#Vm-E7RLv{Y?D}pgt5GAWgN}Rqg8xc;qW9E27X=Ud4MpF1 z{BY)qz!8C)iUJc(HG^q|Rc2+Ko!jX<9GTIKv^VX`{YgSfOZ!J{_A8V4s;fsr;sgT> zJ#6NUXQ*k?rEL!Fh{(Z)?;|iW)lm1k9MvH9HHUX6p(0vjZoPJqvmEouBjawUt3|G-gx;{ZexYLZ(ml>?ZD$zuP^)Zb=5cyJq{i3qknK;Y2i@mBc)Fr8D4}@t56isJwUrij>}S_h2FH=i{6jm#i=|z5 z%Nv|7CyV+a1wQ?8?XpL4hLU7(^h0qun=MNL($N!vDYs|08c^lHnz_t&x7p~1+D+az zr#8x$SQD+iUoBgNj`W=TzR{I5xkQ`^N&&mib=0$U{ZGOwi?48JaTiVQhsRI$w)BUF5z*-xGteP@-ATr_qHZT~|f zuHNw0#ISd?z+G=sni0=7;T79Ebn+p!N6H2KenN#xN|92Go^HGC1=YSSt*12y)+DlB zeK=r+6lgb3sh30Psow)iCT`E?OK3wG7=_L8?$?$N$$Pi%mMJKbGmk^jP7zf3g?E(R zD{vf6B0PR_-Y#BqJ>A>K6%j+Vx{#rjz^x+iV14fnpQOv`ZJyK5gMp|#PI~9J8#QO| zjrL{AFS92%Cw)4h9eO?@HE2VBTeD>Gh%Xt}y{=5?5D_<*{9kO>^rO92 zuH`RVtKogocwkAdcuBWRyz85Id7H-Gjf=f+Kg@lYn^#{^k>#5`ii;CCmbBokA>p`x zwg544t8Z~Z-5~4KNI|-})@;dfy1g&jcdos8=)!#;8#9eyfn@J)f8Ud*tN9-ZByy@m zS>R@lt4re#qNa}Ra%^ba_o;Ct`WTm8h3Up}M<0G$33IpBi=HCiy$4+0FDqYE9JiddbdWb$+Ty0ly-v5?jy;9$Iog}) zW64{N`r@CU+{ov8t*)%3A31w$u(e~^)4*_a{3}~7K#?viZRvYZW~67TuertQRG7dz zuI0l)6F2l)!nWsp(?i9Y?pjM$pwo>DT`^bcBZoSU*2mVon7gjc5t(tQ<<;U>gG^6B zq3(m%D^AA0HhbFDEo<3p3>n;}^at87@Dmy|?Gl zvqaoeojd$*N)~D~y1U-Z&+K|BIwCg>ylC0BZ%;ycoXs%;-m&R{6Q}Hb3x`5F=L7bE zizWcJB=zdn&%73T4sQT@n9mlk+@d;xPDBRYoKf#+*UTsFwRYu^O01Eu(dERCS#t8* z_MCQT4nCRs&Hg&_8P|Btf$a_txaA^7bvvwt&0F@{C(tg+d6HE3zB515QTe^w(d7=VD!YM`cLkqb*ewYPW~8zGhN!SCN46Y)P&NpGlA zIi~{(_Vl>~h>d8yvl&R8G{793!z3iVEYv;R@bYvSW2B&93w3|wKKT$2!#b0wh0nD^ zR?@*aQxCZ^ZFZ|m7rKXbkTlMI){M3hO)!YS$)6Ftx3m9=ZX9Yrb>ezXo$yv3Qs)`5 zMB#JD*j>Hmdr$6@FK+I}PE2od&r^wZKGm+YRnXBm%f zvdw%FU+GWOQm!&N#q&<1!+71=aP?YpsEf_HG~M_uQj(Iz?BmaC-4;hD4_>c&qBL`+ zZ8}WhRL?WSkY#>r&x(*{HtKUKuX@$a_PWr|9BFw0VUEV~oT`iI=?YH!;(Hr8uF&dK)b3xCfc>g;eH(5VBjFw7gQ(zc-O+8qHIv6Y$=ER+0ef5Q8xbYM2 zLgCEzR~6f*+AY(v!gDpuS8%#v?vBzPgQ}wwqm269A9wOwg=06N)jzL!{8A`Uuc)WE zWPC2N$c3_JDT_BW@ZNC6({5{~ZiJyeUAR&y@u0+xyRc)SYm-qhVyR zJN>}GG|yw_ph3rgFS3&>_n$qGOqlFrYa3=V`_)>8q4y__n2h$j%iO7(&R-~REWx!Q z($8d7s_TAz7Q(;uB)WGO$A3tNk3j8IhEOn>M2F5sK<5mCgW=OK(2fy90Ny;Og#_WX zsMx?@XbYRc{mZ!oRtn8vhg*JZ1N_*>LOQruviq`j+<=e-rWyl@fwlt}3afvI!~5{? z=8F%3M2LiW(EvOKI@3b;*9wkc5)Gjw)Zv093h~=6-0vIhKX=zzJ8ukb3?w&nW(31z z?Gdm@{P?#XN}Cxn|Z8u`((; zJS18u=_C^>A(8lB$7RLGLzgUz4T+8sO0E-1`KgExED8x-xMEps!1AbNOM>Ht(!tRS z<1M0>En5;~u_AWKvKWj1x6)L((qzaWLNd^g((}gNCgabypNxJE{8KenS|NGQMvGL186ohJi zQvcLwB^>A`;U<|bRG%RnG}2AlPf~o86KZ&Pc)0!*zy5u5lZX`#4$5?r5Z{F*GX%WE zk}0C+t3vQjO9gJrxjT|d*_gV17zQ@ zq0ijtu+(=J*;hi#!e`_B-!il(@;q<8rw-C&%~9CbL#rLLDERYZ3aCkhzV14hCv>99 ztS`_xc_+?3`VKa2DOf0j^rV#59(3~@g3UYL)Y@r>_^RHk}>bBBAyE@3rK1o$SX3*qFT~hHc=FA^D z;g{il;P*NVhZ*if{bf%eeeOJy6fkYfFF-f8|L8Jdk-MvK{5Ae_A(ljA5NctJO$;8 z_Dmz|BPYFMG=yI(r=N)#v~&JVe%r_*F0Xk7&MMZYEiQ|2UX3w-Y^^>`@6|;kzLY=w zWHyGclEnwH%jk60Y1%wo6~8AappEHtuK95i4w~E~`sE^pw623?4?E1vPmO^Vqi&FK zNP^Oc#V~5S7JjbjVyfz=;oDOM^e+2rU#IwzMU3A<$+lae%kvXkQ#Ov>;|iF2&3x9g z|1da?(MOv@IruYaHgciA;CWLpm60@MRL-ZOu?5g0H50#j_2XY?AFy}vj3I%g4^yo$ zL$lz6Flurn9eFi@0;d~c>}8y8C6K_fwjsP>==1tVFls#<#v4B3!DOW_J~A2u71j?~t78xUsdNS$jJIUB zm%SGSwokJ29QTVkSx=)`-z{)~gCW^p_NBSHbMbeuE2m+W%CtTuW1`Fs3XL5^Pe=KY z_TgzT^xfGxNG>huT9$Fj#LheP6W^ zaw-n8XO|Da3yUGRTfv*e>TSe`Kgr7Q}`vGgwgNozNrk8ZHPtGtSFZ=5M_ zbQ#JvyHc%XI(a+l;FIky*-xV@@M(Y-I9>e(? zC%3Xki6;ajQnWECv7ELh*`e9yQ~cy%n`z3Je5`jJ#gEQWV^J!P1%EsG*xtkaG0n^d zN6TulH*KJ23D%SNE?(R7Ln_(`XZDdMHG9F2Hk z3R|rVaG6p)>{@z@^L<)^N*fZHUEfSdsWyjW=KHCB;Y>*VvV&I1Y2dtoQ~pZK`(Do z$dO|789x7k2dH-^(67F!f*?bq=7dX1B=pz};S(Rg$i#b-&YB+{Hsbb(i z5H^1M0h_A)Y0}r7=&|37_y=FXJ);FGTlpq~D?)g6>@^Ds+)S#YhEmIr5dK^J5AbKl zNWXtEysch<617G+!&jA^@=So`9xqvDZ#ubr8%=rErc7;LF7I`CI}R7EqRS(4!O~_k zW&BP+*AIrcDU4%PSIuDD%|@mum_Tm>-PpaT#kg{S_`hv(4OI<#0j@{A*yy`O_#<~b zP7ONF>hnclcqxj$gnYrrg^Or}cwRr(D50k8PC-U+EoQxrpwE5wd`;XNoILHS;ESva z=QdNBv=41(gCk^-WVTXk`ylMLk;nA3gJ8^;;+gh`czv%IT}Vj6g4fn?)vFBEr+wx{ zW932StS)XJoy9(V=px?>sSw%T4oU%Oq`7wvxAbobQ{53m{K6#qF29UZ^s0e0oiy6_ zTN<`ml%uBKM6xJLqe*FLP$Fw#w3R9e1{H$}WDDb{n((HIPOr5I3ppH2Y(=lH6xx&_=U* zShj7K=)&_^I5O-U>-`W#McwsG&1yW|5!K?FZEx|L%MSYEyo1a%H85S>0M-^?XU$Q0 zRP?@|!*;{1wD@2kMaC_po6)8CcBcdl(JH{WTb@*=GKA}LiiDwiwn5K@&Fq&$4-2Yt zX9K=`#5JwA*t`MV%sA2n_v)-<`y^xtQkT=>Yuky7{t02Dm1y*>i)`sKEoNEK%XN)7 z$bM?o;)06t$Q?OD=D*FE`BE7QDA|i)Foh4jb{Wp(6wpJ(R4|z1!k%BqWU}6#?3n)^ zIOJYR?US4^=GSKGoLWXhRJ`DVg&BJoX~t%Z*~Q%VX~3u}tJt69G}vrcjlautzzJe0 zFzz5r3VYAe=k0)ScbQWE)GQ*rlYv_~YbGR+9 zKo&2(@ODidlg#grC1wrO)O?VBN9nP&$*FL_`~({>k%T`+s?&Xr!1K5=U23#JCHRc$ z-x}Gq-Z^|s?M$rMaupLxqqs9gL(pz^1Yb36H@l@&fP*B)LG|=J_U=h8Tfe*-_j*p` zi>&X%^!bn2p>G>N?bZ!g*QU!}-0BC@;>TmUrzV%lUxLaZt-OLy26^Vb2Q0mduf_@4 zo1X#f>yay9srCikmA2rt{%crtO(kYoyn_B?{ZKUN2U{%{$lm`3w79m2j?^B38TU7! zy+jf8+Pq-vKCI!NHm)PDfMA@{p2S`XjbK^7Q>1*LTd(Egut;uCVB`-Eiuz7g<#SYq8pTZ|4t!5)zyBWM241E!;012a^e47TT8X2RX=6L+H*qT&) z1F?UhHtmSc#+uuD{M_9WFk!wFxya<@u#K1gBT;uza!=}xshsVwQj3N+Bt zVZ7&Fc;+RZ6C3*TvjqZrkhO>|1(Y$j!R?}buT9YN!yT@0_f8t>yPN876|tT-{fPE# zL}$Z$EJxgvJ#LjojpaNm$X3TS%^UGp<0-f^Clb}hE~0Znebk%j19f*^Le~WaD4p~J z%7;DxnTu2Dhx~ayrKS+~R?j50X^Jc)tUndrpN%iq72>Rt0-EkUmO8MSPAa~{w!(4j z^H}jSf!bWZRj!fsBn_?mh$)Y&LX9u|*n>@B^j1cl1tqqUmEHih`ENRRv}!!t ztgr!3O87Dzt*LM-MUL$6s$==j37FYlNAv$|q%FTeq&fc@^eURc`mkqsVXYQ-w{ROJ zzb&WV`4F^LUEEahVI!%( z+Q;{n<)e+Eo8af_bedXyf!6n|fsT*snCH4W{&bWkE0BJV3bV68P2Q7LrKsZHgjUFu zA4U)L53$KVKEeTYJ=}Z44sVAXfVVx7U>MOKgqpF!Awhma{ulKcv}9q7P%BI!>|Y@| zG)$~QwPy%*{!yU@sY2bbL79^hTbREnKbMJ4%qqDp z>wESF+;+i<-T|VBv}(bK_ICSbudb%wH&=^h&08z-I@8{?Z;((h!&F!F%D>ru(?w;0 z_Ktpn2d;{O)*K&E-|c$47Qg!IeB5 z{Qs5^w{Y2tkpEYP8~mpf!+%N{HbXf4e+0FWxR`$>vEIBtxYR3!X2*Jx?58eT_C<}H zCF8I+QVw1IJb{ol4YKK5LXJ}pvmHN%(SElmc2z-_vpHK&hrU-}<+A>G;uw!E?_=?q zx+%rXQ75j7pHXC(AyRl?8;fhb~g>Cli5SC+)169!uq&fofXij zDT!sfFR~DYrIh+85FI=YKwN_t=J_um`Q<~|45dY+*SLXGh!0?qM$%l{>^| zl|XmpLY6w+0;9?-xG}{%tUqf^v&Bo(Ni|t|IrbcU*UTeF`N04-NBM&*7qjW16S1Wp z=w8i9QnL@o_9P258N3oiSVQVAN zXRsIgO-p5Y!mXtIF$IG!dt=_Ek9hovgJA6 zJ2sR0y%^YhH9dq1P9KB~fn}_x!~&i6X}?!7zRmazCmq9a^p)2%eB~VOn0f)aSq?(;D~)(O>j3Au<|gwnk!Jc5Mi?sT zjplzv2pXqwR@NiXTd<8Dl$YYn^X`~+z?zzO$dkyRfz8Xz=kKvi@afbdTFlP{37Hji zApR3uxhs$UG#p{^tERy`M}5}tvXRAo38o|0n_1{5b7pd}O%Pei@$dTikjtzKF#UTX zZJ#!TyvE-~rJr-~+wip*_9p-)l&{A}->1OGxg~hv2w=F|J`9k^0h`q4)Gu@ybN@RD z>lR7kJH=d7oTG%N=FOnXy2s&zCKM?|UigQZ?n;M{6;WaoZwY55XJ+Bju3b>9tw95R*ke({7|N=ufTq@13Mei`pQQcN`74aIT}i=!RYlY> zYYOW1=9AtFcj%ZsiuVXxjm8JF1hLBFSZ=uozv=B#ocvG?qYBp0;r`aN|E(>?4m?Ot zwC)mj{yd62q#bgW$)S{U6083*O8`5PP?ev86KN&xat&pheKrbqmRGP1z1w-`0S#Di zK8>7Pj*!QgwYWaF9`!3tu`^Q!V`mP=qrHxlciRrlRmWrCw-(WhAT=_-n~X||XIOHE zBt~5wMYkvMT&tQE>L%sT(5c#N(Ju{TVJ)2TOfNF(s0Ca9e01_i=Jm>5C^*gyjfeTu zD(MQ5WbFg^Vib;F^OD&Q8F9NYy9HNRX5iIdf7l|$>n!H=NkN)tJ_a9M%W3bKPp(_b z$RqhF3?4X&a^Fs25s9Z*n0y6IDZPZLzT&x}$H=3o2ESDlFtaI3XwuuC>`>cj*1B~) z@(WM0;V*`umQw-6f2?Whu6jfU-@Dm^-WY5jbCA07^x*Wasoabs65wRMgj~!T1tFfV zpl4AHo*8F{aUcA7w?P?H9{a(5L5Kmxb(W#&vJ>o~@--%Bs^~D!-H#F%p5lIVmQwJ} zE!>Mq`kYVnOqkJflFgXW!9D%vh}99NS=-=hdfdIAg>RDPp18_Uzem$BaMdd&{B(&8 zlW4+*t$M7MU1bX$v)IT(LOj-)&4l5qm=WlNH{yf1l-F`7Kj001sJw_8i=-XyXX)aX zGrzzp!yAIO4;MH~)pCu7KhZ!%7aCpLxV}Y-wAiF*f^feAJkV~S;)%(q9M;KAU6jNI zchvFnsY7Abg$9Z!Dx}iUa}a&YVAe$oQk*J@{P>|*y;T5iKAZ8WoC>i^2jRxO$84nO z3N~Mc!_VcX*!l;#lpVDR^ws`CLO7#ID>8YdmzJ1W^$f;+n1fJgk0wV0&@9~>m5Z-Z zzM>Al9#nDb^gYZZr9j-u1LB$EWH{OhEpOJsxDueKtGht_mFV;7B&OJY0IB=7;yKVC zjWq|*!Z8VA30?}b$4(*W9qD9Hxpg zA?i>T$MD9nyIj~WHI%Xp!k(!cp!cXR{@SyS30HjJRtFts`&0(P9K)$tHf8|KJ>r9s z6L#|!`*ZR5?}Ip`Sp>VU{Nj?L{h9aeHe3$_=}z(jrd@NIqHNpX<$_XL+Lp$a`WjPv zs2uj3=zxqyCGdG{OrJG!s9UFs%vGG=+$T%!$Z7@H^f8k?JpP1POc{phdQ!Oj!XsMN zF9Kz{WhpJ(88Z$#@D2Vd*!ZMc40knHgtH?qsaK~&(`@#0oC&A(ek5Ht-b7xNO(K=Y zd*JdxIpX+a{Ccht44qzrs?sy2K3;}y*wr$_b8Ref?N`|HsF#bc8iS|oe#4+;`pumq ze8`t&kn4eySU#vK zgdWMPYUNx07LeAGG;sX%90e*8&4Jd|WR!6p&5~*%;oc2MDjWsDPn&S;(X+VzT#+F8 z&3p9IUB~5apGb0xjiB&lD{Bkw0bA2J@{2T~OS3k>cJ;+{R?dvo*$hA<>8&t1?ItLz zoq>F>5peBmHVP|4@k?qxUwQX5{+cO|m&$UPZpJQHTGz~$d9<;ZRq^O9af9hBZv$D` zsW5z18P+OvUv$YaO(YOg3J@C$^)g_rPI3u4%L&XRIl1+tb99wY_oP>@0G! zsU%NF8@e=62Jd#7;)a8jbnSa8-igy835OB*`C;g`?| zYH2FuE!rQFm)0$&ajpWD>yN;?SK*NRZ9jSGD$smcDavn|#2wx zflbF@_>IxbHfJ3+E)L>Sl|7;UHlXa=?|fp&R9JrQAz!0?m<89L_U9f+vE;p!Jgqy0r!bbz1O>zkl z+!?+GSDUtwcH}*pzwI`4Jlo1fXqsa~d@>z9#8I#ID^@deJ1W<1XHu`VsMg8SZotwz zm?Txp`uw8tq?9gtnIOCu%w(e@FLSPYWnhuUS)AfOoW^)sz;xGT82x)IsfI2Gd)*Ew zy8as`Z=Ay)KGMcd`>>5(UmQuFO^?91A+69|lLneSGtgB14vwFl2Qw!mA(vXiHRzX- z!P9y+e782sc@c*ngGOVFgahtAu$@U47Q=ylN4W^MNf>M|Ue`8-klr~H>iwF*Z+kQ! zvuEd0#~ed)+GCDepSIwgvmw;cctEs;AsmMGignGX_Q?TQGn15c4 z;yY8B|EglJ9-)lynL-y~bMbzpdi@z1SLDsyL?K{yTn0U|% zJ);{)LvuHjr+bkw+7@QlT;vbj%7me63N#{5{NLRb&6i);!sc{1p=xqA4Iiw^Hf4BI zMRPo#@IV3In+MbWn_0~NwFAZOOTp{*z06=>8Y=gha8g!fxa26|DaUP~H#wR0pLPrD zKUuSyzvHp;(R7k*naexd+OdZ7{m?dG1H_lBu!f3!jIRpjPGro3$T>e*f|olP$0%Y` z`Y^mCd;_;CQc-08m;acqjt?7;vf1}{(^LIAn&sy}DvSK+c2LfGM`)p=Hlk!Kc7u{J1}kth&$%%FQi+t`27kd1|a1hQk1j z$&3YNal%ot!qGwhi!(j$KhCtpe~K9MPZ5?ggk%3L;vbKB+`p1oRYKzE2YDuZ=Z)T8 z-B9s!zChdTHgqi61Wmu)>5hdV*=Ww;T9!rQgAJ4L+mQsEIBzc-|F)8C-8Ta6Oz>hI zZ_M!3wOTqdW;#WVe@?XD6}J}c#?d~%*mNmvx@$TD@25Oq7bhNpRSl1L(X$y}4?Rp0|Ip@i!l_rXaDj9H9xN`HRr|9$>e13d~=t z^$+P{)$ik&blE`q@kt-|6>q0$Tb9#;G*{ZWqlD2$S-?I09lY=}n`>!KVGVPkOFkOz z-`B-D&87Gxzlhb1)@EA*mt#Wp2#Wc%fDaeh)9ytvl)pfleYOx_m}3nbv)#`1sh%L? zNjj8TFb8wEJ#2@%8p?e71}QD2{LS1yY}ng6Xqz{j394OSnpO#I@o0g2`JqrXFp|1w zW?*5~GM2o%kXIYL1oPG+S(umdI^xIrf{imNi)Gh%``cjCn~K|93c&T)6=oVNMJ0YI_@Y7sE9?ze?R4a0H2m%7cc0*6Li$li)Cc%0ahSX8H;@L( z>|(2W_R_qfdtB1YAl7+k3tHdZ&fPgSi5axarU6ZRL1OMGbe?7hM`zCj&+K71c-3Wr zac2=)ee}TR3PrRs(~uIqw~*?y8f-D%#gf#zp>gMPc(q~``@Lcxoh_-Ls-tquYLYZo zD%GLnU=!3oTLdn8{g_+p7OwpG7V791N*jmHq3V+on7LvPlX-1R%~~g!Q}iIrUiA)4 zf5yRgA1S(`rc7-whq9^PLGa`T?vEJi%Hr5P;@pK4;k6gAys({h|(l(cs4`bY}cY@SYJ#U*5sLK7R%Zehi?-%PBCn*qQWe=g`l^QLx); zF~6EW!QV>rp!18QX^TY#+q6jzXF8b(IPK|V{uju#MFTx=3SdiZ5$J7or7IG5>E4== zxNB@G4*a7`vm&F#GT#XO_Bo?dkS#8_bQ_{fZg76$R=3=>6~3K5Nj<*~LebGOMA^Zp z8C%NT!ALZFcL-yvI$)x768NV#z+&+;vsYKr-ce};zVFe7%Im49X{JU*Vu(vEcpHb_e%H>3@&CR7c{6% zV>bx9N6_q-H(+%{CU~M+S+xNg+@OXBzPN9R!km?JE%(jJI=FFH|{|CluR@j`hXWM zoQw);R+Q7y15N$hvBq>7Cymh>%C@wXQ2n$T zmfU)oS$d7epKgnplgA`{lc+|AC-gD9SuQNy>^HpoQ_0VE51_XT%*E-1BFr0H1B;7f zF+E40q*at)%{UdN;~2_&tyoTfI{VN_zKF$cKFWOTw!xyb7z}Yf&9-bcAPJ36{CW!~ zW)*Y-o(0KKv-3$fvT_oo|9r`t*9T(TmN}pkexJM9UsQeJ=k_dWT|SjM9YNFKCj^pvHo^XN1E~Fa5?B6u z17{A6r@Ni6jr+)zN@$Uc>uFHh?}#RM9k6Ob1G#0Xa^4RONq+QYXz|(y zzfEKKsd@*1ciO~WR<`k3FQRFAi33f|;4tCUJ2oJ10NeB_2aWy0SaiTd%)5R=^y$?m zluZA|G@b0xL#2UpkBY>tWn(GjtUA?;v53UcM-X7*M>@whV3V&Bt|+<>eJAHod$1i9 zEOaE{`YdWxvBB8MF5F_{2JC@fV5HNZ{@7>Ikl_*Z@IVBL2d2xYO^N5qIJ3F?Nv_A3 zC5NeEfnEi+9vXr>C#7?Sx6iOZK?v&%&Sg5}!XC=k@;`@8W?g4v*m7xW%65IqMS47i zM9BhFoWC6heVzdGE}EdD*#Jy#Zs*m`X7JT@E77Z}oqL_RhpCUSW|`j8Fs$hloZFd& zD^LXGF&pVoi3=%yx1$+@1JE@_%ybAJ`42X6*(e$qrcjz>IGD z9^l4xTjSffe5w49V37O{6{hl8WaNp5=X zMf3+OUbZgu?4D#F!W3P@S{qey#Q%#S_^c+EF=_(5L zvBz7cVx;h~fof~iiq?0Usz*sgGndHq$#TQw8$DP7{$W|`q0lOY&3>yK!U`7AV# zThBghoaLFXzA2OPp*k#Hun*u{A?x6FKw*hBO<4Jy6G>bW)oj;bHzIXd!<*Y|u7?)p zZjQwAKlAbY&r~?)`;8?$y8uSx+qkR=4e&`z4<<}&}_BJepG7bgLiFZx1DT6zb}puf!}LyBgSD;5OLO?9xZtYHEmUJsc{6_zf?q}`PyvA`0IRP zOga<< zUS5Kq0&nBfF_Fyij2n01q$OU`sKG&{`t+e(0bG^*$ob|6KIv`?O8A|?Y>5U~D(qzw zm7Mrl%Udw+t5WmPB5gK#>s6L6sfeGhq%yzzo|GEC1x~Fkf{wor@Y3u6bl2%Z!Ea4? zvw9fSfBC|EmQTYn`E^XGL!3nN(t-^){rU1;_t~}CrLd(;1^4TFkn@FDv9+`d#<)4N zg+&=m6tIwrH1@(w!!&qaW|KIBh5RfK=k-)@ zH&$a|ekaSnHV&UDl+h<~GUd^v7m&PV5xmr%3h`b$u&1+|*E_3#pVGduqyCntq#Dhp zD1_p{(=#c+w?D2)^uW+_x7dU^#<+UV6BJnQ#L`KT#91!{J2x-EPVXDg{UVpf@7_em z+wL>H<=eooK6zRa%3rl8f>A~rm;fEh6@oUp(G9$XZ#yAvhpOH2tC^-t%zH8t3U zW8rXDW;sS}8^%hO4uIocGB|wH8PN^r`E)Yn6CCjD$DJ*SrpK$Np;5+mJn(xd*f0Lg z!ZN%-X-^nAeLcxuIb=}dCrMl>w&l781=5L+^RV-!19rC?fSH^T4U_l83HF6}nHuP# zuN2$=DubWqqrx_3T@vdCY4+8;9ZJpav#<8)oN#=s(CU9n35*-_FATB%4@a>1ha=d| z5Kj1S7-IJ?3|Uv5ExPEE($qZLN<>-}f)6wA*=q*-3XZ9Gi$13AYdV{kE*dvAyeVMK zOwmQHO3|TJyPEDEI(H&ww7zKa(NvMoh$;3RLRG=<4u3&wvbN~#(wlK%_e?xD_$>RXp++XP4+U3!O{lKYh%z$n!Fl&w(X!~BROu>YQE!F3 z$v$oTu?1W%l)JsgzHyv+fN12 z0v!*!ChyBOs5P>6*$*IR_6dHn_IFm&#F>>$7Ocg(2F|X9HQeF9y7HClJqd|1&o|s$d4=Z#Jw&h zV4>2Fe5_ZJPiqksSDN6OyZYeptsHhJJK{Nw9B~WmiUT$T)0nBVY3lbJlAALe`;||o z0SV?bzG5y@pM#;-zHhcP>z*OTPr8X#3s8^!-UDJy89?K71ZR zt24(DZX8JC9|j`VP>CbYo?)xs&ca$@9JzOiU3Bj=xMs{y8aqM)&n&3Nnc|Q=>z1SY zpu$fL;_Tgr+Y%yI>Z*kQ8`I;?cri9ajF zu9=ASR&D}MDJ`ZOTEnhPQzr9>S6r0aTedkVm(~nVUBpq9q^0 znXjE}+x3-Twx|}C6gZP0-~(JqG^VM4CvtUbTZx^D!=wp11lty{GY57s$v1_V;JO9^ zj$MJ1eWzLF%_XE$TZ|>%96i_k14I8FgRGSq6j(aQ;r{kJm~h_+BLoF3{+caWrd?x# zDc3}q?=^9gr#I$LIS&w;0~`I-&@OE;-Zu#cnWG0-p4ukn8g7ImOAP70=Rv&CAP!(w z4u-uIF?c|&zr(IoI~ig*%{M2JRNq`w(N&?|`5t)ZP&BS9y+~=ppEC)~Vv(~@jot=D z(V+CJVB#Ewm*+hI(W;Rcv-AUVY*VDCvM)seW#5?Z?(<+=FM}sQ+J3_z9^$p^C`|q> zOTTZ8F70EeSvL%)Pxr^y-O=pT^EceY&^qPC?NBDQoOJ2RvlqK(30M9q<0MlS` zn{?tmj-E4*`U3v2++oY9cby9+uaqF8^b?SoFCd-sIp~mOL3+s=wBcz0&X(38#kH>N z@0J1dv@Q|e&mRa?Yu>P>5*y&}luP_vtfh{jO_URF3!99!$?;o193YBhHd-5~wQoDi z8{`9}&B0Xr?l$!8dd06Dn!~oZE8(Nwrw}HP!?~SCM5||Dp^*S*t}H|A(U)o1u}|IpNt6cB?9nw_Hhb$!{5dfe8+-%s^PxA?5-}G~E%hY#w_TiJ-$d$;v7yqj z+i_#Ub(Xb5n)!t6(X*K54jHX*!MF zq>bZv8B*T8gHQEDS`xYj`}cX`X~P?A{!0bgplwedhOg$%H$3HL z)jMI(vx^X6kxt(SDq(c=SYl>k%x1lV-ROTCCRQI~{enKT?gP!Nr1T*lI!VNz)-IrZ z?uc2UA;b@>#jvDU`Z`g2Q)EuE4|XvY^EJp={i*bS7-eM#JV%C>B-0$#-A5m5_iPcjfVur7~wKFMBh;-hcO!3h)Q(TZ<9Y*5R}Cab>&XfoUZ3%k_ecXAbX=F>3Lo2`YV($|6A zd&nLft`Iy^-c7+@Ch;1MxolYWS#~5vg?Hk-xVEM9ILYJFsL{HW{al=aakC#lz_|X+ zYW4$h^^u#Dnb04XdsNW!%+q9iZZz(gY)Ze?%s|@c9#{v}!uqG_oW14(7~|;!mbv5U zoYHJICFeJA_N8wV1~)G`?u;h;?r|lvMpLzDDtV~5P_oqt^!PXvTHaZ) zKYA&w;HV}>ZSZDu6c^Eutv>XAZ49hccQbQ8`Ha=5NYcV~b?P^)na>mivA;~nEuS~u?6y#{&BS72e;$?P^BVa;M&;KU_gsJ6=@t8v?bw$EzR&8J`$8l1Kh}bO zBSZKR;xBuPQx+Dc{s-5KJr={SW~$(_qWjwGklkFP8TI$nz^nhH*YH#*(k8b z->h()K?%NihR`4*K=bbh5R+0_U2Z5vq&i|~;9$ymI0|fX=8__3O&dC=@*?*ge42s* z&0JfK{k@~{&b?m9vbrM@PQJ~psn_J^-tnR6_EM~94(FdNtYT>*Dag%Krs_gBJheN4 zEzk9#_K$!OHCy3A${%iw4wCap1<;8IU`vwTK*e<)E~rlh|2O?9HCRaB?#{rQ8g<;Z zn2T(_-Z@w~;39R)6+##L4O^Zj(Zgk_xVa|^w^}vQ%JkLn{6zrH8)87it5;H{^%Hm) zq5~Up>zHnz2RAeF2An&fj_tNXXxySjR3WvPGkNm}4@U;mmDYo-Y=RawEWM4k>uueH&Bc#vU=Eon-fUpmJC<)XCe+y z;`uUh8t6#1*h(DWNmZF|*n}AkXpmqn_*y+0r{1@N{;zFdzj%Y<*N-+7WEx8QHyr1u zs>9{uwg_IeeW9JbC!lt%1!w-~x+u^wlXl&lO+Fvu1%?a#Kw_0Ib%=DR zut%Rl>&o#*mLXOD*^f)x*WkKqN|aV+ikmd$(0uq7*f=YRt8>VMfv#H6asE3=JX?T+ zKObR%FFZxP10%UgjU5=QzMH$U=Q$s+{31N){kII=j2w{V?j zAIo;IH$pX5K1i48+FRqb(pwbS@(@j(+h~qzqu7GhX0`k!rrhR${MnO~*L;}mk58qr z8!9N@sYY@}Jf8SggX;UjC`n~Bg@-dijLQ>9uF`1=9~z3+TI{Lm-6Iy%?S`TE2k{3l zj^f@-lw`Rx^l5v|S(vN39_nMCLu=+I_Qt}S9WmbkS9_w_1*?r{QY?kp=H2k3bmg;de82B*bD;kO4nnX17rRwkE3EtTQy@f>@cyzvOAOPP~|aUwlNH|$N@0LAmy z;11`F5Vy$)N5K~Ikhsf?ThqZbxsjDAL{dM?Ec~rMh$5#m_Huncylj1$bv|%mhs0w{ zSfT=>Zf#_9weFGJOMe=({s}E`J_a~j9&5#Yk{r}xM28CwF|GyW)gx*8?L+)DQ8`^0 z{DBWL)Tij-E@-1PhOV!f%Q7lQ!TI~`th~q#*V(NTb={keNjBfe3ezxfZU%Z8Y+}NR zu|lW+J0MLX{oz7!C0;;@zPMHn6kUo!ytn6O9P%H^LR_|;dDN(PTT^x;jHQ#>`1*iAOE0@eeeDWYA5s9iyx`tKoERzX_{8ltG=Z#QVbjjA8N4L)lI&=2!pGqtAjh6yZ4)_hgR3NssO_ z1)a&L%&u~$;~uk5&2>z9>KDjSte_dityEM|hEx9LvupSAnb{5xrnxv5+3R6=ZPRJ) zRDKq^IV5rOFNk-)@zq?$b02y*D;>L3a&VUQ5kcF#J){ynlBUher1MI5*qdBkZm{D4 zSgfu97nF~nTiw3@r>`@Q%CYVKzGya;iqcFaLuE**>pXVIl!Qt&2n~ixaZ!c{jhY7y z5*3vUk)cps=iZ4(rc4o~LMYTNM7Ip@<$0d_x8Akh_xJpLt#z!mkMq3NxsUJue7^^u zR&Iogxhys<;S?+{A4R|Bg;0uHC%5aqHaO4E1E&Mc*mkcDzv&KPb4};4Z7W4w)A*~b zq>Bv<$>+S`)o&vG5;(r6b?yjfNk3rk@bQwW}QzJ@}r|z zm3=y!j3?>z9U(5XZA7URWlXBBm-X%n1^JnBaAjyFxJIodAE^-W@n|31O-{ynkDb74 z?HY0qPU7cv6+q=WJyv{q5ucLNiJKpCv~_nayDnu3VPDq>cHUe78gnElp?E38x-SC< zg)y{g*a;ZfYk_Bu)`Lpo2R2+FB*ktE{_+nI)K7c~mhnT#%r2h}ZSq9V9hq?Uh#c!4 zxsNxwmdQr=WpD>%%xG&sCI2#SAp5J|Ew&**iTb83Vc+_ek)D(x{Vw*U=|djC7nx8B z5}Lu}=K&PAHx&bdgINC^t7%Sh1%6V!#@WfFvXd%z1dodEvrS@{zT{pKJ6F4bf?R(x zjS*+a`Sxs(?XeIm@RfX``BxSqw;nFMEQjO?u5@6*4%&3B3{$>^2^ipbB?lpF23Me9e{ps#?kCj~}lg5P~z=!qI0e7DhyA=u{ z)WVdG&zb;_*RNw~0$D1#VFmSNj?}JP$d-MI$1cTiIKH9@%O@F9g0DHWsj1R(Z+kB0 zYag14r#DH?MQH5XBzXMj6UyAG=bqI>bBzHvA?@NyPXE+Jn(yES3*0Urj{DCQ@_?!`Hu?@sMQESauHQpQ&^F2GrP3l7#*CaCO&RQV@Iy@+A2b(K`%ec!Mg!|O=gF)h zT;T4=Nl-g$DCO%PU>BaGP(gor2&#Sw)#C1q6l&1uqImFTxP=VXo@GW2E8y?91NqdA zMYw2mCi{qs`LT&(XlAf6J8gc0cZ#b*kH(L<$X^`<*ArRz!Exk}aTAOsD@3x%auBnm z6Mwi~uP~i5mptZsK;rHNTvSIGEL+*11+*LR#qTq?r6c#V(=Wc@j@4&SdUY0@Q&Aur zwe2)!!f7lYH;y_#E7B~xotT^W38X(XaZa0Vvf&-dRAG4r*giG7^w5N`$DQA1bCmym zaW93sEr*Gc8>w4%FYB{71c&{cVD+Kv7<0}7?0Vkds4x?-2;T>J!4g#A=f$L^J?3Ly zJ;L4J)__SvEOhI>5q&B;h6^3T(L{R&r7Bp#t93V+>G~3okoSkkGC9^^J%+RLzsUOQ zzrnH1`*F0h3wxoe!Jg!8!NSU3F>iR9oAK~G)3PaL-?cWtgTNN+SSQqZ@5)5dX&(rfHlVV(Z(zse@@zi zQfGjQj`{G)J76%f9Sq z=|N*jt6c`P6%0w>W{<^1v)RA{`{-fAI5hby4a>8qz}N}(P$cuZqJCN=f6i(mnaY*Y zzBiWy8s2tTwKkX9GIYp1PZQ*Z?}Wsk>Tv#X44*F?8rZ2DlCXk2eX+f+}7*9(A0kj_7R5{tjRZ^wr^jR0m38 zK2vKp1M4&^QGUxfJd~6NBHAL_bQ($+sVQgb@9X~KmrvD!?AJVcGzgphf;oq zw0zZ7wl%gA58c>6z4oaXo^z1NU6@bcvl=C>nmK0!Q<8l%2)yp7)ep<^m=~P?P?bIpq|Cec+2;5G$i+_u5h8>99nohq2koDI?!GJ4W5io zfxF-cp-ICrN!gog-FFi{XBEPZmLhB(ypL6?Mxv*`IlH{|JFEJ2i79C9;|iueLQAvL z_&`Ge(+62_>%`eICZF=~`LrfBA-j~~8U}#;S23=1vyiWmFvSykx?;NF3<}+A*y&5H zxa@9_xPNgF`?@u0b#x0WRbC_RS?u9+nhXurk^;T#@u2hd8Es}3tWkRpJ-MAsbq}^u zk-ZI;RnHOi>clWmAH#CC>a%C^GSKKdj#)1Zh1olzS$me)81=fHwTOn(_kziEafLQ( zT@ym-M-<_4MjZd+*Cx8Utc%$X+Qw>&t!U=$@zA-^2d&D4*xT|REAGgFN78pnUtvn~ z*L#a&T?WJ2*i~q>Q-ZB3(xMxhduY}7V!Et$3{IGeO#qVxq&)f-zv-zPrG8@E_V}YX z{h~MRkbcSjF1*Aq@JJym{W|7u0u;0I9_RW!gT2#fL~YqZ2)enPg0}@&ZvxUXg6<(@Pn!>ULv7M3 z>e7Ekn%d>e*;Wdcy|;l2g^Os?Bo93Bss`&bLW!PPQU0%GRJYZJcJKqp)<2862jAz^ zX*}gAuO`P)uLzwN&>V*omQo{=@lfSZ+<9a*`Rf9yJrBdjDjjUL(lDs^ie>lWvgl`@ zJsWUvFog`52=$ihY2)cMS}{@qY>yW((;iKX)i-7p6a6u1v?Jd8=#8I}b*LgH3nJ4x z(Yy2>o)?^@`|}WY?X;oD>H#o$<$T(dTaTJbN7$IL>TpHV04_Y*%3VGaThVAXg1Sy> z!{|FzsFcyme)pQwRW3(xsW^_i{AnNtt3`pz3|%T3cm~_vg_7*zr7-z@CyUR204+T- zpta!|>!@u(Q%6Oze%dHDK3(B=uKR&&Yx>h7^R+PIdpmnxh@vp@*rg?WJuhWA6esG> zBw6*-tUas(9Og=a&BZOaJuHC=FTP<}=b^TKM)XN)Efj-Rx);jPDvtmcEF=#>#A8>K=GBLDjPN*8!TWD+3?P2T^6G zAN~qF%FBOOA}yB$d{ZKpr%ufQh1`KOc7i-@`z3(f^qw&sRH}*X&#LER)DN)hBX`i!yk~rE!XAwDzESb~Kr#2%)3Gp>SEWx9 zKgl(HAnvKq+edLTuXayl?&fH)sx^d8=SxEyUuJVT4Q(tX z;OWSER?Lj3$|R9~s>HBEiJ^?7O6gg&Hhr+1K(~Abu;EJtRCQWx{#JIT^R1uwN|j{z zFlZQ9Tnxl>zc*8ASb@NGVm^EG@h#SVP=(%~>rnr-H#R!F5cKVrg-v&qS##oXvT@1e z0){XYUoGWQ{W@9R<)a^#-qm$4RGw78UvZWg1|f+;lfiV@4VT?q-> zss(PltwHuxKl~)lve;dJjeU{d1YM4+VR33GlTV!l>kEf6vlZ{*>&Gs1&3r-U6trOK z2Y>V{7=SO_tl>pyI~!w}ND-@Iz*Au!HtL**`7$jyAuomhknF`mS2gg^9nO+u&*M?m zMI3Awfkvn^6D|rAF8<#rktzSCMC||35gh*L2#yZICI1maoc<+-IGbmeJq}-5vBCUk z#o;I)QPvb4QO(0GqOoba1)<4lqE`Xlg4IP#w0CxbAX;XvU}J7f#j^dTqWBM!MBSfV zMTNnN6>0BJh@vuYiW*a`M6)N277dLp7AY_4Ez7f9gCMMbat`0)C|B*zN|09V!>(ql6HFL?y z_7*-l{g&SD(c&!T{9;-?*I1<5-zeKw$I67`>6c77cAu?433IUx$Ylxh71It&Z{LF# zGeeo*=1ejw-9RU=#4z>p11!kWA7`s*QhDYj%8|a#a^K}q#9t*LF=acrFyYx}38EwC2kcF{yO^pR@t&E>8iPem1HOJA4H;3h$-zsLekbptp$*D(qX`$xLQcVm zt@mKP`bXSyxP{&|HL>set0;GOu1J$zfc_=tq3cRKy~`O#d8V5|d$k2kkrih;WWMLz zbFx8Z&2k7j8AlJuZ%-bAi4xF_5j|4o`w4;lAAk3`uXWC_48X$`!9_E-|j7jlnFjevxQ%jjnr;a~oe><0wgV8ux z00&wAGFAS@##{_t*dNoc9i*sVv2?g!C0q5*3nYiWWq*yhj2Etc!TBSlaZ>IVxSG~p z;pn2veUortRFws;12RCA{sK-u5R%nPLt1hE2EQw78ICpZ1AqI2l-u$b3!88Q>eUTM zv^bZd!so;9kzwq0X)5$|`vJ2pVd)KBVv%t>dv1G>t_2&=nS!gJs5%ZBp1i_A*Nyl? z)!F<2jYl|m-aY1eK?70-O<`etG%Fhw4U5Jr!s>uJ+%fqYDRo)$BlXMZYx{KZq+cJx z#o*Y4?2nin5(Kf$VyeR69rGU*PRm^{p>6qgDD({AB699CYrzQ%munGFBbCPITuh{M ziC$PXcr>Y5B=W~2cJSleZ(y6J433;@%_bel!GRf__{^k5^k%0GKJp$Rp0K#{k8_e} zL3tM6Q@WGw-nNIWezhO1oYP6Bbfv&{&;(}fssdX%Z*ZxZOrm51kmIlNnt}bGE=(Hi z%jJ?|#1I2g)2TVruD zjwQj%LJSLVV@lJuL*Mn6Y;s6G3{BF7a=J}b<_qbPjWqa+t|H~jf>-o97_6^PH>a%U zzgoMIVSzD4s})k_nhY9oEgC0eJYeer*5lpBr$BY?ck-5e!A~E4in&Lc@o9&%@ZQiE zba(7uc_N+IQE{xA=J37nJjRFgEe_+IzGhs%mtbqW34GXe7Y1=2Fe+>v6X}>FWT~`d?+5JCk|E-duKJ$QgDsEtP~LWGQC;3Gz#nqu7O`P?`V4s-t4bWBOCR zBUX}5O^$>{-7&PP=qQxLAEZZWLNNE4fNAv~aa{RVHY28PsxBAYoe4##VE!oNL%=hCy zE{+3Et%qr!-$-|+{>591yxB+Z_Z6;2CvldVAMH%6pyN}N*sPrEV0!5Tye?b8veu;I zXxV3ik@f&bY%k&XtHWq|QZu{I63UiH9Ky17&R{=l55)D(VFQ-!#UuOr!3;6oBe6|^ z246YP6@A&o%Rjb*U^^x9IULH=jpxwQw+=8~uoPyEa)gCDi|LG3ojAiM99GGRSZml} zG~AvD{oiOpKk+-^C$p8l8fsIlx(VC$+>2hfnp5pSVl&4Eh+|HVatCX*p=IM2-p)jk zerr~u?hAEN^!m*ti$8OEpEp3#;QbIYErFsMRmdgnFmv35xaI3ZOYV*x7v(jmGU;}< zSfbRUyQgg-XwP)$)JmsAQqO2i`!w{k+{wFMeoivi4^!uoLG;qGQcNDiVx;O<)?{eN zTxvR5YeF8xyy-{!#!|R`uovd(i{o_Pet^4!_v6UP$9XrwbxyU-n5Aok({Z5#cv#-& zB;)U(^oOV5y4xP+Zo5npud6UCr5S$uR52Oxl=+TDCK>PU#>DVrFh6z&)vp?b<_DVD z?#F9s!w+#prD`FV_V#g-C4;CYZwJ**Q>90;BPttQTDbkvp3GrEKP&%<=LAP59On`Z zZ-GfyHXD*w$pUljVc)J9P&;A^KWy7xmiW2?X2&~Ipq30IT`a=D-SfdRwGzFi8^CIh zFo+#F6;9jPLcpgU>}-nz`GAqENqkhdzZFd17e&*HYqs=FX*HW8e8XQIQosl89>q@O zL_&OVG=|OeCDG6@koEVZ)1za#OYN&*(0X?^=HNW`xo8qK4V%gB513Db+gDRtlPufN zIRhpvp9a&!(xYl?D|VX8P<7q~KJ?gNI$Wv@>$BZ3CEU(~q{k zE1{K#8wq!yva4zGB_dV&_x$#KU7r#s~#JMJXg9x+iIBRYaFGb+IvjU)W1#Zr~IjT0#9 zl@0bRGvV({*@G@7q3~+6xaIGTg&&rZAXT;$DkORKxt}GjcpplNzuVc8)A?XAy@yYC zI)pL4a?m}iko&sC7qo@CFlN;w40v)8x}zkl%uWe_+wMih)=TK{EHOEgvIbt($#PyY z`q(l>2J#inp|Us~9)x$ZHJ!sjJtjp=^NpgM`3`XN`#bpj(Te+a-v;hvJYqIYrfi6H z6SFP8N||{A?oF!+tvJz1i%03wtBu3xNM|J$gaKSNr^WC1F+|wZW_ru}so{`khp}`hC90msOQ;2T- zO+OtD!I!23Z1UNqU_B`T*Dg$9Wl{x@|LGa)w5(y~(oBmVgI!%U0X7V{ERLRah2%03n;mw6mFi{V_hr$59_5f*_!9Z0%kWov z9d`Cb;L4svw%ya8MT!l4HcES$Qu}0Fa_ktVSe3`J#N(d513n1)G_>LS*f?0c{xCSL z9SiMwg;Y1m4*IG5X0Zi4GrStdWPcL0g*7r`gX>(9ZY|Ti{ z#DcY*A-W}n(jy9RzP2toi*qfOM=plA9~Dfse~75?s1Iv3iKjmXhbs@-EA$jZR8rX)9yAmOF?}|G3H0HBSf@&Nzd6CK+IoVmQ`~u4S@!heCJQF3>$?1Xr62Xv&L0 zqD*F6`1h$XhR-0du<%Z2UAFUM;$%W(3laWG-hLR8nCg1Me){0Ai$ zvRUK}J~uq5|Bz?Q#J3(ll?-PijkB5aDG~GN-iL)B2f=F+faf=260(nRNlRwAq2x;H zNSw|xzi_Plvl7kxbsx{}98bQ_N7GwDD(~ymA1tjS*qA7N7-qsj$u$*xFTsFp3(;M@ z5g(_^F{#ZO*m{2x{djN5rLC-E<)-Dpy)~fN%jIZz>m;p{+)r&zr(l8CS<8;m{prn* zE;i9}Bbm3Hr`qB;61ncBh^Xnv*(&k(`%L)@cL&pr{3iMxvWOb;L@dNC4%$tHkdXEU zBP#E4gZ6BKFO$RsU$8lOh^0e^=?W}3atW+d@dCMz6QRLrF8sAClolM(rm^#mV!nnI z*6A!}n}u#*HYJi?Pd*Lpx{|oG?$6eu+ILjMH zIjn|&SIOWO7Y8l-rZBgLBvf)TV-7E6sQ;-rW`BMQ7uM_rbH()8;t6Naz<4u?0#*6c z?EN(Hj2ZBE?ARn(TiB)J0)F{#S#8Z?EI1U8b+_X%y*VGcThw9m!l_L8`at}7VmZ5% zRSMffPGf1(F$!!nCXWupCjTV*(CSHtd1F%6(u4D#&+*kyoJ9H&OW=gqOn$}Kmh;g6 z%|Da3qLL*OScgXyE>|~$JH2<<_akz&CS?OGdOnq=8vI~^ClV=oWPf`^Q|AvD6f14Skh5ZA_|dr>a+wkgmS+0E=Y{Gf6+`tfmmlUD>GZR|$Ssm7txRB62P`Kj1QzHKnMppi# zCan6SCU`gqJ^v$&c>PNl*`YlRIll%HsqBT3mi7HzIEKd!eV-b@q z5~$2!g6rF|1XF)Bh{BYc1+i?H;GohrQOl7c0T-Ann0D|%`6a)4k&5avUf^6{d8nmL z@at!-DE!O-fo)uss4>M%~6E*)|k7M4zr`msS?E)_fK8 zth^@JX7J7O%122-z{dc~sVkpYDsQnC1sAUtN#{!Q7d#~R3tuuU)%40l4`dZZ(OCoe z0TxD_Ma_PJmA0{H9ctHS1nN-WrP27eE|#gu|Bvd0-uP}6{LEMCK1gKwjHY$+-^ zRN@}r)}tvUCS2F6k+tv0@mJmk68zv z$Mby`*@mLaDBK<>4EA)^{BO1VYj1Y^Q_0Rhm4rA5cm1hEB2u{fPaTqHXT>t%og%`K?p%1cQsDoU*6D&+KLXf5P&W=tyNZZG5s zD&*DhX7sjeE#!kLOev`>NaZTzk6>b8U`R756!2#7W^6AM3@Q}T@Mex+1d7@e3j6u_ zdHn|hFyYNmQYezt>HOpP69N0J%f&Xd@$!CtR!f2@PW{!%5+{&&`n_S&Ix zb`N4J_U>Ie(RP3MHrw?w?E4K>TK1mVT(SSVy2AcfCm?QbqxaA5wEOmb z?+sSj9?M-}yEd<7uU?_EO^@XAy_N-!ta+L`_PVAWv@UPWv8|3Uwe~+$wDeVoAlNdWB38oIrklK}lwAW>xBB?KDogSH20aQX_bQ Z!X-tCnR%&2@x>*HC8>q1wS{b@dH^{&vL65d diff --git a/tests/data/rllib_data/single_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/single_agent/checkpoint_1/checkpoint-1.tune_metadata index e83b72aeaf2291e2f177e78504c94fde7e5a3392..55b72be28978f4b959b001b57aad8683ce0f562a 100644 GIT binary patch delta 105 zcmdnOc!_a>yn>;LiIGWil3_}kiMdH~YHCudrJ;dks!3v6iiufrs?kJyZ7GA&2rdQ& zhLX(O)cBJ8lEj=sR(FTQ4ck^VOl;9ph~Ngw6(v^2m!%dJXXfV>azwBKMGW*T^b86) GOZ5Pp1Rp;D delta 75 zcmcb_xP@_oyh2K{ak8bMiII7tiLptlxw)lqD0&z+vtKr&yE|cKHf@4J z!HdOE8YW&u@vH|At``s9yoeX?>QBf)@CP`XL|Ut}u*|;iy*E4G_x4v_U6|WVJuA$P z4c4RH*-kc$0<=xp17N(jsmrH@W%;$RV2vlZ*N7B2);TShLqb$_1g;}a+R(dEin`yb z>Efn(WJT+nc^q*;1LR;qXoy_Gg*>NMtnca0Nlt45)hQ!w8us=&lOaVe4FNd~@u*Bqb2F}-k@2MNokn245Q zt(4>)W6=;SlDkG(FJF|gu_pV*?A4l2w`~ZU+-4;7YzM1j?e*ln5jL%%u?0H~Bpgo} zSzNJva6AYFsu3?@fPi>)5noRET8=p`i8xBEl7E*U(qGm>5J{&K@!+u0H@A<$kSLOYfGGRxmZ*Q-cjsk-_Tn3NY2YhnNb+pQ!E0kLLCGXQ8jj#X|jFCV+sl rRuVRoR__BANX1FD^bnsKZczi+$R%OV{fGTGCH^t}P}59#cjoFnXm$xn~ZNi9pwnbO1J>F4G@Sy*J# z#7POv%-+lszrUUQi_unK2X_xoaei7!d`5mzW>tRPX&nzxnl9-g4lUW6_ zu_Plk9%S`qS(Y3|ZixLr*BO~jp22!rqBuD^yA1I>alb>G@4CPOr%_h%RmYR{7 zoReA%lq@MKoqUo_d$T0_EXK{3IVLbNo9LNN-pD0Bxt1$r@&+!6$*;LqF&mk3O`aei zz4?}a2os~xrMVD;wi(OSDG7N zo?n!mT2ws6o2iEb#7{3M1#%cBYl&x0l-Cm#C@D(J%!^M-EJ@CYFV3t=o#LDDYNC(Q VW;@ZhjGNz!S2Hq7PR^Fx0st4Wrak}w diff --git a/tests/fast_tests/test_controllers.py b/tests/fast_tests/test_controllers.py index 58967cef8..bef765396 100644 --- a/tests/fast_tests/test_controllers.py +++ b/tests/fast_tests/test_controllers.py @@ -405,6 +405,175 @@ def test_no_crash_LinearOVM(self): self.tearDown_failsafe() +class TestFeasibleAccelFailsafe(TestInstantaneousFailsafe): + """ + Tests that the feasible accel failsafe of the base acceleration controller + does not fail under extreme conditions. + """ + + def test_no_crash_OVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(OVMController, { + "fail_safe": "feasible_accel" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + def test_no_crash_LinearOVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(LinearOVM, { + "fail_safe": "feasible_accel" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + +class TestObeySpeedLimitFailsafe(TestInstantaneousFailsafe): + """ + Tests that the obey speed limit failsafe of the base acceleration controller + does not fail under extreme conditions. + """ + + def test_no_crash_OVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(OVMController, { + "fail_safe": "obey_speed_limit" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + def test_no_crash_LinearOVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(LinearOVM, { + "fail_safe": "obey_speed_limit" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + +class TestBrokenFailsafe(TestInstantaneousFailsafe): + """ + Tests that the failsafe logic triggers exceptions when instantiated + incorrectly. + """ + + def test_invalid_failsafe_string(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(OVMController, { + "fail_safe": "default" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + additional_env_params = { + "target_velocity": 8, + "max_accel": 3, + "max_decel": 3, + "sort_vehicles": False + } + env_params = EnvParams(additional_params=additional_env_params) + + additional_net_params = { + "length": 100, + "lanes": 1, + "speed_limit": 30, + "resolution": 40 + } + net_params = NetParams(additional_params=additional_net_params) + + initial_config = InitialConfig(bunching=10) + + # create the environment and network classes, see that it raises ValueError + with self.assertRaises(ValueError): + ring_road_exp_setup(vehicles=vehicles, + env_params=env_params, + net_params=net_params, + initial_config=initial_config) + + self.tearDown_failsafe() + + def test_invalid_failsafe_type(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(LinearOVM, { + "fail_safe": True + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + additional_env_params = { + "target_velocity": 8, + "max_accel": 3, + "max_decel": 3, + "sort_vehicles": False + } + env_params = EnvParams(additional_params=additional_env_params) + + additional_net_params = { + "length": 100, + "lanes": 1, + "speed_limit": 30, + "resolution": 40 + } + net_params = NetParams(additional_params=additional_net_params) + + initial_config = InitialConfig(bunching=10) + + # create the environment and network classes, see that it raises ValueError + with self.assertRaises(ValueError): + ring_road_exp_setup(vehicles=vehicles, + env_params=env_params, + net_params=net_params, + initial_config=initial_config) + + self.tearDown_failsafe() + + class TestStaticLaneChanger(unittest.TestCase): """ Makes sure that vehicles with a static lane-changing controller do not diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index 8e871afb4..fbd78294d 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -69,8 +69,11 @@ def test_parse_args(self): 'aimsun': False, 'exp_config': 'exp_config', 'gen_emission': False, + 'is_baseline': False, 'no_render': False, - 'num_runs': 1 + 'num_runs': 1, + 'only_query': "['all']", + 'to_aws': None, }) # test the case when optional args are specified @@ -86,8 +89,11 @@ def test_parse_args(self): 'aimsun': True, 'exp_config': 'exp_config', 'gen_emission': True, + 'is_baseline': False, 'no_render': True, - 'num_runs': 2 + 'num_runs': 2, + 'only_query': "['all']", + 'to_aws': None, }) def test_bottleneck(self): @@ -148,9 +154,12 @@ def test_highway_single(self): @staticmethod def run_simulation(flow_params): + flow_params = deepcopy(flow_params) + # make the horizon small and set render to False flow_params['sim'].render = False flow_params['env'].horizon = 5 + flow_params['env'].warmup_steps = 0 # create an experiment object exp = Experiment(flow_params) @@ -167,13 +176,22 @@ def test_parse_args(self): args = parse_train_args(["exp_config"]) self.assertDictEqual(vars(args), { + 'algorithm': 'PPO', + 'checkpoint_freq': 20, 'exp_config': 'exp_config', + 'exp_title': None, + 'grid_search': False, 'local_mode': False, 'rl_trainer': 'rllib', 'num_cpus': 1, + 'num_iterations': 200, + 'num_rollouts': 1, 'num_steps': 5000, + 'render': False, 'rollout_size': 1000, - 'checkpoint_path': None + 'checkpoint_path': None, + 'use_s3': False, + 'multi_node': False, }) # test the case when optional args are specified @@ -187,13 +205,22 @@ def test_parse_args(self): ]) self.assertDictEqual(vars(args), { + 'algorithm': 'PPO', + 'checkpoint_freq': 20, 'checkpoint_path': '5', 'exp_config': 'exp_config', + 'exp_title': None, + 'grid_search': False, 'local_mode': False, 'num_cpus': 1, + 'num_iterations': 200, + 'num_rollouts': 1, 'num_steps': 3, + 'render': False, 'rl_trainer': 'h-baselines', - 'rollout_size': 4 + 'rollout_size': 4, + 'use_s3': False, + 'multi_node': False, }) @@ -205,6 +232,11 @@ class TestStableBaselineExamples(unittest.TestCase): """ @staticmethod def run_exp(flow_params): + # Reduce the number of warmup steps to speedup tests. + flow_params = deepcopy(flow_params) + flow_params['env'].warmup_steps = 0 + + # Run the example. train_model = run_stable_baselines_model(flow_params, 1, 4, 4) train_model.env.close() @@ -408,7 +440,13 @@ def test_multiagent_i210(self): @staticmethod def run_exp(flow_params, **kwargs): - alg_run, env_name, config = setup_rllib_exps(flow_params, 1, 1, **kwargs) + # Reduce the number of warmup steps to speedup tests. + flow_params = deepcopy(flow_params) + flow_params['env'].warmup_steps = 0 + + # Run the example. + alg_run, env_name, config = setup_rllib_exps( + flow_params, 1, 1, parse_train_args([""]), **kwargs) try: ray.init(num_cpus=1, local_mode=True) diff --git a/tests/fast_tests/test_experiment_base_class.py b/tests/fast_tests/test_experiment_base_class.py index b3863a77c..8a7a9500c 100644 --- a/tests/fast_tests/test_experiment_base_class.py +++ b/tests/fast_tests/test_experiment_base_class.py @@ -1,6 +1,7 @@ import unittest import os import time +import csv from flow.core.experiment import Experiment from flow.core.params import VehicleParams @@ -168,15 +169,44 @@ def test_convert_to_csv(self): time.sleep(1.0) # check that both the csv file exists and the xml file doesn't. - self.assertFalse(os.path.isfile(dir_path + "/{}-emission.xml".format( + self.assertFalse(os.path.isfile(dir_path + "/{}-0_emission.xml".format( exp.env.network.name))) - self.assertTrue(os.path.isfile(dir_path + "/{}-emission.csv".format( + self.assertTrue(os.path.isfile(dir_path + "/{}-0_emission.csv".format( exp.env.network.name))) + # check that the keys within the emission file matches its expected + # values + with open(dir_path + "/{}-0_emission.csv".format( + exp.env.network.name), "r") as f: + reader = csv.reader(f) + header = next(reader) + + self.assertListEqual(header, [ + "time", + "id", + "x", + "y", + "speed", + "headway", + "leader_id", + "follower_id", + "leader_rel_speed", + "target_accel_with_noise_with_failsafe", + "target_accel_no_noise_no_failsafe", + "target_accel_with_noise_no_failsafe", + "target_accel_no_noise_with_failsafe", + "realized_accel", + "road_grade", + "edge_id", + "lane_number", + "distance", + "relative_position", + ]) + time.sleep(0.1) # delete the files - os.remove(os.path.expanduser(dir_path + "/{}-emission.csv".format( + os.remove(os.path.expanduser(dir_path + "/{}-0_emission.csv".format( exp.env.network.name))) diff --git a/tests/fast_tests/test_rewards.py b/tests/fast_tests/test_rewards.py index 3f2e08cde..ac406b545 100644 --- a/tests/fast_tests/test_rewards.py +++ b/tests/fast_tests/test_rewards.py @@ -7,7 +7,6 @@ from flow.core.rewards import average_velocity, min_delay from flow.core.rewards import desired_velocity, boolean_action_penalty from flow.core.rewards import penalize_near_standstill, penalize_standstill -from flow.core.rewards import energy_consumption os.environ["TEST_FLAG"] = "True" @@ -152,31 +151,6 @@ def test_penalize_near_standstill(self): self.assertEqual(penalize_near_standstill(env, thresh=2), -10) self.assertEqual(penalize_near_standstill(env, thresh=0.5), -9) - def test_energy_consumption(self): - """Test the energy consumption method.""" - vehicles = VehicleParams() - vehicles.add("test", num_vehicles=10) - - env_params = EnvParams(additional_params={ - "target_velocity": 10, "max_accel": 1, "max_decel": 1, - "sort_vehicles": False}) - - env, _, _ = ring_road_exp_setup(vehicles=vehicles, - env_params=env_params) - - # check the penalty is zero at speed zero - self.assertEqual(energy_consumption(env, gain=1), 0) - - # change the speed of one vehicle - env.k.vehicle.test_set_speed("test_0", 1) - self.assertEqual(energy_consumption(env), -12.059337750000001) - - # check that stepping change the previous speeds and increases the energy consumption - env.step(rl_actions=None) - env.step(rl_actions=None) - self.assertGreater(env.k.vehicle.get_previous_speed("test_0"), 0.0) - self.assertLess(energy_consumption(env), -12.059337750000001) - def test_boolean_action_penalty(self): """Test the boolean_action_penalty method.""" actions = [False, False, False, False, False] From bb94c27518182a4dd8e069d0af17bc2ee7496ce5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 10 Jul 2020 22:19:21 -0700 Subject: [PATCH 317/438] remove line from testing --- flow/core/experiment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index b9ce3ac0e..ca1b54409 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -253,7 +253,6 @@ def rl_actions(*_): max_speed=10, start=self.env.env_params.warmup_steps ) - exit() upload_to_s3( 'circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' From d373965b7325b338dfaa7456b05d14cc8dbcfbe9 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 00:18:08 -0700 Subject: [PATCH 318/438] fix toyota temp file removal --- flow/energy_models/toyota_energy.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index b65b7a0c1..b40146d80 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -20,15 +20,14 @@ def __init__(self, filename): with open('temp.pkl', 'rb') as file: try: self.toyota_energy = pickle.load(file) + # delete pickle file + os.remove('temp.pkl') except TypeError: print('Must use Python version 3.6.8 to unpickle') # delete pickle file - os.remove(file) + os.remove('temp.pkl') raise - # delete pickle file - os.remove(file) - @abstractmethod def get_instantaneous_power(self, accel, speed, grade): """See parent class.""" From ab6732e1164da2fcf700d119728cad3a8dfa97e2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 01:10:47 -0700 Subject: [PATCH 319/438] fix fc <> power unit conversion --- flow/energy_models/base_energy.py | 2 +- flow/energy_models/toyota_energy.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/energy_models/base_energy.py b/flow/energy_models/base_energy.py index ed75efd09..ba5da5080 100644 --- a/flow/energy_models/base_energy.py +++ b/flow/energy_models/base_energy.py @@ -59,4 +59,4 @@ def get_instantaneous_fuel_consumption(self, accel, speed, grade): ------- float """ - return self.get_instantaneous_power(accel, speed, grade) * self.conversion + return self.get_instantaneous_power(accel, speed, grade) / self.conversion diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index b40146d80..397610089 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -58,7 +58,7 @@ def __init__(self): def get_instantaneous_power(self, accel, speed, grade): """See parent class.""" - return self.get_instantaneous_fuel_consumption(accel, speed, grade) / self.conversion + return self.get_instantaneous_fuel_consumption(accel, speed, grade) * self.conversion def get_instantaneous_fuel_consumption(self, accel, speed, grade): """See parent class.""" From c0de59b994c84c56ebb2f7c9e6d09b8303b983f6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 01:11:46 -0700 Subject: [PATCH 320/438] make default highway single penetration rate 0 --- examples/exp_configs/non_rl/highway_single.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index ff486b3f5..fcd2f2da4 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -28,7 +28,7 @@ # whether to include noise in the car-following models INCLUDE_NOISE = True # penetration rate of the follower-stopper vehicles -PENETRATION_RATE = 10.0 +PENETRATION_RATE = 0.0 additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params.update({ From 5f6acc2045a7ea0ceaff16f2dce7975ae43014f4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 01:24:09 -0700 Subject: [PATCH 321/438] use 1609.34 meters per mile --- flow/core/rewards.py | 2 +- flow/data_pipeline/query.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 33960f8cd..20ed1c6a7 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -393,7 +393,7 @@ def instantaneous_mpg(env, veh_ids=None, gain=.001): cumulative_distance += speed cumulative_gallons /= 3600.0 - cumulative_distance /= 1609.0 + cumulative_distance /= 1609.34 # miles / gallon is (distance_dot * \delta t) / (gallons_dot * \delta t) mpg = cumulative_distance / (cumulative_gallons + 1e-6) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 558488d8e..f68dfa321 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -360,7 +360,7 @@ class QueryStrings(Enum): distance_meters, power_watts * time_step_size_seconds AS energy_joules, distance_meters / (power_watts * time_step_size_seconds) AS efficiency_meters_per_joules, - 33561 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon + 33554.13 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon FROM sub_fact_vehicle_trace WHERE 1 = 1 AND power_watts * time_step_size_seconds != 0 @@ -403,7 +403,7 @@ class QueryStrings(Enum): SUM(distance_meters) AS distance_meters, SUM(energy_joules) AS energy_joules, SUM(distance_meters) / SUM(energy_joules) AS efficiency_meters_per_joules, - 33561 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon + 33554.13 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 AND date = \'{date}\' @@ -419,7 +419,7 @@ class QueryStrings(Enum): t.source_id, e.energy_model_id, e.efficiency_meters_per_joules, - 33561 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, + 33554.13 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, t.throughput_per_hour, s.safety_rate, s.safety_value_max From 7a773e343f6ddbd5e1836be689e873469c836731 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 13:18:23 -0700 Subject: [PATCH 322/438] fix av routing controller if no on-ramp --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 9e415fc65..399ef7f9f 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -95,7 +95,7 @@ "v_des": V_DES, "no_control_edges": ["ghost0", "119257908#3"] }), - routing_controller=(I210Router, {}) + routing_controller=(I210Router, {}) if ON_RAMP else None, ) inflow = InFlows() From 0e8be957940039d4b199a7dfdf5670cd011c0136 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 22:15:21 -0700 Subject: [PATCH 323/438] Time-Space Diagram offset axes (#999) * refactor tsd to allow for axes offsets * update time-space plotter unit tests --- .../exp_configs/non_rl/i210_subnetwork.py | 14 +- flow/core/experiment.py | 9 +- flow/visualize/time_space_diagram.py | 233 ++++++++-------- .../test_files/ring_230_emission.csv | 142 ++-------- tests/fast_tests/test_visualizers.py | 256 ++++++------------ 5 files changed, 237 insertions(+), 417 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 399ef7f9f..0c66f42e7 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -108,16 +108,16 @@ veh_type="human", edge=highway_start_edge, vehs_per_hour=INFLOW_RATE * (1 - PENETRATION_RATE), - departLane=lane, - departSpeed=INFLOW_SPEED) + depart_lane=lane, + depart_speed=INFLOW_SPEED) if PENETRATION_RATE > 0.0: inflow.add( veh_type="av", edge=highway_start_edge, vehs_per_hour=INFLOW_RATE * PENETRATION_RATE, - departLane=lane, - departSpeed=INFLOW_SPEED) + depart_lane=lane, + depart_speed=INFLOW_SPEED) # on ramp if ON_RAMP: @@ -125,7 +125,7 @@ veh_type="human", edge="27414345", vehs_per_hour=int(ON_RAMP_INFLOW_RATE * (1 - PENETRATION_RATE)), - departSpeed=10, + depart_speed=10, ) if PENETRATION_RATE > 0.0: @@ -133,8 +133,8 @@ veh_type="av", edge="27414345", vehs_per_hour=int(ON_RAMP_INFLOW_RATE * PENETRATION_RATE), - departLane="random", - departSpeed=10) + depart_lane="random", + depart_speed=10) # =========================================================================== # # Generate the flow_params dict with all relevant simulation information. # diff --git a/flow/core/experiment.py b/flow/core/experiment.py index ca1b54409..38599b002 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -248,10 +248,13 @@ def rl_actions(*_): write_dict_to_csv(metadata_table_path, metadata, True) tsd_main( trajectory_table_path, - {'network': self.env.network.__class__}, + { + 'network': self.env.network.__class__, + 'env': self.env.env_params, + 'sim': self.env.sim_params + }, min_speed=0, - max_speed=10, - start=self.env.env_params.warmup_steps + max_speed=10 ) upload_to_s3( 'circles.data.pipeline', diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index a9742e249..955043691 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -43,6 +43,17 @@ HighwayNetwork ] +# networks that use edgestarts +USE_EDGESTARTS = set([ + RingNetwork, + FigureEightNetwork, + MergeNetwork +]) + +GHOST_DICT = defaultdict(dict) +GHOST_DICT[I210SubNetwork] = {'ghost_edges': {'ghost0', '119257908#3'}} +GHOST_DICT[HighwayNetwork] = {'ghost_bounds': (500, 2300)} + def import_data_from_trajectory(fp, params=dict()): r"""Import and preprocess data from the Flow trajectory (.csv) file. @@ -62,8 +73,10 @@ def import_data_from_trajectory(fp, params=dict()): Returns ------- - pd.DataFrame + pd.DataFrame, float, float """ + network = params['network'] + # Read trajectory csv into pandas dataframe df = pd.read_csv(fp) @@ -73,33 +86,47 @@ def import_data_from_trajectory(fp, params=dict()): 'lane_number': 'lane_id', } df = df.rename(columns=column_conversions) - if 'distance' not in df.columns: + if network in USE_EDGESTARTS: df['distance'] = _get_abs_pos(df, params) + start = params['env'].warmup_steps * params['env'].sims_per_step * params['sim'].sim_step + # produce upper and lower bounds for the non-greyed-out domain + ghost_edges = GHOST_DICT[network].get('ghost_edges') + ghost_bounds = GHOST_DICT[network].get('ghost_bounds') + if ghost_edges: + domain_lb = df[~df['edge_id'].isin(ghost_edges)]['distance'].min() + domain_ub = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() + elif ghost_bounds: + domain_lb = ghost_bounds[0] + domain_ub = ghost_bounds[1] + else: + domain_lb = df['distance'].min() + domain_ub = df['distance'].max() + + df.loc[:, 'time_step'] = df['time_step'].apply(lambda x: x - start) + df.loc[:, 'distance'] = df['distance'].apply(lambda x: x - domain_lb) + domain_ub -= domain_lb + # Compute line segment ends by shifting dataframe by 1 row df[['next_pos', 'next_time']] = df.groupby('id')[['distance', 'time_step']].shift(-1) # Remove nans from data df = df[df['next_time'].notna()] - return df + return df, domain_lb, domain_ub, start -def get_time_space_data(data, params): +def get_time_space_data(data, network): r"""Compute the unique inflows and subsequent outflow statistics. Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data - params : dict - flow-specific parameters, including: - * "network" (str): name of the network that was used when generating - the emission file. Must be one of the network names mentioned in - ACCEPTABLE_NETWORKS, - * "net_params" (flow.core.params.NetParams): network-specific - parameters. This is used to collect the lengths of various network - links. + network : child class of Network() + network that was used when generating the emission file. + Must be one of the network names mentioned in + ACCEPTABLE_NETWORKS Returns ------- @@ -117,8 +144,8 @@ def get_time_space_data(data, params): if the specified network is not supported by this method """ # check that the network is appropriate - assert params['network'] in ACCEPTABLE_NETWORKS, \ - 'Network must be one of: ' + ', '.join([network.__name__ for network in ACCEPTABLE_NETWORKS]) + assert network in ACCEPTABLE_NETWORKS, \ + 'Network must be one of: ' + ', '.join([network_.__name__ for network_ in ACCEPTABLE_NETWORKS]) # switcher used to compute the positions based on the type of network switcher = { @@ -130,7 +157,7 @@ def get_time_space_data(data, params): } # Get the function from switcher dictionary - func = switcher[params['network']] + func = switcher[network] # Execute the function segs, data = func(data) @@ -238,7 +265,7 @@ def _i210_subnetwork(data): """ # Reset lane numbers that are offset by ramp lanes offset_edges = set(data[data['lane_id'] == 5]['edge_id'].unique()) - data.loc[data['edge_id'].isin(offset_edges), 'lane_id'] -= 1 + data.loc[data['edge_id'].isin(offset_edges), 'lane_id'] = data[data['edge_id'].isin(offset_edges)]['lane_id'] - 1 segs = dict() for lane, df in data.groupby('lane_id'): @@ -382,16 +409,7 @@ def _get_abs_pos(df, params): return ret -def plot_tsd(ax, - df, - segs, - cmap, - min_speed=0, - max_speed=10, - start=0, - lane=None, - ghost_edges=None, - ghost_bounds=None): +def plot_tsd(df, network, cmap, min_speed=0, max_speed=10, start=0, domain_bounds=None): """Plot the time-space diagram. Take the pre-processed segments and other meta-data, then plot all the line @@ -399,24 +417,21 @@ def plot_tsd(ax, Parameters ---------- - ax : matplotlib.axes.Axes - figure axes that will be plotted on df : pd.DataFrame data used for axes bounds and speed coloring - segs : list of list of lists - line segments to be plotted, where each segment is a list of two [x,y] - pairs + network : child class of Network() + network that was used when generating the emission file. + Must be one of the network names mentioned in + ACCEPTABLE_NETWORKS + cmap : colors.LinearSegmentedColormap + colormap for plotting speed min_speed : int or float minimum speed in colorbar max_speed : int or float maximum speed in colorbar start : int or float starting time_step not greyed out - lane : int, optional - lane number to be shown in plot title - ghost_edges : list or set of str - ghost edge names to be greyed out, default None - ghost_bounds : tuple + domain_bounds : tuple lower and upper bounds of domain, excluding ghost edges, default None """ norm = plt.Normalize(min_speed, max_speed) @@ -426,49 +441,57 @@ def plot_tsd(ax, ymin, ymax = df['distance'].min(), df['distance'].max() ybuffer = (ymax - ymin) * 0.025 # 2.5% of range - ax.set_xlim(xmin - xbuffer, xmax + xbuffer) - ax.set_ylim(ymin - ybuffer, ymax + ybuffer) + # Convert df data into segments for plotting + segs, df = get_time_space_data(df, network) - lc = LineCollection(segs, cmap=cmap, norm=norm) - lc.set_array(df['speed'].values) - lc.set_linewidth(1) - ax.add_collection(lc) - ax.autoscale() + nlanes = df['lane_id'].nunique() + plt.figure(figsize=(16, 9*nlanes)) + if nlanes == 1: + segs = [segs] - rects = [] - if ghost_edges: - y_domain_min = df[~df['edge_id'].isin(ghost_edges)]['distance'].min() - y_domain_max = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() - rects.append(Rectangle((xmin, y_domain_min), start - xmin, y_domain_max - y_domain_min)) - rects.append(Rectangle((xmin, ymin), xmax - xmin, y_domain_min - ymin)) - rects.append(Rectangle((xmin, y_domain_max), xmax - xmin, ymax - y_domain_max)) - elif ghost_bounds: - rects.append(Rectangle((xmin, ghost_bounds[0]), start - xmin, ghost_bounds[1] - ghost_bounds[0])) - rects.append(Rectangle((xmin, ymin), xmax - xmin, ghost_bounds[0] - ymin)) - rects.append(Rectangle((xmin, ghost_bounds[1]), xmax - xmin, ymax - ghost_bounds[1])) - else: - rects.append(Rectangle((xmin, ymin), start - xmin, ymax - ymin)) + for lane, lane_df in df.groupby('lane_id'): + ax = plt.subplot(nlanes, 1, lane+1) + + ax.set_xlim(xmin - xbuffer, xmax + xbuffer) + ax.set_ylim(ymin - ybuffer, ymax + ybuffer) + + lc = LineCollection(segs[lane], cmap=cmap, norm=norm) + lc.set_array(lane_df['speed'].values) + lc.set_linewidth(1) + ax.add_collection(lc) + ax.autoscale() + + rects = [] + # rectangle for warmup period, but not ghost edges + rects.append(Rectangle((xmin, 0), start, domain_bounds[1])) + # rectangle for lower ghost edge (including warmup period) + rects.append(Rectangle((xmin, ymin), xmax - xmin, domain_bounds[0])) + # rectangle for upper ghost edge (including warmup period) + rects.append(Rectangle((xmin, domain_bounds[1]), xmax - xmin, ymax - domain_bounds[1])) - if rects: pc = PatchCollection(rects, facecolor='grey', alpha=0.5, edgecolor=None) pc.set_zorder(20) ax.add_collection(pc) - if lane: - ax.set_title('Time-Space Diagram: Lane {}'.format(lane), fontsize=25) - else: - ax.set_title('Time-Space Diagram', fontsize=25) - ax.set_ylabel('Position (m)', fontsize=20) - ax.set_xlabel('Time (s)', fontsize=20) - plt.xticks(fontsize=18) - plt.yticks(fontsize=18) + if nlanes > 1: + ax.set_title('Time-Space Diagram: Lane {}'.format(lane), fontsize=25) + else: + ax.set_title('Time-Space Diagram', fontsize=25) - cbar = plt.colorbar(lc, ax=ax, norm=norm) - cbar.set_label('Velocity (m/s)', fontsize=20) - cbar.ax.tick_params(labelsize=18) + ax.set_ylabel('Position (m)', fontsize=20) + if lane == nlanes - 1: + ax.set_xlabel('Time (s)', fontsize=20) + plt.xticks(fontsize=18) + plt.yticks(fontsize=18) + cbar = plt.colorbar(lc, ax=ax, norm=norm) + cbar.set_label('Velocity (m/s)', fontsize=20) + cbar.ax.tick_params(labelsize=18) -def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): + plt.tight_layout() + + +def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10): """Prepare and plot the time-space diagram. Parameters @@ -487,9 +510,9 @@ def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): minimum speed in colorbar max_speed : int or float maximum speed in colorbar - start : int or float - starting time_step not greyed out """ + network = flow_params['network'] + # some plotting parameters cdict = { 'red': ((0, 0, 0), (0.2, 1, 1), (0.6, 1, 1), (1, 0, 0)), @@ -499,58 +522,23 @@ def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): my_cmap = colors.LinearSegmentedColormap('my_colormap', cdict, 1024) # Read trajectory csv into pandas dataframe - traj_df = import_data_from_trajectory(trajectory_path, flow_params) + traj_df, domain_lb, domain_ub, start = import_data_from_trajectory(trajectory_path, flow_params) - # Convert df data into segments for plotting - segs, traj_df = get_time_space_data(traj_df, flow_params) - - if flow_params['network'] == I210SubNetwork: - nlanes = traj_df['lane_id'].nunique() - plt.figure(figsize=(16, 9*nlanes)) - - for lane, df in traj_df.groupby('lane_id'): - ax = plt.subplot(nlanes, 1, lane+1) - - plot_tsd(ax=ax, - df=df, - segs=segs[lane], - cmap=my_cmap, - min_speed=min_speed, - max_speed=max_speed, - start=start, - lane=int(lane+1), - ghost_edges={'ghost0', '119257908#3'}) - plt.tight_layout() - else: - # perform plotting operation - plt.figure(figsize=(16, 9)) - ax = plt.axes() - - if flow_params['network'] == HighwayNetwork: - plot_tsd(ax=ax, - df=traj_df, - segs=segs, - cmap=my_cmap, - min_speed=min_speed, - max_speed=max_speed, - start=start, - ghost_bounds=(500, 2300)) - else: - plot_tsd(ax=ax, - df=traj_df, - segs=segs, - cmap=my_cmap, - min_speed=min_speed, - max_speed=max_speed, - start=start) + plot_tsd(df=traj_df, + network=network, + cmap=my_cmap, + min_speed=min_speed, + max_speed=max_speed, + start=start, + domain_bounds=(domain_lb, domain_ub)) ########################################################################### # Note: For MergeNetwork only # - if flow_params['network'] == 'MergeNetwork': # - plt.plot([df['time_step'].min(), df['time_step'].max()], - [0, 0], linewidth=3, color="white") # - plt.plot([df['time_step'].min(), df['time_step'].max()], - [-0.1, -0.1], linewidth=3, color="white") # + if network == MergeNetwork: # + plt.plot([traj_df['time_step'].min(), traj_df['time_step'].max()], + [0, 0], linewidth=3, color="white") # + plt.plot([traj_df['time_step'].min(), traj_df['time_step'].max()], + [-0.1, -0.1], linewidth=3, color="white") # ########################################################################### outfile = trajectory_path.replace('csv', 'png') @@ -575,13 +563,11 @@ def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): parser.add_argument('--steps', type=int, default=1, help='rate at which steps are plotted.') parser.add_argument('--title', type=str, default='Time Space Diagram', - help='rate at which steps are plotted.') + help='Title for the time-space diagrams.') parser.add_argument('--max_speed', type=int, default=8, help='The maximum speed in the color range.') parser.add_argument('--min_speed', type=int, default=0, help='The minimum speed in the color range.') - parser.add_argument('--start', type=float, default=0, - help='initial time (in sec) in the plot.') args = parser.parse_args() @@ -596,6 +582,5 @@ def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): args.trajectory_path, flow_params, min_speed=args.min_speed, - max_speed=args.max_speed, - start=args.start + max_speed=args.max_speed ) diff --git a/tests/fast_tests/test_files/ring_230_emission.csv b/tests/fast_tests/test_files/ring_230_emission.csv index 9051074c8..342c5c7f3 100644 --- a/tests/fast_tests/test_files/ring_230_emission.csv +++ b/tests/fast_tests/test_files/ring_230_emission.csv @@ -1,117 +1,25 @@ -speed,CO,electricity,x,NOx,id,fuel,angle,time,edge_id,eclass,route,waiting,CO2,lane_number,PMx,type,noise,relative_position,HC,y -0.0,164.78,0.0,36.64,1.2,idm_0,1.13,94.02,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,0.0,0.81,-1.65 -0.08,163.5,0.0,36.65,1.21,idm_0,1.13,94.01,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,0.01,0.81,-1.65 -0.16,162.24,0.0,36.66,1.21,idm_0,1.13,93.98,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,0.02,0.8,-1.65 -0.23,161.0,0.0,36.69,1.21,idm_0,1.14,93.94,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,0.05,0.79,-1.65 -0.31,159.78,0.0,36.72,1.21,idm_0,1.14,93.88,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,0.08,0.79,-1.65 -0.41,158.73,0.0,36.76,1.22,idm_0,1.15,93.8,0.6,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2679.14,0,0.07,idm,60.47,0.12,0.79,-1.65 -0.0,164.78,0.0,46.49,1.2,idm_1,1.13,78.81,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,9.55,0.81,-0.34 -0.08,163.5,0.0,46.5,1.21,idm_1,1.13,78.8,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,9.55,0.81,-0.33 -0.16,162.24,0.0,46.51,1.21,idm_1,1.13,78.78,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,9.57,0.8,-0.33 -0.23,161.0,0.0,46.54,1.21,idm_1,1.14,78.74,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,9.59,0.79,-0.32 -0.31,159.78,0.0,46.57,1.21,idm_1,1.14,78.7,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,9.62,0.79,-0.31 -0.41,158.73,0.0,46.61,1.22,idm_1,1.15,78.64,0.6,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2679.14,0,0.07,idm,60.47,9.66,0.79,-0.3 -0.0,164.78,0.0,56.08,1.2,idm_10,1.13,304.55,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,37.95,0.81,69.53 -0.08,163.5,0.0,56.08,1.21,idm_10,1.13,304.54,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,37.96,0.81,69.53 -0.16,162.24,0.0,56.06,1.21,idm_10,1.13,304.52,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,37.98,0.8,69.54 -0.23,161.0,0.0,56.04,1.21,idm_10,1.14,304.48,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,38.0,0.79,69.55 -0.31,159.78,0.0,56.01,1.21,idm_10,1.14,304.44,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,38.03,0.79,69.57 -0.41,158.73,0.0,55.98,1.22,idm_10,1.15,304.38,0.6,right,HBEFA3/PC_G_EU4,routeright,0.0,2679.14,0,0.07,idm,60.47,38.07,0.79,69.59 -0.0,164.78,0.0,46.95,1.2,idm_11,1.13,289.47,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,47.5,0.81,73.43 -0.08,163.5,0.0,46.94,1.21,idm_11,1.13,289.45,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,47.51,0.81,73.43 -0.16,162.24,0.0,46.92,1.21,idm_11,1.13,289.42,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,47.52,0.8,73.44 -0.23,161.0,0.0,46.9,1.21,idm_11,1.14,289.38,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,47.55,0.79,73.44 -0.31,159.78,0.0,46.87,1.21,idm_11,1.14,289.32,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,47.58,0.79,73.45 -0.41,158.73,0.0,46.83,1.22,idm_11,1.15,289.24,0.6,right,HBEFA3/PC_G_EU4,routeright,0.0,2679.14,0,0.07,idm,60.47,47.62,0.79,73.46 -0.0,164.78,0.0,37.11,1.2,idm_12,1.13,274.71,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,57.05,0.81,74.86 -0.08,163.5,0.0,37.11,1.21,idm_12,1.13,274.7,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,57.05,0.81,74.86 -0.16,162.24,0.0,37.09,1.21,idm_12,1.13,274.68,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,57.07,0.8,74.86 -0.23,161.0,0.0,37.07,1.21,idm_12,1.14,274.65,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,57.09,0.79,74.86 -0.31,159.78,0.0,37.03,1.21,idm_12,1.14,274.6,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,57.12,0.79,74.86 -0.41,158.73,0.0,36.99,1.22,idm_12,1.15,274.55,0.6,right,HBEFA3/PC_G_EU4,routeright,0.0,2679.14,0,0.07,idm,60.47,57.16,0.79,74.86 -0.0,164.78,0.0,27.19,1.2,idm_13,1.13,259.6,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,9.09,0.81,73.68 -0.08,163.5,0.0,27.18,1.21,idm_13,1.13,259.58,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,9.1,0.81,73.68 -0.16,162.24,0.0,27.17,1.21,idm_13,1.13,259.55,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,9.11,0.8,73.67 -0.23,161.0,0.0,27.14,1.21,idm_13,1.14,259.51,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,9.14,0.79,73.67 -0.31,159.78,0.0,27.11,1.21,idm_13,1.14,259.45,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,9.17,0.79,73.66 -0.41,158.73,0.0,27.07,1.22,idm_13,1.15,259.37,0.6,top,HBEFA3/PC_G_EU4,routetop,0.0,2679.14,0,0.07,idm,60.47,9.21,0.79,73.65 -0.0,164.78,0.0,17.96,1.2,idm_14,1.13,244.67,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,18.64,0.81,70.0 -0.08,163.5,0.0,17.95,1.21,idm_14,1.13,244.66,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,18.64,0.81,70.0 -0.16,162.24,0.0,17.94,1.21,idm_14,1.13,244.63,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,18.66,0.8,69.99 -0.23,161.0,0.0,17.92,1.21,idm_14,1.14,244.6,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,18.68,0.79,69.98 -0.31,159.78,0.0,17.89,1.21,idm_14,1.14,244.55,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,18.71,0.79,69.96 -0.0,164.78,0.0,9.98,1.2,idm_15,1.13,229.84,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,28.18,0.81,64.07 -0.08,163.5,0.0,9.98,1.21,idm_15,1.13,229.83,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,28.19,0.81,64.07 -0.16,162.24,0.0,9.97,1.21,idm_15,1.13,229.8,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,28.21,0.8,64.06 -0.23,161.0,0.0,9.95,1.21,idm_15,1.14,229.76,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,28.23,0.79,64.04 -0.31,159.78,0.0,9.93,1.21,idm_15,1.14,229.7,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,28.26,0.79,64.02 -0.0,164.78,0.0,3.81,1.2,idm_16,1.13,214.88,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,37.73,0.81,56.29 -0.08,163.5,0.0,3.81,1.21,idm_16,1.13,214.87,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,37.74,0.81,56.28 -0.16,162.24,0.0,3.8,1.21,idm_16,1.13,214.85,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,37.75,0.8,56.27 -0.23,161.0,0.0,3.79,1.21,idm_16,1.14,214.81,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,37.77,0.79,56.24 -0.31,159.78,0.0,3.77,1.21,idm_16,1.14,214.77,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,37.81,0.79,56.22 -0.0,164.78,0.0,-0.15,1.2,idm_17,1.13,199.9,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,47.27,0.81,47.18 -0.08,163.5,0.0,-0.15,1.21,idm_17,1.13,199.88,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,47.28,0.81,47.17 -0.16,162.24,0.0,-0.16,1.21,idm_17,1.13,199.85,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,47.3,0.8,47.15 -0.23,161.0,0.0,-0.16,1.21,idm_17,1.14,199.81,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,47.32,0.79,47.13 -0.31,159.78,0.0,-0.17,1.21,idm_17,1.14,199.75,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,47.35,0.79,47.1 -0.0,164.78,0.0,-1.64,1.2,idm_18,1.13,185.04,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,56.82,0.81,37.35 -0.08,163.5,0.0,-1.64,1.21,idm_18,1.13,185.03,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,56.83,0.81,37.34 -0.16,162.24,0.0,-1.64,1.21,idm_18,1.13,185.0,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,56.84,0.8,37.33 -0.23,161.0,0.0,-1.64,1.21,idm_18,1.14,184.97,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,56.87,0.79,37.3 -0.31,159.78,0.0,-1.64,1.21,idm_18,1.14,184.93,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,56.9,0.79,37.27 -0.0,164.78,0.0,-0.52,1.2,idm_19,1.13,170.03,0.1,left,HBEFA3/PC_G_EU4,routeleft,0.0,2624.72,0,0.07,idm,55.94,8.86,0.81,27.42 -0.08,163.5,0.0,-0.52,1.21,idm_19,1.13,170.01,0.2,left,HBEFA3/PC_G_EU4,routeleft,0.1,2631.03,0,0.07,idm,59.48,8.87,0.81,27.41 -0.16,162.24,0.0,-0.51,1.21,idm_19,1.13,169.98,0.3,left,HBEFA3/PC_G_EU4,routeleft,0.0,2636.67,0,0.07,idm,59.44,8.89,0.8,27.39 -0.23,161.0,0.0,-0.51,1.21,idm_19,1.14,169.94,0.4,left,HBEFA3/PC_G_EU4,routeleft,0.0,2641.63,0,0.07,idm,59.4,8.91,0.79,27.37 -0.31,159.78,0.0,-0.5,1.21,idm_19,1.14,169.88,0.5,left,HBEFA3/PC_G_EU4,routeleft,0.0,2645.91,0,0.06,idm,59.36,8.94,0.79,27.34 -0.0,164.78,0.0,55.68,1.2,idm_2,1.13,64.0,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,19.09,0.81,3.45 -0.08,163.5,0.0,55.68,1.21,idm_2,1.13,63.99,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,19.1,0.81,3.45 -0.16,162.24,0.0,55.7,1.21,idm_2,1.13,63.97,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,19.11,0.8,3.46 -0.23,161.0,0.0,55.72,1.21,idm_2,1.14,63.93,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,19.14,0.79,3.47 -0.31,159.78,0.0,55.75,1.21,idm_2,1.14,63.88,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,19.17,0.79,3.49 -0.0,164.78,0.0,3.11,1.2,idm_20,1.13,155.0,0.1,left,HBEFA3/PC_G_EU4,routeleft,0.0,2624.72,0,0.07,idm,55.94,18.41,0.81,18.17 -0.08,163.5,0.0,3.11,1.21,idm_20,1.13,154.99,0.2,left,HBEFA3/PC_G_EU4,routeleft,0.1,2631.03,0,0.07,idm,59.48,18.42,0.81,18.16 -0.16,162.24,0.0,3.12,1.21,idm_20,1.13,154.96,0.3,left,HBEFA3/PC_G_EU4,routeleft,0.0,2636.68,0,0.07,idm,59.44,18.43,0.8,18.15 -0.23,161.0,0.0,3.13,1.21,idm_20,1.14,154.93,0.4,left,HBEFA3/PC_G_EU4,routeleft,0.0,2641.7,0,0.07,idm,59.41,18.46,0.79,18.12 -0.31,159.77,0.0,3.15,1.21,idm_20,1.14,154.89,0.5,left,HBEFA3/PC_G_EU4,routeleft,0.0,2646.14,0,0.06,idm,59.37,18.49,0.79,18.1 -0.0,164.78,0.0,8.98,1.2,idm_21,1.13,140.22,0.1,left,HBEFA3/PC_G_EU4,routeleft,0.0,2624.72,0,0.07,idm,55.94,27.95,0.81,10.15 -0.1,163.3,0.0,8.99,1.21,idm_21,1.13,140.21,0.2,left,HBEFA3/PC_G_EU4,routeleft,0.1,2637.25,0,0.07,idm,60.3,27.96,0.81,10.15 -0.2,161.84,0.0,9.0,1.21,idm_21,1.14,140.18,0.3,left,HBEFA3/PC_G_EU4,routeleft,0.0,2649.89,0,0.07,idm,60.34,27.98,0.8,10.13 -0.29,160.38,0.0,9.02,1.21,idm_21,1.14,140.14,0.4,left,HBEFA3/PC_G_EU4,routeleft,0.0,2662.63,0,0.07,idm,60.37,28.01,0.79,10.11 -0.39,158.94,0.0,9.05,1.22,idm_21,1.15,140.07,0.5,left,HBEFA3/PC_G_EU4,routeleft,0.0,2675.48,0,0.07,idm,60.41,28.05,0.79,10.08 -0.0,164.78,0.0,63.57,1.2,idm_3,1.13,49.05,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,28.64,0.81,9.48 -0.08,163.5,0.0,63.58,1.21,idm_3,1.13,49.04,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,28.64,0.81,9.49 -0.16,162.24,0.0,63.59,1.21,idm_3,1.13,49.02,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,28.66,0.8,9.5 -0.23,161.0,0.0,63.61,1.21,idm_3,1.14,48.99,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,28.68,0.79,9.52 -0.31,159.78,0.0,63.63,1.21,idm_3,1.14,48.94,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,28.71,0.79,9.54 -0.0,164.78,0.0,69.65,1.2,idm_4,1.13,34.22,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,38.18,0.81,17.34 -0.08,163.5,0.0,69.65,1.21,idm_4,1.13,34.21,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,38.19,0.81,17.35 -0.16,162.24,0.0,69.66,1.21,idm_4,1.13,34.19,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,38.21,0.8,17.36 -0.23,161.0,0.0,69.68,1.21,idm_4,1.14,34.15,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,38.23,0.79,17.38 -0.31,159.78,0.0,69.69,1.21,idm_4,1.14,34.11,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,38.26,0.79,17.41 -0.0,164.78,0.0,73.49,1.2,idm_5,1.13,19.04,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,47.73,0.81,26.5 -0.08,163.5,0.0,73.5,1.21,idm_5,1.13,19.02,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,47.74,0.81,26.51 -0.16,162.24,0.0,73.5,1.21,idm_5,1.13,18.99,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,47.75,0.8,26.53 -0.23,161.0,0.0,73.51,1.21,idm_5,1.14,18.95,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,47.77,0.79,26.55 -0.31,159.78,0.0,73.52,1.21,idm_5,1.14,18.91,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,47.81,0.79,26.58 -0.0,164.78,0.0,74.87,1.2,idm_6,1.13,4.39,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,57.27,0.81,36.34 -0.08,163.5,0.0,74.87,1.21,idm_6,1.13,4.38,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,57.28,0.81,36.35 -0.16,162.24,0.0,74.87,1.21,idm_6,1.13,4.36,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,57.3,0.8,36.37 -0.23,161.0,0.0,74.87,1.21,idm_6,1.14,4.32,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,57.32,0.79,36.39 -0.31,159.78,0.0,74.87,1.21,idm_6,1.14,4.28,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,57.35,0.79,36.42 -0.0,164.78,0.0,73.62,1.2,idm_7,1.13,349.16,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,9.32,0.81,46.26 -0.08,163.5,0.0,73.62,1.21,idm_7,1.13,349.15,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,9.33,0.81,46.27 -0.16,162.24,0.0,73.61,1.21,idm_7,1.13,349.12,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,9.34,0.8,46.28 -0.23,161.0,0.0,73.6,1.21,idm_7,1.14,349.07,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,9.37,0.79,46.31 -0.31,159.78,0.0,73.6,1.21,idm_7,1.14,349.01,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,9.4,0.79,46.34 -0.0,164.78,0.0,69.89,1.2,idm_8,1.13,334.33,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,18.86,0.81,55.47 -0.08,163.5,0.0,69.88,1.21,idm_8,1.13,334.32,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,18.87,0.81,55.47 -0.16,162.24,0.0,69.87,1.21,idm_8,1.13,334.3,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,18.89,0.8,55.49 -0.23,161.0,0.0,69.86,1.21,idm_8,1.14,334.27,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,18.91,0.79,55.51 -0.31,159.78,0.0,69.85,1.21,idm_8,1.14,334.22,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,18.94,0.79,55.54 -0.0,164.78,0.0,63.91,1.2,idm_9,1.13,319.44,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,28.41,0.81,63.4 -0.08,163.5,0.0,63.9,1.21,idm_9,1.13,319.42,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,28.42,0.81,63.41 -0.16,162.24,0.0,63.89,1.21,idm_9,1.13,319.39,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,28.43,0.8,63.42 -0.23,161.0,0.0,63.87,1.21,idm_9,1.14,319.35,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,28.46,0.79,63.44 -0.31,159.78,0.0,63.85,1.21,idm_9,1.14,319.3,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,28.49,0.79,63.46 +time,id,x,y,speed,headway,leader_id,follower_id,leader_rel_speed,target_accel_with_noise_with_failsafe,target_accel_no_noise_no_failsafe,target_accel_with_noise_no_failsafe,target_accel_no_noise_with_failsafe,realized_accel,road_grade,edge_id,lane_number,distance,relative_position +0.0,idm_0,36.64,-1.6,0.0,4.545454545454547,idm_1,idm_21,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,0.0 +0.1,idm_0,36.648322761506634,-1.599834647122385,0.07984158415841586,4.545454545454546,idm_1,idm_21,0.0,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415841587,0.007984158415841587 +0.2,idm_0,36.66480556684144,-1.599507174168713,0.15812219156578355,4.545454545454545,idm_1,idm_21,0.0,0.7906341348144134,0.7906341348144134,0.7906341348144134,0.7906341348144134,0.7828060740736771,0,bottom,0,0.023796377572419945,0.023796377572419945 +0.3,idm_0,36.68928269645688,-1.599020873580327,0.23481302481051264,4.545454545454546,idm_1,idm_21,5.551115123125783e-17,0.7745774157717638,0.7745774157717638,0.7745774157717638,0.7745774157717638,0.7669083324472908,0,bottom,0,0.04727768005347121,0.04727768005347121 +0.0,idm_1,46.477059895666216,-0.2910450274933619,0.0,4.545454545454547,idm_2,idm_0,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,9.545454545454547 +0.1,idm_1,46.48510950976829,-0.2889238453988948,0.07984158415841586,4.545454545454547,idm_2,idm_0,0.0,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415840879,9.553438703870388 +0.2,idm_1,46.5010513605782,-0.2847229522800698,0.15812219156578355,4.5454545454545485,idm_2,idm_0,0.0,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7828060740736771,0,bottom,0,0.023796377572418592,9.569250923026964 +0.3,idm_1,46.524725167351825,-0.2784845842789108,0.2348130248105127,4.5454545454545485,idm_2,idm_0,-5.551115123125783e-17,0.7745774157717642,0.7745774157717642,0.7745774157717642,0.7745774157717642,0.7669083324472914,0,bottom,0,0.04727768005347066,9.592732225508016 +0.0,idm_2,55.65270828548022,3.488595652781747,0.0,4.545454545454547,idm_3,idm_1,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,19.090909090909093 +0.1,idm_2,55.66000796138611,3.4925969566116453,0.07984158415841586,4.545454545454547,idm_3,idm_1,0.0,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415840879,19.098893249324934 +0.2,idm_2,55.67446459778839,3.5005213350840068,0.15812219156578355,4.545454545454547,idm_3,idm_1,0.0,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7828060740736771,0,bottom,0,0.02379637757242037,19.114705468481514 +0.3,idm_2,55.69593284641682,3.5122891158136613,0.23481302481051264,4.545454545454547,idm_3,idm_1,5.551115123125783e-17,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7669083324472908,0,bottom,0,0.04727768005347244,19.138186770962566 +0.0,idm_3,63.54122270574333,9.511222705743334,0.0,4.545454545454547,idm_4,idm_2,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,28.63636363636364 +0.1,idm_3,63.54710894820649,9.517108948206497,0.07984158415841586,4.545454545454549,idm_4,idm_2,0.0,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415840879,28.64434779477948 +0.2,idm_3,63.558766351653254,9.528766351653257,0.15812219156578355,4.545454545454549,idm_4,idm_2,0.0,0.7906341348144138,0.7906341348144138,0.7906341348144138,0.7906341348144138,0.7828060740736771,0,bottom,0,0.02379637757242037,28.660160013936057 +0.3,idm_3,63.57607771154312,9.546077711543122,0.2348130248105127,4.545454545454549,idm_4,idm_2,-5.551115123125783e-17,0.7745774157717643,0.7745774157717643,0.7745774157717643,0.7745774157717643,0.7669083324472914,0,bottom,0,0.04727768005347244,28.683641316417113 +0.0,idm_4,69.61055207686752,17.363529025870548,0.0,4.545454545454547,idm_5,idm_3,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,38.18181818181819 +0.1,idm_4,69.61489064350748,17.370633428743492,0.07984158415841586,4.545454545454547,idm_5,idm_3,0.0,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415844433,38.18980234023403 +0.2,idm_4,69.62348295380036,17.384703336848084,0.15812219156578355,4.545454545454547,idm_5,idm_3,0.0,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7828060740736771,0,bottom,0,0.023796377572423918,38.20561455939061 +0.3,idm_4,69.63624261991681,17.40559729011379,0.23481302481051264,4.545454545454547,idm_5,idm_3,0.0,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7669083324472908,0,bottom,0,0.04727768005347599,38.229095861871656 +0.0,idm_5,73.45066460734851,26.51380415096358,0.0,4.545454545454547,idm_6,idm_4,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,47.72727272727274 +0.1,idm_5,73.45278578944298,26.521853765065657,0.07984158415841586,4.545454545454547,idm_6,idm_4,-1.3877787807814454e-17,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415844433,47.73525688568858 +0.2,idm_5,73.4569866825618,26.53779561587557,0.15812219156578355,4.545454545454547,idm_6,idm_4,-5.551115123125783e-17,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7828060740736771,0,bottom,0,0.023796377572423918,47.75106910484515 +0.3,idm_5,73.46322505056297,26.561469422649196,0.23481302481051264,4.545454545454547,idm_6,idm_4,-1.1102230246251563e-16,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7669083324472908,0,bottom,0,0.04727768005347599,47.77455040732621 diff --git a/tests/fast_tests/test_visualizers.py b/tests/fast_tests/test_visualizers.py index d2f4a20a4..47aa9d968 100644 --- a/tests/fast_tests/test_visualizers.py +++ b/tests/fast_tests/test_visualizers.py @@ -94,54 +94,54 @@ def test_time_space_diagram_figure_eight(self): dir_path = os.path.dirname(os.path.realpath(__file__)) flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/fig8.json')) - emission_data = tsd.import_data_from_trajectory( + emission_data, _, _, _ = tsd.import_data_from_trajectory( os.path.join(dir_path, 'test_files/fig8_emission.csv'), flow_params) - segs, _ = tsd.get_time_space_data(emission_data, flow_params) + segs, _ = tsd.get_time_space_data(emission_data, flow_params['network']) expected_segs = np.array([ - [[1., 60.], [2., 59.]], - [[2., 59.], [3., 57.02]], - [[3., 57.02], [4., 54.05]], - [[1., 23.8], [2., 22.81]], - [[2., 22.81], [3., 20.83]], - [[3., 20.83], [4., 17.89]], - [[1., 182.84166941], [2., 181.85166941]], - [[2., 181.85166941], [3., 179.87166941]], - [[3., 179.87166941], [4., 176.92166941]], - [[1., 154.07166941], [2., 153.08166941]], - [[2., 153.08166941], [3., 151.10166941]], - [[3., 151.10166941], [4., 148.16166941]], - [[1., 125.30166941], [2., 124.31166941]], - [[2., 124.31166941], [3., 122.34166941]], - [[3., 122.34166941], [4., 119.39166941]], - [[1., 96.54166941], [2., 95.54166941]], - [[2., 95.54166941], [3., 93.56166941]], - [[3., 93.56166941], [4., 90.59166941]], - [[1., -203.16166941], [2., -202.17166941]], - [[2., -202.17166941], [3., -200.02166941]], - [[3., -200.02166941], [4., -197.07166941]], - [[1., -174.40166941], [2., -173.40166941]], - [[2., -173.40166941], [3., -171.43166941]], - [[3., -171.43166941], [4., -168.48166941]], - [[1., -145.63166941], [2., -144.64166941]], - [[2., -144.64166941], [3., -142.66166941]], - [[3., -142.66166941], [4., -139.72166941]], - [[1., -116.86166941], [2., -115.87166941]], - [[2., -115.87166941], [3., -113.89166941]], - [[3., -113.89166941], [4., -110.95166941]], - [[1., -88.09166941], [2., -87.10166941]], - [[2., -87.10166941], [3., -85.13166941]], - [[3., -85.13166941], [4., -82.18166941]], - [[1., -59.33], [2., -58.34]], - [[2., -58.34], [3., -56.36]], - [[3., -56.36], [4., -53.42]], - [[1., -30.56], [2., -29.72]], - [[2., -29.72], [3., -27.97]], - [[3., -27.97], [4., -25.22]], - [[1., -1.79], [2., -0.8]], - [[2., -0.8], [3., 208.64166941]], - [[3., 208.64166941], [4., 205.69166941]]] + [[1., 263.16166941], [2., 262.16166941]], + [[2., 262.16166941], [3., 260.18166941]], + [[3., 260.18166941], [4., 257.21166941]], + [[1., 226.96166941], [2., 225.97166941]], + [[2., 225.97166941], [3., 223.99166941]], + [[3., 223.99166941], [4., 221.05166941]], + [[1., 386.00333882], [2., 385.01333882]], + [[2., 385.01333882], [3., 383.03333882]], + [[3., 383.03333882], [4., 380.08333882]], + [[1., 357.23333882], [2., 356.24333882]], + [[2., 356.24333882], [3., 354.26333882]], + [[3., 354.26333882], [4., 351.32333882]], + [[1., 328.46333882], [2., 327.47333882]], + [[2., 327.47333882], [3., 325.50333882]], + [[3., 325.50333882], [4., 322.55333882]], + [[1., 299.70333882], [2., 298.70333882]], + [[2., 298.70333882], [3., 296.72333882]], + [[3., 296.72333882], [4., 293.75333882]], + [[1., 0.], [2., 0.99]], + [[2., 0.99], [3., 3.14]], + [[3., 3.14], [4., 6.09]], + [[1., 28.76], [2., 29.76]], + [[2., 29.76], [3., 31.73]], + [[3., 31.73], [4., 34.68]], + [[1., 57.53], [2., 58.52]], + [[2., 58.52], [3., 60.5]], + [[3., 60.5], [4., 63.44]], + [[1., 86.3], [2., 87.29]], + [[2., 87.29], [3., 89.27]], + [[3., 89.27], [4., 92.21]], + [[1., 115.07], [2., 116.06]], + [[2., 116.06], [3., 118.03]], + [[3., 118.03], [4., 120.98]], + [[1., 143.83166941], [2., 144.82166941]], + [[2., 144.82166941], [3., 146.80166941]], + [[3., 146.80166941], [4., 149.74166941]], + [[1., 172.60166941], [2., 173.44166941]], + [[2., 173.44166941], [3., 175.19166941]], + [[3., 175.19166941], [4., 177.94166941]], + [[1., 201.37166941], [2., 202.36166941]], + [[2., 202.36166941], [3., 411.80333882]], + [[3., 411.80333882], [4., 408.85333882]]] ) np.testing.assert_array_almost_equal(segs, expected_segs) @@ -150,16 +150,16 @@ def test_time_space_diagram_merge(self): dir_path = os.path.dirname(os.path.realpath(__file__)) flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/merge.json')) - emission_data = tsd.import_data_from_trajectory( + emission_data, _, _, _ = tsd.import_data_from_trajectory( os.path.join(dir_path, 'test_files/merge_emission.csv'), flow_params) - segs, _ = tsd.get_time_space_data(emission_data, flow_params) + segs, _ = tsd.get_time_space_data(emission_data, flow_params['network']) expected_segs = np.array([ - [[2.0000e-01, 7.2949e+02], [4.0000e-01, 7.2953e+02]], - [[4.0000e-01, 7.2953e+02], [6.0000e-01, 7.2961e+02]], - [[6.0000e-01, 7.2961e+02], [8.0000e-01, 7.2973e+02]], - [[8.0000e-01, 7.2973e+02], [1.0000e+00, 7.2988e+02]]] + [[2.0000e-01, 7.2463e+02], [4.0000e-01, 7.2467e+02]], + [[4.0000e-01, 7.2467e+02], [6.0000e-01, 7.2475e+02]], + [[6.0000e-01, 7.2475e+02], [8.0000e-01, 7.2487e+02]], + [[8.0000e-01, 7.2487e+02], [1.0000e+00, 7.2502e+02]]] ) np.testing.assert_array_almost_equal(segs, expected_segs) @@ -168,37 +168,37 @@ def test_time_space_diagram_I210(self): dir_path = os.path.dirname(os.path.realpath(__file__)) module = __import__("examples.exp_configs.non_rl", fromlist=["i210_subnetwork"]) flow_params = getattr(module, "i210_subnetwork").flow_params - emission_data = tsd.import_data_from_trajectory( + emission_data, _, _, _ = tsd.import_data_from_trajectory( os.path.join(dir_path, 'test_files/i210_emission.csv'), flow_params) - segs, _ = tsd.get_time_space_data(emission_data, flow_params) + segs, _ = tsd.get_time_space_data(emission_data, flow_params['network']) expected_segs = { 1: np.array([ - [[0.8, 5.1], [1.6, 23.37]], - [[1.6, 23.37], [2.4, 42.02]], - [[2.4, 42.02], [3.2, 61.21]], - [[3.2, 61.21], [4., 18.87]], - [[4., 18.87], [4.8, 39.93]], - [[2.4, 5.1], [3.2, 22.97]], - [[3.2, 22.97], [4., 40.73]]] + [[-719.2, 3.77], [-718.4, 22.04]], + [[-718.4, 22.04], [-717.6, 40.69]], + [[-717.6, 40.69], [-716.8, 59.88]], + [[-716.8, 59.88], [-716., 17.54]], + [[-716., 17.54], [-715.2, 38.6]], + [[-717.6, 3.77], [-716.8, 21.64]], + [[-716.8, 21.64], [-716., 39.4]]] ), 2: np.array([ - [[2.4, 5.1], [3.2, 23.98]], - [[3.2, 23.98], [4., 43.18]]] + [[-717.6, 3.77], [-716.8, 22.65]], + [[-716.8, 22.65], [-716., 41.85]]] ), 3: np.array([ - [[0.8, 5.1], [1.6, 23.72]], - [[1.6, 23.72], [2.4, 43.06]], - [[2.4, 43.06], [3.2, 1.33]], - [[3.2, 1.33], [4., 21.65]], - [[4., 21.65], [4.8, 43.46]], - [[2.4, 5.1], [3.2, 23.74]], - [[3.2, 23.74], [4., 42.38]]] + [[-719.2, 3.77], [-718.4, 22.39]], + [[-718.4, 22.39], [-717.6, 41.73]], + [[-717.6, 41.73], [-716.8, 0.]], + [[-716.8, 0.], [-716., 20.32]], + [[-716., 20.32], [-715.2, 42.13]], + [[-717.6, 3.77], [-716.8, 22.41]], + [[-716.8, 22.41], [-716., 41.05]]] ), 4: np.array([ - [[2.4, 5.1], [3.2, 23.6]], - [[3.2, 23.6], [4., 42.46]]] + [[-717.6, 3.77], [-716.8, 22.27]], + [[-716.8, 22.27], [-716., 41.13]]] )} for lane, expected_seg in expected_segs.items(): @@ -208,106 +208,30 @@ def test_time_space_diagram_ring_road(self): dir_path = os.path.dirname(os.path.realpath(__file__)) flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/ring_230.json')) - emission_data = tsd.import_data_from_trajectory( + emission_data, _, _, _ = tsd.import_data_from_trajectory( os.path.join(dir_path, 'test_files/ring_230_emission.csv'), flow_params) - segs, _ = tsd.get_time_space_data(emission_data, flow_params) + segs, _ = tsd.get_time_space_data(emission_data, flow_params['network']) expected_segs = np.array([ - [[1.0000e-01, 0.0000e+00], [2.0000e-01, 1.0000e-02]], - [[2.0000e-01, 1.0000e-02], [3.0000e-01, 2.0000e-02]], - [[3.0000e-01, 2.0000e-02], [4.0000e-01, 5.0000e-02]], - [[4.0000e-01, 5.0000e-02], [5.0000e-01, 8.0000e-02]], - [[5.0000e-01, 8.0000e-02], [6.0000e-01, 1.2000e-01]], - [[1.0000e-01, 9.5500e+00], [2.0000e-01, 9.5500e+00]], - [[2.0000e-01, 9.5500e+00], [3.0000e-01, 9.5700e+00]], - [[3.0000e-01, 9.5700e+00], [4.0000e-01, 9.5900e+00]], - [[4.0000e-01, 9.5900e+00], [5.0000e-01, 9.6200e+00]], - [[5.0000e-01, 9.6200e+00], [6.0000e-01, 9.6600e+00]], - [[1.0000e-01, 9.5550e+01], [2.0000e-01, 9.5560e+01]], - [[2.0000e-01, 9.5560e+01], [3.0000e-01, 9.5580e+01]], - [[3.0000e-01, 9.5580e+01], [4.0000e-01, 9.5600e+01]], - [[4.0000e-01, 9.5600e+01], [5.0000e-01, 9.5630e+01]], - [[5.0000e-01, 9.5630e+01], [6.0000e-01, 9.5670e+01]], - [[1.0000e-01, 1.0510e+02], [2.0000e-01, 1.0511e+02]], - [[2.0000e-01, 1.0511e+02], [3.0000e-01, 1.0512e+02]], - [[3.0000e-01, 1.0512e+02], [4.0000e-01, 1.0515e+02]], - [[4.0000e-01, 1.0515e+02], [5.0000e-01, 1.0518e+02]], - [[5.0000e-01, 1.0518e+02], [6.0000e-01, 1.0522e+02]], - [[1.0000e-01, 1.1465e+02], [2.0000e-01, 1.1465e+02]], - [[2.0000e-01, 1.1465e+02], [3.0000e-01, 1.1467e+02]], - [[3.0000e-01, 1.1467e+02], [4.0000e-01, 1.1469e+02]], - [[4.0000e-01, 1.1469e+02], [5.0000e-01, 1.1472e+02]], - [[5.0000e-01, 1.1472e+02], [6.0000e-01, 1.1476e+02]], - [[1.0000e-01, 1.2429e+02], [2.0000e-01, 1.2430e+02]], - [[2.0000e-01, 1.2430e+02], [3.0000e-01, 1.2431e+02]], - [[3.0000e-01, 1.2431e+02], [4.0000e-01, 1.2434e+02]], - [[4.0000e-01, 1.2434e+02], [5.0000e-01, 1.2437e+02]], - [[5.0000e-01, 1.2437e+02], [6.0000e-01, 1.2441e+02]], - [[1.0000e-01, 1.3384e+02], [2.0000e-01, 1.3384e+02]], - [[2.0000e-01, 1.3384e+02], [3.0000e-01, 1.3386e+02]], - [[3.0000e-01, 1.3386e+02], [4.0000e-01, 1.3388e+02]], - [[4.0000e-01, 1.3388e+02], [5.0000e-01, 1.3391e+02]], - [[1.0000e-01, 1.4338e+02], [2.0000e-01, 1.4339e+02]], - [[2.0000e-01, 1.4339e+02], [3.0000e-01, 1.4341e+02]], - [[3.0000e-01, 1.4341e+02], [4.0000e-01, 1.4343e+02]], - [[4.0000e-01, 1.4343e+02], [5.0000e-01, 1.4346e+02]], - [[1.0000e-01, 1.5293e+02], [2.0000e-01, 1.5294e+02]], - [[2.0000e-01, 1.5294e+02], [3.0000e-01, 1.5295e+02]], - [[3.0000e-01, 1.5295e+02], [4.0000e-01, 1.5297e+02]], - [[4.0000e-01, 1.5297e+02], [5.0000e-01, 1.5301e+02]], - [[1.0000e-01, 1.6247e+02], [2.0000e-01, 1.6248e+02]], - [[2.0000e-01, 1.6248e+02], [3.0000e-01, 1.6250e+02]], - [[3.0000e-01, 1.6250e+02], [4.0000e-01, 1.6252e+02]], - [[4.0000e-01, 1.6252e+02], [5.0000e-01, 1.6255e+02]], - [[1.0000e-01, 1.7202e+02], [2.0000e-01, 1.7203e+02]], - [[2.0000e-01, 1.7203e+02], [3.0000e-01, 1.7204e+02]], - [[3.0000e-01, 1.7204e+02], [4.0000e-01, 1.7207e+02]], - [[4.0000e-01, 1.7207e+02], [5.0000e-01, 1.7210e+02]], - [[1.0000e-01, 1.8166e+02], [2.0000e-01, 1.8167e+02]], - [[2.0000e-01, 1.8167e+02], [3.0000e-01, 1.8169e+02]], - [[3.0000e-01, 1.8169e+02], [4.0000e-01, 1.8171e+02]], - [[4.0000e-01, 1.8171e+02], [5.0000e-01, 1.8174e+02]], - [[1.0000e-01, 1.9090e+01], [2.0000e-01, 1.9100e+01]], - [[2.0000e-01, 1.9100e+01], [3.0000e-01, 1.9110e+01]], - [[3.0000e-01, 1.9110e+01], [4.0000e-01, 1.9140e+01]], - [[4.0000e-01, 1.9140e+01], [5.0000e-01, 1.9170e+01]], - [[1.0000e-01, 1.9121e+02], [2.0000e-01, 1.9122e+02]], - [[2.0000e-01, 1.9122e+02], [3.0000e-01, 1.9123e+02]], - [[3.0000e-01, 1.9123e+02], [4.0000e-01, 1.9126e+02]], - [[4.0000e-01, 1.9126e+02], [5.0000e-01, 1.9129e+02]], - [[1.0000e-01, 2.0075e+02], [2.0000e-01, 2.0076e+02]], - [[2.0000e-01, 2.0076e+02], [3.0000e-01, 2.0078e+02]], - [[3.0000e-01, 2.0078e+02], [4.0000e-01, 2.0081e+02]], - [[4.0000e-01, 2.0081e+02], [5.0000e-01, 2.0085e+02]], - [[1.0000e-01, 2.8640e+01], [2.0000e-01, 2.8640e+01]], - [[2.0000e-01, 2.8640e+01], [3.0000e-01, 2.8660e+01]], - [[3.0000e-01, 2.8660e+01], [4.0000e-01, 2.8680e+01]], - [[4.0000e-01, 2.8680e+01], [5.0000e-01, 2.8710e+01]], - [[1.0000e-01, 3.8180e+01], [2.0000e-01, 3.8190e+01]], - [[2.0000e-01, 3.8190e+01], [3.0000e-01, 3.8210e+01]], - [[3.0000e-01, 3.8210e+01], [4.0000e-01, 3.8230e+01]], - [[4.0000e-01, 3.8230e+01], [5.0000e-01, 3.8260e+01]], - [[1.0000e-01, 4.7730e+01], [2.0000e-01, 4.7740e+01]], - [[2.0000e-01, 4.7740e+01], [3.0000e-01, 4.7750e+01]], - [[3.0000e-01, 4.7750e+01], [4.0000e-01, 4.7770e+01]], - [[4.0000e-01, 4.7770e+01], [5.0000e-01, 4.7810e+01]], - [[1.0000e-01, 5.7270e+01], [2.0000e-01, 5.7280e+01]], - [[2.0000e-01, 5.7280e+01], [3.0000e-01, 5.7300e+01]], - [[3.0000e-01, 5.7300e+01], [4.0000e-01, 5.7320e+01]], - [[4.0000e-01, 5.7320e+01], [5.0000e-01, 5.7350e+01]], - [[1.0000e-01, 6.6920e+01], [2.0000e-01, 6.6930e+01]], - [[2.0000e-01, 6.6930e+01], [3.0000e-01, 6.6940e+01]], - [[3.0000e-01, 6.6940e+01], [4.0000e-01, 6.6970e+01]], - [[4.0000e-01, 6.6970e+01], [5.0000e-01, 6.7000e+01]], - [[1.0000e-01, 7.6460e+01], [2.0000e-01, 7.6470e+01]], - [[2.0000e-01, 7.6470e+01], [3.0000e-01, 7.6490e+01]], - [[3.0000e-01, 7.6490e+01], [4.0000e-01, 7.6510e+01]], - [[4.0000e-01, 7.6510e+01], [5.0000e-01, 7.6540e+01]], - [[1.0000e-01, 8.6010e+01], [2.0000e-01, 8.6020e+01]], - [[2.0000e-01, 8.6020e+01], [3.0000e-01, 8.6030e+01]], - [[3.0000e-01, 8.6030e+01], [4.0000e-01, 8.6060e+01]], - [[4.0000e-01, 8.6060e+01], [5.0000e-01, 8.6090e+01]]] + [[-7.50000000e+01, 0.00000000e+00], [-7.49000000e+01, 7.98415842e-03]], + [[-7.49000000e+01, 7.98415842e-03], [-7.48000000e+01, 2.37963776e-02]], + [[-7.48000000e+01, 2.37963776e-02], [-7.47000000e+01, 4.72776801e-02]], + [[-7.50000000e+01, 9.54545455e+00], [-7.49000000e+01, 9.55343870e+00]], + [[-7.49000000e+01, 9.55343870e+00], [-7.48000000e+01, 9.56925092e+00]], + [[-7.48000000e+01, 9.56925092e+00], [-7.47000000e+01, 9.59273223e+00]], + [[-7.50000000e+01, 1.90909091e+01], [-7.49000000e+01, 1.90988932e+01]], + [[-7.49000000e+01, 1.90988932e+01], [-7.48000000e+01, 1.91147055e+01]], + [[-7.48000000e+01, 1.91147055e+01], [-7.47000000e+01, 1.91381868e+01]], + [[-7.50000000e+01, 2.86363636e+01], [-7.49000000e+01, 2.86443478e+01]], + [[-7.49000000e+01, 2.86443478e+01], [-7.48000000e+01, 2.86601600e+01]], + [[-7.48000000e+01, 2.86601600e+01], [-7.47000000e+01, 2.86836413e+01]], + [[-7.50000000e+01, 3.81818182e+01], [-7.49000000e+01, 3.81898023e+01]], + [[-7.49000000e+01, 3.81898023e+01], [-7.48000000e+01, 3.82056146e+01]], + [[-7.48000000e+01, 3.82056146e+01], [-7.47000000e+01, 3.82290959e+01]], + [[-7.50000000e+01, 4.77272727e+01], [-7.49000000e+01, 4.77352569e+01]], + [[-7.49000000e+01, 4.77352569e+01], [-7.48000000e+01, 4.77510691e+01]], + [[-7.48000000e+01, 4.77510691e+01], [-7.47000000e+01, 4.77745504e+01]]] ) np.testing.assert_array_almost_equal(segs, expected_segs) From fa1561a963f6daee5bb32c40bd6c5b8d688d0de6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 23:02:10 -0700 Subject: [PATCH 324/438] Add kjang96 as codeowner --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 175783157..1a0ba0d97 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,7 +2,7 @@ # Each line is a file pattern followed by one or more owners. # These owners will be the default owners for everything in the repo. -* @cathywu @eugenevinitsky @AboudyKreidieh @kanaadp +* @eugenevinitsky @AboudyKreidieh @kjang96 # Order is important. The last matching pattern has the most precedence. # So if a pull request only touches javascript files, only these owners From 99db8d259c46ab21165920ceaaeec73c4487555d Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 12 Jul 2020 15:56:44 -0700 Subject: [PATCH 325/438] rename env for CIRCLES --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index 97d9ad6f8..9dbbefb25 100644 --- a/environment.yml +++ b/environment.yml @@ -1,4 +1,4 @@ -name: flow +name: circles dependencies: - python==3.7.3 From 08262012227ce7ff599945dc0db08516d2466533 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 13 Jul 2020 04:40:25 -0700 Subject: [PATCH 326/438] make experiment.py only plot the first run out --- flow/core/experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index ed9451a70..5950c0ec0 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -309,7 +309,7 @@ def rl_actions(*_): generate_trajectory_table(emission_files, trajectory_table_path, source_id) write_dict_to_csv(metadata_table_path, metadata, True) tsd_main( - trajectory_table_path, + emission_files[0], { 'network': self.env.network.__class__, 'env': self.env.env_params, From 7fb5c53fb7095e4aaaa88f4570ecba64551a423e Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 13 Jul 2020 06:40:41 -0700 Subject: [PATCH 327/438] remove redudent code between i210_replay and visualize_rllib --- flow/visualize/i210_replay.py | 165 ++++-------------------- flow/visualize/visualizer_rllib.py | 200 +++++++++++++++++------------ 2 files changed, 139 insertions(+), 226 deletions(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index d5a0b90dc..9b0c4cf3b 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -32,11 +32,8 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables -from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration -from flow.data_pipeline.leaderboard_utils import network_name_translate -import uuid - from flow.core.experiment import Experiment +from flow.visualize.visualizer_rllib import read_result_dir, set_sim_params, set_env_params, set_agents, get_rl_action EXAMPLE_USAGE = """ example usage: @@ -97,44 +94,12 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= if veh_param['veh_id'] == 'av': veh_param['acceleration_controller'] = (controller, test_params) - sim_params = flow_params['sim'] - sim_params.num_clients = 1 - - sim_params.restart_instance = True - dir_path = os.path.dirname(os.path.realpath(__file__)) - emission_path = '{0}/test_time_rollout/'.format(dir_path) - sim_params.emission_path = emission_path if args.gen_emission else None - - # pick your rendering mode - if args.render_mode == 'sumo_web3d': - sim_params.num_clients = 2 - sim_params.render = False - elif args.render_mode == 'drgb': - sim_params.render = 'drgb' - sim_params.pxpm = 4 - elif args.render_mode == 'sumo_gui': - sim_params.render = False # will be set to True below - elif args.render_mode == 'no_render': - sim_params.render = False - if args.save_render: - if args.render_mode != 'sumo_gui': - sim_params.render = 'drgb' - sim_params.pxpm = 4 - sim_params.save_render = True - - # Start the environment with the gui turned on and a path for the - # emission file - env_params = flow_params['env'] - env_params.restart_instance = False - if args.evaluate: - env_params.evaluate = True - - # lower the horizon if testing - if args.horizon: - env_params.horizon = args.horizon + sim_params = set_sim_params(flow_params['sim'], args.render_mode, args.save_render) + + set_env_params(flow_params['env'], args.evaluate, args.horizon) # Create and register a gym+rllib env - exp = Experiment(flow_params) + exp = Experiment(flow_params, custom_callables=custom_callables) if args.render_mode == 'sumo_gui': exp.env.sim_params.render = True # set to True after initializing agent and env @@ -144,87 +109,25 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= exp.env.restart_simulation(sim_params=sim_params, render=sim_params.render) # reroute on exit is a training hack, it should be turned off at test time. - if hasattr(env, "reroute_on_exit"): - env.reroute_on_exit = False + if hasattr(exp.env, "reroute_on_exit"): + exp.env.reroute_on_exit = False + policy_map_fn, rets = None, None if rllib_config: - # check if we have a multiagent environment but in a - # backwards compatible way - if rllib_config.get('multiagent', {}).get('policies', None): - multiagent = True - pkl = get_rllib_pkl(result_dir) - rllib_config['multiagent'] = pkl['multiagent'] - else: - multiagent = False - raise NotImplementedError - - # Run on only one cpu for rendering purposes - rllib_config['num_workers'] = 0 + result_dir, rllib_config, multiagent, rllib_flow_params = read_result_dir(rllib_config, True) # lower the horizon if testing if args.horizon: rllib_config['horizon'] = args.horizon - assert 'run' in rllib_config['env_config'], "Was this trained with the latest version of Flow?" - # Determine agent and checkpoint - config_run = rllib_config['env_config']['run'] - - rllib_flow_params = get_flow_params(rllib_config) agent_create_env, agent_env_name = make_create_env(params=rllib_flow_params, version=0) register_env(agent_env_name, agent_create_env) - if rllib_config['env_config']['run'] == "": - from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel - from ray.rllib.models import ModelCatalog - agent_cls = CCTrainer - ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel) - elif rllib_config['env_config']['run'] == "": - from flow.algorithms.custom_ppo import CustomPPOTrainer - agent_cls = CustomPPOTrainer - elif config_run: - agent_cls = get_agent_class(config_run) - else: - raise Exception('You forgot to store the algorithm type') - - # create the agent that will be used to compute the actions - agent = agent_cls(env=agent_env_name, config=rllib_config) - checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num - checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num - agent.restore(checkpoint) - - if multiagent: - # map the agent id to its policy - policy_map_fn = rllib_config['multiagent']['policy_mapping_fn'] - - if rllib_config['model']['use_lstm']: - use_lstm = True - if multiagent: - # map the agent id to its policy - size = rllib_config['model']['lstm_cell_size'] - lstm_state = defaultdict(lambda: [np.zeros(size, np.float32), - np.zeros(size, np.float32)]) - else: - lstm_state = [ - np.zeros(rllib_config['model']['lstm_cell_size'], np.float32), - np.zeros(rllib_config['model']['lstm_cell_size'], np.float32) - ] - else: - use_lstm = False - - # used to store - info_dict = { - "velocities": [], - "outflows": [], - "avg_trip_energy": [], - "avg_trip_time": [], - "total_completed_trips": [] - } - all_trip_energy_distribution = defaultdict(lambda: []) - all_trip_time_distribution = defaultdict(lambda: []) - - info_dict.update({ - key: [] for key in custom_callables.keys() - }) + assert 'run' in rllib_config['env_config'], "Was this trained with the latest version of Flow?" + # Determine agent and checkpoint + agent = set_agents(rllib_config, result_dir, agent_env_name) + + rllib_rl_action, policy_map_fn, rets = get_rl_action(rllib_config, agent, multiagent) # reroute on exit is a training hack, it should be turned off at test time. if hasattr(exp.env, "reroute_on_exit"): @@ -232,27 +135,14 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= def rl_action(state): if rllib_config: - if multiagent: - action = {} - for agent_id in state.keys(): - if use_lstm: - action[agent_id], lstm_state[agent_id], _ = \ - agent.compute_action( - state[agent_id], state=lstm_state[agent_id], - policy_id=policy_map_fn(agent_id)) - else: - action[agent_id] = agent.compute_action( - state[agent_id], policy_id=policy_map_fn(agent_id)) - else: - if use_lstm: - raise NotImplementedError - else: - action = agent.compute_action(state) + action = rllib_rl_action(state) else: action = None + return action info_dict = exp.run(num_runs=args.num_rollouts, convert_to_csv=args.gen_emission, to_aws=args.use_s3, - rl_actions=rl_action, multiagent=rllib_config and multiagent) + rl_actions=rl_action, multiagent=rllib_config and multiagent, rets=rets, + policy_map_fn=policy_map_fn) # i = 0 # while i < args.num_rollouts: @@ -503,14 +393,6 @@ def create_parser(): parser = create_parser() args = parser.parse_args() - rllib_config = None - rllib_result_dir = None - if args.rllib_result_dir is not None: - rllib_result_dir = args.rllib_result_dir if args.rllib_result_dir[-1] != '/' \ - else args.rllib_result_dir[:-1] - - rllib_config = get_rllib_config(rllib_result_dir) - if args.exp_config: module = __import__("../../examples/exp_configs.non_rl", fromlist=[args.exp_config]) flow_params = getattr(module, args.exp_config).flow_params @@ -533,7 +415,7 @@ def create_parser(): s = [ray.cloudpickle.dumps(transfer_test) for transfer_test in inflows_range(penetration_rates=[0.0, 0.1, 0.2, 0.3])] ray_output = [replay.remote(args, flow_params, output_dir=output_dir, transfer_test=transfer_test, - rllib_config=rllib_config, result_dir=rllib_result_dir, + rllib_config=args.rllib_result_dir, result_dir=args.rllib_result_dir, max_completed_trips=args.max_completed_trips) for transfer_test in s] ray.get(ray_output) @@ -542,8 +424,9 @@ def create_parser(): assert args.controller == 'follower_stopper' ray_output = [ - replay.remote(args, flow_params, output_dir="{}/{}".format(output_dir, v_des), rllib_config=rllib_config, - result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips, v_des=v_des) + replay.remote(args, flow_params, output_dir="{}/{}".format(output_dir, v_des), + rllib_config=args.rllib_result_dir, result_dir=args.rllib_result_dir, + max_completed_trips=args.max_completed_trips, v_des=v_des) for v_des in range(8, 17, 2)] ray.get(ray_output) @@ -552,11 +435,11 @@ def create_parser(): pr = args.penetration_rate if args.penetration_rate is not None else 0 single_transfer = next(inflows_range(penetration_rates=pr)) ray.get(replay.remote(args, flow_params, output_dir=output_dir, transfer_test=single_transfer, - rllib_config=rllib_config, result_dir=rllib_result_dir, + rllib_config=args.rllib_result_dir, result_dir=args.rllib_result_dir, max_completed_trips=args.max_completed_trips)) else: ray.get(replay.remote(args, flow_params, output_dir=output_dir, - rllib_config=rllib_config, result_dir=rllib_result_dir, + rllib_config=args.rllib_result_dir, result_dir=args.rllib_result_dir, max_completed_trips=args.max_completed_trips)) if args.use_s3: diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 2ee8f624d..537bc93ee 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -15,6 +15,7 @@ import argparse import gym import numpy as np +from collections import defaultdict import os import sys import time @@ -45,15 +46,10 @@ """ -def visualizer_rllib(args): - """Visualizer for RLlib experiments. - - This function takes args (see function create_parser below for - more detailed information on what information can be fed to this - visualizer), and renders the experiment associated with it. - """ - result_dir = args.result_dir if args.result_dir[-1] != '/' \ - else args.result_dir[:-1] +def read_result_dir(result_dir_path, multi_only=False): + """Read the provided result_dir and get config and flow_params.""" + result_dir = result_dir_path if result_dir_path[-1] != '/' \ + else result_dir_path[:-1] config = get_rllib_config(result_dir) @@ -65,118 +61,109 @@ def visualizer_rllib(args): config['multiagent'] = pkl['multiagent'] else: multiagent = False + if multi_only: + raise NotImplementedError # Run on only one cpu for rendering purposes config['num_workers'] = 0 flow_params = get_flow_params(config) + return result_dir, config, multiagent, flow_params + +def set_sim_params(sim_params, render_mode, save_render): + """Set up sim_params according to render mode.""" # hack for old pkl files # TODO(ev) remove eventually - sim_params = flow_params['sim'] setattr(sim_params, 'num_clients', 1) # for hacks for old pkl files TODO: remove eventually if not hasattr(sim_params, 'use_ballistic'): sim_params.use_ballistic = False - # Determine agent and checkpoint - config_run = config['env_config']['run'] if 'run' in config['env_config'] \ - else None - if args.run and config_run: - if args.run != config_run: - print('visualizer_rllib.py: error: run argument ' - + '\'{}\' passed in '.format(args.run) - + 'differs from the one stored in params.json ' - + '\'{}\''.format(config_run)) - sys.exit(1) - if args.run: - agent_cls = get_agent_class(args.run) - elif config['env_config']['run'] == "": - from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel - from ray.rllib.models import ModelCatalog - agent_cls = CCTrainer - ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel) - elif config['env_config']['run'] == "": - from flow.algorithms.custom_ppo import CustomPPOTrainer - agent_cls = CustomPPOTrainer - elif config_run: - agent_cls = get_agent_class(config_run) - else: - print('visualizer_rllib.py: error: could not find flow parameter ' - '\'run\' in params.json, ' - 'add argument --run to provide the algorithm or model used ' - 'to train the results\n e.g. ' - 'python ./visualizer_rllib.py /tmp/ray/result_dir 1 --run PPO') - sys.exit(1) - sim_params.restart_instance = True dir_path = os.path.dirname(os.path.realpath(__file__)) emission_path = '{0}/test_time_rollout/'.format(dir_path) sim_params.emission_path = emission_path if args.gen_emission else None # pick your rendering mode - if args.render_mode == 'sumo_web3d': + if render_mode == 'sumo_web3d': sim_params.num_clients = 2 sim_params.render = False - elif args.render_mode == 'drgb': + elif render_mode == 'drgb': sim_params.render = 'drgb' sim_params.pxpm = 4 - elif args.render_mode == 'sumo_gui': + elif render_mode == 'sumo_gui': sim_params.render = False # will be set to True below - elif args.render_mode == 'no_render': + elif render_mode == 'no_render': sim_params.render = False - if args.save_render: - if args.render_mode != 'sumo_gui': + if save_render: + if render_mode != 'sumo_gui': sim_params.render = 'drgb' sim_params.pxpm = 4 sim_params.save_render = True + return sim_params - # Create and register a gym+rllib env - exp = Experiment(flow_params, register_with_ray=True) - register_env(exp.env_name, exp.create_env) - - # check if the environment is a single or multiagent environment, and - # get the right address accordingly - # single_agent_envs = [env for env in dir(flow.envs) - # if not env.startswith('__')] - - # if flow_params['env_name'] in single_agent_envs: - # env_loc = 'flow.envs' - # else: - # env_loc = 'flow.envs.multiagent' +def set_env_params(env_params, evaluate, horizon, config=None): + """Set up env_params according to commandline arguments""" # Start the environment with the gui turned on and a path for the # emission file - env_params = flow_params['env'] env_params.restart_instance = False - if args.evaluate: + if evaluate: env_params.evaluate = True # lower the horizon if testing - if args.horizon: - config['horizon'] = args.horizon + if horizon: + if config: + config['horizon'] = args.horizon env_params.horizon = args.horizon + +def set_agents(config, result_dir, env_name, run=None): + """Determine and create agents that will be used to compute actions.""" + # Determine agent and checkpoint + config_run = config['env_config']['run'] if 'run' in config['env_config'] \ + else None + if run and config_run: + if run != config_run: + print('visualizer_rllib.py: error: run argument ' + + '\'{}\' passed in '.format(run) + + 'differs from the one stored in params.json ' + + '\'{}\''.format(config_run)) + sys.exit(1) + if run: + agent_cls = get_agent_class(run) + elif config['env_config']['run'] == "": + from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel + from ray.rllib.models import ModelCatalog + agent_cls = CCTrainer + ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel) + elif config['env_config']['run'] == "": + from flow.algorithms.custom_ppo import CustomPPOTrainer + agent_cls = CustomPPOTrainer + elif config_run: + agent_cls = get_agent_class(config_run) + else: + print('visualizer_rllib.py: error: could not find flow parameter ' + '\'run\' in params.json, ' + 'add argument --run to provide the algorithm or model used ' + 'to train the results\n e.g. ' + 'python ./visualizer_rllib.py /tmp/ray/result_dir 1 --run PPO') + sys.exit(1) + # create the agent that will be used to compute the actions - agent = agent_cls(env=exp.env_name, config=config) + agent = agent_cls(env=env_name, config=config) checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num agent.restore(checkpoint) - if hasattr(agent, "local_evaluator") and \ - os.environ.get("TEST_FLAG") != 'True': - exp.env = agent.local_evaluator.env - else: - exp.env = gym.make(exp.env_name) - - # reroute on exit is a training hack, it should be turned off at test time. - if hasattr(exp.env, "reroute_on_exit"): - exp.env.reroute_on_exit = False + return agent - if args.render_mode == 'sumo_gui': - exp.env.sim_params.render = True # set to True after initializing agent and env +def get_rl_action(config, agent, multiagent, multi_only=False): + """Return a function that compute action based on a given state.""" + policy_map_fn = None if multiagent: rets = {} # map the agent id to its policy @@ -186,17 +173,13 @@ def visualizer_rllib(args): else: rets = [] - policy_map_fn = None if config['model']['use_lstm']: use_lstm = True if multiagent: state_init = {} - # map the agent id to its policy - policy_map_fn = config['multiagent']['policy_mapping_fn'] size = config['model']['lstm_cell_size'] - for key in config['multiagent']['policies'].keys(): - state_init[key] = [np.zeros(size, np.float32), - np.zeros(size, np.float32)] + state_init = defaultdict(lambda: [np.zeros(size, np.float32), + np.zeros(size, np.float32)]) else: state_init = [ np.zeros(config['model']['lstm_cell_size'], np.float32), @@ -205,10 +188,6 @@ def visualizer_rllib(args): else: use_lstm = False - # if restart_instance, don't restart here because env.reset will restart later - if not sim_params.restart_instance: - exp.env.restart_simulation(sim_params=sim_params, render=sim_params.render) - def rl_action(state): if multiagent: action = {} @@ -222,8 +201,59 @@ def rl_action(state): action[agent_id] = agent.compute_action( state[agent_id], policy_id=policy_map_fn(agent_id)) else: + if use_lstm and multi_only: + raise NotImplementedError action = agent.compute_action(state) return action + return policy_map_fn, rl_action, rets + + +def visualizer_rllib(args): + """Visualizer for RLlib experiments. + + This function takes args (see function create_parser below for + more detailed information on what information can be fed to this + visualizer), and renders the experiment associated with it. + """ + result_dir, config, multiagent, flow_params = read_result_dir(args.result_dir) + + sim_params = set_sim_params(flow_params['sim'], args.render_mode, args.save_render) + + # Create and register a gym+rllib env + exp = Experiment(flow_params, register_with_ray=True) + register_env(exp.env_name, exp.create_env) + + # check if the environment is a single or multiagent environment, and + # get the right address accordingly + # single_agent_envs = [env for env in dir(flow.envs) + # if not env.startswith('__')] + + # if flow_params['env_name'] in single_agent_envs: + # env_loc = 'flow.envs' + # else: + # env_loc = 'flow.envs.multiagent' + set_env_params(flow_params['env'], args.evaluate, args.horizon, config) + + agent = set_agents(config, result_dir, exp.env_name, run=args.run) + + if hasattr(agent, "local_evaluator") and \ + os.environ.get("TEST_FLAG") != 'True': + exp.env = agent.local_evaluator.env + else: + exp.env = gym.make(exp.env_name) + + # reroute on exit is a training hack, it should be turned off at test time. + if hasattr(exp.env, "reroute_on_exit"): + exp.env.reroute_on_exit = False + + if args.render_mode == 'sumo_gui': + exp.env.sim_params.render = True # set to True after initializing agent and env + + rl_action, policy_map_fn, rets = get_rl_action(config, agent, multiagent) + + # if restart_instance, don't restart here because env.reset will restart later + if not sim_params.restart_instance: + exp.env.restart_simulation(sim_params=sim_params, render=sim_params.render) exp.run(num_runs=args.num_rollouts, convert_to_csv=args.gen_emission, to_aws=args.to_aws, rl_actions=rl_action, multiagent=multiagent, rets=rets, policy_map_fn=policy_map_fn) From c1670a23f4ba21d53e3adb4eb920c575d7b8ea01 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 13 Jul 2020 06:43:14 -0700 Subject: [PATCH 328/438] remove commented code in i210_replay --- flow/visualize/i210_replay.py | 134 ---------------------------------- 1 file changed, 134 deletions(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 9b0c4cf3b..f5238bf2c 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -144,140 +144,6 @@ def rl_action(state): rl_actions=rl_action, multiagent=rllib_config and multiagent, rets=rets, policy_map_fn=policy_map_fn) - # i = 0 - # while i < args.num_rollouts: - # print("Rollout iter", i) - # vel = [] - # per_vehicle_energy_trace = defaultdict(lambda: []) - # completed_veh_types = {} - # completed_vehicle_avg_energy = {} - # completed_vehicle_travel_time = {} - # custom_vals = {key: [] for key in custom_callables.keys()} - # run_id = "run_{}".format(i) - # env.pipeline_params = (extra_info, source_id, run_id) - # state = env.reset() - # initial_vehicles = set(env.k.vehicle.get_ids()) - # for _ in range(env_params.horizon): - # - # - # state, reward, done, _ = env.step(action) - # - # # Compute the velocity speeds and cumulative returns. - # veh_ids = env.k.vehicle.get_ids() - # vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) - # - # # collect additional information for the data pipeline - # get_extra_info(env.k.vehicle, extra_info, veh_ids, source_id, run_id) - # - # # Compute the results for the custom callables. - # for (key, lambda_func) in custom_callables.items(): - # custom_vals[key].append(lambda_func(env)) - # - # for past_veh_id in per_vehicle_energy_trace.keys(): - # if past_veh_id not in veh_ids and past_veh_id not in completed_vehicle_avg_energy: - # all_trip_energy_distribution[completed_veh_types[past_veh_id]].append( - # np.sum(per_vehicle_energy_trace[past_veh_id])) - # all_trip_time_distribution[completed_veh_types[past_veh_id]].append( - # len(per_vehicle_energy_trace[past_veh_id])) - # completed_vehicle_avg_energy[past_veh_id] = np.sum(per_vehicle_energy_trace[past_veh_id]) - # completed_vehicle_travel_time[past_veh_id] = len(per_vehicle_energy_trace[past_veh_id]) - # - # for veh_id in veh_ids: - # if veh_id not in initial_vehicles: - # if veh_id not in per_vehicle_energy_trace: - # # we have to skip the first step's energy calculation - # per_vehicle_energy_trace[veh_id].append(0) - # completed_veh_types[veh_id] = env.k.vehicle.get_type(veh_id) - # else: - # per_vehicle_energy_trace[veh_id].append(-1 * vehicle_energy_consumption(env, veh_id)) - # - # if type(done) is dict and done['__all__']: - # break - # elif type(done) is not dict and done: - # break - # elif max_completed_trips is not None and len(completed_vehicle_avg_energy) > max_completed_trips: - # break - # if env.crash: - # print("Crash on iter", i) - # else: - # # Store the information from the run in info_dict. - # outflow = env.k.vehicle.get_outflow_rate(int(500)) - # info_dict["velocities"].append(np.mean(vel)) - # info_dict["outflows"].append(outflow) - # info_dict["avg_trip_energy"].append(np.mean(list(completed_vehicle_avg_energy.values()))) - # info_dict["avg_trip_time"].append(np.mean(list(completed_vehicle_travel_time.values()))) - # info_dict["total_completed_trips"].append(len(list(completed_vehicle_avg_energy.values()))) - # for key in custom_vals.keys(): - # info_dict[key].append(np.mean(custom_vals[key])) - # i += 1 - # - # print('======== Summary of results ========') - # if args.run_transfer: - # print("Transfer test: {}".format(transfer_test.transfer_str)) - # print("====================================") - # - # # Print the averages/std for all variables in the info_dict. - # for key in info_dict.keys(): - # print("Average, std {}: {}, {}".format( - # key, np.mean(info_dict[key]), np.std(info_dict[key]))) - # - # # terminate the environment - # env.unwrapped.terminate() - # - # if output_dir: - # ensure_dir(output_dir) - # if args.run_transfer: - # exp_name = "{}-replay".format(transfer_test.transfer_str) - # else: - # exp_name = "i210_replay" - # replay_out = os.path.join(output_dir, '{}-info.npy'.format(exp_name)) - # np.save(replay_out, info_dict) - # # if prompted, convert the emission file into a csv file - # if args.gen_emission: - # emission_filename = '{0}-emission.xml'.format(env.network.name) - # time.sleep(0.1) - # - # emission_path = \ - # '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename) - # - # output_path = os.path.join(output_dir, '{}-emission.csv'.format(exp_name)) - # # convert the emission file into a csv file - # emission_to_csv(emission_path, output_path=output_path) - # - # # generate the trajectory output file - # trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - # write_dict_to_csv(trajectory_table_path, extra_info, True) - # metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) - # write_dict_to_csv(metadata_table_path, metadata, True) - # - # # upload to s3 if asked - # if args.use_s3: - # upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' - # '{1}_METADATA.csv'.format(cur_date, source_id), - # metadata_table_path) - # upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( - # cur_date, source_id), - # trajectory_table_path, {'network': metadata['network'][0]}) - # - # # print the location of the emission csv file - # print("\nGenerated emission file at " + output_path) - # - # # delete the .xml version of the emission file - # os.remove(emission_path) - # - # all_trip_energies = os.path.join(output_dir, '{}-all_trip_energies.npy'.format(exp_name)) - # np.save(all_trip_energies, dict(all_trip_energy_distribution)) - # fig_names, figs = plot_trip_distribution(all_trip_energy_distribution) - # - # for fig_name, fig in zip(fig_names, figs): - # edist_out = os.path.join(output_dir, '{}_energy_distribution.png'.format(fig_name)) - # fig.savefig(edist_out) - # - # # Create the flow_params object - # with open(os.path.join(output_dir, exp_name) + '.json', 'w') as outfile: - # json.dump(flow_params, outfile, - # cls=FlowParamsEncoder, sort_keys=True, indent=4) - return info_dict From b7bbf99c0a6611f93e3bdb10ed534ecedebfebab Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 13 Jul 2020 16:24:01 -0700 Subject: [PATCH 329/438] Straight Road warmup steps to 500 Co-authored-by: Aboudy Kreidieh --- .../exp_configs/rl/singleagent/singleagent_straight_road.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/exp_configs/rl/singleagent/singleagent_straight_road.py b/examples/exp_configs/rl/singleagent/singleagent_straight_road.py index 265d34d42..efd56214a 100644 --- a/examples/exp_configs/rl/singleagent/singleagent_straight_road.py +++ b/examples/exp_configs/rl/singleagent/singleagent_straight_road.py @@ -120,7 +120,7 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - warmup_steps=0, + warmup_steps=500, sims_per_step=1, # do not put more than one done_at_exit=done_at_exit, additional_params=additional_env_params, From 94e4b4414eb1859e719cd724dbc54b6f0c665eaf Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 13 Jul 2020 17:01:28 -0700 Subject: [PATCH 330/438] reformat leaderboard output --- flow/data_pipeline/query.py | 58 +++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index f68dfa321..02290a4b2 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -733,27 +733,47 @@ class QueryStrings(Enum): AND (m.is_baseline='False' OR (m.is_baseline='True' AND m.source_id = b.source_id)) + ), joined_cols AS ( + SELECT + agg.submission_date, + agg.source_id, + agg.submitter_name, + agg.strategy, + agg.network, + agg.is_baseline, + agg.energy_model_id, + agg.efficiency_meters_per_joules, + agg.efficiency_miles_per_gallon, + 100 * (1 - baseline.efficiency_miles_per_gallon / agg.efficiency_miles_per_gallon) + AS fuel_improvement, + agg.throughput_per_hour, + 100 * (baseline.throughput_per_hour - agg.throughput_per_hour) / baseline.throughput_per_hour + AS throughput_improvement, + agg.safety_rate, + agg.safety_value_max + FROM agg + JOIN agg AS baseline ON 1 = 1 + AND agg.network = baseline.network + AND baseline.is_baseline = 'True' + AND agg.baseline_source_id = baseline.source_id ) SELECT - agg.submission_date, - agg.source_id, - agg.submitter_name, - agg.strategy, - agg.network, - agg.is_baseline, - agg.energy_model_id, - agg.efficiency_meters_per_joules, - agg.efficiency_miles_per_gallon, - 100 * (1 - baseline.efficiency_miles_per_gallon / agg.efficiency_miles_per_gallon) AS percent_improvement, - agg.throughput_per_hour, - agg.safety_rate, - agg.safety_value_max - FROM agg - JOIN agg AS baseline ON 1 = 1 - AND agg.network = baseline.network - AND baseline.is_baseline = 'True' - AND agg.baseline_source_id = baseline.source_id - ORDER BY agg.submission_date, agg.submission_time ASC + submission_date, + source_id, + submitter_name, + strategy, + network, + is_baseline, + energy_model_id, + ROUND(efficiency_miles_per_gallon, 1) || + ' (' || CASE(WHEN SIGN(fuel_improvement) = 1 THEN '+' END) || + ROUND(fuel_improvement, 1) || ')' AS efficiency, + ROUND(throughput_per_hour, 1) || + ' (' || CASE(WHEN SIGN(throughput_improvement) = 1 THEN '+' END) || + ROUND(throughput_improvement, 1) || ')' AS inflow, + ROUND(safety_rate, 1) AS safety_rate, + ROUND(safety_value_max, 1) AS safety_value_max + FROM joined_cols ;""" FACT_TOP_SCORES = """ From 337189f8b755e8e9a8988065701647157d20842b Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 13 Jul 2020 19:05:48 -0700 Subject: [PATCH 331/438] update fact_max_score to use mpg --- flow/data_pipeline/query.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 02290a4b2..c51543b6d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -765,6 +765,7 @@ class QueryStrings(Enum): network, is_baseline, energy_model_id, + efficiency_miles_per_gallon, ROUND(efficiency_miles_per_gallon, 1) || ' (' || CASE(WHEN SIGN(fuel_improvement) = 1 THEN '+' END) || ROUND(fuel_improvement, 1) || ')' AS efficiency, @@ -781,7 +782,7 @@ class QueryStrings(Enum): SELECT network, submission_date, - 1000 * MAX(efficiency_meters_per_joules) + 1000 * MAX(efficiency_miles_per_gallon) OVER (PARTITION BY network ORDER BY submission_date ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS max_score FROM leaderboard_chart_agg From ae211bf70c4057d76f566e276c07862add3d5e63 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 13 Jul 2020 19:07:31 -0700 Subject: [PATCH 332/438] update repo path --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 297281bc7..f92decccc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -41,13 +41,13 @@ before_install: - source activate flow # [sumo] dependencies and binaries - - pushd $HOME/build/flow-project + - pushd $HOME/build/CIRCLES-consortium - ./flow/scripts/setup_sumo_ubuntu1604.sh - popd - source ~/.bashrc # [aimsun] install the conda env and update the path to the env - - pushd $HOME/build/flow-project + - pushd $HOME/build/CIRCLES-consortium - ./flow/scripts/setup_aimsun.sh - popd - source ~/.bashrc From aadd3a2ecbd60debda4ca612e5371bc7b7f2288c Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 13 Jul 2020 22:44:19 -0700 Subject: [PATCH 333/438] update conda env name --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f92decccc..a802f5d23 100644 --- a/.travis.yml +++ b/.travis.yml @@ -38,7 +38,7 @@ before_install: # Set up requirements for flow - conda env create -f environment.yml - - source activate flow + - source activate circles # [sumo] dependencies and binaries - pushd $HOME/build/CIRCLES-consortium From c0c20d3c8097f471ada16baf59673bcda02bca4e Mon Sep 17 00:00:00 2001 From: Rchide Date: Tue, 14 Jul 2020 15:37:42 +0200 Subject: [PATCH 334/438] (re)Add libsumo build script --- scripts/setup_libsumo_ubuntu.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 scripts/setup_libsumo_ubuntu.sh diff --git a/scripts/setup_libsumo_ubuntu.sh b/scripts/setup_libsumo_ubuntu.sh new file mode 100644 index 000000000..09cee8c08 --- /dev/null +++ b/scripts/setup_libsumo_ubuntu.sh @@ -0,0 +1,19 @@ +#!/bin/bash +echo "Installing system dependencies for SUMO" +sudo apt-get update +sudo apt-get install cmake python g++ libxerces-c-dev libfox-1.6-dev +sudo apt-get install libgdal-dev libproj-dev libgl2ps-dev swig + +echo "Installing sumo binaries and python tools" +mkdir -p $HOME/sumo_binaries +pushd $HOME/sumo_binaries +git clone https://github.com/eclipse/sumo.git +cd sumo +git checkout 2147d155b1 +cmake . +make -j$(nproc) +popd + +echo 'export PATH="$PATH:$HOME/sumo_binaries/sumo/bin"' >> ~/.bashrc +echo 'export SUMO_HOME="$HOME/sumo_binaries/sumo"' >> ~/.bashrc +echo 'export PYTHONPATH="$PYTHONPATH:$HOME/sumo_binaries/sumo/tools"' >> ~/.bashrc From 53dceb5fb856968004f2661c7815521809a22dc5 Mon Sep 17 00:00:00 2001 From: Rchide Date: Tue, 14 Jul 2020 15:59:27 +0200 Subject: [PATCH 335/438] Update .travis.yml + script correction --- .travis.yml | 2 +- scripts/setup_sumo_osx.sh | 4 ++-- scripts/setup_sumo_ubuntu1404.sh | 2 +- scripts/setup_sumo_ubuntu1604.sh | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index b2cb1a897..30f3174a4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -42,7 +42,7 @@ before_install: # [sumo] dependencies and binaries - pushd $HOME/build/flow-project - - ./flow/scripts/setup_libsumo_ubuntu.sh + - ./flow/scripts/setup_sumo_ubuntu1604.sh - popd - source ~/.bashrc diff --git a/scripts/setup_sumo_osx.sh b/scripts/setup_sumo_osx.sh index 6f036678f..6f9a946c0 100755 --- a/scripts/setup_sumo_osx.sh +++ b/scripts/setup_sumo_osx.sh @@ -13,8 +13,8 @@ echo "Installing sumo binaries" mkdir -p $HOME/sumo_binaries/bin pushd $HOME/sumo_binaries/bin wget https://flow-sumo.s3-us-west-1.amazonaws.com/libsumo/sumo_binaries_macos.zip -tar -xf binaries-mac.tar.xz -rm binaries-mac.tar.xz +unzip sumo_binaries_macos.zip +rm sumo_binaries_macos.zip chmod +x * popd export SUMO_HOME="$HOME/sumo_binaries/bin" diff --git a/scripts/setup_sumo_ubuntu1404.sh b/scripts/setup_sumo_ubuntu1404.sh index ea6487183..20c49f517 100755 --- a/scripts/setup_sumo_ubuntu1404.sh +++ b/scripts/setup_sumo_ubuntu1404.sh @@ -12,7 +12,7 @@ echo "Installing sumo binaries" mkdir -p $HOME/sumo_binaries/bin pushd $HOME/sumo_binaries/bin wget https://flow-sumo.s3-us-west-1.amazonaws.com/libsumo/sumo_binaries_ubuntu1404.tar.gz -tar -xf sumo_binaries_ubuntu1404.tar.gz +tar -zxvf sumo_binaries_ubuntu1404.tar.gz rm sumo_binaries_ubuntu1404.tar.gz chmod +x * popd diff --git a/scripts/setup_sumo_ubuntu1604.sh b/scripts/setup_sumo_ubuntu1604.sh index 7fe4b2629..e78f69533 100755 --- a/scripts/setup_sumo_ubuntu1604.sh +++ b/scripts/setup_sumo_ubuntu1604.sh @@ -11,8 +11,8 @@ sudo pip3 install cmake cython echo "Installing sumo binaries" cd $HOME -wget https://flow-sumo.s3-us-west-1.amazonaws.com/libsumo/sumo_binaries_ubuntu1804.tar.gz -tar -zxvf sumo_binaries_ubuntu1804.tar.gz +wget https://flow-sumo.s3-us-west-1.amazonaws.com/libsumo/sumo_binaries_ubuntu1604.tar.gz +tar -zxvf sumo_binaries_ubuntu1604.tar.gz rm sumo_binaries_ubuntu1804.tar.gz cd sumo_binaries chmod +x * From f12add4989fb3487481ddecae9aff19d47061829 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 14 Jul 2020 21:34:05 -0700 Subject: [PATCH 336/438] set warmup_steps = 500 for straight road; set display_warnings = True --- .../rl/multiagent/multiagent_straight_road.py | 9 +++------ flow/controllers/base_controller.py | 2 +- flow/controllers/car_following_models.py | 16 ++++++++-------- 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index 73460d656..e0726b059 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -28,6 +28,8 @@ PENETRATION_RATE = 10.0 +WARMUP_STEPS = 500 + additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params.update({ # length of the highway @@ -140,11 +142,6 @@ depart_speed=TRAFFIC_SPEED, name="rl_highway_inflow") -# SET UP FLOW PARAMETERS -warmup_steps = 0 -if additional_env_params['reroute_on_exit']: - warmup_steps = 500 - flow_params = dict( # name of the experiment exp_tag='multiagent_highway', @@ -161,7 +158,7 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - warmup_steps=warmup_steps, + warmup_steps=WARMUP_STEPS, sims_per_step=3, additional_params=additional_env_params ), diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index e732415f6..3c9985360 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -49,7 +49,7 @@ def __init__(self, car_following_params, delay=0, fail_safe=None, - display_warnings=False, + display_warnings=True, noise=0): """Instantiate the base class for acceleration behavior.""" self.veh_id = veh_id diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index 2840e291e..a26b52cb1 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -57,7 +57,7 @@ def __init__(self, time_delay=0.0, noise=0, fail_safe=None, - display_warnings=False): + display_warnings=True): """Instantiate a CFM controller.""" BaseController.__init__( self, @@ -136,7 +136,7 @@ def __init__(self, time_delay=0.0, noise=0, fail_safe=None, - display_warnings=False): + display_warnings=True): """Instantiate a Bilateral car-following model controller.""" BaseController.__init__( self, @@ -219,7 +219,7 @@ def __init__(self, time_delay=0.0, noise=0, fail_safe=None, - display_warnings=False): + display_warnings=True): """Instantiate a Linear Adaptive Cruise controller.""" BaseController.__init__( self, @@ -299,7 +299,7 @@ def __init__(self, time_delay=0, noise=0, fail_safe=None, - display_warnings=False): + display_warnings=True): """Instantiate an Optimal Vehicle Model controller.""" BaseController.__init__( self, @@ -377,7 +377,7 @@ def __init__(self, time_delay=0.0, noise=0, fail_safe=None, - display_warnings=False): + display_warnings=True): """Instantiate a Linear OVM controller.""" BaseController.__init__( self, @@ -460,7 +460,7 @@ def __init__(self, time_delay=0.0, noise=0, fail_safe=None, - display_warnings=False, + display_warnings=True, car_following_params=None): """Instantiate an IDM controller.""" BaseController.__init__( @@ -565,7 +565,7 @@ def __init__(self, delay=0, noise=0, fail_safe=None, - display_warnings=False): + display_warnings=True): """Instantiate a Gipps' controller.""" BaseController.__init__( self, @@ -648,7 +648,7 @@ def __init__(self, time_delay=0, noise=0, fail_safe=None, - display_warnings=False): + display_warnings=True): """Instantiate an Bando controller.""" BaseController.__init__( self, From fb7447250db0a1e44cd67ef79b1cf93ea49ac9d3 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 14 Jul 2020 21:48:45 -0700 Subject: [PATCH 337/438] fix pydocstyle and flake8 --- flow/core/experiment.py | 1 - flow/visualize/i210_replay.py | 30 +++++------------------------- flow/visualize/visualizer_rllib.py | 6 +----- 3 files changed, 6 insertions(+), 31 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 5950c0ec0..eeab10863 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -99,7 +99,6 @@ def __init__(self, flow_params, custom_callables=None, register_with_ray=False): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index f5238bf2c..3cab93cd6 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -1,39 +1,19 @@ """Transfer and replay for i210 environment.""" import argparse -from datetime import datetime, timezone -from collections import defaultdict +from datetime import datetime from copy import deepcopy -import numpy as np -import json import os import pytz import subprocess -import time - -import ray - -try: - from ray.rllib.agents.agent import get_agent_class -except ImportError: - from ray.rllib.agents.registry import get_agent_class -from ray.tune.registry import register_env - -from flow.core.util import emission_to_csv, ensure_dir -from flow.core.rewards import veh_energy_consumption -from flow.utils.registry import make_create_env -from flow.utils.rllib import get_flow_params -from flow.utils.rllib import get_rllib_config -from flow.utils.rllib import get_rllib_pkl -from flow.utils.rllib import FlowParamsEncoder - -from flow.visualize.transfer.util import inflows_range -from flow.visualize.plot_custom_callables import plot_trip_distribution from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables - from flow.core.experiment import Experiment +from flow.utils.registry import make_create_env +from flow.visualize.transfer.util import inflows_range from flow.visualize.visualizer_rllib import read_result_dir, set_sim_params, set_env_params, set_agents, get_rl_action +import ray +from ray.tune.registry import register_env EXAMPLE_USAGE = """ example usage: diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 537bc93ee..1e3dc56fd 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -18,7 +18,6 @@ from collections import defaultdict import os import sys -import time import ray try: @@ -27,9 +26,6 @@ from ray.rllib.agents.registry import get_agent_class from ray.tune.registry import register_env -from flow.core.rewards import instantaneous_mpg -from flow.core.util import emission_to_csv -from flow.utils.registry import make_create_env from flow.utils.rllib import get_flow_params from flow.utils.rllib import get_rllib_config from flow.utils.rllib import get_rllib_pkl @@ -106,7 +102,7 @@ def set_sim_params(sim_params, render_mode, save_render): def set_env_params(env_params, evaluate, horizon, config=None): - """Set up env_params according to commandline arguments""" + """Set up env_params according to commandline arguments.""" # Start the environment with the gui turned on and a path for the # emission file env_params.restart_instance = False From 9987bd56eddd264353ec6a04fa5cb106a0832ce2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 14 Jul 2020 22:37:54 -0700 Subject: [PATCH 338/438] fix flake8 --- examples/train.py | 8 ++------ flow/core/kernel/simulation/traci.py | 9 +++++---- flow/core/kernel/vehicle/base.py | 1 - 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/examples/train.py b/examples/train.py index 21511bc9f..2d6b67990 100644 --- a/examples/train.py +++ b/examples/train.py @@ -533,6 +533,7 @@ def main(args): flow_params['sim'].use_libsumo = flags.libsumo if flags.rl_trainer.lower() == "rllib": + import ray n_cpus = submodule.N_CPUS n_rollouts = submodule.N_ROLLOUTS policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -560,12 +561,7 @@ def main(args): if flags.checkpoint_path is not None: exp_config['restore'] = flags.checkpoint_path - trials = run_experiments({flow_params["exp_tag"]: exp_config}) - - elif flags.rl_trainer == "Stable-Baselines": - # Path to the saved files - exp_tag = flow_params['exp_tag'] - result_name = '{}/{}'.format(exp_tag, strftime("%Y-%m-%d-%H:%M:%S")) + # trials = run_experiments({flow_params["exp_tag"]: exp_config}) # Perform the training operation. if flags.rl_trainer.lower() == "rllib": diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 21f4f4494..23f579d0e 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -1,9 +1,5 @@ """Script containing the TraCI simulation kernel class.""" -from flow.core.kernel.simulation import KernelSimulation -import flow.config as config -import traci.constants as tc -import traci import traceback import os import time @@ -12,6 +8,11 @@ import signal import csv +import flow.config as config +from flow.core.kernel.simulation import KernelSimulation +from flow.core.util import ensure_dir +import traci.constants as tc +import traci # Number of retries on restarting SUMO before giving up RETRIES_ON_ERROR = 10 diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 893bb4ae5..5b4ba0622 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -405,7 +405,6 @@ def get_default_speed(self, veh_id, error=-1001): pass @abstractmethod - def get_position(self, veh_id, error=-1001): """Return the position of the vehicle relative to its current edge. From 16e0b0d11207a3a727c3fb80951f1b189ba5977d Mon Sep 17 00:00:00 2001 From: Rchide Date: Wed, 15 Jul 2020 09:36:42 +0200 Subject: [PATCH 339/438] Add get_default_speed --- flow/core/kernel/vehicle/traci.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index af9631166..9be0bd9a0 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -580,6 +580,13 @@ def get_speed(self, veh_id, error=-1001): return [self.get_speed(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_SPEED, error) + def get_default_speed(self, veh_id, error=-1001): + """See parent class.""" + if isinstance(veh_id, (list, np.ndarray)): + return [self.get_default_speed(vehID, error) for vehID in veh_id] + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_SPEED_WITHOUT_TRACI, + error) + def get_position(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): From cb2c4047e9749247111fb76f0a3a4c36b373bbfd Mon Sep 17 00:00:00 2001 From: Rchide Date: Wed, 15 Jul 2020 10:49:21 +0200 Subject: [PATCH 340/438] Add subscription_results VARs --- flow/core/kernel/vehicle/traci.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 9be0bd9a0..0a457530e 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1162,7 +1162,17 @@ def _get_libsumo_subscription_results(self, veh_id): tc.VAR_SPEED: self.kernel_api.vehicle.getSpeed(veh_id), tc.VAR_EDGES: self.kernel_api.vehicle.getRoute(veh_id), tc.VAR_LEADER: - self.kernel_api.vehicle.getLeader(veh_id, dist=2000) + self.kernel_api.vehicle.getLeader(veh_id, dist=2000), + tc.VAR_ANGLE: + self.kernel_api.vehicle.getAngle(veh_id), + tc.VAR_DISTANCE: + self.kernel_api.vehicle.getDistance(veh_id), + tc.VAR_FUELCONSUMPTION: + self.kernel_api.vehicle.getFuelConsumption(veh_id), + tc.VAR_POSITION: + self.kernel_api.vehicle.getPosition(veh_id), + tc.VAR_SPEED_WITHOUT_TRACI: + self.kernel_api.vehicle.getSpeedWithoutTraCI(veh_id), } except (TraCIException, FatalTraCIError, libsumo.TraCIException): # This is in case a vehicle exited the network and has not been From 20f349b5e2d69cfae22cb270e075785bcc4500fc Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 15 Jul 2020 01:58:13 -0700 Subject: [PATCH 341/438] fix some bugs --- flow/core/experiment.py | 6 +++--- flow/visualize/visualizer_rllib.py | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index eeab10863..dc462b85b 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -160,6 +160,8 @@ def run(self, "avg_trip_time": [], "total_completed_trips": [] } + if not multiagent: + info_dict["returns"] = [] all_trip_energy_distribution = defaultdict(lambda: []) all_trip_time_distribution = defaultdict(lambda: []) @@ -268,13 +270,11 @@ def rl_actions(*_): if rets and multiagent: for key in rets.keys(): rets[key].append(ret[key]) - elif not multiagent: - rets.append(ret) # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) if not multiagent: - info_dict["returns"] = rets + info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) info_dict["avg_trip_energy"].append(np.mean(list(completed_vehicle_avg_energy.values()))) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 1e3dc56fd..8b444c5e0 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -67,7 +67,7 @@ def read_result_dir(result_dir_path, multi_only=False): return result_dir, config, multiagent, flow_params -def set_sim_params(sim_params, render_mode, save_render): +def set_sim_params(sim_params, render_mode, save_render, gen_emission): """Set up sim_params according to render mode.""" # hack for old pkl files # TODO(ev) remove eventually @@ -80,7 +80,7 @@ def set_sim_params(sim_params, render_mode, save_render): sim_params.restart_instance = True dir_path = os.path.dirname(os.path.realpath(__file__)) emission_path = '{0}/test_time_rollout/'.format(dir_path) - sim_params.emission_path = emission_path if args.gen_emission else None + sim_params.emission_path = emission_path if gen_emission else None # pick your rendering mode if render_mode == 'sumo_web3d': @@ -213,7 +213,8 @@ def visualizer_rllib(args): """ result_dir, config, multiagent, flow_params = read_result_dir(args.result_dir) - sim_params = set_sim_params(flow_params['sim'], args.render_mode, args.save_render) + sim_params = set_sim_params(flow_params['sim'], args.render_mode, + args.save_render, args.gen_emission) # Create and register a gym+rllib env exp = Experiment(flow_params, register_with_ray=True) From e248c1a9a932c1c45dd26dfebc978e7e6f30db84 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 15 Jul 2020 12:03:10 -0700 Subject: [PATCH 342/438] cleanup references to args --- flow/visualize/visualizer_rllib.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 8b444c5e0..1f0b3c42c 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -112,11 +112,11 @@ def set_env_params(env_params, evaluate, horizon, config=None): # lower the horizon if testing if horizon: if config: - config['horizon'] = args.horizon - env_params.horizon = args.horizon + config['horizon'] = horizon + env_params.horizon = horizon -def set_agents(config, result_dir, env_name, run=None): +def set_agents(config, result_dir, env_name, run=None, checkpoint_num=None): """Determine and create agents that will be used to compute actions.""" # Determine agent and checkpoint config_run = config['env_config']['run'] if 'run' in config['env_config'] \ @@ -150,8 +150,8 @@ def set_agents(config, result_dir, env_name, run=None): # create the agent that will be used to compute the actions agent = agent_cls(env=env_name, config=config) - checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num - checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num + checkpoint = result_dir + '/checkpoint_' + checkpoint_num + checkpoint = checkpoint + '/checkpoint-' + checkpoint_num agent.restore(checkpoint) return agent @@ -231,7 +231,7 @@ def visualizer_rllib(args): # env_loc = 'flow.envs.multiagent' set_env_params(flow_params['env'], args.evaluate, args.horizon, config) - agent = set_agents(config, result_dir, exp.env_name, run=args.run) + agent = set_agents(config, result_dir, exp.env_name, run=args.run, checkpoint_num=args.checkpoint_num) if hasattr(agent, "local_evaluator") and \ os.environ.get("TEST_FLAG") != 'True': From ffde8ba4327a108f1484dcba3045e4b24cdefcfe Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 16 Jul 2020 04:39:02 -0700 Subject: [PATCH 343/438] add support for the sqs --- flow/data_pipeline/data_pipeline.py | 133 +++++++++++++++++++++++++- flow/data_pipeline/lambda_function.py | 9 ++ flow/data_pipeline/query.py | 14 ++- 3 files changed, 149 insertions(+), 7 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index f0e3637f6..bb63df641 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,12 +2,14 @@ import pandas as pd import boto3 from botocore.exceptions import ClientError -from flow.data_pipeline.query import QueryStrings, prerequisites +from flow.data_pipeline.query import QueryStrings, prerequisites, tables from time import time from datetime import date import csv from io import StringIO import json +import collections +from collections import defaultdict def generate_trajectory_table(emission_files, trajectory_table_path, source_id): @@ -124,8 +126,7 @@ def get_configuration(): def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): """Delete the obsolete data on S3.""" - response = s3.list_objects_v2(Bucket=bucket) - keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table) == 0 and e["Key"][-4:] == ".csv"] + keys = list_object_keys(s3, bucket=bucket, prefixes=table, suffix='.csv') keys.remove(latest_key) for key in keys: s3.delete_object(Bucket=bucket, Key=key) @@ -183,6 +184,132 @@ def get_ready_queries(completed_queries, new_query): return readied_queries +def list_object_keys(s3, bucket='circles.data.pipeline', prefixes='', suffix=''): + """Return all keys in the given bucket that start with prefix and end with suffix. Not limited by 1000.""" + contents = [] + if not isinstance(prefixes, collections.Iterable) or type(prefixes) is str: + prefixes = [prefixes] + for prefix in prefixes: + response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix) + if 'Contents' in response: + contents.extend(response['Contents']) + while response['IsTruncated']: + response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix, + ContinuationToken=response['NextContinuationToken']) + contents.extend(response['Contents']) + keys = [content['Key'] for content in contents if content['Key'].endswith(suffix)] + return keys + + +def delete_table(s3, bucket='circles.data.pipeline', only_query_result=True, table='', source_id=''): + """Deletes the specified the table files in S3""" + queries = ["lambda_temp"] + if table: + queries.append(table) + else: + queries = tables + if only_query_result: + queries.remove('fact_vehicle_trace') + queries.remove('metadata_table') + if source_id: + queries.remove('leaderboard_chart_agg') + queries.remove('fact_top_scores') + keys = list_object_keys(s3, bucket=bucket, prefixes=queries) + if source_id: + keys = [key for key in keys if source_id in key] + for key in keys: + s3.delete_object(Bucket=bucket, Key=key) + + +def rerun_query(s3, bucket='circles.data.pipeline', source_id=''): + """Re-run queries for simulation datas that has been uploaded to s3, will delete old data before re-run.""" + vehicle_trace_keys = list_object_keys(s3, bucket=bucket, prefixes="fact_vehicle_trace", suffix='.csv') + delete_table(s3, bucket=bucket, source_id=source_id) + if source_id: + vehicle_trace_keys = [key for key in vehicle_trace_keys if source_id in key] + sqs_client = boto3.client('sqs') + event_template = """ + {{ + "Records": [ + {{ + "eventVersion": "2.0", + "eventSource": "aws:s3", + "awsRegion": "us-west-2", + "eventTime": "1970-01-01T00:00:00.000Z", + "eventName": "ObjectCreated:Put", + "userIdentity": {{ + "principalId": "EXAMPLE" + }}, + "requestParameters": {{ + "sourceIPAddress": "127.0.0.1" + }}, + "responseElements": {{ + "x-amz-request-id": "EXAMPLE123456789", + "x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH" + }}, + "s3": {{ + "s3SchemaVersion": "1.0", + "configurationId": "testConfigRule", + "bucket": {{ + "name": "{bucket}", + "ownerIdentity": {{ + "principalId": "EXAMPLE" + }}, + "arn": "arn:aws:s3:::{bucket}" + }}, + "object": {{ + "key": "{key}", + "size": 1024, + "eTag": "0123456789abcdef0123456789abcdef", + "sequencer": "0A1B2C3D4E5F678901" + }} + }} + }} + ] + }}""" + for key in vehicle_trace_keys: + response = sqs_client.send_message(QueueUrl="https://sqs.us-west-2.amazonaws.com/409746595792/S3CreateEvents", + MessageBody=event_template.format(bucket=bucket, key=key)) + + +def list_source_ids(s3, bucket='circles.data.pipeline'): + """Return a list of the source_id of all simulations which has been uploaded to s3.""" + vehicle_trace_keys = list_object_keys(s3, bucket=bucket, prefixes="fact_vehicle_trace", suffix='csv') + source_ids = ['flow_{}'.format(key.split('/')[2].split('=')[1].split('_')[1]) for key in vehicle_trace_keys] + return source_ids + + +def sanity_check(s3, bucket='circles.data.pipeline'): + """Check if all the expected queries get run without error. Note that this does not check the correctness of + the content of the query, only that it finish without error.""" + queries = tables + queries.append('lambda_temp') + queries.remove('leaderboard_chart_agg') + queries.remove('fact_top_scores') + expected_count = len(queries) + keys = list_object_keys(s3, bucket=bucket, prefixes=queries, suffix='.csv') + source_ids = list_source_ids(s3, bucket=bucket) + counts = defaultdict(lambda: []) + for key in keys: + source_id = 'flow_{}'.format(key.split('/')[2].split('=')[1].split('_')[1]) + table = key.split('/')[0] + counts[source_id].append(table) + for sid in source_ids: + count = len(counts[sid]) + if count < expected_count: + missing = [] + for q in queries: + if q not in counts[sid]: + missing.append(q) + print("Simulation {} is missing the following queries: \n {}".format(sid, str(missing))) + elif count > expected_count: + extra = counts[sid].copy() + for q in queries: + if q not in counts[sid]: + extra.remove(q) + print("Simulation {} is having too much of the following queries: \n {}".format(sid, str(extra))) + + class AthenaQuery: """Class used to run queries. diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 1d813f98b..b87941c91 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,5 +1,6 @@ """lambda function on AWS Lambda.""" import boto3 +import json from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data, update_baseline, \ get_ready_queries, get_completed_queries, put_completed_queries @@ -14,6 +15,14 @@ def lambda_handler(event, context): # stores all lists of completed query for each source_id completed = {} records = [] + event_records = [] + # do a pre-sweep to put all s3 records in one list + for event_record in event['Records']: + if event_record["eventSource"] == "aws:s3": + event_records.append(event_record) + elif event_record['eventSource'] == "aws:sqs": + s3_event = json.loads(event_record['body']) + event_records.extend(s3_event['Records']) # do a pre-sweep to handle tasks other than initalizing a query for record in event['Records']: bucket = record['s3']['bucket']['name'] diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index f68dfa321..796da431a 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -119,7 +119,7 @@ 83.12392997 * speed + 6.7650718327 * POW(speed,2) + 0.7041355229 * POW(speed,3) - ) + GREATEST(0, 4598.7155 * accel + 975.12719 * accel * speed) AS power, + ) + GREATEST(0, 4598.7155 * acceleration + 975.12719 * acceleration * speed) AS power, \'{1}\' AS energy_model_id, source_id FROM {2} @@ -276,6 +276,9 @@ class QueryStrings(Enum): ROW_NUMBER() OVER() - 51 AS lb, ROW_NUMBER() OVER() - 50 AS ub FROM fact_safety_metrics + WHERE 1 = 1 + AND date = \'{date}\' + AND partition_name = \'{partition}_FACT_SAFETY_METRICS\' ), bins AS ( SELECT lb, @@ -372,8 +375,11 @@ class QueryStrings(Enum): SELECT ROW_NUMBER() OVER() - 1 AS lb, ROW_NUMBER() OVER() AS ub - FROM fact_safety_metrics - ) bins AS ( + FROM fact_vehicle_fuel_efficiency_agg + WHERE 1=1 + AND date = \'{date}\' + AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + ), bins AS ( SELECT lb, ub @@ -771,7 +777,7 @@ class QueryStrings(Enum): SELECT network, submission_date, - LAG(max_score IGNORE NULLS, 1) OVER (PARTITION BY network ORDER BY submission_date ASC) AS max_score + LAG(max_score, 1) OVER (PARTITION BY network ORDER BY submission_date ASC) AS max_score FROM curr_max ), unioned AS ( SELECT * FROM curr_max From f093205aba049e01f57fe0dea34d8c777ebc926c Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 16 Jul 2020 09:20:06 -0700 Subject: [PATCH 344/438] fix small bugs --- flow/core/experiment.py | 8 ++++++-- flow/visualize/i210_replay.py | 33 ++++++++++++++++++--------------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index dc462b85b..3b3a1ce5b 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -108,7 +108,8 @@ def run(self, is_baseline=False, multiagent=False, rets=None, - policy_map_fn=None): + policy_map_fn=None, + supplied_metadata=None): """Run the given network for a set number of runs. Parameters @@ -194,7 +195,10 @@ def rl_actions(*_): metadata['network'].append( network_name_translate(self.env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(is_baseline)) - name, strategy = get_configuration() + if supplied_metadata: + name, strategy = supplied_metadata + else: + name, strategy = get_configuration() metadata['submitter_name'].append(name) metadata['strategy'].append(strategy) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 3cab93cd6..3053118e1 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -45,6 +45,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= """ assert bool(args.controller) ^ bool(rllib_config), \ "Need to specify either controller or rllib_config, but not both" + + args.gen_emission = args.gen_emission or args.use_s3 + if transfer_test is not None: if type(transfer_test) == bytes: transfer_test = ray.cloudpickle.loads(transfer_test) @@ -74,7 +77,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= if veh_param['veh_id'] == 'av': veh_param['acceleration_controller'] = (controller, test_params) - sim_params = set_sim_params(flow_params['sim'], args.render_mode, args.save_render) + sim_params = set_sim_params(flow_params['sim'], args.render_mode, args.save_render, args.gen_emission) set_env_params(flow_params['env'], args.evaluate, args.horizon) @@ -121,8 +124,8 @@ def rl_action(state): return action info_dict = exp.run(num_runs=args.num_rollouts, convert_to_csv=args.gen_emission, to_aws=args.use_s3, - rl_actions=rl_action, multiagent=rllib_config and multiagent, rets=rets, - policy_map_fn=policy_map_fn) + rl_actions=rl_action, multiagent=True, rets=rets, + policy_map_fn=policy_map_fn, supplied_metadata=("Brent", "FS;5%;v_des:{}".format(v_des))) return info_dict @@ -273,7 +276,7 @@ def create_parser(): replay.remote(args, flow_params, output_dir="{}/{}".format(output_dir, v_des), rllib_config=args.rllib_result_dir, result_dir=args.rllib_result_dir, max_completed_trips=args.max_completed_trips, v_des=v_des) - for v_des in range(8, 17, 2)] + for v_des in range(8, 13, 1)] ray.get(ray_output) else: @@ -288,14 +291,14 @@ def create_parser(): rllib_config=args.rllib_result_dir, result_dir=args.rllib_result_dir, max_completed_trips=args.max_completed_trips)) - if args.use_s3: - s3_string = 's3://kanaad.experiments/i210_replay/' + date - if args.exp_title: - s3_string += '/' + args.exp_title - - for i in range(4): - try: - p1 = subprocess.Popen("aws s3 sync {} {}".format(output_dir, s3_string).split(' ')) - p1.wait(50) - except Exception as e: - print('This is the error ', e) + # if args.use_s3: + # s3_string = 's3://kanaad.experiments/i210_replay/' + date + # if args.exp_title: + # s3_string += '/' + args.exp_title + # + # for i in range(4): + # try: + # p1 = subprocess.Popen("aws s3 sync {} {}".format(output_dir, s3_string).split(' ')) + # p1.wait(50) + # except Exception as e: + # print('This is the error ', e) From 68f9c55c24348bb7dc5dabd269a7fd8e4439d20c Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 16 Jul 2020 09:24:28 -0700 Subject: [PATCH 345/438] moving the place where the completed_query list is updated --- flow/data_pipeline/data_pipeline.py | 13 ++++++------- flow/data_pipeline/lambda_function.py | 4 ++-- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index bb63df641..71f72890f 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -162,13 +162,12 @@ def get_completed_queries(s3, source_id): return set(completed_queries) -def put_completed_queries(s3, completed_queries): - """Put all the completed queries lists into S3 as in a serialized json format.""" - for source_id, completed_queries_set in completed_queries.items(): - completed_queries_list = list(completed_queries_set) - completed_queries_json = json.dumps(completed_queries_list) - s3.put_object(Bucket='circles.data.pipeline', Key='lambda_temp/{}'.format(source_id), - Body=completed_queries_json.encode('utf-8')) +def put_completed_queries(s3, source_id, completed_queries_set): + """Put the completed queries list into S3 as in a serialized json format.""" + completed_queries_list = list(completed_queries_set) + completed_queries_json = json.dumps(completed_queries_list) + s3.put_object(Bucket='circles.data.pipeline', Key='lambda_temp/{}'.format(source_id), + Body=completed_queries_json.encode('utf-8')) def get_ready_queries(completed_queries, new_query): diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index b87941c91..0fe01d583 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -72,6 +72,8 @@ def lambda_handler(event, context): readied_queries = get_ready_queries(completed[source_id], query_name) completed[source_id].add(query_name) + # stores the updated list of completed queries back to S3 + put_completed_queries(s3, source_id, completed[source_id]) # initialize queries and store them at appropriate locations for readied_query_name, table_name in readied_queries: result_location = 's3://circles.data.pipeline/{}/date={}/partition_name={}_{}'.format(table_name, @@ -80,5 +82,3 @@ def lambda_handler(event, context): readied_query_name) queryEngine.run_query(readied_query_name, result_location, query_date, partition, loc_filter=loc_filter, start_filter=start_filter, stop_filter=stop_filter) - # stores all the updated lists of completed queries back to S3 - put_completed_queries(s3, completed) From 3e730293ae0f08fa9367194d0dc2d5391086d768 Mon Sep 17 00:00:00 2001 From: Rchide Date: Thu, 16 Jul 2020 19:55:54 +0200 Subject: [PATCH 346/438] Add restart_instance = True --- flow/core/params.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flow/core/params.py b/flow/core/params.py index cfdc216bd..8ae7340cf 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -527,6 +527,7 @@ def __init__(self, centroid_config_name=None, subnetwork_name=None): """Instantiate AimsunParams.""" + restart_instance = True super(AimsunParams, self).__init__( sim_step, render, restart_instance, emission_path, save_render, sight_radius, show_radius, pxpm) @@ -635,6 +636,7 @@ def __init__(self, use_ballistic=False, disable_collisions=False): """Instantiate SumoParams.""" + restart_instance = True super(SumoParams, self).__init__( sim_step, render, restart_instance, emission_path, save_render, sight_radius, show_radius, pxpm, force_color_update) From 6c387e64cd3d546101175a158c00d3e4d4b7af9c Mon Sep 17 00:00:00 2001 From: Akash Velu <31679538+akashvelu@users.noreply.github.com> Date: Thu, 16 Jul 2020 11:13:04 -0700 Subject: [PATCH 347/438] Update i210.py --- flow/envs/multiagent/i210.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 004208cb4..65bad3e37 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -380,7 +380,7 @@ def step(self, rl_actions): state, reward, done, info = super().step(rl_actions) # handle the edge case where a vehicle hasn't been put back when the rollout terminates if self.reroute_on_exit and done['__all__']: - for rl_id in self.observed_rl_ids: + for rl_id in self._observed_rl_ids: if rl_id not in state.keys(): done[rl_id] = True reward[rl_id] = 0 From 6e02a208345b8f72018169f663aa8b452d13a8d9 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 16 Jul 2020 17:16:54 -0700 Subject: [PATCH 348/438] remove unneeded import --- flow/visualize/i210_replay.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 3053118e1..333d48b1b 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -4,7 +4,6 @@ from copy import deepcopy import os import pytz -import subprocess from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables From d766f0d4dd9613aea32fc2b2b596791117448aab Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 16 Jul 2020 17:35:38 -0700 Subject: [PATCH 349/438] move and rename replay scripts --- flow/{visualize/visualizer_rllib.py => replay/rl_replay.py} | 0 flow/{visualize/i210_replay.py => replay/transfer_tests.py} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename flow/{visualize/visualizer_rllib.py => replay/rl_replay.py} (100%) rename flow/{visualize/i210_replay.py => replay/transfer_tests.py} (100%) diff --git a/flow/visualize/visualizer_rllib.py b/flow/replay/rl_replay.py similarity index 100% rename from flow/visualize/visualizer_rllib.py rename to flow/replay/rl_replay.py diff --git a/flow/visualize/i210_replay.py b/flow/replay/transfer_tests.py similarity index 100% rename from flow/visualize/i210_replay.py rename to flow/replay/transfer_tests.py From 793eecf14b30554a5eab72a0fcec5d271e60c5ca Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 16 Jul 2020 17:40:25 -0700 Subject: [PATCH 350/438] move replay tests to new file --- tests/fast_tests/test_replays.py | 47 ++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 tests/fast_tests/test_replays.py diff --git a/tests/fast_tests/test_replays.py b/tests/fast_tests/test_replays.py new file mode 100644 index 000000000..87b323fae --- /dev/null +++ b/tests/fast_tests/test_replays.py @@ -0,0 +1,47 @@ +from flow.replay import visualizer_rllib as vs_rllib +from flow.replay.visualizer_rllib import visualizer_rllib + +import os +import unittest +import ray + +os.environ['TEST_FLAG'] = 'True' + + +class TestVisualizerRLlib(unittest.TestCase): + """Tests visualizer_rllib""" + + def test_visualizer_single(self): + """Test for single agent""" + try: + ray.init(num_cpus=1) + except Exception: + pass + # current path + current_path = os.path.realpath(__file__).rsplit('/', 1)[0] + + # run the experiment and check it doesn't crash + arg_str = '{}/../data/rllib_data/single_agent 1 --num_rollouts 1 ' \ + '--render_mode no_render ' \ + '--horizon 10'.format(current_path).split() + parser = vs_rllib.create_parser() + pass_args = parser.parse_args(arg_str) + visualizer_rllib(pass_args) + + # FIXME(ev) set the horizon so that this runs faster + def test_visualizer_multi(self): + """Test for multi-agent visualization""" + try: + ray.init(num_cpus=1) + except Exception: + pass + # current path + current_path = os.path.realpath(__file__).rsplit('/', 1)[0] + + # run the experiment and check it doesn't crash + arg_str = '{}/../data/rllib_data/multi_agent 1 --num_rollouts 1 ' \ + '--render_mode no_render ' \ + '--horizon 10'.format(current_path).split() + parser = vs_rllib.create_parser() + pass_args = parser.parse_args(arg_str) + visualizer_rllib(pass_args) \ No newline at end of file From fb121f2bbd1ffeb5f71daa4b865a9b3c7438271c Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 16 Jul 2020 17:41:04 -0700 Subject: [PATCH 351/438] newline at eof --- tests/fast_tests/test_replays.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fast_tests/test_replays.py b/tests/fast_tests/test_replays.py index 87b323fae..54b6b5ac8 100644 --- a/tests/fast_tests/test_replays.py +++ b/tests/fast_tests/test_replays.py @@ -44,4 +44,4 @@ def test_visualizer_multi(self): '--horizon 10'.format(current_path).split() parser = vs_rllib.create_parser() pass_args = parser.parse_args(arg_str) - visualizer_rllib(pass_args) \ No newline at end of file + visualizer_rllib(pass_args) From de81a63e315940ae436cd9fa3f002d1f4d4f9566 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 16 Jul 2020 18:14:53 -0700 Subject: [PATCH 352/438] missed visualizer -> replay move --- flow/replay/rl_replay.py | 22 +++++++-------- tests/fast_tests/test_replays.py | 30 ++++++++++++-------- tests/fast_tests/test_visualizers.py | 41 ---------------------------- 3 files changed, 29 insertions(+), 64 deletions(-) diff --git a/flow/replay/rl_replay.py b/flow/replay/rl_replay.py index 1f0b3c42c..08648ae5f 100644 --- a/flow/replay/rl_replay.py +++ b/flow/replay/rl_replay.py @@ -1,4 +1,4 @@ -"""Visualizer for rllib experiments. +"""Replay script for rllib experiments. Attributes ---------- @@ -6,7 +6,7 @@ Example call to the function, which is :: - python ./visualizer_rllib.py /tmp/ray/result_dir 1 + python ./rl_replay.py /tmp/ray/result_dir 1 parser : ArgumentParser Command-line argument parser @@ -34,7 +34,7 @@ EXAMPLE_USAGE = """ example usage: - python ./visualizer_rllib.py /ray_results/experiment_dir/result_dir 1 + python ./rl_replay.py /ray_results/experiment_dir/result_dir 1 Here the arguments are: 1 - the path to the simulation results @@ -123,7 +123,7 @@ def set_agents(config, result_dir, env_name, run=None, checkpoint_num=None): else None if run and config_run: if run != config_run: - print('visualizer_rllib.py: error: run argument ' + print('rl_replay.py: error: run argument ' + '\'{}\' passed in '.format(run) + 'differs from the one stored in params.json ' + '\'{}\''.format(config_run)) @@ -141,11 +141,11 @@ def set_agents(config, result_dir, env_name, run=None, checkpoint_num=None): elif config_run: agent_cls = get_agent_class(config_run) else: - print('visualizer_rllib.py: error: could not find flow parameter ' + print('rl_replay.py: error: could not find flow parameter ' '\'run\' in params.json, ' 'add argument --run to provide the algorithm or model used ' 'to train the results\n e.g. ' - 'python ./visualizer_rllib.py /tmp/ray/result_dir 1 --run PPO') + 'python ./rl_replay.py /tmp/ray/result_dir 1 --run PPO') sys.exit(1) # create the agent that will be used to compute the actions @@ -204,12 +204,12 @@ def rl_action(state): return policy_map_fn, rl_action, rets -def visualizer_rllib(args): - """Visualizer for RLlib experiments. +def replay_rllib(args): + """Replay for RLlib experiments. This function takes args (see function create_parser below for more detailed information on what information can be fed to this - visualizer), and renders the experiment associated with it. + replay script), and renders the experiment associated with it. """ result_dir, config, multiagent, flow_params = read_result_dir(args.result_dir) @@ -282,7 +282,7 @@ def create_parser(): '--num_rollouts', type=int, default=1, - help='The number of rollouts to visualize.') + help='The number of rollouts to replay.') parser.add_argument( '--gen_emission', action='store_true', @@ -327,4 +327,4 @@ def create_parser(): parser = create_parser() args = parser.parse_args() ray.init(num_cpus=1) - visualizer_rllib(args) + rl_replay(args) diff --git a/tests/fast_tests/test_replays.py b/tests/fast_tests/test_replays.py index 54b6b5ac8..b762b1e2b 100644 --- a/tests/fast_tests/test_replays.py +++ b/tests/fast_tests/test_replays.py @@ -1,5 +1,5 @@ -from flow.replay import visualizer_rllib as vs_rllib -from flow.replay.visualizer_rllib import visualizer_rllib +from flow.replay import rl_replay as rl_replay +from flow.replay.rl_replay import replay_rllib import os import unittest @@ -8,11 +8,11 @@ os.environ['TEST_FLAG'] = 'True' -class TestVisualizerRLlib(unittest.TestCase): - """Tests visualizer_rllib""" +class TestRLReplay(unittest.TestCase): + """Tests rl_replay""" - def test_visualizer_single(self): - """Test for single agent""" + def test_rllib_replay_single(self): + """Test for single agent replay""" try: ray.init(num_cpus=1) except Exception: @@ -24,13 +24,13 @@ def test_visualizer_single(self): arg_str = '{}/../data/rllib_data/single_agent 1 --num_rollouts 1 ' \ '--render_mode no_render ' \ '--horizon 10'.format(current_path).split() - parser = vs_rllib.create_parser() + parser = rl_replay.create_parser() pass_args = parser.parse_args(arg_str) - visualizer_rllib(pass_args) + replay_rllib(pass_args) # FIXME(ev) set the horizon so that this runs faster - def test_visualizer_multi(self): - """Test for multi-agent visualization""" + def test_rllib_replay_multi(self): + """Test for multi-agent replay""" try: ray.init(num_cpus=1) except Exception: @@ -42,6 +42,12 @@ def test_visualizer_multi(self): arg_str = '{}/../data/rllib_data/multi_agent 1 --num_rollouts 1 ' \ '--render_mode no_render ' \ '--horizon 10'.format(current_path).split() - parser = vs_rllib.create_parser() + parser = rl_replay.create_parser() pass_args = parser.parse_args(arg_str) - visualizer_rllib(pass_args) + replay_rllib(pass_args) + + +if __name__ == '__main__': + ray.init(num_cpus=1) + unittest.main() + ray.shutdown() diff --git a/tests/fast_tests/test_visualizers.py b/tests/fast_tests/test_visualizers.py index 47aa9d968..5fa6d649a 100644 --- a/tests/fast_tests/test_visualizers.py +++ b/tests/fast_tests/test_visualizers.py @@ -1,5 +1,3 @@ -from flow.visualize import visualizer_rllib as vs_rllib -from flow.visualize.visualizer_rllib import visualizer_rllib import flow.visualize.capacity_diagram_generator as cdg import flow.visualize.time_space_diagram as tsd import flow.visualize.plot_ray_results as prr @@ -14,45 +12,6 @@ os.environ['TEST_FLAG'] = 'True' -class TestVisualizerRLlib(unittest.TestCase): - """Tests visualizer_rllib""" - - def test_visualizer_single(self): - """Test for single agent""" - try: - ray.init(num_cpus=1) - except Exception: - pass - # current path - current_path = os.path.realpath(__file__).rsplit('/', 1)[0] - - # run the experiment and check it doesn't crash - arg_str = '{}/../data/rllib_data/single_agent 1 --num_rollouts 1 ' \ - '--render_mode no_render ' \ - '--horizon 10'.format(current_path).split() - parser = vs_rllib.create_parser() - pass_args = parser.parse_args(arg_str) - visualizer_rllib(pass_args) - - # FIXME(ev) set the horizon so that this runs faster - def test_visualizer_multi(self): - """Test for multi-agent visualization""" - try: - ray.init(num_cpus=1) - except Exception: - pass - # current path - current_path = os.path.realpath(__file__).rsplit('/', 1)[0] - - # run the experiment and check it doesn't crash - arg_str = '{}/../data/rllib_data/multi_agent 1 --num_rollouts 1 ' \ - '--render_mode no_render ' \ - '--horizon 10'.format(current_path).split() - parser = vs_rllib.create_parser() - pass_args = parser.parse_args(arg_str) - visualizer_rllib(pass_args) - - class TestPlotters(unittest.TestCase): def test_capacity_diagram_generator(self): From 15753a58a748cc8ededc9397ec5c2d5341d81c22 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 16 Jul 2020 18:15:57 -0700 Subject: [PATCH 353/438] typo --- flow/replay/rl_replay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/replay/rl_replay.py b/flow/replay/rl_replay.py index 08648ae5f..0df267a6b 100644 --- a/flow/replay/rl_replay.py +++ b/flow/replay/rl_replay.py @@ -327,4 +327,4 @@ def create_parser(): parser = create_parser() args = parser.parse_args() ray.init(num_cpus=1) - rl_replay(args) + replay_rllib(args) From 8fcb6975c7ab0df69a311126808c30d23591f726 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 16 Jul 2020 21:32:12 -0700 Subject: [PATCH 354/438] fix binned queries; fix top scores query --- flow/data_pipeline/query.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index c51543b6d..f444c1154 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -288,8 +288,8 @@ class QueryStrings(Enum): SELECT CONCAT('[', CAST(bins.lb AS VARCHAR), ', ', CAST(bins.ub AS VARCHAR), ')') AS safety_value_bin, COUNT() AS count - FROM bins, fact_safety_metrics fsm - WHERE 1 = 1 + FROM bins + LEFT JOIN fact_safety_metrics fsm ON 1 = 1 AND fsm.date = \'{date}\' AND fsm.partition_name = \'{partition}_FACT_SAFETY_METRICS\' AND fsm.safety_value >= bins.lb @@ -380,18 +380,18 @@ class QueryStrings(Enum): FROM unfilter_bins WHERE 1=1 AND lb >= 0 - AND ub <= 20 + AND ub <= 60 ) SELECT CONCAT('[', CAST(bins.lb AS VARCHAR), ', ', CAST(bins.ub AS VARCHAR), ')') AS fuel_efficiency_bin, COUNT() AS count - FROM bins, fact_vehicle_fuel_efficiency_agg agg - WHERE 1 = 1 + FROM bins + LEFT JOIN fact_vehicle_fuel_efficiency_agg agg ON 1 = 1 AND agg.date = \'{date}\' AND agg.partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND agg.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND 1000 * agg.efficiency_meters_per_joules >= bins.lb - AND 1000 * agg.efficiency_meters_per_joules < bins.ub + AND agg.efficiency_miles_per_gallon >= bins.lb + AND agg.efficiency_miles_per_gallon < bins.ub GROUP BY 1 ; """ @@ -782,7 +782,7 @@ class QueryStrings(Enum): SELECT network, submission_date, - 1000 * MAX(efficiency_miles_per_gallon) + MAX(efficiency_miles_per_gallon) OVER (PARTITION BY network ORDER BY submission_date ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS max_score FROM leaderboard_chart_agg @@ -792,7 +792,7 @@ class QueryStrings(Enum): SELECT network, submission_date, - LAG(max_score IGNORE NULLS, 1) OVER (PARTITION BY network ORDER BY submission_date ASC) AS max_score + LAG(max_score, 1) OVER (PARTITION BY network ORDER BY submission_date ASC) AS max_score FROM curr_max ), unioned AS ( SELECT * FROM curr_max @@ -801,5 +801,7 @@ class QueryStrings(Enum): ) SELECT DISTINCT * FROM unioned + WHERE 1 = 1 + AND max_score IS NOT NULL ORDER BY 1, 2, 3 ;""" From 4ee4d53755e6d4090c1cfee931fc39ca730bca41 Mon Sep 17 00:00:00 2001 From: AboudyKreidieh Date: Fri, 17 Jul 2020 11:24:38 -0700 Subject: [PATCH 355/438] try/except on register with warning --- flow/utils/registry.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/flow/utils/registry.py b/flow/utils/registry.py index 3f6c9dad5..a1163849f 100644 --- a/flow/utils/registry.py +++ b/flow/utils/registry.py @@ -117,15 +117,19 @@ def create_env(*_): entry_point = params["env_name"].__module__ + ':' + params["env_name"].__name__ # register the environment with OpenAI gym - register( - id=env_name, - entry_point=entry_point, - kwargs={ - "env_params": env_params, - "sim_params": sim_params, - "network": network, - "simulator": params['simulator'] - }) + try: + register( + id=env_name, + entry_point=entry_point, + kwargs={ + "env_params": env_params, + "sim_params": sim_params, + "network": network, + "simulator": params['simulator'] + }) + except gym.error.Error: + print("WARNING: Environment {} already registered, ignoring." + .format(env_name)) return gym.envs.make(env_name) From c62a031e0149727d991ad5e65e80e099ecf837ee Mon Sep 17 00:00:00 2001 From: Akash Velu <31679538+akashvelu@users.noreply.github.com> Date: Fri, 17 Jul 2020 11:29:30 -0700 Subject: [PATCH 356/438] Update ray version to 0.8.4 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a4f6f83f8..2d3c61093 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ pyglet==1.3.2 matplotlib==3.1.0 imutils==0.5.1 numpydoc -ray==0.8.0 +ray==0.8.4 opencv-python dill lz4 From 6e6ced4c24990bb96644cf38df433cf282ca06ba Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Fri, 17 Jul 2020 11:48:22 -0700 Subject: [PATCH 357/438] New AMI --- scripts/ray_autoscale.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 18e25154d..ebaebb7eb 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -40,7 +40,7 @@ auth: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances head_node: InstanceType: c4.8xlarge - ImageId: ami-0c047f3ddd3939b30 # Flow AMI (Ubuntu) + ImageId: ami-0f657e381384fb3c1 # circles_0 AMI (Ubuntu) InstanceMarketOptions: MarketType: spot #Additional options can be found in the boto docs, e.g. @@ -55,7 +55,7 @@ head_node: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances worker_nodes: InstanceType: c4.8xlarge - ImageId: ami-0c047f3ddd3939b30 # Flow AMI (Ubuntu) + ImageId: ami-0f657e381384fb3c1 # circles_0 AMI (Ubuntu) #Run workers on spot by default. Comment this out to use on-demand. InstanceMarketOptions: From b01de82d251b45ceaede91816e0158abf1ba449c Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Fri, 17 Jul 2020 13:12:27 -0700 Subject: [PATCH 358/438] Added doc for libsumo installation on Mac --- docs/libsumo_mac.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 docs/libsumo_mac.md diff --git a/docs/libsumo_mac.md b/docs/libsumo_mac.md new file mode 100644 index 000000000..4360856d3 --- /dev/null +++ b/docs/libsumo_mac.md @@ -0,0 +1,23 @@ +# How to install Libsumo for Mac OS + +This is adapted from an email exchange with the SUMO staff. + + + +To install libsumo requires re-building and installing SUMO from source. + +## Steps + +- **Install swig:** brew install swig +- **Clone the repo:** git clone https://github.com/eclipse/sumo.git +- **Create a “cmake-build” directory inside sumo/build/ and navigate to it:** mkdir build/cmake-build && cd build/cmake-build + +**The next 3 steps are inside that directory** + +- cmake ../.. +- make +- make install + +## Additional Notes +- You can test if libsumo has been built looking at (./testlibsumo) inside the sumo/bin/ directory. +- Bear in mind to use libsumo with the same Python version with which CMake built SUMO. From e698db776d5f65e454b1c61ee8b3366e0082502c Mon Sep 17 00:00:00 2001 From: AboudyKreidieh Date: Fri, 17 Jul 2020 13:23:57 -0700 Subject: [PATCH 359/438] create emission path if not existing --- flow/core/experiment.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 38599b002..46d77edd1 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,5 +1,6 @@ """Contains an experiment class for running simulations.""" from flow.utils.registry import make_create_env +from flow.core.util import ensure_dir from flow.data_pipeline.data_pipeline import upload_to_s3 from flow.data_pipeline.data_pipeline import get_configuration from flow.data_pipeline.data_pipeline import generate_trajectory_table @@ -143,6 +144,10 @@ def run(self, 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') + # Make sure the emission path directory exists, and if not, create it. + if self.env.sim_params.emission_path is not None: + ensure_dir(self.env.sim_params.emission_path) + # used to store info_dict = { "returns": [], From 0d4a90e9fc92b3d8ee2e34f822846fe8fd213002 Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Fri, 17 Jul 2020 13:54:17 -0700 Subject: [PATCH 360/438] Added code shell --- docs/libsumo_mac.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/libsumo_mac.md b/docs/libsumo_mac.md index 4360856d3..7a02d3823 100644 --- a/docs/libsumo_mac.md +++ b/docs/libsumo_mac.md @@ -8,16 +8,16 @@ To install libsumo requires re-building and installing SUMO from source. ## Steps -- **Install swig:** brew install swig -- **Clone the repo:** git clone https://github.com/eclipse/sumo.git -- **Create a “cmake-build” directory inside sumo/build/ and navigate to it:** mkdir build/cmake-build && cd build/cmake-build +- **Install swig:** ```brew install swig``` +- **Clone the repo:** ```git clone https://github.com/eclipse/sumo.git``` +- **Create a “cmake-build” directory inside sumo/build/ and navigate to it:** ```mkdir build/cmake-build && cd build/cmake-build``` **The next 3 steps are inside that directory** -- cmake ../.. -- make -- make install +- ```cmake ../..``` +- ```make``` +- ```make install``` ## Additional Notes - You can test if libsumo has been built looking at (./testlibsumo) inside the sumo/bin/ directory. -- Bear in mind to use libsumo with the same Python version with which CMake built SUMO. +- Bear in mind to use libsumo with the same Python version with which CMake built SUMO. \ No newline at end of file From 0c8add7793dc60385b19d9010a6df00f7c471844 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nathan=20Lichtl=C3=A9?= Date: Sat, 18 Jul 2020 16:05:43 +0200 Subject: [PATCH 361/438] automatic leaderboard upload --- examples/train.py | 49 +++++++++++++++++++++++++++++ flow/data_pipeline/data_pipeline.py | 18 +++++++---- flow/visualize/i210_replay.py | 33 ++++++++++++------- 3 files changed, 83 insertions(+), 17 deletions(-) diff --git a/examples/train.py b/examples/train.py index f889ac9b6..959596697 100644 --- a/examples/train.py +++ b/examples/train.py @@ -21,6 +21,7 @@ from flow.utils.registry import env_constructor from flow.utils.rllib import FlowParamsEncoder, get_flow_params from flow.utils.registry import make_create_env +from flow.visualize.i210_replay import create_parser, generate_graphs def parse_args(args): @@ -87,6 +88,11 @@ def parse_args(args): parser.add_argument('--multi_node', action='store_true', help='Set to true if this will be run in cluster mode.' 'Relevant for rllib') + parser.add_argument( + '--upload_graphs', type=str, nargs=2, + help='Whether to generate and upload graphs to leaderboard at the end of training.' + 'Arguments are name of the submitter and name of the strategy.' + 'Only relevant for i210 training on rllib') return parser.parse_known_args(args)[0] @@ -376,6 +382,49 @@ def trial_str_creator(trial): exp_dict['upload_dir'] = s3_string tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) + if flags.upload_graphs: + print('Generating experiment graphs and uploading them to leaderboard') + submitter_name, strategy_name = flags.upload_graphs + + # reset ray + ray.shutdown() + if flags.local_mode: + ray.init(local_mode=True) + else: + ray.init() + + # grab checkpoint path + for (dirpath, _, _) in os.walk(os.path.expanduser("~/ray_results")): + if "checkpoint_{}".format(flags.checkpoint_freq) in dirpath \ + and dirpath.split('/')[-3] == flags.exp_title: + checkpoint_path = os.path.dirname(dirpath) + checkpoint_number = -1 + for name in os.listdir(checkpoint_path): + if name.startswith('checkpoint'): + cp = int(name.split('_')[1]) + checkpoint_number = max(checkpoint_number, cp) + + # create dir for graphs output + output_dir = os.path.join(checkpoint_path, 'output_graphs') + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + # run graph generation script + parser = create_parser() + + strategy_name_full = str(strategy_name) + if flags.grid_search: + strategy_name_full += '__' + dirpath.split('/')[-2] + + args = parser.parse_args([ + '-r', checkpoint_path, '-c', str(checkpoint_number), + '--gen_emission', '--use_s3', '--num_cpus', str(flags.num_cpus), + '--output_dir', output_dir, + '--submitter_name', submitter_name, + '--strategy_name', strategy_name_full.replace(',', '_').replace(';', '_') + ]) + generate_graphs(args) + def train_h_baselines(env_name, args, multiagent): """Train policies using SAC and TD3 with h-baselines.""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index f0e3637f6..8f73e7e5b 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -99,7 +99,7 @@ def get_extra_info(veh_kernel, extra_info, veh_ids, source_id, run_id): extra_info["run_id"].append(run_id) -def get_configuration(): +def get_configuration(submitter_name=None, strategy_name=None): """Get configuration for the metadata table.""" try: config_df = pd.read_csv('./data_pipeline_config') @@ -107,13 +107,19 @@ def get_configuration(): config_df = pd.DataFrame(data={"submitter_name": [""], "strategy": [""]}) if not config_df['submitter_name'][0]: - name = input("Please enter your name:").strip() - while not name: - name = input("Please enter a non-empty name:").strip() + if submitter_name: + name = submitter_name + else: + name = input("Please enter your name:").strip() + while not name: + name = input("Please enter a non-empty name:").strip() config_df['submitter_name'] = [name] - strategy = input( - "Please enter strategy name (current: \"{}\"):".format(config_df["strategy"][0])).strip() + if strategy_name: + strategy = strategy_name + else: + strategy = input( + "Please enter strategy name (current: \"{}\"):".format(config_df["strategy"][0])).strip() if strategy: config_df['strategy'] = [strategy] diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 4c7498413..60f95f6d8 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -242,7 +242,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(args.is_baseline)) if args.to_aws: - name, strategy = get_configuration() + name, strategy = get_configuration(args.submitter_name, args.strategy_name) metadata['submitter_name'].append(name) metadata['strategy'].append(strategy) @@ -500,16 +500,19 @@ def create_parser(): action='store_true', help='specifies whether this is a baseline run' ) + parser.add_argument('--submitter_name', type=str, required=False, default=None, + help='Name of the submitter (replaces the prompt asking for ' + 'the name) (stored locally so only necessary once)') + parser.add_argument('--strategy_name', type=str, required=False, default=None, + help='Name of the training strategy (replaces the prompt ' + 'asking for the strategy)') return parser -if __name__ == '__main__': +def generate_graphs(args): date = datetime.now(tz=pytz.utc) date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") - parser = create_parser() - args = parser.parse_args() - rllib_config = None rllib_result_dir = None if args.rllib_result_dir is not None: @@ -520,12 +523,13 @@ def create_parser(): flow_params = deepcopy(I210_MA_DEFAULT_FLOW_PARAMS) - if args.multi_node: - ray.init(redis_address='localhost:6379') - elif args.local: - ray.init(local_mode=True, object_store_memory=200 * 1024 * 1024) - else: - ray.init(num_cpus=args.num_cpus + 1, object_store_memory=200 * 1024 * 1024) + if not ray.is_initialized(): + if args.multi_node: + ray.init(redis_address='localhost:6379') + elif args.local: + ray.init(local_mode=True, object_store_memory=200 * 1024 * 1024) + else: + ray.init(num_cpus=args.num_cpus + 1, object_store_memory=200 * 1024 * 1024) if args.exp_title: output_dir = os.path.join(args.output_dir, args.exp_title) @@ -573,3 +577,10 @@ def create_parser(): p1.wait(50) except Exception as e: print('This is the error ', e) + + +if __name__ == '__main__': + parser = create_parser() + args = parser.parse_args() + + generate_graphs(args) From 8ec8728e7aaeaff4aeb644d7cc2fce97d5119f3a Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 21 Jul 2020 23:30:45 -0700 Subject: [PATCH 362/438] Import fixes --- examples/exp_configs/rl/multiagent/adversarial_figure_eight.py | 2 +- flow/utils/rllib.py | 1 + tests/slow_tests/test_benchmarks.py | 3 +-- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/adversarial_figure_eight.py b/examples/exp_configs/rl/multiagent/adversarial_figure_eight.py index 4fb81ce97..a7834c6cd 100644 --- a/examples/exp_configs/rl/multiagent/adversarial_figure_eight.py +++ b/examples/exp_configs/rl/multiagent/adversarial_figure_eight.py @@ -8,7 +8,7 @@ # the negative of the AV reward from copy import deepcopy -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy +from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy from flow.controllers import ContinuousRouter from flow.controllers import IDMController from flow.controllers import RLController diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index fc3229e52..9be55bbc0 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -228,5 +228,6 @@ def get_rllib_pkl(path): "Could not find params.pkl in either the checkpoint dir or " "its parent directory.") with open(config_path, 'rb') as f: + print(f) config = cloudpickle.load(f) return config diff --git a/tests/slow_tests/test_benchmarks.py b/tests/slow_tests/test_benchmarks.py index c119d4bd8..4a50e84cb 100644 --- a/tests/slow_tests/test_benchmarks.py +++ b/tests/slow_tests/test_benchmarks.py @@ -84,8 +84,7 @@ def ray_runner(self, num_runs, flow_params, version): # Register as rllib env register_env(env_name, create_env) - - alg = ppo.PPOAgent( + alg = ppo.ppo.PPOTrainer( env=env_name, config=config) for i in range(num_runs): From 54ac7f3ae27c5c9c4963275071a24ca1d6f0660a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nathan=20Lichtl=C3=A9?= Date: Wed, 22 Jul 2020 14:38:04 +0200 Subject: [PATCH 363/438] fix auto upload --- flow/utils/rllib.py | 4 ++-- flow/visualize/i210_replay.py | 15 +++++++++++---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index fc3229e52..db0e811b8 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -8,6 +8,7 @@ import os import sys +import flow.config import flow.envs from flow.core.params import SumoLaneChangeParams, SumoCarFollowingParams, \ SumoParams, InitialConfig, EnvParams, NetParams, InFlows @@ -149,8 +150,7 @@ def get_flow_params(config): net.inflows.__dict__ = flow_params["net"]["inflows"].copy() if net.template is not None and len(net.template) > 0: - dirname = os.getcwd() - filename = os.path.join(dirname, '../../examples') + filename = os.path.join(flow.config.PROJECT_PATH, 'examples') split = net.template.split('examples')[1][1:] path = os.path.abspath(os.path.join(filename, split)) net.template = path diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 60f95f6d8..23ef5fdd4 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -6,6 +6,7 @@ import numpy as np import json import os +import os.path import pytz import subprocess import time @@ -241,7 +242,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(args.is_baseline)) - if args.to_aws: + if args.use_s3: name, strategy = get_configuration(args.submitter_name, args.strategy_name) metadata['submitter_name'].append(name) metadata['strategy'].append(strategy) @@ -362,8 +363,12 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename) output_path = os.path.join(output_dir, '{}-emission.csv'.format(exp_name)) - # convert the emission file into a csv file - emission_to_csv(emission_path, output_path=output_path) + if os.path.exists(emission_path.replace('emission.xml', '0_emission.csv')): + # csv already exists + os.rename(emission_path.replace('emission.xml', '0_emission.csv'), output_path) + else: + # convert the emission file into a csv file + emission_to_csv(emission_path, output_path=output_path) # generate the trajectory output file trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) @@ -384,7 +389,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= print("\nGenerated emission file at " + output_path) # delete the .xml version of the emission file - os.remove(emission_path) + if os.path.exists(emission_path): + os.remove(emission_path) all_trip_energies = os.path.join(output_dir, '{}-all_trip_energies.npy'.format(exp_name)) np.save(all_trip_energies, dict(all_trip_energy_distribution)) @@ -510,6 +516,7 @@ def create_parser(): def generate_graphs(args): + """Generate the graphs.""" date = datetime.now(tz=pytz.utc) date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") From 4bf9c88278fbc8ac454b3f9474b1f58a260693fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nathan=20Lichtl=C3=A9?= Date: Wed, 22 Jul 2020 15:24:50 +0200 Subject: [PATCH 364/438] fix tests --- tests/fast_tests/test_examples.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index fbd78294d..07ee95f79 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -192,6 +192,7 @@ def test_parse_args(self): 'checkpoint_path': None, 'use_s3': False, 'multi_node': False, + 'upload_graphs': None }) # test the case when optional args are specified @@ -202,6 +203,7 @@ def test_parse_args(self): "--num_steps", "3", "--rollout_size", "4", "--checkpoint_path", "5", + "--upload_graphs", "name", "strategy" ]) self.assertDictEqual(vars(args), { @@ -221,6 +223,7 @@ def test_parse_args(self): 'rollout_size': 4, 'use_s3': False, 'multi_node': False, + 'upload_graphs': ['name', 'strategy'] }) From 43e12c9631868496d45c1a9ff8b159f79b447139 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 22 Jul 2020 10:14:56 -0700 Subject: [PATCH 365/438] New data files --- .../rl/multiagent/multiagent_figure_eight.py | 2 +- .../multi_agent/checkpoint_1/checkpoint-1 | Bin 20358 -> 0 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 210 -> 0 bytes tests/data/rllib_data/multi_agent/params.json | 142 ------------------ tests/data/rllib_data/multi_agent/params.pkl | Bin 21381 -> 0 bytes 5 files changed, 1 insertion(+), 143 deletions(-) delete mode 100644 tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 delete mode 100644 tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata delete mode 100644 tests/data/rllib_data/multi_agent/params.json delete mode 100644 tests/data/rllib_data/multi_agent/params.pkl diff --git a/examples/exp_configs/rl/multiagent/multiagent_figure_eight.py b/examples/exp_configs/rl/multiagent/multiagent_figure_eight.py index 0579bb978..2b18bf61b 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_figure_eight.py +++ b/examples/exp_configs/rl/multiagent/multiagent_figure_eight.py @@ -1,5 +1,5 @@ """Figure eight example.""" -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy +from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy from ray.tune.registry import register_env from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 deleted file mode 100644 index d346e9dc58b39a5b511ced70927eac1d0d32579b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20358 zcmZU)c{oKn`xww<#`NGS4uu8$eLzHzgEsBdrxPk1j+#8YJS!rC1?(e=S=!wiG{{Wq^O+#b5w zKg95VMoGB_I{Y(SiYKNQ#S;(l6d!#cBy?@4FHd4GZ;apQYscz(%Kh^eqmAo*OpHe7 z+~(`glU%=Tdq7Z_;rhT}U&Day{-K-K29M4mI#)6rF{fg%fy6 zZoKij&Z3^eqoy!CWmi|%1%F3>|NJ=%h4NI^?Y9*Y6%wkuFCvJRzCj*c`hXiJ55r%Z zI%qt?g}8;2xLcADKIQmh@ajke^W*f(&~(AO!7})`(v_~dCNJ1UW^mVE83At?C&(F8 z6im09$gC1ortzNk&~uasB;U^xoYhnk#K+7Mxb_*~M*kn+GrkWlrJcd5**wNY0r?!RumkKwT4)Zs6eyj3(CyzB8LZKiRn2f!Qb;b+>zTn@=8An zIAStP%Wre8)w~c~kZH~Rn$ro=s|>+QIg?#HMo7@`sfyGFXTf=sX;fdzQ392>bB?pY%amdS$Rlg#JFSjjS;K} zsKc$%x)>rgjQ$~Y(CQHcSt*_lmu8cZY2-Tt&(Zn7Y~Rz|QQ<2xKX zjT3;6h{7Dy@-uhj#Ro~ z$qX%lV%9(eFs_U>*A=Eu_x zVxrVb-%ve4Lsb&7`5g%}WW(XYxIJWp-U4zxPoMcH_YMo=uS5FX&!AD90*fAKbG=+2 za?aiihi=hXNNW1Y+^3~5vt&FsjXc9r%T(B2XTasvGlJ*(v$*3|D&Y`2o@=+)o-1ED zotsqhoa~jljyj(8Kmy`mV~d?Y(}EJ-gizkZb)GW+$79!RUK_%j zH{O*0c;3`Fo|@nI{dSQLrL(Q|DhfFo8?rfu>IK$+U8h;QWp3yE5tHZtP4sA}SnbCD zl$FZqXdA<+QIz3NJsZb)cQk>MXL+yjnv#pP<^HM6x!Mf=hgqtu@<171h)GWgW{8|Ml70&fKf<`xgaPxkrrQsfY43)_IQquh~1+ z`fvCB|HIAHf6dqYkNH||ylMXp9NMEF@Q>>+cFrX^p}{!KS(+=ha3N{=Jc4SnayVUj zJ&5-w!IW7Vg7S;0c&sLZ7I6dUl~5lz;$_F3^^4*Do#_C^slr&VAudp`=*9PkJ3%Z_ znj8IfJeNPa5?4B@;rY}futzZojlx{HadzjySTqZzaywz01{YH8ys+FZAB&Ytxj_mt z0?r0wZuIy*8Z9D;nPcx^`KJJGz&#N`L1GYh-}v1EvG{jT&@npi*+#@c5y6QywIFCU z0OblrVz=@dz_ZJcG|n7y-ZsMEfvwp3a-rZ{RyEx7oK6nh9uLLMaiBN4oK$TkpjTrt zxqo}J;MuJ(f$ioK;3kpLbWFgH(z->0+i`-?%)b=D_0_DgVcg>|`i>JO0zADn2bx{W}t zIs@{qW0K&}z%YF`=QR$`e~DTRE>xW% zVCn6TmO*O-*>2TP&6|%Bts2}LYA4XDvJdtq8*=-M4!~lcSMWXCoHcL`!s9lDIKSE# zK+{uDdRK>A?C}ok-wvZfmbSq7U;@dJaRi}l%J^Nx5qV;A)c$injNz$q=kwn}+Uv{s z)F>ABn*O53s4rM!X9_Jnc3jOJ18j$)J~wVdKbY~%u*u;Kl-QdH9t?hkw%}SElGA>+a9K|Cr<@(1@V zNF76U>7e)-@R;cf>wd`yE^#fnGrmf5`5qmRse2agIR%3D{46|oHxjq3l@qLBWVtV2 zox#`I-B>y8H09}p@}{rzRQ!K@=$`;E<6jTb{f`Ifx$*S>Hvr81Cjk7Z459Arb#z`_ z4VjpCfGqu0M9ntcpxxcxBs(R8*uGDrw_OUTnQ1(|75#|$*mQ`Dxt>oPi)zW?6M1CS zh7`(|Jx$eS=g?fkG-B(0gpAWTLJYGG(FrXf)U9#!oss$Uct|CwI$uI`>obUYS{8|0 z+CW0K9i%*iP~NO{|0mNh{WsGv{MT@!{}^uU#xwcH@c-ltrvJFUypYhfj7-?o-pkeu z{UFvh1@ur!IbAbokGF$s@F!hE+h+b?9TnHXxnmKyuK5j3N!t&F^D^lY6(_hk=NP-f zP6Jyl9%DSL%4hglL{=%PB69=^N=m(Xa`N;M|APL~PM{7Wxs3)tZrN`AJ! zvvsi8wIDyY8rIo+124jR7N^gv4m znYAg7#@;T#LH9GnqV^a`xOSWNT{T5+b19koHV%?tChS|`L$qNzS+jwMhCUBaL)aWl z1{T5$Ek53;*1-*J!Z5V>4OKle2^XtH(x1tDF;7z)rDku&Ha#QUNVeed$Gg~)5Le=0 zHVFc{9hkJxnasF~)i96SMJse7VPpSfJpa3p4roTh2ZMvqykQ(=DZ0Um<7I{9r}%X0$1p zhECynr0}jU1g);-E6_Y}&=uT&*Ce5;R)H`>KKhf~B!(G7iawxeofC5;Vu&{!uv z8~crFh#mSei(8N4%-FZ|_nSq;qVOph_wX(WnjS?KPBx@3?D~n9Rz6(asRD1_1Ot?> zL^-Tv_hxyqa=l~m+mu`6^_U|xYHtX9?8qTbODyOoAqRStza68C*22C!ui2IZjx=@X zGBhM+5c%+CqBupBRa$tQRoe8G-Vc}qCfAMd>OxgCo;DW(%m&E=8w(od5)Z0cMX*9R zh7Lcif(Gdf=>@CIaM~QSoUI+$G&4uaBl2kFd3>W6VVD#JwGtW5y=B$dr z@;~BmCUGmB+r5A>mpo2eHf$y~x$^K}{Vl40-j7~Yy+k&B6=!ZcG~geX6Ev%|j;zX_ zik7FnvCqGo*(#$3UyL?mgS-wtKGjdRpWKKFO7BU%>nXhD_K;CB%fOP-2c#=v4y@05 z4`M@obkpfG5NNOt)k;lp@LMXf*|KDe0i~OtoPq7%cEG92b3t=L2Afmk1k@@TkJ7`? zea*S?$3YPibKQvvh>D{TEs`Wfe=J_|>BfjVsYKKGJ)NA@3i^*vvtvFjfYeq^sP?HL z>jmk|d&4cX_pdeH-Nu6nUZUh~tTeM|uL1E6X6awB!{aknk$GJ=X^h%icHEd~Xb1{H zmmLPMa``GUTkj~{F0=&hUvUSHw=+Ca-ax(8OyKc}XndK$BhPn8!L6h6_)+f^>II!4 zi(eN)fJ6q}n^Zt*K3dV)W6zWKU*Ayw0TIr{(-PRKW(H?|$x!1ze~D}AQVbnBgUuIR z3C$3O^{EA9!>grm^G_=k6DcEfxj3vz$)y3`4Df@}1Jc+b4)3=+;k?y#BunWO38^S1 zt&?5Rme)rVI>lj!%VF}VI|_c?eZV-)Q^K+}Q|Mo(3^*ly4@qk`)n9dhy`11e!!IMh zeWC{#R!t+VR${2+avKTi7GN{?ImuI1rzB9zCb7#xtS$ke7X(tl?ax4~(iwm0Jex zJ-&}jKGRJ6Zk~imld_S#csx7%SU6q0wUSDGi@;+4d;Dj5NieV>k?DL~MfC!mQ8?5M zBUd(2v!8dE8^dQfGa4o_v72M*1@|PNhVjT(?jU1>Md9*YMYs{T269Gp&|z>jcuajl zDkt8C!+ok?w^bL~uRmhNR8G^D;y4m9BeAjPjtP{NtD#7BEGy+*4?>;Fz`634Qf?Wo zl}{rV1d;TJa4uPBz(t!2srV{rgzD-dO+h`m z-%&|dU2Y&wAG6`TO%c+W*r>cZ1Ex&sWl{l>(stz?330OpjT<3wb;eV2(2S39 zB^&VLnqJa7IkWL;`ET0RM;V)oAz1w3G-tL&KP^4`kqEU*GR3Pd(RyJouu6=_%bP1e z*4Q2SPQ@6L<4F|NTFBx#Uo;o7Y+T^s&I6lDbDdO#Zv{d;kS+`YHyfJ&aaDssb;#UxokBi&Jls7 zwi@)+n1hUWwHJz?n~G~T&!c8h0W{^!bbRik399eKV7^`o_3IcQkl~FL>!!h4j!`x}O%>bQNciy)a(b&LgB}lQ*PSA4kMJRK^AyngX&jc-^^-rEl_;n4ijsp%BL{%mlyhqL{f@sL`R}Ap7Lf zH>NCE9t_%fIAcr>l>K7pA^*+fQd3O*ae}nLDq?(pEnN`ZPg}p6L6^vM z%F7YM70(-q@yS{|QzJqBh0j4~odNyP`kLHa&=+)!vl;%PYgbpG8Hm( zUy=zOx#-xrjcIi{P4B;wg5!Tv*pt`d>GR*eVAwYg-_CQzj%{!GB@y2k^LTL(J2xJ_ zk68;zd=)xY?+be^!wD9?6lQd#rUO%*iwUb3V7~Sc)BJO6rp6GNl^er;OS7X%0gHeQ z6~m+RW?`|;V#xR9kON03`{7S9X1p`NvTg_Z<>O=Me0ZHWKDtVsZ&uODm6|v^MjTwn z{^oz+`LX2%J884xM|#!qEYVnQ!bC?G(s7&Sk(k6Ku&RG7{(fFd*N7F-=tG-G(vc~+ zQ6K~%dR!P0pH4U1DYCi+pUCCSn(Xq~O|-*{YR zapoW{GROq(vjXz{Q66>qr~^3}34mwU5Y@Yxu-C-{s-?Bydwv24!n+ugcfz1({)6#a z&_Ui>1;DkLk0EGy0=6eQz{I_iF{y7K{#Muvcdk{l9j{(fE7XF{dtUHiyA=4(_9dG- zVsPV!SejPRhL{~k9@?zH*?xc)BMqd{EeI`}&Vhn#HhthNjs3eVVZu-y3DkVax(Qz5 z&&_f;>#iY&L|f3oT5>z(F)@(ra>6~wqex)p2m1Zn0zA0#84||_bn?+Wa23zO&8u$m>m-m&%&MdA*FxyT z{5z=i!30WoIU(ne6-GAN&^V)0s9Jso&A*7#E7MG1Fn9tgtZ=0-)4$Q|N$R-i<|(-U zeFk{em2g%mU8cWocR;IkC2P$@!_(L=thLQ+8nyi}@t3?z1;vq^4>_H5|7LRttn`IB zoI!Hxz;e=k`z#GBh=a(#{vi4)AM3j{h%Y|@JX5o1NwyuVxy>in@<#E1Z7%G&=!$Bc zxv=OO~Rsdl&qJ$?F ziC26iFf>;K4ZjP+{nA?Y-KzxbnI;a7mHp(K-FKF2zZhKW67luemqhsQdT3Ac1pReG zbVIuz>Ra-NkL)Hi)e&Y6(;&Khl=K}C4WjsjOEr>`DPwY-YO54b>GDo`SMLyAP_hK; z%wCAq&hk6r&$msocf(lxK!$j)Pe|= zDEz+j8O}5nL(2;dG|9yn?tH8v_y1VadOH=;c+3U`Qasog=ZvokeewFZ2#nLUht)cD z?9|UssB-uoCU?(Yx=w2~w0j??b$5#SlIvyh;eNGrUn@kJkB|y_+kSbo*hmR>~=-xd-#S81G_yh~^ zZ9a@c9jRE_z5t4L8>51e4E8^^h2Kx7qQu8!vT)-X=HTKfxY93$tfhnW?~!~m?0bvu zJRySy4;K-WBtEVER)BL(E1<6QHt?Ueo;18#fj{arK=agHCS_OzrDh&rilncTH-Ddy z2hVpRPeBe|zAy%#D>gXcq8Fr{c)>s3w4a8$@JYdp=Y(e#$}|7J)YH`HALwE6FFUaO z4?CFc#+&oM(8KB<=y9cGHs?b^3xB}0_$Ue&W$cjgmXUZ0{?}`FOIy36vyG?9qZj|j5tQ?G&w<= zf&Xc&E%RDIi^KN)11IkWvTIXvr79_Rn1p8rK3%)jQ7 z|CkSMJnnzdht0p}!#DaWjWF-SeQIN{=}kON?kXd$fjzLzT1Fs^sknPzIlfIOz?~J= zNSbu;zEC1;la+3;%DF*&XZqqnCm!mxjs=ZTGMLB7!u|dk@KP!sidHTlkG9`|v-OF% zAt#h3*W?4|wieC5qL1xgHnD9>C-RT&6~`M|E9fno@layRp<^#PLyB`gy7eS7(Va%* zw|pNAi)_J`CTn;&{w^)-lgIkb2|>#+G;%>WZNs@m0B4sjI<)-7kJ~NU=e|i zYUF4Gs2IHV$bz!FH06$_#Eax3iC<;|%_`at$(el4S0hi@Ati~|PRU@vk`Sn$A|Q7D zE|us*m2e>=AFjcmMd+87@G;5ieMB4=jhPcvwdF(fPa4{5u=2WIB?(`4h_@G7r> z=0-(ggu_L;BlbLKlyRBn?jiiMW-NA3)4=#Yx=`N!nt7%u0kaO}!;{f<=soo>cpo1k z+BOalyF~@sDom_L%K~~cVIjFQWfo0R`#=&mJfLM&b~x!}6bW#wgq(1SVWNsCEPisd zTrfk)=}M4Owh8z{i8wbM;PuzZbdhQoQ>ZY3Y}v1Z+2b~WiiHVGnW#!7eht&|h)<2a zlP&P_Vjb$A_!N&jJ1~XIGr*vCIS76Rk@Y<0cH|-sEJDg2Bx|o_AwSDtl!T9m->=@V z8&*n^{<0iWQz!xJ7mt=amc>vtn1SDC_>q!ZreLz6m+Wbh!AXWWu(GO%bs9IAQEIS^=^5e#5%C+i}Zp6T#K?+xTdF9I4e_jH`D}!A)D$;Nr{g5LvT; zmJi54gy=&YTE3K+4+$V}j5j{@i6IAeeZq&!GN@#NEq*^34Sbb)sA()jUxO~1^DP_a zr|?l^eH9VRp9bEY_vqrB6F8&xE$LSYpl05kluuS-#a|tGDwa*(jh2xv8m74AN*e6u z2~g~;2EIwv#!IQ;DA=@}=H}04PiHm~XVoLP;mT34DUBl5<&Qas_~U8G(+0*>Ab~sD zYq991<>w4FWigisQL(gEfAf>h%-9BtQge`VQx%6D z)3M$i>9n=u(B38&@^apgiRq{LuDP1PJGGTA7F>bVpJst;j3Wu;AH|YWVc=G(jDe0j z;d5jKT`TMa$G#;r1UvN7<*CYu^UKKv!xq|8eV;kiSPGlnK2ZOspUGW^1n{1^n=rY2 zM)br+P?2n>;Tqq_n~qFU{x=X#30nR)?5R-mpe(GT|>qc5a(C z?s7}O{(g1%)$^B?6z`>f7uC`%IXQ9^e9?9H9h9ErgZ2-Dpy}Zkx>95rX>u9|F5ws1 zs>58oKClzsU6)6_!&QWvz7f3Ei_vW(5gyW z!7~DReLK>-nM=17h0}(O&9SIVSOq2Q0~aF;E_c8!pw(;BIEW)``(tbuO&s*Q8_!_-@ovWI>wB*mZO z(K5^dI4@i9Q0hLMccTMZ-rU70Lpy1w^L27Cua=g^&H;Cg<G`9#!nCL zif@A21q(2Ax|Cq#m=HGkWaG}jEbxkT!R`oEe#8ARbe-P|UQ@Vez4Ii|#&Ql#-ORqx zw}9=7*5kUP97xvmrt-eEuzd0$acvnO^SDPLjJuTF-WQA;P5yvU?HMMp;S!xUAPGWj z%$%*09>JcZl_cnK7Og3Drndez_;>IEnS6~UH#a2VxcmcvFTKef^<4U>W*Ty)8o}8G zO9`#*p|?y_L4CAwcj4|+sy-(NeeNg0?_h86UYCVUuYBP*r8KYh3kY>@WDJGZ;-yJ8 zU~KuAK3{u*JtrLr>!t$;rY|RFSKNYqyX|22zCnKbUO9B*@aXao4G@%`V%EjvF(XQr z%-Rnz(A;Q86u0^jvu9eUCp`lv?K=woTa#$?!FRwt6iWjx+7kQgU86`kj$dVNfW0f- zu~LNrulL6p-Vq17GJidLL@u6jF{ZS(V>?dooe0N#5^ze@0dNbCBxRtL z7Q9cTJ{PutmA(e%ggjx#Ot8fVMZxTsX?M{FhR9B{@8m%DTHHHlKhu<%h#swB;5nuY zuY@lEzT_pc$Tyx1c^`&z)=bB){4;3z=qTKDdI;!XOY^uz_ZleC16lm=%BRuQZz>>U05bGj^C#t@Xefe^NeQi_W zVq7#XdtwfPaed?>T!iZ%J?L^9MHsWXntT#6PJ=Fk#c7Op^=$*TYj6b`N99wXhtU5LwAFl|4JC5F@_I=xEQ(YFe9eK!JYd6DHzH6btWG-oz zAu#>TVLJBPUD_Zw9}=<`gVx|!=qnnHxgGj+$FXEweYBKTq)VgONF}3FodV>?4Z7N| z0!oi;hPMa)ux;;Fu)lV{#7ASTVNbX`S}%SHGi96zCpwXCW)w!_el@w1Xvj(9zoHH^ zPk>Rg0SY^oj-rqP?2(&|kC(q;IX`7Uw{0HYxupp!qoSba_ac6{|03$4K8rnik_*8v zf>EQ!fT1Gs81^L#?$7sxynQTrAzMd(tg(Sf50(MKD6TNX7qQi$@1@^>c8@krQ(J`M4@URPsV7KZZYX$LZ^TWvUEoe@0@E#j0oT?( zVcHG$(0RsJv9doBoYT)^6)K<~HF^*PD#mb#vtH@P#VF5-dw`WoA*>NVB zWDRGLRr!6)i;x(q(&kBh@2XJl-6E1CNTUu{GKs~POwv-sBlLL%^_0vcPMbDSIk{Bo z_$!@QgC4b#PbXa`qo`iuF_Ia3i=DaDo!Ixgl5;Wz^u(tu!gCDeIsISNhuOcXj|Kl4 z?))FaUEFvJ|BF6c|3x3hQd}~r#ETTVm7v|JSfak6g6|WQ2xHBQIgf8=;)Vx2G9+&d zI!<#zVTmr=WG#%HfvMy{;tdq4Ji~rgjtBJ)D>&jL1(&Q$!M;?5y%~^8)KpB+?{EM; z_3$jY`auu6*4|?*v!@UhE&-!F9h_IEMRO*cq6;8{CdOZ&?(apwDDwt&x0Dbhdnb}N ztqydT!%=ceRSOO$C!%CQ79H_;O?tP8GLI)k61884shHg}&J)>_V6`m_3qOb8?%^7! za5&Abe*T^0EqKZ-_R50w1(V2YkwCgWbc7h*s-jzOGQ`O_1F}u<$I2dh>wX z(oaFL3}xtEmJXuB;`s4-GUW-aK+cQ(Bv@S-c2_Kc!s@zX_`(Ts0JI+rJc@vk1vuDY3cb?6mU%RfSveV)a6P$2<_wiQxza|M~G zBe1utlsw)fMKl+cpz59_Y)jEDtZTkcZ%24T#^8K9@62z~m{0MT0kBTy!eBneiKBkx zE)@h%L0cUSq>nFyVRJOzdn<-1%8eY~wKrL%#me|z`!PKnXOHJAPJ;R2=XB+kE10)& zDNY+r!+ux|p?G>aYKRx&tuL00R81!vxoitbQMO`VoRXuW*3XDv@8QPHnxTY!CWDXv zYJ%FUKJvp~mMCi1(0ZR;BvQWsi}PKX&LzpTP4zm}d7y@E+ONn;Rt#QWoeZaL%R;!o zgUHLiXIII)faf(IQgJndPT`Bg{aR(~+R%4wcNH#HZ4Qs0Yp_cki~7$^*89V_Hlz;^*S6=}L6*L4DA$TsP@JXg~GGw=4*<_K(bwH%HB@N@!G~j4zf&lED4rK=QE& zs$C8y*W%iU?fJ{}T6zs_UAqNx_hmAs3J&-?zJa#>sDyagpYU*3INplLq)(eFsA6dz zEibrFPTDjRqaOpLr|Ay#3ww-er)}ZalR|26wizwD7QuO~N|JN?I1yPa3xC4ppnFX$ zy0o4{UQjc?%lIB_y4%S7I2Q^r!`;+BdInBe9mH?j&t>G-O-A4Sx9FOChv>wzaq#u& zLo%Yp;Du|KsKxa#R&j7VhS{DW`@iL*`}jlXiBFgzeiaR^&_LNmk@%wKHv9bgM|!|A zkenV{#&(P6QNODk7-N$}SAEYw)$koSWv(dm(Xg8Is^2F+Rvo4ue)iC5_kc~}7r{^4 zTHKlKh4JUKU`63nTy$$XyGIa5^Et(s*49MlN8W^c8!u439jCDImN|*g8r5Q_HWGt9 zmzj@C6Y=@IA$sR_5C&`*p^D0*ofb7mjH=j1`!B!bJpZ|e?mXuX*e^|LqggsJ`xp7} zG8y`;v{^|H8EVEkLtkFrOD5E}k^829Xw{W|`dZ>DiJEt`u}f(UOehzHmf3}v=zfft z8YRPpdB;iL@f|c=Esk|i*vd&5O{wy#*N`@|_xu=NOSBhH0X3x^^t9R`*!HOc*43Wn zteG4|ZXW+mI$lQ8Ydz(#Nca^!nO;uB1Ow!f{UuV^a0h%2?Waq!F4L>&=Qsh2{xTDi z50EP5T&z6lPR71?LEnv?jc=7pu(;74dskeb*<<5TktpK2oJ6>hR7tHz#jtZ;6STV+ z!X|5dvcA_m1I`s&(I*E&Km=1cnPOYW$i+5V*nN*)xcY`O=vYEzhNn{JIn$wZ{~NY^ z?P#f{=!*k=I>cxZ2M(`H!uvi`@t3Cv;rcB{gLt{oZrNh$eZT~=obIt=7gUBU40BXnQ-{*lEo+eNMuI zs_W@yv$yPAbvIlpHpE(uKShF%IDnZ2-s7X!zqvKIE^;Kn$B>@_V^vwc))(>8@X z%gSNp4*qO3?_3FYhHg@~q^WpnMHUtR`KeK%NexzeR+93w`=RpP37jxFoR(Id;ZGV> zvn(v}#4i&yp=FmQeo=oyhlg8nugYv-cz*N>6@jm_Iw31B0gb0_C5^gLf^Q-7FfTbA za^}1wOAmad4F?6pR%9!j+_njIA&nUAip9^yhpBH^6k9ubuA`kGm zG+O8}q#uZ;DShkFIMtfgxoeS`CkBYfQWw0JWrjm`9J-||VH8<5(0c<4P_=6d`c-Qa z+ZhBmR4bxYwIrii^v0i6L9NsrW);f^hw$f;ga#diPB=>CiXPIl z!Bg?EXa`lECJA9`CqaJ90epBj3{Kj`Nwo9}s6gXNcKw9jI6yfTZR!m@j>iocAY7XvsO6Y#M6XeS>8=NOA1KYj@gSS#5xLUMPk7_j@ zPj->7H^xziH|D?}?W~rstHVE{ABnKc1FDmz2Qz8|SQi6ZcI%g1vZU1r#~Q3Bwhq#8 zG_8hw)yzj(_cV~+w*Vsgj4}OE28o@m1Ye%0z*<#(DnBt6XDBSj8OEl#fnS1JU+UqS zSPcEM%n%Q3`%OMcB-7XhPsm%BsZ_W*(B;`@IR zn2$`)(J9~)y^;^@E_C4jJgS)-3R?S+h8@c#rU9bdzrxxiWaKB^^v)kQn@_X9jzk^x^oH4e)i!BIG31GdilJ z%xMirx?(h+%pr^5UXcr255Gy$OP$FxnNo7*+CfH4v6MJ2^Tn@U*3)&)`>@mS5SgpG z0FpCuuxR02vO!S{T#V+>*4RUYtd7O*TjiLz-4Nk;B4rx9!RXo}PGhw)(H}fS1-bc9 zrA?q2PLP2YKWW6=)41yE4=NIqL9N~^fjH+a6aD%z^hC-M8Gap`k{QW}Z*_s+l}E|5 z)MB*!(TV7w55s29*+12W%-?N0>4e+|^o2t$eNm$gvJQTjvSmE=IN(RMsu;$;BU)KAp=aevd>3y{4YL{YLi{+Hc({s6c20wU zfDyV{?-G?A=8T1VU#gz;4`7~$sLKTMMS02EEk!XhhDja=X~k)0!V+2YU@;Hos5>)mUn z9j0w`Ugu@7h|Hr}1J+RH>4Wb!3g|Ch2D8MW*;;FP1awSH1fR5A##MMb*{1c8-rM-nba>*)i}sAVP;QkIF>fLZdx7G{Y_*w;yvs)7_>JvY%luJ8Z$f!uxSV z{Q|9hcNaEna)WdC)8X10VPa25mD(jn)a%(#c6ZS_n7}SYb9Pjv>=^}p>kKg}qmk9| zo=#=^#)ALP-(VGeSIgQ_LgCur%;@i((sfJ@vV_*1q@HP2H3prUd?ZkG_E_=hO;bMIF zqYs+j?8isdC1mTC@5FWT15!756sJypOosm+N7wfWL^|d*`@T+vTsqTB`kM!d!;;JF zS1&`zsMl%?P_hMuu1`z>nS$S|mg9+N6H-ugj{f}S2G1`oz!AMvd{`(YkP4_lLCP`a zNT&f^`nQleJ=P*0^-9@A-6=%ec>-CM^Brtg&n?!=I0f*2oT;YEO2GkWG*0qzoOsQw{?d90ui(l}{q8eB{MIUp=+H+^D*n)4n zy(nvO1L?=r=;!4D8E-C=hmGHG($O)3lOJy32zP`^s-;0@=OL8!i=f;Sd-3Mi`SgwN z7fxZl7?@ca!1&4-RGk|Ni@h#nPMm%+xU56r}M0^(jD58Nru zIITAxPIe`d0znMen-zhK&;!oMJs1^H7e0Ig-U|A+9m==j1@8y^qzQdgR=7s3CjtiUC7>ynbTMn699dylP{TL=@1Z_sn29ZY9`18QHr zp7PeJ(ZgN+IHT1Yo)>1I4lGQsM28q?GA0#0sdrD_HdWZ>OqvZWi@SF@(WIrVAy z_xvBcd{>5fKX)<4CC1WhT7o~q`ia3cHQaLa0En-fIC@qv9ewSKkynyOS2swa;8_^8 zrS(LfH;D0Xlc_?}CgjiL!3FMJS~7P6-U`#;D)lO0MMNRo^|P?bTxLsrp6^3T=6GQ|UPATpiO<5d|$llQ6=EU&B=-I!SC)+5@H1C&;tF-?VFN44UpdiWl<7a7UIK;tlkIPc_Zh>241xTjxVn zN)vhCwS`7*m!N{b#f+g^2Bgio#@Tw+8moNXkmA}b47<7rJu{Rb+h&NW-ucbhJ9=0n z>~t;WMQj1n6Irx&Sd+Xg98adbKZ`B)mLR@26=u2hP;cH&3|dl4E-8x%nBzC8qq~gY zZ?GnKyf!7-v#$`nBXLk|ybogv4*@6q819l60jV-W+WlcHtR6E=D}J`Ko$Jd9H^7-J znQ;_XSM0!QLuouT?;fr-4Fa)pFFeS!l8&_R^zQr>z>OSY_DQ;9tP>aaoyvmd+(RVJ z{4zNr{E|-Tvc$px9jq&uirp2G+}H_1^w+%?^!ev^U_6_UpeX`2cQv5#4sXm4DyKsq zpF+vXdb%WH2K0O3z5T@qP#%5r^`9 zC+UK6We7A}20b<$X!9%r&T|8@Fue@t?l}g_o_?pp8+Az!jH<(iH5tthQxmqCp9j4#!kC(@7Br`MvM6QxO?D$yAgEqC)j+5EYdnzeJ;lfNqgY4LFiWC}gS(_l;|-aA?ti1qldS~N^Dn)9$uWITu^&G;WS4?AC7)&{}7yn^#l^!RV zVoIJ9soplGRf*y#6V)SDQUj=9alFwK-p18u!&kh zGu@=v7V62*9^F)URU|xoC+Jb?r*UkESsgdXzXbzd)nb-x7<6$nsNVq@jc_tzFUzvX zqI4R~cHT{rJEtHz?<9^Nc%2fwi&?5m4|YOh9Pkr=rAE6XY+Lh$sccsQi{*nT;<~t+ zfAautaEZ`yj3fIX;TG!d-Q@j z5;IxmelfPd!3qq{JF*G8524PsgQ)wu7cDqMq)$hzeAzmaWJTS#%huk#55R}^p)P0a;Pdx+aVFJ7@Swmx{C9!2!Pq2o~)$HR#c`x@ z*^o3$Zi+8iW}xVsfDfu$uzmTj^5zE{C~4OWN=)4f>hUtnpm=z0m`b` zQERyc_}DF?(qe18?$v>JdTLS66*rlm^&)ifc}&k8C($I?bm(y5=#^p_ED1Lvi*h5d z?e7B5bW?fTmks>FGyBnwKPI`&Z=xBijPRYuH6d)24BqA|)3n8DR1tHWep@V{-D6Ff zwRItG-K#=AHudu4z(9FJK?5FLU=5i^>fvRO20c0cPM$lsjLhd%$u)WwFoQD}S^Kwm z9J3bL?by+Ld;dJV^;8go?SpBKNOzf-f03uPJcdr|*X--NX!4S!5a->*S5+M0FZO$n z4=q3P!*1lVE6q3A>(h;xJ2DPVspl}&RtX%j_#5?I=fkqMd+CGgB6*}=6uUGx4__K@ z5^)Gacv;4Q`y)$Ao3fT-mpqbJ*Hy{2Bk%Dt*#I~hwnCoaFc#0RiG`(;{K?SANA3{B z@qVHe)aU6^sL7rMdG~*zu=NvYr{=(-e-23`ov=?-)oDq=hG}e-Lw3>f^V(o}N}!HX z6_KEc0E-Q(a5^~;)t98>=m8I@(R?Kw)9YqIzZ%fc+7x)|c^eZiC|u|}q7cGI58>m6DZ=^aHuU@|Wj&sDqG^6LiyS+Iwytr5 zqQ}J&cMWsMc%}rAQByc?jD*nP6?jTNpUoYkePQOzQcQQ%!2^0OWYH@TntQ~+ijZ~U zS&V`a2d?0YhBDH!?Z5&}Ll!skDAREa;k~dN#Cq0wsv7Bu8J{%+NyGPNAeJB zdaH}DY&{Nrz7&=8>zMwjC{Cqf5gXf;0!EX|nY}y$?7WxI0H<+K@k<`ASs98e`Ww(w zw>0Vu?tu8+9rAP1Ft%z`J}ql=$1`(ZVe8_zOtbbWd#{&-Zs#LV_nZognUlcuujbQC ztI&!QXy1=FD@ zQjmX7G_8!C4%H*L}_qs=EnWuBFJe&jrFjEqBIl8qmhz-K@Io znJ~x41rk;+q-foK;tTv9O3ygL`ivUFUcR?tlY7}fS9vUj4)LXe)WML{^osrIqX_x6v6q_OC_&8f zP?Dz{V)swDg2@+=lUR^~P9%t6(c(T60FCY@Cb zJx_dsUE*y!V}vHN)YuDIu@T&^k7X#`84iJ~6S#q0n?a(R4O45Muv?e<x$}!c3qDD|)oSgFjpFoA`fnnEe)5$DU3#B9+L=IKT1>I^1k!Ugi7c zN(aiJy)=c6{xuZV`;COT7kN7Pu9$BT@49vJN@k#6z@nB$qPG}65Td>VHjj#tpJ>|# zdb?LZtq85E`roF0&SfyB#0XkKdr^4A5`L+-BR5UBL|SD!biC`Cd|Ofy)*FSO$BjL7 zX{!b(^4Vl#JrBJWYhiCsRZuypPV=mtu)W?MhsuM)@hCGCK%tFB!Qbh|>St|-% z8(UeBaW*|vSVSvIqT!Ea|DbUwfzfluqH@szkVJJb?V?T`n(P41`Rj0*hZ$U)HypmL zF2OsVRal5b8nN@B&(3tn6LL6{lb31F^mH27JA^G(%wdN^vI`}{GAMj-xO|YOC)*g3 zjyWNrq}Xde1x$^Ho>tdzz}H#4fk72Ek~j6WuVhk3f9cBqr#}8;fH?hh5S+Um1Q%PW z>wgT8Ro@Me;F@$+I!MN1m&+Jm6u^9!YNL9-C3~~b9Cgm?F_q>Z?3|g4)eST7O_m!L z@`-G6as*q{)gte&JQyR|x1r3|8F%Nzv*L?7xWPM|Jt=g@h$nm4t3Cm&;oCmEY8=ky zeLN|TIUB}oyyIB2uN|ApB5=Zo2qsz zP26eiT<&F}nWTm(^Y`zH8=1yl!a+}-TNb{LTd$=c1WR{w1&a?CUGdn)jkU@WT+8=y zi{r)#s*A1*$sMs=$(uvM^jjN1F(JpH1XehE3vpt$vl9ZdT); zxv!Lj1ZWFBHEI$c%Q|6XSSDw6>ks~EnXb_FFiQB;->UGzWhQJ|c}XHJ=O1lTIeCW{)&TMN8C-wE0 z`nlR^{uscIsHOj!f&b4c0o_$L+DZd|%%Eq4bkp|(YQ)Fl&%al)oquGrE1}P%H#>FllfSQvhnJhL(~nTR-dm)70*w9*V#6`m diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata deleted file mode 100644 index febe7b205e46a15ce78f3248344fddfc47a3eb3e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 210 zcmZo*t}SHHh~Q;lU`VYfNG;0DP0cHb&rB&~j8FiHrKXxDTNs!o8JMJ)m|3PKCR>^t zCnXyL`N<}RCWeJf5u89}nI)-3i6xo&d4", - "Box(6,)", - "Box(1,)", - {} - ] - }, - "policies_to_train": null, - "policy_mapping_fn": "" - }, - "no_done_at_end": false, - "no_eager_on_workers": false, - "num_cpus_for_driver": 1, - "num_cpus_per_worker": 1, - "num_envs_per_worker": 1, - "num_gpus": 0, - "num_gpus_per_worker": 0, - "num_sgd_iter": 10, - "num_workers": 2, - "object_store_memory": 0, - "object_store_memory_per_worker": 0, - "observation_filter": "NoFilter", - "optimizer": {}, - "output": null, - "output_compress_columns": [ - "obs", - "new_obs" - ], - "output_max_file_size": 67108864, - "postprocess_inputs": false, - "preprocessor_pref": "deepmind", - "remote_env_batch_wait_ms": 0, - "remote_worker_envs": false, - "sample_async": false, - "sample_batch_size": 200, - "seed": null, - "sgd_minibatch_size": 128, - "shuffle_buffer_size": 0, - "shuffle_sequences": true, - "simple_optimizer": false, - "soft_horizon": false, - "synchronize_filters": true, - "tf_session_args": { - "allow_soft_placement": true, - "device_count": { - "CPU": 1 - }, - "gpu_options": { - "allow_growth": true - }, - "inter_op_parallelism_threads": 2, - "intra_op_parallelism_threads": 2, - "log_device_placement": false - }, - "timesteps_per_iteration": 0, - "train_batch_size": 30000, - "use_gae": true, - "vf_clip_param": 10.0, - "vf_loss_coeff": 1.0, - "vf_share_layers": false -} \ No newline at end of file diff --git a/tests/data/rllib_data/multi_agent/params.pkl b/tests/data/rllib_data/multi_agent/params.pkl deleted file mode 100644 index 192cf7558830fe2e280e383cf7777e9ee669a7f0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 21381 zcmeHv2Ut^C({MuXAR;OtU~C{5svsaLB3J_0L02)R+>nGca}%U0f(Xi0QDVbhvG?A~ z+E&HhMMbQuu3apk-<*3B2#D^!|M!1?d7cdqN$xr4&diyaGc)JR++-WgN*_!kUs)Ot z!cwM|vd7Am$(Rz?undT%=o*uy;A{n^)OKiCbfO8)kt!q@o6Ax0glt@tj%iq@2n*^` zD(7PwqKQhWmhm_$jEyNoI5goJ!VG~+Qm6qojj&TGIU<=ph*)1WVT|sUXb>@0Jh?1| zEf7gmh?+RUmb~WhRH77)N+g$Q2qPsXQELc00Z)dh*!6l;9GOr!)5;dg=*tn3RgkhXCL_z^!rpDn4 zS4pAUQp!nXc^Hn%l>o*{+e~pXjw37T!C6~;JBJFM;B6%rV=g*<0Ot$ z%F*=h{;I0%renSrVJeg>Md`r!*gM9A6)qR3*t(X}QoS_&lcv1|ngCW&i?EpVpMGA!KT#ei{)P}x8Aol5_01Kv4E5SrjzL{LEEDlC#AZ9)1}M==ST-B%Wkdf05; zfDwtcMC(gbj)Mi_sDuDQrj|(bRl94 zyEtlqPWxb?)T5cflS|}EHaCq;P8<3ftZW7*72!C@{wKtOWO*2vD+EQY;5WW*(QK4p zm6ZC7Z!l}HgJ_8&-vU%LunEWkg4KYfhghyBZiYMs^plf}fw0jc`;mSHST1O8fK36q zp<;t+kb#agVuTc_3dAUXJ+UakJY&j6Fu<(9^r_e)8GOokjjjex3Q+hn05~Yzk0jzk zF#D7yH-Z#L)2-|F@2p2D2$|$1+~&*FXO;?+V2B@Va94=TPWqk5-EzTnfS(ibbQtR? zAHY?EDCLL=3{h*D1pby&!0k%OWk!XkCb zPv5uH8>ix^0S@%n6C@gfO>{I791=Ypq{HXMlVsnhyv9{5SgiF}@=` zh;F;^|9kZ5_iyz1Z}h2+6aU?!*I(@Q7W%(ibaY(!e{zfd|48(yZz0K@dZGp7{|q64 z3-1F4^oP`-A>@-ZMDxZ{N1amqG>~*exiBiTiITO{HI;HinbRivNlKKl6hJzd52r0~ zM#t9>1`;JC8ZCkzJa>ugM3_qDkkWxClA@%g z2Vn$q( zZGIJXi#2g-!cmkYlS4*~%9XPP9FYVPWH=CPP3Dx-aJCwU#2d`q7=uqV6>IVf0)Y}r z(9jf81ymbg(PiDCS(`XSXUuT8k0~LK2&6REYbMk3y0kjWf@q@G2gf^9#uZYuRFU+BTkC4cACfBL@tHssy-$0zqb`>+UE}6U`u#3u&81 zCL-+5G7}D9nZX#ITnc)L<7_=PA&EAJq>u#TsUYpHQi^yunRI}YF%4@w;iPNBrE<9n zB{p@s2u$D;_DW1DhYY_~O;Ao20JhM!!lkzSVJt@k;K{NGTa>*NA=aVX2Agbaj=qw4 zU$PDZ^^!tFWfVpz>4G$sOf5leMT1fevK`f2)N7{^&f5Gx%vi35)K|TJU}_ya$c(Z1 zN;o}LqS6l&DNIS3hEO+^bH#`+P=o+Wp{~6N^|b*)*G5J+7-B4vDS)qpDW$kdDD5Cy z*mdfSYB&f35fqacj)ixOu-9Lng=rrLxqn+the;3ZX}LL*E-ZU_Y|C+ZuVhH9k_ zQdUc;A`!w6Q~=13VPlap02T-BXjEDND+lC)YF_$5;fxVsQI{wu4U(BcE)nrWU=4tc z2AmWi*cnPVm^_JG%~yzc$r4QeyM}OJOHtwvPll|P3Kkhp!ohJ(vBpCKh=Afl3#YI2;5A)Ps3KjIV|&lVS>rus}GOaEL;w0!3P+91{qB96`9%fhWrh zaHc{bNB^}rtXObV3pdA#QbjU7*wct6@lYRuY4f_eZh}zk<&i*Q0}v1^EXA0w;U}LIU}I04#tT(1jDg z1r7!KV*$00c55LCx70$&Bl*e!c6Q;b9@Z!NELkEri1) z1foqHV;ZU*TUQnUzK0DC4lEyABg`c%VmMMRpHr*Y9?)hM3AwMj7M2N%#xiBuv6@CI z3QU#uDq4YQE=@qsG@M4yGAuAuwklC4KfXLjp(#|qkQ9hn!u1x=Nn8k%Q8{X z#Rj_m0`mf^0<%n$0*eBArctJGfk}ZS-*Bo8omOC=loZsCLvZ-C!8DCWoba><%Lphi zVi^Mkz6CU;JwVgLS3u{}lFgJ15x`*6WB|FqAQ%4G6oj}u4bhB^r6R|vEyfaHP(TNk zA`gs&iB7xC!RA0B4?G_7id5B>_R>V_-9bTr*@Uq%898+(-1AV z82ABHF$WHk6xvuNM*(>?4J$R$f@WYux1pQS%?!X~HyjCJV`Fn>3s40gj!6WXe2pi$ zEaQ-^8W>?a0T|+BCE_qa@hL-$MGy~F^!o?MJ zT%3n+KA2ZhD5#7QoCU0yK^kGT_Cq)gLRw4(HE>#Lib)SBtgh>jA}@n{Cv_uqI71aD zgmrwJ&5=Phlsrj< ziEcggr0vC zyThm~qzA&!;ui3ouwt{vs5ug?#Hlu0Hb_DyhjoY|e^3N)sy1wP@>s2^p(fZrf-9p` zkGvKx7P|sK@JmEvA4lR%!h2kL40>Zp@(xgU+J0Ys{)KV)v zq%Blm<6NriZ&3@7S;s{b@R0B^0v14$L=NRg)Xc<903ijLR2u{cSCb(ErJ*{_n+Aa_ z$P!m8mGUGAH%LQUtH;_=1=KyD)lHHmT73p{OxiT;WHfVo&rQHwi^5M?nzcj&Pl*5r>jvz<$lrPKV zRp&w-3uw4mCVenX-;Gks%o9vu?FN>#udpS79Tr$DlJAtwMMdSN+RZ;q9E{@6aI-X# zW;A0Xx)FFO&^LVw!Ib4u4oLVhE{?P&P4)YcT#_Jkq>Ke+>nH}x$f;BM&V}z3k3;_n zkM$AgvFJbGIHg5GhL)>zFj{n$Ap|C_1$3ny1T_$vh-tY7h`VCA$^|}?fea`UI2Z;9 z5)3!6Ot0#jwJ6dA`Eo1p<3`j5!m73=9J$0v^%%8VPlqSzDPDmdUovX>(&?Y@r50Ob z%GapoQG6iFBig_XUFoF)pY{-+b+HqPn(r4E)7^*Xyv*A0XM*oOi zbzmFOoL{lah7tyZaFR4Qg=vguG|(bFE)~$ldRMhPTMGnhd1iyCq5NzL2cDN8xz`X8 zYke&X3402C?IaKnNxs%oGt&5x{M2XKUOlF5u4S74Khd!UQ~wV+=RyUu=-dFRqZ$VC zKdeRv*raIJA8b@;VuNY04pAT>d@4Q)bPLY`o2Z5OYsjo^h;asV3p&LkJ)1=`N8d1& z=`rVIEpvMO6XyJKcK;!(oazHZa;^=%>M;r}()O@OTc~I%x`-yC3+Tvz<GGHxBr~o^*HoTnJvBvDB;{05sr?!&<;lzWDr(kMJl1TpdFm0 z8KMbxH1O354HWU<08=REgH_TB1O>lz*x))x!uDDK;wV5HY}zzZ6i9Cg+XnLjO`Zgi z7Ajy@iqLajq*tkrm~|}DfCf&*7M>tVA|JC#hMILaOV-n3VKsPCIA5ef_(X^z3x!jK zMZ%dFVT2;c21i|bdpmI~;y^>EE*7EI`u;2KZ}(&c^|pm^b=w_r6bY`rv06y7o)lZ{ z&Jv!uYGmg`2FJhLF2?;-1<~PuBc}$WxwJqO{J7^9o1pf3+d8oUYHG~p!lP$;o58ro z_QG*$fqOiP10uk{%qQ(miG1LdgOrPou$f2`Xd5Y7MSG)atx7V8urQA?O{a4Nx~ z5x<2_Jq<^ZLDNVpw7nC6fzjDy!d9E3sXJnh9S&))VZUV{2!zZ4i=GdFJy?bWy{iUxjs3pE zi+p3zR2nW}Og-_)G68F12q#oi$o^tLmZyoNb0OA12-&_VcTsS9@OCdQxiG;djvZ=&{2Dbj%5y;-bvjiwj3xYR| z9pV=f5)cd~f;tJZ=EA`)JY$GbRWL-H4ah+7=q6H!43n|6Ul16pFsPNJx}Hw`Op-kh zn?pK=M#~;Oy6{P&x6=1?>qD8jYjB5l#n!hx5;m*nFT-zbSmE+%K9(i5 z3*7ftub?R-HV3;{=1Wu`&EjU-+kSXwva9RzZp_KY24{%QFYA?e04vz#J@(4`aKqkx z&$MfD>dMuJdpBQ@9=O^1)syCF_QR!Hh8hhl+9DfoK6&_&r#xms^`#RAqHw9|I(w2N z#?5pE({}p61A9N1%(58g{LOIdn29T<=8UQ6C*1vAY16UQ`)2+<-rE#yp0ZKwy?^1$ zUSX}49@#W?EHl5j^>U|SL&r>7yYMf0dZy%g%FFP^mk%_JO!Wr$RPt4kx->2pJ6BRLI#|=qO;H>(K_9n|cYhA*|cqRR~@xkSa z@7=5XA}*!&A5kX8&)FPz7c`x`L$h+3=M@*u$grr76IBPw*5&V5VDIsiDM)x0+Pi#< z7k$QnDK-I?!fU(_n@s1k<7-;Q+`byt1j~-mNtiI}$@S*$+Rd_82cHIX>Nb z^V~k|mk?hUoV|Hz&$_&rkIC(4G5HScWzx zE4wYX-7)uB@15=3_$Iq0?B+g|X=R7k?loOM+$lbAA--oq#p}(>&VS#%HszI>tj(Qt zcfatq&nK&T^c8+|4Vm@kvXi~w!~&VM(~C}3wB+Ns#ofda`LCMiUi+S9{F1)DOOrQ` zBRhT1fBdplMRp?7>FC>+X$ErP_#rKvSj#4apK@Rh$?JFM+@+XSL;2Q6&S#uzd)uqM zvf>Yp<9$iVvBGa1Xx>d*%54k!20dI*+{P!t(fjd^s^_oUyb3&5^trequA-*N;WWSJ z@_UQs9fZI%^0&&t>H}iz1rO7dy=2ouduk{6*6D$$qXiV@9T^2fn-;F}j_5 zd^$}ct1wx;W4dFZPe?Fb9#YtOgsFGC{mUww6uv7w{`A|bZT;?ENEkYI;vOqT_JgTu zUti6BvheYYSa0j7^z+f@4JKah&)Si^VEd%sdYz3(==0*VBqQ+T?os`g)}-@&A|8Jo z)~8q3_ZjcTiH}|iT`}~unOC>N_bR9S*&*A=rOumOe7rBHQqwW@V|zm^RB>^EQ{K z+*vnJ@!;ZT_4&3_4l_pE+e%}Fd7J-?XV3SJVLC)R9f;_&phZrV<)#T5`S!})%68Y* zODj^x1oe%zNg{aZcSQ8W4qHDNogFsv*5r@&osV`SzFiOPWIVDfZkD_4?fV%K!|jK+ zJ?o=c60d01v}3^73*~_yPvYaf`ORtuXPNk39Dni863-Kl-sh~7$U8j7Zyr0djJ5mb zq8leNZ};Hv%hx4r??`apboCmy_Q-+b(@~@GqI0)jxI6nr9Zz zj!sYceXU!-{wa4(nzOE+-{Dg&d>D0mXq%;vk4IY7#LZ@Qxu)#Iep6(~vf18z<*T?8 zw+^OF+pzlj{Me=5cA;3G3r6xv+Rmg^uSP~pIQ;VU&CBbqR?U%KF7>#+Qh7VdqpCb| z*`5kbK$UOx;Zp}@TxW3>K6u;ph|#WVVa}U^C3A*&UAW|aJaUG|iT(Zu?F(-8_!1~F z`Es!A;UDU=^Ta2tMqTW;carzbQLFi!ve(t=?}`dpHKHx|cCVCOjtqEW9`CZ$vW=l& zl;8EKMq$l%dZt^N4GVTTmU3(9sn>=dB~}k|x6VEM`@`~)okd&!NVP9^`)zWyG%Pt# z@y_VH$V zrH#|yWoPf)5Il6xXGZL-8NN^at3g$TQk1>HKujjc~?r7O_+C=w(7Gq^VtY? z(}^oXO@)*0Ch~BzWfOb1)ZDF1d(!?~bFOj4ZSUB@`(sXY=<~+MWsBEp`%^)AsomyV zSKgeyY+V0diSOGNBpus&bYR;2_xBB1<0~JYa$+sbsZc56_iVp-E<1b7H1&b;)of)5 zv(=OqE24yq@?Og?Z%muh{N(LhPgk}6)W<62)(~!4$?w++XLhUPwB1~^!}8u=58wak z{%ksXdYkCPEgKzndKUS#x7&ZxC4Z-Itkvw6meQ-1Q?qyMtr6!QE!kG`WYONi^3TrS zgaMOwU%mNi{5XwLkvO}PQ$S=px$V)9FWshe?66QIdo=v3s3<*|kS$)*Rer-uhlO|sKj#YhY zm`o6YmqjhPV|JV=j234P@?emFJsYj3De>rFukjo5yx_wds_XI&t9b^7Br)qbIGN*oO7-?TnnR*WbuTJ^B4hxaFv3{q2J0Z4;%8 z|6n6>{6n%bHQ|!Xbvb?F;HpuX3D9c^4IE}U6P0%KK zGiRxV@suueO<%0|wJ^x&4``}`dHF<dkTBr z%Qd>YRxeUM_=5cqiAph>u-~em!i@$W=~c6l(~F;~EG6;Ri{Cu>VQosBy9k z3Vx6Ted|qzdGMy@s<~p=cG0M9i8va*ieu1Qe|TfjA%Gl+u6`c9qajrEZbYN?IE=81 zx&O&6J;+-`e}4-VYt!lEc=bOVFJ27;5lxxshVk(BL!2$1h`KhRw>)@*6BKbiy7=jw zsla*~jG0^;F{fo-bnKZUY;)xB>{)*#@T7@O$tF+6$v+KTL(81$J?i8O){&ZcnT+US zWxck%^l4FZi)--_)2i%S{N{b1g`F&OVZ5hpe-}Et=lL1$nZs8_EhtZIGqA|cJFmU+ z?YW)9J+7_KoHnz^`JScROB-A`?bm)y>U%d#eCgaNBT2%iBg=+&t!Ur2*lR^Wc#Zfz(?R4^*G*{XD_ z&{g-@_g$P0u%iAVj?e0LWzYtn^QBYH^c-i^clW~YGo077|6R^5EiHEveH-o6b11{$ zj``LzCziNI^qTuErexMe;~QaDQmXw%Eb71-^5pLCgHs2&$lgp|TRF&MYp=E^*6}C# z9}j-o;>wE5@CP$q*x43mm}RW4=sddfYpWBlwvOFUD5YDKyTA2fhRw2Eccr)W-ckK3 zGDo>4JWe;HkKEcU?^g4G_4f`ppT9QByoBZE*rG)<%eN1&ol&Mt4tbYu@tkQ@+-7Lg z(Jyz4-TmWM4+*VkUg>(s%t+X-mt;!}(}TET=dhr4Si;I1`~+I%p7-UBX_eDrlg0c& zgWL-k(f&~p-=FQ9Kl=#kCc zX4UL!PQSfg^FC}l8{a?NrN=8#PTQ%Mtkb3~Ze=vf?$%4ANAuecJJ2-nPuGVn&tHH4 zdSwg8g|rIE0@t*y+;2JWA4Ygw+~67etYq!!(#Nf~e5klpecy9WV0!Z#%v~+xld|zy zHzv7cS^iac>Bxn$>`fictoF{xusr{$yZ^M1Q-T>p^YdM*EB*LzRKjPOm^Xjy4= zFKYIp&v}J$ZCj^C4mlJ!VSYv1X+~8y@5L?`D?cv|Tb12!#G!{HgZnrg+!TAQpG0W9 z?FLrVQF^F{DlJ~w-o$rB=*(-;5f>yq_TiuB74FJ-yUk_Hz%g@=pKk9aSm{wHFTLC3 z&Ky^K?i@zi@OHGw3{6c@I&=GrJBvPFx-sLcqTlFd%jLNRqtlWGxefTpTjnPCy3g-J zvAAD=;jqvU%aYkwq2%GlopT1B<1l6P+{5;57Bec&LZR2Ep03KSi)9P=C;GfN zSW_tp70yY#bH-8PIy6w#lSwljz9ap^&FZj<09u#jd(Wy?97qi6*{gV5Key1E6Q4hh zJDKyY>`3o6mp2_aH2uPnhnH7{)ofHx9&MPA8XdK(w4YtRQTwT*mdz-7;lHo-c>|jB zhdf`K3wt}BFI#ix#7FPs7}swHSO@k*j=M5lJcPZfYTZ`;=+>N5w4R+@p5{7L^qrG; zY_Qd*PmV^zTR5n%ru56rwaRTNjVib7pZMj%fd|jN8Z|dC^_#w@p|M_Z?L=v&!^D?^C;H?F*=G zwmj+3ot(n*O3RW+FYeq+dz&ZAdZ*5^>~Y`!pyb3c+N@I%f2f1~zHU4AxOcZNeQgUtAylQS)T4v~%Bq zJB(hu-F>=h-i!XNZp|pVHf!UqCjx0L+Hm2K#m;9DtO^4mAx>NbK!o$l7}Xq3ggXV}|`N2Z!C?A-+K zwtG^}v~z!U%UJrg(>?h{`ps(l)2&{P=iZly42I_L_GGzsy&Lm}-|UlN>6*$j!t?iA zOuYPRYfo%n_O_Xt8!so^8*ek%F2XT=64Q;bUXyjr()gG%v1hn{|KcyNC)FsrZ1ovl z^;gH+j?*23n;gkGX=C%Sa?=KbfrZhT{Or>a^rZ5kL zle})+Sb3+WvWMn$yo2r9gS4vbp0p?L<9hzpOLOp0hexT-7NrhGtAh$At{)$IHfm&$ zTUp@k*gH+TJytJT{kjZ_k~7{8@ig4@;KXKL^^je+1Z#g=ICI;doeJ>-_Y#)KUv7At za;aU-_@suGVW-P&;+;xsMusV7(Ws6OM%;R=YSOK4DqimxZVEHxkFr4;I zaDJ!qj167WwU<=5VBfh#*G4_%J~!$=+K}76WLwk2ZB4@V_3pk!xM@`71M6(RPYI@p ztW_1pPcJV!>GE;TSjm-x-NyB5vDPfIPmib%Ejzy2a-prgMU&r>eHY;!LZm~Ql?QfO zWLKEquYY>cKNke9PEf<~)1HuJEHZbEemvwQlvwx#gRID|aU+7>*CjiVyAO8dg~N zuEYI`ET*EC;-g;Zag zd)j&btoc3Q|1zP4=GZN1}8qe%p80>5%Xm=_pqHpsn zt-rUnQ-8S7(c*62(C+5XM(p6OFzavr{`rlw-S+kmh@l@TyXjBt<~~dud9V82$RP6x zCDW?r89J}|&J5qY^4pTs$;W@+wLtXtt^KrRFQ;rOKfH}mNxRki&alA;UN?IlH(>0M zLz?`FWeOJCs)8?@z?s>~Z2$IV+b%lxxsbeBOdC1uZDg;$BY87(2LySY`?hjcf!{H$uvPv9E>=X)A)d=Z!!Vmr88&yw-L!r3-PsY{Nwb%OrIE{EefQK_*3LF%= zvsH5H!5Q>uj4As4B~&j{K$uX~Xy_*dEy5McuUimiP`AWG<>1hEdLDX}5cYTypAAnv zh^R73s#HH4eq)mQO*Zr!i)!?9KMGacgiYZw8z}fA zOB8i@UQ_!8c>ac}W!Dj)+eSdzO}|4A9AQD$ZjjH#0l}98$QOx0zoSQ6Ee5hu1j^Gy l=z$}1m@4&4f^e%Qy`uL Date: Wed, 22 Jul 2020 10:34:53 -0700 Subject: [PATCH 366/438] new data files --- .../multi_agent/checkpoint_1/.is_checkpoint | 0 .../multi_agent/checkpoint_1/checkpoint-1 | Bin 0 -> 10894 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 0 -> 210 bytes tests/data/rllib_data/multi_agent/params.json | 151 ++++++++++++++++++ tests/data/rllib_data/multi_agent/params.pkl | Bin 0 -> 32787 bytes 5 files changed, 151 insertions(+) create mode 100644 tests/data/rllib_data/multi_agent/checkpoint_1/.is_checkpoint create mode 100644 tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 create mode 100644 tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata create mode 100644 tests/data/rllib_data/multi_agent/params.json create mode 100644 tests/data/rllib_data/multi_agent/params.pkl diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/.is_checkpoint b/tests/data/rllib_data/multi_agent/checkpoint_1/.is_checkpoint new file mode 100644 index 000000000..e69de29bb diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 new file mode 100644 index 0000000000000000000000000000000000000000..127111e6750d744d4cb10f31508ed1cf1385f4a8 GIT binary patch literal 10894 zcmZX)c{tTy_daf($xxycN+l#J4bE$?BSVu=BpS#NAw$Pd8kA%#naP|Y36UnldF^#n zN*WZYs7Q0spjo4@dOz>y_gufv`Qu#I*?XV$y3bnowa(u6z9U5=ctX0)5<)^kTY`c& zdIj@@&CT?Lgd+bLmmD7V_Vo|-3J&23NAN_PMTQ65H}gb2gWb0p2K)Q_dKiX<`uc|$ z{VHX9jxt z^Q1ik!vZ#KHS`P$_A(3z^AGiP4<3Go%(`GN&!E7N(BLr7P#K=gKdWQ{*Z(uileHF# z;mKJDd+_ArcnbEs5xUmG)}q#0u{_29EL|VEb(5D2Pw6jo?^!(Mg!nj~ial?nuC=JM z@GuaDH_F!5cFEB2>+hSjP$*B;Bf&g!c=A=C7`N@=G@_lN3pGFHah1xP1ov)&RGQWR`fpDWnC*bg+BUtl+p2&z|~#kCD`7#1o8O z;Eo9q^cimiRo!-oNn6O>AiGu&{$>zcT}N?kv%cV)7!$5xsFJ{0e>7M1$yC90>tZym z(&Gjdbz^v~828yDakw-j#!VKTz_nc$44a?jVX=_`w}>mumE3t1nnwJ_=H`#2Cu6PP z)|3ylB1e;}bmAz^6k5tnZohz47E{pDC_->qViT7)I+Um8;Vkz*eDUyg58X!@q3gsrzxr9SfxUp9`M>GxzU{ME?0q487@IEpUO7)kHCNfX}qP@blT^T_|g-t6ul z=Jo%PUF%=?+JE3ru;)$uFKu;(f52br?GN?lR$3fqDzr!8`tA?V#odn!L4cjvZ!nIv!?nMEIHm~uxpM|1IDH~-D`Ofe>;gcO5qF%P4Oq!u!_%s{m>)ZW+cROeZlNt?@^bh92$ z*@>f@S2TC^79ByCToyjd`H1O}UVJb@^TjZ=8qA~T7L`Z9Us&&PK7i*b*_i&OIW3tz?JA6$+cJ%$sNs+;4a)5&V7E+ zK`@X∧t2u+wKa9;KVl3s{$B`4j70lOdpZ&E04vWK(s|Eb|$0#5n2g1UbyIMtq~ z_a6f4|0STyg$Cjyb)J+x;M4GiCYtY$v~O$^rC0bg>Qo~Yu{clq+*_%8T_c@0x0TH0 zo}<2L7wNYz0;0XSfy|%PL|f)uB;oFjMAC?o`oI%p`sPN`=^!As4)tW^$#mMC+f4LC z&(SZ9*GNzhOHuza$?U&C5)R*_JcCf)G>`x3vorqfvxffypZ*89kv-4&FYrGdbH*R) z2`LH*eUH$CbI)xcbX^gL-*T6%GFGIHk-DVDF&SIZxEMN14njgy(awG|;}E-u-0qbH zTfcH>^m`808B@?IT^jqdelh2tDWja~R`!~>HqF>Dj(Rh@0oR+*Dc)|-88?x9b>0r1 z9d~KNng%+mJqV7v2Ek%^DSi@BA&;Yxi zD^a9(ED^MNEUM~a1(ivdm6^nDJ^z7P zuh&MAz)EKMl?39X$AIfMJIqr)gGKK0L~XV(E^%7{BW^v12e~<>t$aBg-_-+mZO!5B z3VB%5Tm%-v@96tq6M*ht&UtS7oysiHhMHS_bYqnU30~Pru2|=?XT??r}Y|XJaI|2JbG%;zV1oh+*qEc@}IwZ?zeswRM0|(*rx5+TmvKFUZ3u9OG8iIbv z0_<93h3y-zF*W5@Xi_u-d0T=ZJNPa26YeLimkXHpYoxHTKZkMo+DxBxXJFmZI$G>n zPgMq+2r|pSYKse{C9pp8nn%m-cLEhxJ864omv5{r;0qL}Jjp`>P60@5kq_A)m2=gT9 zZ9iq)|JjO3PpgLRz9v|DzzN=+xkks$IY2zWPD6t)2Vu!v-Zbo4FeY&=L;36B7Er5TX#W-fXEA}YlT(V=cT?TNlqfDJ~fsfxql{Xy<1BLzuCh2OKUOXw-ies#bZ@@FVntykla-^M7~J~ zwNh6l$HeZ#V&5v_`A`Pm6x9&Nlpo}fwLfhOmt?=JG$CPd2>Sz#Nx|bRJm6RfHs@{N z&E!>pD_k)=*?_JMi=(NR%i)AS7f0OB!orW=ncsu!AuvXV$cjW_b#OY`_asAZq9Xkm z69j?1k@Q-53RKTb$C!u{&tx+*BWl|qG(7Oqrb>!postmY8Zn8gjl(Idq zw4rwK1-f8U1udE6h`%p=B{QCSLYqJcue{ntJ#>9B%~Fq?wHis6SEs<`sy2+a%!U~c zd_Y!L7!IG5LFp%a`fXt{@XB|Pe2#=~FQlr?B~0y49kRIG3(NM3LfgrB9QQM!=Ll@rJKQHTu%qw3xzUYgG4OhxBlzt)NCUoUf}6%z@@#e*OmIx1^08v*|NR?XAlJdPMlJ!` zB#x7hjKZR)A@TKMn@3rmf^qyg3thKM;MbyNIlfI4;{? z4T4R4`b%gNO>3S7`5C7`VnjAsz5>cwbAtWd#6{u3Xc)3QNFp|!WcKZz0>`2r(dUtB zU>MN_u1a@^cH%QKSz$3s-<ghwT`B-Z283lJTC3FzEY95xZ-5$T!DDu$VWEKDzRg z1f-Nf$PRgQ2|r7cPC3Hm3D$UjMj|u^=h2{7Da21a8?Hx8BL{7!!nDnbv_+wY^Ynow zjPs8`+o5pu_1#NW>q~(&jbRQ+N8?PFKpcHKjocSbpo{P3!omSH*1osDl-+2<#5rZBNS@2N5GMi-;}qj8P+@>pgU&9Q=LWIhxwREe&yv7lZWq^ zDJI9*%(WiaY+gbq<#o{~UV!_Pmt*bn%^a80cIJ4x(+O$8$gHI=h0Nsy>ACw<)3If(fkkTx8!o_QM62 z)2Xh33IwmVgF9}M@#X74$og)DvzynE#$);9?t(NdJ)TKaQgx_ht|86P801^d-3o6C zuhMl@hjVt1OCj)sojqm$0I+fQl#W zgGEy(!q1(r>CWNZC|S7@t{HkU?&c0?y68O_G-)Tt%#aH28Vfz=v(R(iJeqgZ2=f^& zjQDhfq-O7=%A(t0*S&0F_b44Mt#bpVmhm`AY&Y&5vyHv-a4%Y?Xc5)UPW~jzjZ`7@ z1&0xjqG6qK5P9HjLz_(yye-}h@kgDoBXKFQSMQ`{pAsR=sgCN`anM&<2KO7SgMj)P zmScSe44o{XzzgY>z|GWd29IN&RYITK=;nVbEWi)>zWnWAM*OB0!lL#ja`O|1JQj|k z?%Y&jQm+KXI|4}XXC-dhf`fQ{Y!$YyPJ_bT=3KxBjX9?4WF9LK9EyK^x@(9G@26_f#L2$G*;;)tkXFL>A_AIeft=+vFCA))MaMo z#F=1msD%Ca`w)G|9*5dpr5JQc1!Yg{#g12NU~K0pc>BqMoEPe%+)N#))=r?_gFk5= zNg+EYi~xasGqvhjNjYh*u)?Jf-8Y({@VXtt`PauN^9*cVyM$;g{L17mFDA5cD`a`M zlhuybS>v)9w0W!vTF71|uU*SQF1(ivHA<64+gqlNtULzy2Qe8U=4foWiYojt18+@D z`uI&W9eX^8UcK5$bNh?w65|2tGsyuwe|kZ;UN?!mnhoY<*$lsKF%~5~qZ72%$l8Uo zQJ`~;_-8BQ%58bn<;D@{7|_EHr>(JhTLONTYtbt)3X{*T#Hb&ta9%eo+cMdTK$ z_e2hITO#P_1<5o`=q-Kx+nKCWcO}(_c90^jCo<}FDC(kv3oMq?PLBpyl$=PTW{jrs zMX>~RGRfS`8#L2J2YME&(LLgda~TCnifef^bwnJP-uXph4z|;0GaZ=;%1ZcRj0}D? z`$orUE+ALiWyqS@eN?vm4te>pfmHVGfOUq_%#gMM)%tM_2l8}qz_6bja?3!MuaimN zF+1qeXe2+r=VRTQ1PIZSMXfneL~J@lJk@3GtV~qfRT-t|r zhx*xNA!)?@b{D&bY=o>tf2htX!PlAZn0UDq9C}nnWFLsarl@%+by0_^Y>vheQt2pr zt&B!n#Gt4`If?61z=&;q{M08FxK=Bk{@_?Z(}Grt!X=#N-B;P2VYkVZ1Q&3>|Crsl zwx=OjuY$NH#gorA?R*|bA9|NmvbUY*5XXk=uzQ0nx(X$cPOt~LMl+)J?HLJgf6LT+ zT!-5cr(weeQyjl81y9b%!&lnB$cI#Y%s(Q5yax**?Ws4t;ZTo89y4)G^)A@EQXOZP zcrsq6i%ID6JTMd0#U{5tTC150S?cHMYt22V^>YlWIOM@EmlEodFGY_B7QnRauW99_ zQryCZ!mO7{^oU#?&U)4j`9G#W@$}IR&mQNKvKcnSWQ94|s1%93nW4N{|Es#qnEqGv zF!@(InEgjPm}AeI`ybI`-e1wf@~kA`rk!UJj!KgFKsowu?Rz#kP=R!rkETX^CHga6 zo}TNIqxBOc=(s2uY8B7pm`00}!kMZh;*tn$Qj((524~njB1g?0Ns@evYfRPdPi)3I zX`=B;hTfF;%8qMLAfFZq5#t_ZBIY1RB-Br^bMO6Pc&V-o&oq?B`M;{$f29xRU-;w? ze6Z(n|0{i%{VRRUTfPK$9bE|ar*;ux)i=aHc>)9;=ixikz0k6%6q;vQ;I?b$$fcY- zy4bM@8HE6{E~1m(%#`3xe-#Du8ZY6^!{IO(d6HNwN0V~@m5lnND3Z->qbDPGV#@3N zsM)TJ*5n7vS>4A3%ua%6vBl)p#70z4TSX^du!1H2mCVG^P2@3~1fExpQ_IwJY+9HI z!I>c>gl9{e6OgD09VCO7o90P=_(JM@ItgE+6o$S=L)Z9aBrx?2@vu=Qt4?Y|CdZ5> zZB?YJH{_vS+FsUMW<9xhG!!%K6RA+?TBzx%V8(nE~{RFh>D}t?S z-M}zo7Yuzr4R3W4>Bdh9gjdyq^XDs~dR`F@d?`eS9Az+jQi6pKCbF;Au7lpT01$Wm zPIf8yfUiEGrjPf+F@uFT(PcUsj@^m}?zYqCTSH-as{)idy=JT)UWTWG!X!dzJ#Mw0 zfd0e+#+XyG(Wjjmun!`$Tt|T4*xz71)I*o+&&K7i%gE*9Bk07(QH;J}9l1182%?46 z1PK|R&|S#{8;?x{xoM-oU9*ODySfk86erOH8Dq5Rm`=v}9f$czX;44zF-<})6<89_(ryXV$X;Kg8 zgUIeIhHVLJ;ML9}r0u2)afQsPkf6r%Rt z0JR-0sMD5&Z~6P_z>15orLBg}%*|tpM8u(|w-}GcD+~7SRDpow)2OxEW8!f-7)v(} zVz>4qBC_lWoi}nHzo_a3-K<;-vsb&L%!4Lo{Ki)#qFIbvIV`VaoodCdQWhf*eWkGn z3aNj{dfZUYGIvHMlCX$m(0clVj=={E&yGduz%!8CvJdw@Dn_@9k4ep?`PiR7nH(Hr zhJr)oaH}yDR!H3?IUSk|M{XKe*2Po3J`udXCx?1QYs2IAli;ka3u{{|f!XqmY-`^L znF3vOdsaoa*OicyGAYDV@;12qUIF{;He=M#LOjZ=hi}2>A!TJAto~+Cy6-y?=?6k! z^Wh6k^AND>`WAw{_3%E)Gi6ES zoTJcaSR{qtLR<5*E#`p(+PvfQs^cNNL%G8&2t?n1(P&Eq*~$eOyp$nKLLq zGsD;6IbhdYhsG%?Ovd%P z0<0P@NnYex!<{8^uxi{HdN0KobYcwQ;fPJ}!M>YlW{<*?Unh`JD_gMob38LDC667q zYj`X~8K~1|bZ*hdn4lBbsD7Ibi#EZaY-ubnwV@x+j>fZd4&bv3-puAn>5#>*Vyo}$ zCJ(#ZX;X$3ed5@I0}AH}HtwYdns&hPJu~67Md;= zzK3?=nt3@yu5KFKYY1K^Ph~u_Z$O^^O{LE zpPxw!O^(8lsWSRR*TK4cYxuHCmsIyoMn#8TjK>8zkd$b^tI-Ph#MX>9anq>X)zi$7 z+itST_Y-kFyb>2ZN=5AeV>se+7ZlFjrdEf3(yOa4poL2U7!LRN6$iMuGczB1&dq1S zmd=4rqYl`)kb&G1fCC>c(>?LpxF=E>Gv=n!$C?wMH#ifTA3vun*=RUktANXn?M0)d z6R_{fQuOUy3UHNFjyc((lr@(pS#xY48{=Tc>J;eT zT|x(KV^G_z2x>|jVeXuGi0~ljl3K~;HeDb~U!KKtVOr$&Tp2jhEyGMBY9vKFh;DV4 zAcqoO!@UpzhFRu8qOBh0m{x$+DUaLcI^5>9;iWcN; zmJKLP&ESY+E5krnDqiimj(PJn;pX+TFeP9&_<#FMX9)a&(_KKfC}rUFqy)I98w&#~ zqOtZ@J5>>K!uNYevKQ|ZL%4|#j&QAk1w+-~{n;8OEf_BtU9gp&`0)`Zj2U9e(oYk! zM|BuGdw|OG-CzZ|Pom~bWt_EqVd3z_ZH#|A+5F=g5$N7!U<+zZ)ggGH?0*hUp=nscsxn7vk332g6^+Yiq^w1`i24#$%w>9W( z9nDqiJVH*mh`^UUMIe750Me%j6UW_l7_=cB-)c_b?+u;=4eEKY+;JmlcMZ_xL51*U z_i1=EMVXe|5JS+v$X<#)0aLuiF~0vL@$)pnD1AH18@#02p{1M_NlQp9Od#yGSx_Hx zki$w#O(qVS0N+*5e4|>)R)Yv%NQ0^57__^*KkwjtisntJRoey8=^v)Iq%}9c~I1fsVL5wp{*5w@W5L_LO_<=%2#e zhLI-lHAVu)XeLtbR1MPNc@QoZmk?#gXl#-cg%^hdV9r}b_>tC&a}p<`nCxi`bZWJp#iIkY}B3Dtgw;5nhGDDbId)Sqj@p*?3wt9=~Q zSark971_*py)FC=b2FK>TX%v68owd$0x~XE&Ky*qBUT`kphSq zzClehE`at|49?!Fj8ZxU*e4JoDZMx7;n(q$v!RSxb|2BK!3Pp2a-laq2m(GCfHu4! z;@jM?vaAv(b4vK}+zV{k!$Qa`&4I-Vk}$J5o^hV#L<^=Hk^b+C;p66|aI9z|9^O5L z*|_FsgHc@&jrEfU{=h_Fvf{8w`vys{kHhX#MMmw!7rf{@kD<{ySgW`I!@J(%i14|v zFZn3+Di?tMxcMN>WZ{)}yYcMpchv7m0zEdr09NJBfu3*C?C-)xOj@@ZiUOs$!e>_C z(KEN8Owtgf<{9F)*JG_mNQZ$tVbFbVJ3PE{gXT|~hHtK=)5X?pB)k0z>#KPRw~dcL zeJ5S`V(pFtr)tUe!kFQ`#0m^Dw&E;o`%T_o)Fz*L&+^S5S>cXv*;Mn}0HYzNfb+E- z$nn+E(EWTa#N0eesu#x-`FH88(Pu8^WQ_ww8$IeAa{ybuxI*8$2%MqaO-g)UleNv~ z@KjPd%ejLMmn6TEjvdp%;nsWFU_Xf&abFW$@&Z94NEO0{T?X%cSPP|;raMK#_$U7OPe$6TvcU2iuGv>iG zjZ35=${k0ke}@@Ak5a#%-BiB4n7rFH84f4Kf%=ETq zb<X2rPGM$$KY$aZjF>nlE+_Td2~@ao2V+7e zle)I2?7Mjcm6y50puZ)_EWgR-O)nvfP6(lUD+?_%eCYSCeCSZ>C)~Wt5E5NU*FI~b z3L**A&S*blC%S~{d#5mCrKiIl;UzFb?L5=8)B=;LJkc@86jZ96aA|=CPTL-WPUEBC z?dK>U$5puUzo){SrP8=XTO1D_$Yd9tcuu2pjZku9J`?b27T9WCp^MJ_V%01!@I_~z zM^_^^a__PM^v(N9HsqG#yHk8rsc`_2#wTQwe?9fxmkXcfq|(v1SAvVo8iHf}>1pfk z;s1sWc+!-`m;3jj8h0J={pauv(>P!;sf9$$@1UCB-SMbH5L=g(4jQH_hxZTPsg3$A z`go)itE<-2#^uy(1Bg8gQyp7)vsaGrnId zF-klSGg}6UYKtBnYAJvq(ISY=tDq8D{X`6H(eS1*=(QWb!#xRy?vQ z(K0gmekHBESWdQkmXpESYRbx&(~RMR)u*B=Qu45zC@(rj{Rb|=THLIaF zpH$Pfuxjda(?eIHByg>L-^`ciwI%|GIYNG#9xulOdqHf83qhvQus$B!e(sGCmwFTKLF7#ld7TYs|WS3lqsi+*N*6m8;s z`0|n6ID0Y^RoKd=%vWX%Ec@BE+DrAbWM7zuFBsr#@@!?6i#GE;Oj?<%;i{~ddoA0U zv4lz9c!O=5qsXY%oMPQI0~qJa57_QEZ#Xx$jUh2lz4=*kBbm64R{rP}GIYM15mR-v zhdKJFjsI1(m7SC=%FH?1!CwCSjx)yoHVwtT~EX0%E@Grql&KfC*D!;GJ{EYB{Kx9opiGk@~;SNU50Z}#l}WY58#x8hIs zL}GZ3e+n;^r+S_#dN|&cw=$IH`jod1oo>Q9u__PjO!)Id0vxArfA z?%q8~!!Mtr+s%r}jm`pcxT27V`!&!$`C^i3UPB}u%V_!LQj)55h#dC1KpTCE=>d9z z#@U=ADS4&DnIK)Sd4l{BLsFC8K(3mepbxH=QolLX^v==}D$&5wtA0gPd}1YuuBaln zhK`f!O@8zLZ};qiGy+P)|B>0nkZ0Oi4I0rU(jdpF)RtL-+%Yt zz1)XFYcfX&!L5cyUZ6Bo3gwWMwIok4gM7o&d>a>n<22l{D2j1Xur!Id8W7)(NdpZt zvMQB(bjZ?*u|?WIt*NODJFDU8>yl|7^yVG1r}Noo2$6+Npq9E4@ZT8OrtU4xS`d_J eDkL{*{3bJt4ZG788r_|o5YI{&;2>81HTVIvZ#+T( literal 0 HcmV?d00001 diff --git a/tests/data/rllib_data/multi_agent/params.json b/tests/data/rllib_data/multi_agent/params.json new file mode 100644 index 000000000..9451d356e --- /dev/null +++ b/tests/data/rllib_data/multi_agent/params.json @@ -0,0 +1,151 @@ +{ + "batch_mode": "truncate_episodes", + "callbacks": { + "on_episode_end": ".on_episode_end at 0x1460752f0>", + "on_episode_start": ".on_episode_start at 0x11d064048>", + "on_episode_step": ".on_episode_step at 0x146075268>", + "on_train_result": ".on_train_result at 0x146075378>" + }, + "clip_actions": true, + "clip_param": 0.3, + "clip_rewards": null, + "collect_metrics_timeout": 180, + "compress_observations": false, + "custom_eval_function": null, + "custom_resources_per_worker": {}, + "eager": false, + "eager_tracing": false, + "entropy_coeff": 0.0, + "entropy_coeff_schedule": null, + "env": "MultiStraightRoad-v1", + "env_config": { + "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"accel_penalty\": 0.05,\n \"control_range\": [\n 500,\n 2300\n ],\n \"headway_curriculum\": false,\n \"headway_curriculum_iters\": 100,\n \"headway_reward_gain\": 2.0,\n \"lead_obs\": true,\n \"local_reward\": true,\n \"look_back_length\": 3,\n \"max_accel\": 2.6,\n \"max_decel\": 4.5,\n \"max_num_agents\": 10,\n \"min_time_headway\": 2.0,\n \"mpg_reward\": false,\n \"mpj_reward\": false,\n \"penalize_accel\": true,\n \"penalize_stops\": true,\n \"reroute_on_exit\": true,\n \"sort_vehicles\": false,\n \"speed_curriculum\": true,\n \"speed_curriculum_iters\": 20,\n \"speed_reward_gain\": 1.0,\n \"stop_penalty\": 0.05,\n \"target_velocity\": 6.0\n },\n \"clip_actions\": true,\n \"done_at_exit\": true,\n \"evaluate\": false,\n \"horizon\": 1000,\n \"sims_per_step\": 3,\n \"warmup_steps\": 500\n },\n \"env_name\": \"flow.envs.multiagent.i210.MultiStraightRoad\",\n \"exp_tag\": \"multiagent_highway\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"boundary_cell_length\": 300,\n \"ghost_speed_limit\": 6.0,\n \"lanes\": 1,\n \"length\": 2500,\n \"num_edges\": 2,\n \"speed_limit\": 30,\n \"use_ghost_edge\": true\n },\n \"inflows\": {\n \"_InFlows__flows\": [\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": 24.1,\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"idm_highway_inflow_0\",\n \"vehsPerHour\": 1993,\n \"vtype\": \"human\"\n },\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": 24.1,\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"rl_highway_inflow_1\",\n \"vehsPerHour\": 221,\n \"vtype\": \"rl\"\n }\n ]\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"flow.networks.highway.HighwayNetwork\",\n \"sim\": {\n \"color_by_speed\": false,\n \"disable_collisions\": false,\n \"emission_path\": null,\n \"force_color_update\": false,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": true,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.4,\n \"teleport_time\": -1,\n \"use_ballistic\": true\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"a\": 1.3,\n \"b\": 2.0,\n \"noise\": 0.3\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 0.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"energy_model\": \"PDMCombustionEngine\",\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"SL2015\",\n \"lcAccelLat\": \"1.0\",\n \"lcAssertive\": \"1\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcLookaheadLeft\": \"2.0\",\n \"lcPushy\": \"0\",\n \"lcPushyGap\": \"0.6\",\n \"lcSpeedGain\": \"1.0\",\n \"lcSpeedGainRight\": \"1.0\",\n \"lcStrategic\": \"1.0\",\n \"lcSublane\": \"2.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"energy_model\": \"PDMCombustionEngine\",\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"rl\"\n }\n ]\n}", + "run": "" + }, + "evaluation_config": {}, + "evaluation_interval": null, + "evaluation_num_episodes": 10, + "evaluation_num_workers": 0, + "exploration_config": { + "type": "StochasticSampling" + }, + "explore": true, + "gamma": 0.995, + "grad_clip": null, + "horizon": 1000, + "ignore_worker_failures": false, + "in_evaluation": false, + "input": "sampler", + "input_evaluation": [ + "is", + "wis" + ], + "kl_coeff": 0.2, + "kl_target": 0.02, + "lambda": 0.97, + "local_tf_session_args": { + "inter_op_parallelism_threads": 8, + "intra_op_parallelism_threads": 8 + }, + "log_level": "WARN", + "log_sys_usage": true, + "lr": 5e-05, + "lr_schedule": null, + "memory": 0, + "memory_per_worker": 0, + "metrics_smoothing_episodes": 100, + "min_iter_time_s": 0, + "model": { + "conv_activation": "relu", + "conv_filters": null, + "custom_action_dist": null, + "custom_model": null, + "custom_options": {}, + "custom_preprocessor": null, + "dim": 84, + "fcnet_activation": "tanh", + "fcnet_hiddens": [ + 32, + 32 + ], + "framestack": true, + "free_log_std": false, + "grayscale": false, + "lstm_cell_size": 256, + "lstm_use_prev_action_reward": false, + "max_seq_len": 20, + "no_final_linear": false, + "state_shape": null, + "use_lstm": false, + "vf_share_layers": true, + "zero_mean": true + }, + "monitor": false, + "multiagent": { + "policies": { + "av": [ + null, + "Box(3,)", + "Box(1,)", + {} + ] + }, + "policies_to_train": null, + "policy_mapping_fn": "" + }, + "no_done_at_end": false, + "no_eager_on_workers": false, + "normalize_actions": false, + "num_cpus_for_driver": 1, + "num_cpus_per_worker": 1, + "num_envs_per_worker": 1, + "num_gpus": 0, + "num_gpus_per_worker": 0, + "num_sgd_iter": 10, + "num_workers": 1, + "object_store_memory": 0, + "object_store_memory_per_worker": 0, + "observation_filter": "NoFilter", + "optimizer": {}, + "output": null, + "output_compress_columns": [ + "obs", + "new_obs" + ], + "output_max_file_size": 67108864, + "postprocess_inputs": false, + "preprocessor_pref": "deepmind", + "remote_env_batch_wait_ms": 0, + "remote_worker_envs": false, + "rollout_fragment_length": 200, + "sample_async": false, + "sample_batch_size": -1, + "seed": null, + "sgd_minibatch_size": 128, + "shuffle_buffer_size": 0, + "shuffle_sequences": true, + "simple_optimizer": false, + "soft_horizon": false, + "synchronize_filters": true, + "tf_session_args": { + "allow_soft_placement": true, + "device_count": { + "CPU": 1 + }, + "gpu_options": { + "allow_growth": true + }, + "inter_op_parallelism_threads": 2, + "intra_op_parallelism_threads": 2, + "log_device_placement": false + }, + "timesteps_per_iteration": 0, + "train_batch_size": 1000, + "use_critic": true, + "use_exec_api": false, + "use_gae": true, + "use_pytorch": false, + "vf_clip_param": 10.0, + "vf_loss_coeff": 1.0, + "vf_share_layers": false +} \ No newline at end of file diff --git a/tests/data/rllib_data/multi_agent/params.pkl b/tests/data/rllib_data/multi_agent/params.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d1260bcbb6d6d55e75c505cf2ad3232855d34219 GIT binary patch literal 32787 zcmeIb2Ut{B*DyMb-Vr+@Vgt)iMI`pYIzS9)?2f|}W)8#5IEA8Ev7nw)Wd+oJX-SBnpvUIaFrq{tBbAxx0E5n`mWT~9kxZ-9Lz3P^yJK&eS|fDL zqWujzu~Kc{O3FPh?Si#cCLF7`M5E3T$(1St4mOqcBi~|)L75{qC^c#m&C|(LMicEX zm#Ae1kwcC_tX7z4A0|hkluBi4y=kB+To@`0mC@dEolGWDX);B6gVZ#U_ET%1Behs1 zQYqCku?~}RlX#?QYb=m)?*K*te9MxNmkQKuF~X+v|^Z~-qdGj4Hj#dXDIxH zLAkKRQ09Xqh^0~`4i;8}DejQX6p;X=0cD|#As@1%_)#sIk>)TPf5M<^10b5MM#ARL11S@tuK=b#af2Gl66>qlo zAl@>vaIllfo;42_Ns34f#1v|UA=%+0hvR1(RR$%&OujNEI*Oly-|5JOG8Kk2jaVAS z%r;Dxrxh8*nONIaN2Gvau<0vUSP~ zBN9LCT&z-=dqc_;rV^{m7wQHC7Dke9w@}%z#WKYRGW#xozZ;A?;LuFtKr!vA=oLn} zTvgHX=(S=AGu|+xS}E7)vh9bI7lpkzz#cd;r2P|297t?fhQ_FtigiF#k$*Ch1~dYs z*fvH2f=q=*ZxAuKuL53$T>-v2l_G$GoM`8CBEmpMOgYjYM&uw|1rkqBm?e1w_zyv*e#%?6lsYmnT~`Cfe^>9tV#1U!^}HE6jfP^APGQA3Gs2x3u);$ z%(6?B(rn8H6ERS#yj7sI`W`Y}SB+5ztw*r@rM*8$=KtzUL<57X$gLCP~IU?tT?mCCmB z!VEIdG8mqLIi$9^W*86|RAQhKL*LEcdcGMY`CS=eooX(}G>a?+s@T>TghZ7_C(6iY zNDAaRvFp(%3QjZdJ1%+hUNXe}N0jE_1 z{jY%IYxA@SO*>*4rh^SH!%gW7Mjh%kAS5Z(@LMgh5J|g1>cu%Qz{=|D8JgrYLcIbs z3Y}Q0G*+}KdYr8-JH}2{MMNN=grP?9&AyHx7 zl2Qb=BDTR>3OKfHNevUj0HXPo(aH?SEvIf+>#8+MJuX@lKi0a7Et$g|QU*GXCrl#N ziR5TY$OSB_OrOK1ED9S;`%}e8(Mr`5%b2j+&ZO5AG$+8w5o(nYyGr#LYjetMpi!{5 z298J%x!D?~}dY$uW^P^tgNjgqd+M!82oN`HFGd>V=EJFUbdf$;e^D2z=q z1G8didU8xubc@Q6R3+(vN_nyvNp%>k)+Y+;^+55JIYjoUl*Owg0*wZ=0zhnY$xpQ@ zl*zPdX8!T%-jg+&EHP^L$uc<^E0C4S$J)cFSD*za?4Qaon(7y$g#9a4m6rTJ*>?Zs9k@ugpD@tunQajB zU*7Rw-T^Go$~%$;z&m0qr;PtQq~1zJ|Cua06Ho{0hA3`FaV!{5EM697|Nj+Xv{!5y zDPf=|xJS6)-7?X&!8ya1fTK*RRZ6l{GW&NE9VE&|wM(A`N`(OhdzDzPH_bGKEBLgJ zD8mSzEwqM%kpdEF4*_QP#IJFtH1KK(h<~R?k1u0jx}x zo2Af9s14%_l|;*9CGwaUX;eZ&TujS^gcwPzEIwWqpV6{KRLg`6Q>p=b3H`(jL~=3s zkZ3<<;Na~t5RVlMR?!w6beYL#eV!w2ZkJ4~{FZP8p(pJF^w*XRBMY#7*$5F`%`Vk}B(dlqvojq_h~(hiK{SD0y%83& zJefo!)+(W4Z!`SG`h2wnz5~IDiz6f!A11~D&?HsUnS?ullOD?{bbuY;#I(df6w!VL zh)MvDJbgK2`q z3D{~B8R@Ewm%4(9C>9Cb6^n)LP)4H3hMl9=qp#ZnCd9ewXo!_b3=jli0Qb9|*y%vo zF$p)*wJd4+Y>mc%UN*~a0U$`}K%FdG1Fl>K-$>94G+4u&iY`MYd?i<`gyu_^qn366ZyRM?ipb#!VDPgnP(gT=R`v#$ONCl;xinu7% zT7WC<#vm?8AO0l8e!oCG4b4$PU=KGJo-#Vno`8_jxMo$%-bIjjp}kZ9BZ)$Wl9ee{F#rfC#3HFQup2D>So-#XU8vJ&!EwqAfEf~t1r*CZ z!>&2nldvyh#exB!i6uXz2m6fn4a~E{_#=p00N+2r$^)j#1PnmcvB+hEhSC0D^~P{8 zTlCOGBkcyf1OT6JQbdK*Tycg3REMp-w&Cfj!IVJ)b*2@ztj%o$=m1d!FT z36xA*d>|R-R_UN*h|)sGL>ky-(k;LO!89sdaZ_Tdak}#sWqJ!cE zNC64~?Pu!^%seXG9U_Rnl7JWvv_~WoXXugF8h{=uG*WOXo5H7qFahZj5v~=)!5N1` z-{aGwm;uOTtt4^LlBk$ic^v#Cv}zd>n-CWp-$I?CHDh@m}`36Zesa^aBXE&$7(Ek)=m zWC`7b0m3Q*U6GqE(7-BkD`d&p1>7m@;oKswu0avEh*iKY;1qBR0qhfa6I>>+)Y$+?tT^FW=W<)HtCp6C~gnuHYk<@V}x+(}c zD14Zs4&^e42?eq?4mg9WWwUw!-3Eg`NDU}iFyV>jPgk?H2aHA?*$Y^`K2n?|)+;~? zHbw$Pi4?2FP(&(@jl>w9NQqL1K~|BV0!M1|0ln3+{Fae=Af3&j{VWhT^^ve%kwi(3 zWCjpv-GkQk;%oCko6e8{JH;?ipsZx^a&^ENkO=1qJXstb+n4RmcIN=+b1rEDsj`A; zBS%3YKqWU#FhvmkIu+G?sf`TqTNCj|-Z^0c~rgMiP%h1AUcT~dlq$?jn zoUPwfD_BxsfoOC-a!2kn@qM-z8oD)pp z)EDrZior)LKH~7v2_K!|@rm^@n5cLxVcT>mz07bYUj&>~B95mEmJCDyI9VyE4w#*U*=_`n0Kim7OzVn|_V9qg;ylLz z1k#91;w1t(o!~0kP5hn73#b7iVkd!IssYr*ZzAWwH%{A_ni{TX2i6y|&=@iW%n9~l znu!uYw(U&;oC=IK5GZHfWoR2Rm$Kz$BYQ)$lzm=pkT^g92j(GGOEKU;i702@+;&B6 zol)V?cet6Vuz;$tglwTe;A>z}K&mJ%{Ko^r0~J6ZfH{LTh-^|5?aJ7)K#C;1*EgwC z=MKG+dx`{KrgrYu1;7?T%gWJ)B#p#~I*2|pO{ND~K%%g|h*C6wnzD3=_y&ZI6m&}y zBzH=WM1IdaY)n7W#`FnEaBt0scWq56&DjE$DS~Wb;CNI&TnIlWnZZO2z~oPf64XE_ zK@EimEE~i4+R=elP0`+{MioM-;fNxnS!x)z?JW`wF^W|fCx!Sx()@tMtJNAH1E2;X zdj}!HS0u{HH6te`fd&dP0b^1l-wFXW8X5$)O;-n^V>MZd^u`Rvibvw(!8GXsDIg|5 z6a*jz?P0}F20^CbsO*r|ym$n8NM4Y&g2hOQ%WBInZ^VRrzNL(ehV7B5(mfKmBFRnTD@+L(JaXXc&`Y@XSU5eyx+S||vK2b~jCXrPN^ zc^R?~Y#mHAjP(RS!I0p}#vz;~aA&#jfPuQQ0XywS3nX$J!yT!Vqn$$b6{odxgtcZ3`-f2XE9M7HUaJdz9)D5UYd!uwj@+tt_T_|C} zJ71#tT1%Tz&K`!Y9Ea`x1}l^8{#jtOwOqk|{|7MoGV26#sVW~-?8;%u0*o_5kHC^$ zToEjNVIqKhD3AaweQ{(Ai_a0m5hn{&ojVn-wStxoWQJm$6T+6jy8_H)C@g~79+)AH zzzl*<2ADCQgOkqiD8Fz20iwZ-PYDBpRbQY#0#W#Iy}pV`G} zvrt!MX6Bp@T$0%oww{D#WJ6G5=+J2o3@1rwIABEIx$teLcFe#b5;>$^rEs)O=wHy5 zsn{^lxk!8n6LMm{&Sq1*b7DGmxjReI2!)hGtg%m1z!oFm!oa_SS z)nTH+E0P%Pe;q*6qWv@LxW#cDZR%rF^Exv9+jfIxK=#(ciiGmi)Cr(IDw4|UjtPoZ z7TwW>v35Bu4vOV2V34lTF$ktvm z#}onQ7O}xJ?1H879k_f~hPOjqu4WKQOHy`CmUfVoV*n#imMhq|-E;*7Tzx^28(;yn zK(V1V^dWLl*D7EYyIRsnU2aWQ3CWXU&u--DUc`lWj{>f))Zkg{CFPaiNFj~Fap0KT z)UAtLbr*|V?b6)U;Rf#lw$!C2ursMEyt&o{Sp+QZJcAFG1186tg0pZhVogIG4thiw zm~G}xUVYj(EjhVcM^UFfodlu|J-V3`Y&x%}LKX_a3o;$JiPZ+NDijamlT)2CDP+hb zG!|`q(Yom)Zwg03uo} zWi2L3uq)tpflx7GgB1=CbY`)^j6if^S7ML%#BccL$@b)eR0ndMC)j#@!O#HzK{+1V zmFo{reOB1O3n9NKnMri0psANu~7{2 zX1|l8U}@}ja&7ozc5=k$>|`>FilmDQB@4X9uYOu@_MpPcHg{uYL$diEckf(_X!CBTNmF(A-ioo=y zB;6*%cY|-cwdsguhzuY5*=u9B$12o}Dj>Mbgn7`JMGEv6nIUNoZj?Ei02pgF2NnvX zAdm*_B_Y7K2#cM|3?%2agqs9BG-ShuamvDdU{Q0JePH8m_Bx};5l+4S9h{sx{cLPD zGXZd(0o@^z6uR9i!r*j4Jfp^x0L~&2F6Nd1wo2Kpk*M=@G5Dlhid{0p9AudTqDKR<%Tx$h*m0Rv}< za|q4gf#R5GQ5;KycjfRcg0}58ej#>6HvZ?q*RI3Q-Sl?wMTM{z zPP0O@1{;K%ifw)L?T#405-JWwbPUzV8Fj>rzoBTQffQLN91E5x?r)Q}7 zr-Lq&@zy6=lQ73xG&CGTLn0DGLA5b=pt0tLiq+4(bbm3Oy5K_jdDsJ z#KfY~j)+{5n8mAOg?#`&@Inf~g{imN3sf2;06|j(m z4rgZqF{N0@pv-Sv>l@Gd=3;$wwZ6Go-`rbUtR&PS)JZ+WIdHooUM2)P%la-7=YZhu ziQmlS0AO;(OF=|%x1440|1e%imPZ2T^o&IiV}z(f(2ww`z@LRKCAXKk zNaaw^Rp>Nt|;ZkGolnO!XpyBupRgFEQbo{$4pe$ta?0jMKb z3&cGhv=oW<67&vka{j9>}hPhFRAH6YC;N%GtiPR zej9v2+U7|8r2gMPU2{$V$?>zz8%JW!3nY2&#s1hknQaxEQvrt=Z;7pBP`Q#-NlC7K zNwc++Y$ihY;IM{29N7X!Lax*#x7C@qGP&hb?dg-#R-GTPOPMWX~N z7&SgeOPRJQU!K#l1UfW)7#ltc58D|b9$x5*OzIfs9e3{89d=J=(sNtv znb|FjLYd{N4bK*^oCdo{%f#~63mC0BUz+w!bA8LqT@ka@dt6m75pW& zT!LL{v1@Tc;M<=a0;;P&yB0ebs3jN)IQRscuc!;$BBSj;lyRhRC=b>FRRw4UZ6#Z!N+Nu~jZo8qFVZOuUkCmi}I0QLwuAbMMKc$h;{K@7L1g8R~(3WB&b)yh zD4;X+kI*WzVUGaVK`FHg=HxgiuvilzWsE$I_cDWL6Yn}^NEsp}fmRCuDTCIQ3&$;{ zDhIr^b!tJ+zECkGlr0Ff;3wi}HS(XuP{p8`ouNwRh%nd$gD~Nrg)q43{)J%6Ot3d} zVzHc-J3U_x2N-4&Y{3CoL9}yoq};Kva$t`lt{7Sh$Jljf?~sKekqFfzHZmp_@F6pS z*3H2BAq|C-HE>QD{K=u4=TPb~!m8q$;zq#$IA$6C_11 z7HEKQq6~(G(cpIckPRF$U4Wh@kjt4!aX7z<&;wv8e*!t_-)O zQ$D#gWQ69s2X*mKgp50ni&X-*9C3iy?Zsh*gn5;=urI9>5p> zg8|^FK&lK40KS$^8MiqKpG5D}LWdSvBO5z!3X&QNoJznP&GJS$G^U}P9o@rfpF{WT zVf9HorZm{1gQu%4fx$pf{eBRprCB1V(blrBka)Enr;dCH` z_7HiGPozTTCJ7xms^|^839KIw$Yq2RtRNzi-rVrgdd#0mY#0J|2Jj{9kc~}JH*iD_ zQ1b;GX)JMxd5cM-?9^PaPS{GEH9>W9GPwEN0*!S=PzM0A+p)&bLD*KQN z_2KVLWWb+w$o;2Qm~m!A+rz7%0(eanz_wvIb^u`t;DY>zVY<8EFs+q6)LQ(jEK*H2 zT#?2AocZL7m|N7qXDl+8U_7vRv<-kvyQTER>)u-ufgEGQeKO$4J!mL+2MZLTYCuoK zn}?ZPP<1~sv=lAJx=2PlWT|YCI_~ozcVdj%Kl*mH>fg;%hi|?e9XRjBnA>apj?Z3r z@6r!t?&)sr+v+r$;q~uFeb?h&z_S~-w`E68JLfS{>($FUqO7>XsTW_*)od6wsAlG)U@u{8IsOJ!;&SP&yL;yX5PmW-nsz41HRn#`QJT#+$lL_ z(Dv2GsFaXp_iqk5-oMeXZ>sEk$gkGFZE@?HO|z!`79%|=a+OzG?>lwy%@3K&R!I%- zOk3Y(j;PP|+}`lv9+&CK8MD9Yc3@=Cy1uJAPWa{P@tHm!61~~q>D~k#9PH6IV0&Qy zkE1rUFWKZj<3{@?FW0rw&fmkDGDm;tx%BCi9oM%uN!?qmb?tk@!ZX}v`Pb^~e`D6{ z{NX`!UOj5{$D5=tbwp6hF82SI{9$l$6IEGC9nDQlG>157Uwll zoV<0=ySl@Y6-`Vh6xXlxtD86P&w0ifev>Xe^saxyFStwktsW~UFFSDPUPRqTMXQPj zwdlOlb?T{~*YB8|o6GTypC3|HHLd>bA+Pw&UcUHd-+M4JUz%N7`p4ogTQzav6?)&f zl;qWaXThFd-}h^HeN=vMv)}Tg3ZzR}m;0wI?AKnAK1Ff7r^}m%DYswUU z82e6i@K4?kTZ(_v9I5|o-=x|eo7c8qO+TM;K_%KbO}VVq_dVVJ+;AhiX4~SQmruV;pI>kP+U4Sy^NnsZoF%O>~Z|HejTdJjohcnZu0PA>cjdIT1aCzbPw5!!u>)5n)5WZsUD zrC*p{b5`Hw7k^mGO6w5+TjIU`My}7)1?*{8Yo7A|dBHy4!7ulAGYwt#^3Bbw9~Q5B z`t60+vmdkCO;5huvfuh3FKPCg8zm2K)GY|uxa|6^OM5h-#~yrta^=N-Ic>WASte-z zaTsT+%f+o8ET8X|{xafQ;p;}O!)lf_>p5uch39kUoxR$-^h()UY-mJDd0y|0^O z!rMDb&R?yn-9nEpK2r>Xo)UL|k& zBSTJi*(h4!a(bMo=c>P4Cgc^IQMBvVe`D&Btk%oUgqALw)1|A#eaVmF=GTMwc^>JN z*TehEEmiX$O_{e~?A%wk&Soqhd$V!gt)5MGPZaKYwR+l<6>|f_-VDFNnd_H#zi!&l zunnzOe^;wZJLzzGK#vErjvAz=CF5s2AI)#nU|>PV1=n8BZTfr4MB&(>&71#31x1V% zbeXYzaZ#Plmw%erdZ~Kw4z^cN!0L6$Q&w<}Jmp4t?5eRcYVkMx)U7kt3sy(2mtQX) zF(Z4_7uOaETrYIa)gSM=y>al2(YIEX4N7tCdTDZ-T^&A3=l?Ws!t_0N7P{n~96M!Z z--*#B&mZskVtl{0$M$!UcbTi=9`JuOzo>t=#nY;+_^|WZSK`xoN7Enf_1K%xHf!6; zdeQs1W9Bvc`|i23+XEGo(%x39J!|9WplDw6Iww5be;QNd*?QbW?ZG9JS5II6Of7vk z%Sw=JIh^my=;trte!4x3f)h3?VJZ|*c@w~Sa^Hu21&VTV?wOunIg z-8G`{%fFLqxn>SJ5IFPP-K(AVZ{%Ih|kP?!D5D(if6e?y08S+atxspZC6f{725a(#5}Z`dD@H&V&w6 zxGk?J-{o~PxNZvU&~Nw5VY!o*&6@r5ZsFx-oMA7v z+-t3RQRB?p0TEOG+U$CDTj-c!-z_eTK3(URHPJ3*N!(Lx!AI#uWBTCTZ>t_G8B;dw z)SK%k+jPJEs?+zYm-I;R*uAUvs=wO(HDpq0_2wCM^-bsYVo&&xSo*Xv`zrTdBlcfj zjsM=YtM$4x@1LXVZu9E8Uib6a^ReH)yxG0@&}r^s*4mzDrmXR6zp!><_q_*Qdj44B zMz6Gq($3fFPTt@5>yYnP{B?8H(V520?61}5rB}1k+U`CmN<6Vqb#?c5Dcq`5^ss@q zS{M7RjLe>K@lddENovo~$s@n3-@9AVtVgxKiTi_haO_R~D@C1V)An53_~WWhE^R0O ze63;1FWVcQ`*Bn0PyA#2cE8U_t8w$eY_ENiHW|gF{8Cr1rNwXCydLxCPwO83xct(NDO<*zt>5T! zvnAd39uW`K+*|hTg!EmDGoG@q-sn_grZ1;%?W};ihpK%a(d1)big(o+-}YN0>H4NB z@57dop4Z#^G#lpU5jobcbp3KJ|QVo8y(4=H>7**VX}w zNczLBXWjp?0>Om+bxN}p#lf$79x?p>GVwx#%=G2;^{)RU|I;7=anTNleMYQ_v>26Y)^Y(~|fuE8sH z3mdj|Yk;@ykCso)e)_%GdM#g5dNqL{oEKjwA zf;gritWX^u%|v0=I=io7>pZLIs%WB%lWZ3;}A0)mcjRqzv59N9bT$pKBs@~gZpu|Y*GdbK`$vg4dlA%fr~a;_JZ~wcO_|)h^v!t6tTjt{wJ_oN=zWP7n9P{rbJB zx6dW~#J9COkD0uCz3G5-z}=$k+s_ZT9WtAxdvt303sJ<14su0l|JpwvFF7$|e(^6{ zy-cl+t~+kk#@)xLtM!|R2M1kS*~~qp^zO#{-*0!*DZOvWZ>?(cN6GtTcfQ)b^uxRW zMYBCeUB4{)@mBn-@v=^=VRPq1?b+7(hdHehUKm!y{Tb7;_tWpbtd?!+C28J<)8$cp(-1YdatmnAOs>_2Gp9`U&+mwR7~H1xStt8vDG{JQ;D+;2H^ zRqiP9J5jz@Xq5xn*WRUf)9%)9|Mr`8kH$afd*+pFa@#$D!D$@_@xGx}O9HwNcD?X7 ztHb_2nVNM4P13hNyZCWpRer$cv}=WrUp#v;_2u#SNt^en8|6_wJF#gI9ke3tGR>a8t8n@Y`zq{T+2)@`a(} zxBT*8@TiBOQ@!#UcUsbyyZvs1drNyL`Hx$@jyX}d=I23E2Ugviv+{{_Rojs5i?;W9 z)#%vT3m;Yn&lz4f2(yFvtYt7#SbN$s@^{@XRF5ia&bbJmqEXD%ia0I(=&Ho ztl1UKQw027ZDfRI-GKEMy-NpNc-MPZqF2zFt#J$2cb(2Y@=SQG}4>T@4F9goGTd`S7vi zLi=;S6p1@^A4d1d?z89BjlULjnpdyEhW33qRV1&4(;x4WUQXN9J$mSzHHBBda_#li zwqO6Fl2Ucy_N4K9DbCSrkfvgXJW^&qwZ}R9~^zW>8dJU z?2CAJw#`{8BB$h)YIw+*^B(<0=Soj(e3bO6eo_sO zd24u6y6!r8v{&s}-`3ds=R0lh{oK?szWdIeey^8KcsHQ<=lRPD z5{mPFAJyJ;$fb13+ii^>_6)k+qm8~sv%K>;rKM3F;@DS*MJ-zYxKQDHgD6q^*M90^TI8Csw5_S#c921 z?usR$J(uqu++th1r8WES{LYUaUl@P(XvCM(woXmk+C)@1?ojpEx~~k4@<*>basGDg z0jzM*$h?T>q6KTl-Z*&2w5|25eyjHXwPfMJpZhdhab?TAzDq|>UM!kab$R=e_unpT z)8az=i#pc{E4P^zU;K0Dn6oK!$9va^)P*#8xWRpE$E3eTt{*w;RsCVtS3m1?qp;hT zNxDAXTfg;sRDZ!AJzCwn(MVI%#Vc`O>M`Ze7w58ayh_~lWp($9I;^I59eYmIazFHV z>F=c{woZ7qb;Yi?n@?0-9@po->OzR(;hfRC8lL@XS;-mCt4%V0Saa>y;?vz8K6!Dq zn@@VikTZX*nX9buM`z`fh{hBCV+q|x4@0vAwKmWM<5Id>H zsjTZAr$-NT*H%3nwea9=_oYdTVrKoEIr9FYUA-T8?ct6{?EY+Wx9@*FJq0e-{By%i z&L1YXeNPIr_DgrreYCHtbql_?RuvPYD>B~ie{x2*L+QWn9$aO2_YEh%POjb{agY4? z!1JrRy|}#NLcfPCuk|=F(iqft*-~nLpHnF>rdlx_LFL3J|(S6|w z_a8l%KW#B|8K>vAXWUrX-Mqeor#(#CToe*<^w6`n(d?)uCG$8%;wD=sifW!%Ba)sT^m^my`d{~~eW$ZDo^Dg` z(di#2%{X1XH0Ot$d9APIFZ-ot#;@DI&zFw&Is0Mmde2Skmt>z*Cf6Tt{5AJPo8~j5 z_mXO!AANx%Nb1@q=Y7Kt)J@41)`iQx4ykW{S!eKsC5s{xL>Vo&CL{L-O{u0)^*gL_o*r(P)2|_hXOOz1YW>_(iDCG&P@q)x+Peu>UK( zw!50)Y#p3 Date: Wed, 22 Jul 2020 11:04:03 -0700 Subject: [PATCH 367/438] Fixed import issue --- examples/exp_configs/rl/multiagent/multiagent_merge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_merge.py b/examples/exp_configs/rl/multiagent/multiagent_merge.py index bfc9fb3b7..312d3a0dd 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_merge.py +++ b/examples/exp_configs/rl/multiagent/multiagent_merge.py @@ -3,7 +3,7 @@ Trains a a small percentage of rl vehicles to dissipate shockwaves caused by on-ramp merge to a single lane open highway network. """ -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy +from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy from ray.tune.registry import register_env from flow.core.params import SumoParams, EnvParams, InitialConfig From 04e9d968791b4cc2f31f4fc669b1486e5faf33c4 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 22 Jul 2020 11:26:06 -0700 Subject: [PATCH 368/438] Fix more import statements --- examples/exp_configs/rl/multiagent/lord_of_the_rings.py | 2 +- examples/exp_configs/rl/multiagent/multiagent_highway.py | 2 +- examples/exp_configs/rl/multiagent/multiagent_ring.py | 2 +- .../exp_configs/rl/multiagent/multiagent_traffic_light_grid.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/lord_of_the_rings.py b/examples/exp_configs/rl/multiagent/lord_of_the_rings.py index e7688c87d..866d915ce 100644 --- a/examples/exp_configs/rl/multiagent/lord_of_the_rings.py +++ b/examples/exp_configs/rl/multiagent/lord_of_the_rings.py @@ -3,7 +3,7 @@ Creates a set of stabilizing the ring experiments to test if more agents -> fewer needed batches """ -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy +from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy from flow.controllers import ContinuousRouter from flow.controllers import IDMController from flow.controllers import RLController diff --git a/examples/exp_configs/rl/multiagent/multiagent_highway.py b/examples/exp_configs/rl/multiagent/multiagent_highway.py index cec0b3fba..353eccb2a 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_highway.py +++ b/examples/exp_configs/rl/multiagent/multiagent_highway.py @@ -3,7 +3,7 @@ Trains a non-constant number of agents, all sharing the same policy, on the highway with ramps network. """ -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy +from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy from flow.controllers import RLController from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ VehicleParams, SumoParams, \ diff --git a/examples/exp_configs/rl/multiagent/multiagent_ring.py b/examples/exp_configs/rl/multiagent/multiagent_ring.py index a789174f4..c2fc52f14 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_ring.py +++ b/examples/exp_configs/rl/multiagent/multiagent_ring.py @@ -3,7 +3,7 @@ Trains a number of autonomous vehicles to stabilize the flow of 22 vehicles in a variable length ring road. """ -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy +from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy from ray.tune.registry import register_env from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams diff --git a/examples/exp_configs/rl/multiagent/multiagent_traffic_light_grid.py b/examples/exp_configs/rl/multiagent/multiagent_traffic_light_grid.py index b8293f638..308dfa0d7 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_traffic_light_grid.py +++ b/examples/exp_configs/rl/multiagent/multiagent_traffic_light_grid.py @@ -1,6 +1,6 @@ """Multi-agent traffic light example (single shared policy).""" -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy +from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy from flow.envs.multiagent import MultiTrafficLightGridPOEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams From 0581e48ccd3f2c16a1d345ae30cd5c326c30b188 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 22 Jul 2020 11:54:19 -0700 Subject: [PATCH 369/438] Fix import --- tests/fast_tests/test_examples.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index fbd78294d..6952b2aa9 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -391,7 +391,7 @@ def test_multiagent_i210(self): from examples.exp_configs.rl.multiagent.multiagent_i210 import POLICIES_TO_TRAIN as mi210pr from examples.exp_configs.rl.multiagent.multiagent_i210 import policy_mapping_fn as mi210mf - from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy + from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy from ray.tune.registry import register_env from flow.utils.registry import make_create_env # test observation space 1 From 0f288a959e8e0273aa241df66a942de36d5780c6 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 22 Jul 2020 12:36:25 -0700 Subject: [PATCH 370/438] Remove print statement --- flow/utils/rllib.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index 9be55bbc0..fc3229e52 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -228,6 +228,5 @@ def get_rllib_pkl(path): "Could not find params.pkl in either the checkpoint dir or " "its parent directory.") with open(config_path, 'rb') as f: - print(f) config = cloudpickle.load(f) return config From 176debe95b0019dc75789d5263d237172d3e4749 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 22 Jul 2020 22:10:10 -0700 Subject: [PATCH 371/438] fix some issue with datapipeline utils --- examples/exp_configs/non_rl/i210_subnetwork.py | 4 ++-- flow/core/experiment.py | 8 +++++--- flow/data_pipeline/data_pipeline.py | 14 +++++++++----- flow/data_pipeline/lambda_function.py | 12 ++++++++---- 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 0c66f42e7..4d46c37c2 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -35,9 +35,9 @@ # the speed of inflowing vehicles from the main edge (in m/s) INFLOW_SPEED = 25.5 # fraction of vehicles that are follower-stoppers. 0.10 corresponds to 10% -PENETRATION_RATE = 0.0 +PENETRATION_RATE = 0.10 # desired speed of the follower stopper vehicles -V_DES = 5.0 +V_DES = 14.0 # horizon over which to run the env HORIZON = 1500 # steps to run before follower-stopper is allowed to take control diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 38599b002..c3089dc65 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -99,7 +99,8 @@ def run(self, convert_to_csv=False, to_aws=None, only_query="", - is_baseline=False): + is_baseline=False, + supplied_metadata=None): """Run the given network for a set number of runs. Parameters @@ -122,6 +123,8 @@ def run(self, then it implies no queries should be run on this. is_baseline: bool Specifies whether this is a baseline run. + supplied_metadata: dict + metadata provided by the caller Returns ------- @@ -244,8 +247,8 @@ def rl_actions(*_): self.env.terminate() if to_aws: - generate_trajectory_table(emission_files, trajectory_table_path, source_id) write_dict_to_csv(metadata_table_path, metadata, True) + generate_trajectory_table(emission_files, trajectory_table_path, source_id) tsd_main( trajectory_table_path, { @@ -277,6 +280,5 @@ def rl_actions(*_): trajectory_table_path.replace('csv', 'png') ) os.remove(trajectory_table_path) - os.remove(metadata_table_path) return info_dict diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 71f72890f..c04c50e92 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -25,11 +25,15 @@ def generate_trajectory_table(emission_files, trajectory_table_path, source_id): a unique id for the simulation that generate these emissions """ for i in range(len(emission_files)): - emission_output = pd.read_csv(emission_files[i]) - emission_output['source_id'] = source_id - emission_output['run_id'] = "run_{}".format(i) - # add header row to the file only at the first run (when i==0) - emission_output.to_csv(trajectory_table_path, mode='a+', index=False, header=(i == 0)) + # 1000000 rows are approximately 260 MB, which is an appropriate size to load into memory at once + emission_output = pd.read_csv(emission_files[i], iterator=True, chunksize=1000000) + chunk_count = 0 + for chunk in emission_output: + chunk['source_id'] = source_id + chunk['run_id'] = "run_{}".format(i) + # add header row to the file only at the first run (when i==0) and the first chunk (chunk_count==0) + chunk.to_csv(trajectory_table_path, mode='a+', index=False, header=(chunk_count == 0) and (i == 0)) + chunk_count += 1 def write_dict_to_csv(data_path, extra_info, include_header=False): diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 0fe01d583..673b363a7 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -8,7 +8,7 @@ s3 = boto3.client('s3') queryEngine = AthenaQuery() - +sqs = boto3.client('sqs') def lambda_handler(event, context): """Handle S3 put event on AWS Lambda.""" @@ -24,7 +24,7 @@ def lambda_handler(event, context): s3_event = json.loads(event_record['body']) event_records.extend(s3_event['Records']) # do a pre-sweep to handle tasks other than initalizing a query - for record in event['Records']: + for record in event_records: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) table = key.split('/')[0] @@ -80,5 +80,9 @@ def lambda_handler(event, context): query_date, source_id, readied_query_name) - queryEngine.run_query(readied_query_name, result_location, query_date, partition, loc_filter=loc_filter, - start_filter=start_filter, stop_filter=stop_filter) + message_body = (readied_query_name, result_location, query_date, partition, loc_filter, start_filter, + stop_filter) + message_body = json.dumps(message_body) + response = sqs.send_message( + QueueUrl="https://sqs.us-west-2.amazonaws.com/409746595792/RunQueryRequests", + MessageBody=message_body) From c7983d7b30f4c9b81ad5c4a82baa71f44b002a61 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Jul 2020 13:13:37 -0700 Subject: [PATCH 372/438] v_Des change --- examples/exp_configs/non_rl/i210_subnetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 4d46c37c2..a01a266f2 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -35,9 +35,9 @@ # the speed of inflowing vehicles from the main edge (in m/s) INFLOW_SPEED = 25.5 # fraction of vehicles that are follower-stoppers. 0.10 corresponds to 10% -PENETRATION_RATE = 0.10 +PENETRATION_RATE = 0.15 # desired speed of the follower stopper vehicles -V_DES = 14.0 +V_DES = 5.0 # horizon over which to run the env HORIZON = 1500 # steps to run before follower-stopper is allowed to take control From a4685bd330fa4f327c945c9e1ce03472695f0ff8 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Jul 2020 13:17:49 -0700 Subject: [PATCH 373/438] fix leaderboard chart agg --- flow/data_pipeline/query.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index f444c1154..ae23d3ce1 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -766,12 +766,12 @@ class QueryStrings(Enum): is_baseline, energy_model_id, efficiency_miles_per_gallon, - ROUND(efficiency_miles_per_gallon, 1) || - ' (' || CASE(WHEN SIGN(fuel_improvement) = 1 THEN '+' END) || - ROUND(fuel_improvement, 1) || ')' AS efficiency, - ROUND(throughput_per_hour, 1) || - ' (' || CASE(WHEN SIGN(throughput_improvement) = 1 THEN '+' END) || - ROUND(throughput_improvement, 1) || ')' AS inflow, + CAST (ROUND(efficiency_miles_per_gallon, 1) AS VARCHAR) || + ' (' || (CASE WHEN SIGN(fuel_improvement) = 1 THEN '+' ELSE '' END) || + CAST (ROUND(fuel_improvement, 1) AS VARCHAR) || ')' AS efficiency, + CAST (ROUND(throughput_per_hour, 1) AS VARCHAR) || + ' (' || (CASE WHEN SIGN(throughput_improvement) = 1 THEN '+' ELSE '' END) || + CAST (ROUND(throughput_improvement, 1) AS VARCHAR) || ')' AS inflow, ROUND(safety_rate, 1) AS safety_rate, ROUND(safety_value_max, 1) AS safety_value_max FROM joined_cols From 5e23ec625d3dbdf46a00c5feb3cf530fd86af1a3 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 23 Jul 2020 14:41:36 -0700 Subject: [PATCH 374/438] add % signs to leaderboard output --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index ae23d3ce1..f27db5d8d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -768,10 +768,10 @@ class QueryStrings(Enum): efficiency_miles_per_gallon, CAST (ROUND(efficiency_miles_per_gallon, 1) AS VARCHAR) || ' (' || (CASE WHEN SIGN(fuel_improvement) = 1 THEN '+' ELSE '' END) || - CAST (ROUND(fuel_improvement, 1) AS VARCHAR) || ')' AS efficiency, + CAST (ROUND(fuel_improvement, 1) AS VARCHAR) || '%)' AS efficiency, CAST (ROUND(throughput_per_hour, 1) AS VARCHAR) || ' (' || (CASE WHEN SIGN(throughput_improvement) = 1 THEN '+' ELSE '' END) || - CAST (ROUND(throughput_improvement, 1) AS VARCHAR) || ')' AS inflow, + CAST (ROUND(throughput_improvement, 1) AS VARCHAR) || '%)' AS inflow, ROUND(safety_rate, 1) AS safety_rate, ROUND(safety_value_max, 1) AS safety_value_max FROM joined_cols From e053e4c14c6f7b6c8ad190562a4ac4a1d50afa29 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 23 Jul 2020 17:04:54 -0700 Subject: [PATCH 375/438] Add energy model documentation --- docs/Prius_EnergyModel.pdf | Bin 0 -> 72195 bytes docs/Tacoma_EnergyModel.pdf | Bin 0 -> 78452 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/Prius_EnergyModel.pdf create mode 100644 docs/Tacoma_EnergyModel.pdf diff --git a/docs/Prius_EnergyModel.pdf b/docs/Prius_EnergyModel.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4884fb5cc2ac651906b3572ba81a3ab1f7030834 GIT binary patch literal 72195 zcmc$_QMx|}rS!vt0Gb?S|wr$(aO55mqdw;vf?(y|OpY;Dnj2KU> z6)Wa=<~{E_1DD|Bg5r83DY!Fvd2& z{s{;8|M~&L@*jyPyW1H9=+zaCP5!1~Y-8$V24H6a(2JT|IT<_ry;>PK84DX5+5Y;w z>R(rR0|#qIz&|1g*;+YU+c*OLCNA$_{L9?P$<_hD$n-Zi0KM8@B^UwBe_wxfkg)k> z?Dltp8NkBuud#{k-%l6-iU4{=TU)2U5&q|UrN4p#SpG`&H=N7=6pHD;i9;_T4AA1? zG~{4rGvHw6G-2UjWHdH3W@BRGWHn+kG-hC9U}R?DZ%i7~IvKQ(Z4axgZqhH=X@GW?~#YpQRkug?KZlor@$Ma-~_rFQ@|9%4Ip921SZ^#>%8ao1*IQ~`Be;G{3z{$YM*7QGY&-NeR_&>T$3Bb(u zzdXsx@xNC~Uw`*+sbY-v_4U#AO#FbcGQcAY{9q$~U^!sDU}0@?8J>(z*>ofQ={A#+ z6%_yh1qBKpfU5rQ)nWgS+Yqv~aWb}Xas>R>t;_uNw1J@QKh9M0 z5By#Dzj9PGcC>YNF#0F!e-{3A^}n+IM~VMP`Jb@=7U}<(@_%hWM#jJG;y=x~q%IY^ z!G_d*T{Ej7NKD7+Wg0IPDC3VN61i-KAb}WB)f7BF-=EyE<-3z-HnyKUyfApfd_)c=>M@+vPq8hxeaBT5GaYPpZ3Y<61n6u2(ZiRHD8HfNIKL;F-Tka zbiA!|@ZjIG)1iqNww8v==a*mzg84(bb9E+04|UTI?*xNZ$|sf@WDX(?<0?B%Lk=hX zTM96IfnG~v3im5Ez5(B8x=DKHS8~avNtV61tFt}ObXZb7q%3j-5rbKJ6%x~K^?SB; z#@>t&gjsTQPEN~|OH224dGjsmW!ATg4|(B{nG*YF=mNv)qKvW@qEJ0091`|OSh3ix z5?mRnbc4V?Xc9}Y7!vy`?4`{ton(o^J{+DMl8E<3FsRu=zQ#qoi|6=I_Swib*%5=* z%v`Zf+f8F+RZfrP`Fe)+J5vY}GBk+hqYGvtcvP^=EW-Mfp)_(h@^9vR+GoY*il~JL zdz_`IqTPgjr!umbwU^}V5L}?} zv!EE@PMmaGu`%MtP|jzBZHZL^!Z}${K%a)L$TU~6M4!}G&EWX5!{RxHmhdHrdhv+> z3_&zvCz5N=KeU4x-_z(P2tjTOV!5P|%9~8R(tCLE8qcIA`cEG6xCp_fYP7%3O-5ejr4I~y zk&jGfl}-3Rn2Hgj-R;=X$1OcEi~4o0uS#E zg&ERRp-ABNb3`Z*mCgB`pIg&3)TtWz`m5yhk+-vaoD9D~*%?3!eg-yQv)qd;+-{2R zY9kJJ#C*kbgAJ_NPx0~L42ZFvbKrak@zo=fJEQKCuo3Bls_GCmeq*c=rhW1H?5BU( zM4+>-px={=ToQ@0Q>71cLiYQGsucDB_D>7FBM>;)5hxH(LfYJmi&Ivge1Ez_E8#eo)ICtdtMS^-RDOimX^ev(fjX`woQA4w2!E z+;5o#dMXH=Y}aK*xNoo=bylPw+1a(+QST62ZH~@=66&1yY}7x1RD5PE)ULC4gM70Y zy&Xv-F(!)h3`lsKmf9`6mNeL?SbtikFI<}VydtOQz28=s;Sw%#@nRSv4l^)np8|X2 z#Kc-CGh1~!u|LD$@zcR)BYZbSQ$C~-Om5^QC2ple~^vz^(L5FD{FcuAR+yiYxTNHmi0|ij zchi-QX%*+mTG#XYg{wN2(?i=4Arww23`orW@yT8S5S0aG>J2Q=>kF9s%L|yDx;mUb z1^KC-11<(J#XNe5pVY^MATl&~o{h02V6iF>0|Y_Y)e+dfKWQ8{9LVB6a5?d=yc#ZsNKhmuiiv4#`_%mkc^uv(@F6B9=F!_FLZWL3k%2xE zZYj9nnoyQ4bAW&>zc&~bgoyhmIao*HPnaX3zKQd_y*^L}mm>kdyTM2rA6( za(NWA`H^t%CXfyfBU&FJ+XD4v0U26>J4JzhRCn$Cv(}-$PbV;<{o3-?Jk<;(dOz>J zU>vzkj5&mn_b%Xq3$*QhmP){c1p8VXpZ7oS?eO-X5L;*TMlm2lSHCPk_mBH6p+bK; z2bNQQnmbbWzlxbaiU0?{zPt#F4EoQO@RyRWWSe0=aj!9{_r^Iz)QW%}mtMC3L5?rz+R_4D|f zc(ptnK;C_Ne0|lo8e@S0yWYKxd^{uHpP5_mFsU4WG#vFL!N5Stc`qZ~4Z%vKI76JHE$&pZG9?Kwoy$*Tb}q1^q9+ zk+w4BQW3r02fn|hpLgiKeQCbYkG>@zzN^HCrpErXWPc(Ce#hb;!a3f(E$k<^*_3Y3 z&I)c`fqeMNMCbWdR)h3I-}P{@qMf>69`3ow`o%iEp*Q z-f%w4V=(XNM-dN$zhEPP-Lt;Hhx|LIzk9W&?}@%b1Y5>8v~Kk*2m$nWdN*u!8@4Yw zx_vg{hnK_OCI^>*ul}AIOo~NRnBiBRU?8awj>J%wFN&m+r(Io~-}t5)%(DBtPp?r{ z4jq72%o&9{wo~dmW!~P%MKGKvg_`K{)4_$UN!D6*NEF{mZ*JF^ud_WLha!q6t~sdO;H zIdUE|nAlLQ&5BG)>58-YIY@|6^=}kbWTgpm?}t873W$*(W>S`%D#Q!IP##pu!?Sl9 zR6V|KWjTfuRPu~DcE&6;5S1O1`Li8Ig*nU80-3sm>C7%uwn@PU!a!bC}VdEY~g#Nv~+gx$x?;INoKEkwqo!y`bQ^ozf2sbfH$ z6g8kwzl3HH;f%~X$FBue?v zL$;2&)YtB(H!c|uCC{o|Z;&Wx_cbAMYNC~9o+^f{^aiiZV>5a!@0!h`GK`F&_dvYm zeVazQIh_)&iJx^v>o%j52r}mE@13V(xpnwOc{4vAt+`O+lR3N5SeqWOs;W>qDmL4M zwnGphduE|AY+kaedB&drh)=o;Nv+Ilo;Cj2`WEJKCsP!Zs|~z4G!t1wj5!fMNyF5~ zmCu4lr6X#GQ^FHrdO1p-m%NvX;dp_>D){as_RcX;7D!3DcBO&RKzwvVO4H|l?XnIp zYPR3X=#*!J`z&eQ%3&23mo-k$^d5%A-PEu4ZUa~yPZ+El8Tj1pGHfk7+)Dl=dL1XO zG^e|jjdn0AxgOwv68N^?4y0!hY(~q{T9AU}N`VNjzOS0B`~gMXu$l_5%ca3EkqFLp$Cy&tHdu8I^SkVxh0VN_%~qLVPAmd$3!UJ|z}qw3q3 zsH~d4VYcI*;7#@D)ajGMRvWd$7e9cMG)6~%L+bRub+ELlEV{=}ar$ZM+Z_+X>84Yw zRNI(SuQgeRy5cheV}!X-=I;GnuIyO8wI0nkM`RxtZrisbm`Yr-~;Va%LRwt9nG!U1F zhcu~8pC~bQ4)EbecnmZn%!uBgoF@Cp(w^HEGU{d0-%k=4l+Wv5UC;4&*03V5Kw8dO}-GTl|)t)6gEeE5A#LoZwl@Ctx20|C;6023b-9G); ztAGj0DU<{8DjXL36)5AtxgfK+StYWfJmpqo4vXZ^HBe`^16>uS*9d`9P1JMB&T#Lb zt}CD8u1JjZbT>R((GuZ45>->yzSo7J;Un%5B$_NkmwG2c)`)A^mY{5t*~QC;`nR1KiieH`jFUVCTjqvSdhh+Dd=Zy@Q=^p1 zeVuW%%&O0X66s}FR3*2wFR}#=!q8{cD)cQL3mG=rIO}Gm>;8co4Tkbs} zbIBhYoqwbF;Y1hlaRwNyy9Aj*g%}x89^TI@e65XVQYT_`Lpfz#(a|AL?mc6BAyFxW z9#O<|i-mw4L1NyM)w*884Ddj~6|4tJ~~7JRh+@X4ba{IQ$l?@F1ALs39V zuCKdf|7xe@2si0n$dG+kM%c>QCB(}TFz9Z*CpZYVeIJd#ny%!rX43QH5Wy7gsVJ*# zx6nF%#j(0>8Z2z+2y>N}3pp#SHq4j){zt4)2Ik zpI~cVkONl@i>;MmHhMNwn?6HCqnR+W+5CCxgYRQA;Y+FsHUvKReHZlRkv`!{=3K2e z4L^q-Zfhshua)dlmMGf2>Y!~F!p{X={Pw*+=!@=MEOt&r31y~QG`@}8F)aXI?7`2F zgU+M*D!7uR=!C&C6o+~$P0e-v%X^?_h@31o}r_Z8F`=F;-aoYLN z$CsjYtlH0?G8@ENNzmK48k;nY{_IfsdBD`O0HHVBulj!W#G!E4ICoTcD$%`uoS)mj z3THzvr>vQ(A~(V6B^npa(GkD1y-OLi8i(?Hi_9yg0a;Rr01uD$H{yEXW-)RMw0S9x z!q@a@oUC?KQ!Hr@K0b#lY567g4IuuuzGS8hWKdhyiRtQLZ2?d{fWSSN1a zpY^;+;=0*nVJ3GG&FB)(#I5WP>L9w6^!=!2i|FO%hr8!~ILPkZRW9$UC_m9}=UD}#O= z6PuP()HjpmL)%ZC;gz3JzHam|dB57VAA><@sdkGX z^TIU9w`l`r#O;dn>F!^=-@U_bVeej{&ET0QYe;^6R9i}p;3V56&SJjjakR`RN|HEM zDs*BV$+e=n7At4IH7ipd1O*q$x82AQiFBV2zE`C{D_471^czE8+Xslk5QjigS2kup zzg{joHpAwSaSlt72VHg^LZsiZhlnJxO!F~q(93G50OzfocT7YZ_cfRQWE<{GttO;o zcU^yf9tcy{e(!!ON-q$aA@+&VsujDQ*%z=)7SvG-2gFp?4+#>poTSC5PPE zTl9_@(UIIiTm)5zr5SKon^KRdD0mewJ`Ov@a7 zPlr3#j7DvMay2HL{FAp6&}&0X(Ve_V+}(H#1vjlH;cr%@3D;vOn^^ty-$t=r&WY~W z^YBFXj~$0UDrK|R%iZY%&>-^+S<~TMc;;lY!af8LTc$hha>%j{k;KEle}7mcrI};u z9eOREobWEI^bI@MH?>8)hwsHO(%MM*E#M?BvDFb>f&IZfs8CCe zk&QTd*A04fq+#Lv!zrr1yEL3YWlM0MA8&?C>WTjP{9D@U1uu=)dVMORD74Ht1cJ#| zTIqsa-23!L=8n02T2Fkhy)>c}D3H8haT&Cz87juKC9=Qhfth_nEWI8i1X>P@!)jii z&+;B_z1@xi`-LQX11&29a@Ouq9t?PBf(V4+k8SU1XA}B%`r;%?mXBzN4NoO)L*rb- zvAq)J`A-s+6OMFfy<-)wWEsB`gMo-8je!xLshD*+^HyDOgV&lsso2n9XtBpbH*c#J zXY`(T4E{IGI*($xPZQNRbS*g2Q$hF@^MX2ZczSE+dL-5kU|P+C%w#KV2{2D0)lR)A z?+a51&u33}Vo{}Tcn&M&)AyLqO%=rFVLWGePezTdH$(Gre_U?Yy#5((m4%bCPR{i0 z)St{^g+VRN-R;Mnn9+ep?{c>7dULA4h8NPA?3G0SrsdqaUlr zAA>0x-1V578_Be`Saku6>742PGC}O6MVE6;BLV#D!pukRGzuSB6G~U9AC4|p^*F$S zMCkecv9brF%|CqbRm=(WS~!2v@nPgoSrff(S~2@JJmt~B_>IeRK5%K*sB^zgw_iIR z=2;G#zX^V-94~0%PPdbCZhoSha;&I9(ze!DVP`YjRA*CJhwp9WkCTIu8eUTJH42S6 zHrQ;NBXz~)HorUw_!IUdXlt?YXFUvkL2NYg%#;~b#3sthdFEeWGuOx*!@#Q7Ffmv` zZ;#qN8{CW=NQQ7ghp)oEaas$qQwO^4D85wOo@zYf8}jp+Y1JRMmi^@BMBM94+n=Z~ z98Bs39~2yYyv9RAZ;V94@yO&cQO@puD8b|8*i2Rp{}XqXFGr5cY6SOqNG#CN;}D(U zWq5Xgine6m$v=ZC=-)!UyfdY09ig+vW(ICp7r6V*`ZBU!X=Q;Whh~LS>=}v&BFjwt z-nbKNob6BsESBrZa1qTd%e171BOy{Gh;x;&eUBeD&zGX<{AKGF*rl z)dxSC8V{+JhkQtH{J9a`3IGOjG#QHxroaa6QpG@_tz#>~`(8($Vp$>QMV3sG7Hs7U zNb=%$KSub?I(cJ7B(8_Ov6~yhqzjTUan8y!%dzz*Gil%30q7eo$Zxm&8m6#Pnt^<9 z^-f{@Ld!OfKE%wAVnW*cPSlUM#!s`!2uw>M2C_YR(NB=)_D_S&82N;K>kzGM6%{Y@ zt9D_D?MlOtJ$|kI7#O$p!ml)5Zz?ql84Lk|d@POj5fOkO=ljKMoI}H=bT0SIiP&ZD zdizYB3iwo{mIHT}C5xpopG3M6oi=o|_k?CyU++NZ`aZ>E3~BvOjmV9p7#4jcfj-v}AhoBD#{h1~-A_(e$Kx~(OZ&3rJxxKAQci+}0n#Ux+ zLv9-;Nn^7fDU$-i&LF0pXPO0hwp0)Yf6+Hb9Fnw83$I~TU0m9jopLRvw)R;tPmM{i zU;B@;lEKB}pu9+mloKUO+7Z2uqkW!y)U&~^1<(~2jUWwq+5HNYqpEP2lM0s>GY1$9 zVmv;hVFz#4V>g+u-Ex@wBs7$fHeW<0GxA9=nZ! zhoz?$9UzN?7Kij8$qL~-`pi1#xxK#tWvM^WP9bZY6%q&)7 zYuOtaBJ`4gh){82#vmbj)cf64Jy*BVBgWRqd#>x6Wta+H0ThNVKY67Vp{}0Y#VL<- zYDVLQcOi=P#vmeBetEv71(r$+9(gCB>JPKTGK+d9ihuM5>3bt_SK?hTsjtzv* zRF(|);c6@6> zg|SrC6Jw2(iuhJInM@3n5GQQbaKMBiuG%=vW>IvM?81zC_h~~7t}g=e zuQz*Prip>!`%?Q1n$$h2!mZR5&hD<7qDU?U_quq3COOf3@hcmDVyrS&_g-^K2R3Fe zlke_^v^>QfJ!^%9+$Uo9*BQE9OW5z+$K7xgI^EyC;p~cZk`Vq-R zX{hGby}K2&2(wfg_c|Vvm2%yk=X%nk*N+~V+*LPBRKjTQhP(AWWy`K&RU-s0n%HC9 zC$<60>JiVhl4q@Lw2eZZ;>AxDMz_&JIu~3tnXC&MHL%pKbMv;tED%lG0}LdtT|3(f z(G5DhC7H1g;+3A*rece}g3@-R$$U6IpGdwUjT;5(T=xRRO zt;c!algfCu>W1k$wN@b3p?W0e7z+G)vc*_lO$x^^v&pt~E<}s|-H}drW6z+8;&tF`Y&y3*eqVF$-P zMDnvTiBj}*FCUGuHJa7&kzJe=3+>b$IxYi$8ow$pC;R0~>99Vv9I-W0;jQ4+mT98q^cbe&e1QfiCs(9fysC*+E!&kMfxi=_%mjdguNw|>~9jQLdM%d zXCu{#8!1X1iVMR1U_kBATxmq296r|MzBR=hr@`F+y;Q?0+(cbFwU-2Z4cF6Xct4s5 zlL+B%Zs%RH;5O4lj>S3yl6*pKR%*?P|e7LnZnvo&^#>{;Q4$^^4io}7v_+1XfoK8sM>qs>h1*+ ze^%IC;U0bJ+OpMX8F581hXSqjFz-LL?QxT1s2tdA_3k_J#;4P&1lsV z`tTm~ACe2Hp!Z{+oHKO2nSP*dw*05F+Jy=ytb0}Y>AC(SipX;vJQbl0aUx8jxMPk+) zPiXCM^PY8M@!YKm5pvMRZi4%FTQ!D*wWmh7qR}+L&sQKCmg|{@m5FU2B&%(@`;}6l zlETyR(ybkJxC}0>x<4@Nqs`#<$U8ke1w}whE03H!i2c%gj(w8><23u*y1vJaqf&x> z0__9{7PIRpKjLOzrbla}*)Jog|Nhn<9cJ8zVNA5nB2++A$@HiGeyZbfI2Zu?8sA@C zlEC>KbR-4NSqEo}U6PZu#%1(~RToYaDsr2dZUUOEDqGzjTARP3Xs+OHoY2XdeX)|H z-`Y|fz?blL|4gZx--WFE?UQpYbTM8!Awzuvck4hbH0zQnKzF3O^8|EIWX2O+2+k#A zZQrp;fDM(sz=_urpu?ROn6^d3AkU6(U7@k2t?>KU?f{+oDBv|c%JxWX(s`6VLtq0w zte1PZQI!fu|9nk<+#0#7m|pq{w>-<%f)XPD(?z1l{+zX>jU0x>!=XsPRy_YNYu7n5 zq%f9#=+Jufpzs@+wB>v58vD)I!}Z9hLZ30GTQH=`nfE~#3ztGZOT2ghl-RXiqG$r% zRI5_)7@~Hkg-fr9Ha=%u)>z@L;SHG_FeneA9X~pv6&dvfCeoS{PTMmC1)s4{pwm|5 z&RDb1JKX=XVzKofF#QfW0(W3Qfjp_agb$WREmzV$xVD5ag0bS5op$ zUK~1;gV=dn%=e-e4SadLp{(QwTbe2SBKL$u|I-o{hg<6w8ffko5EUL$S#48v@VMyy z8>wq!H`?YNi)MR!kC&t^eVB&MIsZsPYs%*0SNaXG#GoZb(8f_4Uu!BcEhf_Ojb_vB ziNrGV+WUM8T{#AoLYZ$jCLftb!VcOC zPXIY%;)1-B+{H2Eit!vVXBQ_uJ`y&fnQxh1w(9$wIH66|acdIslmCW9#-XPU9qc14 zAxArAk|59zUh0!}fI>$h6j%4hm?>9{tL;=6B9?Jls%Je7v%<{;S)sW|ScNdPGBQm((G2P6!X1gNsp;-tSI5FsHlFd?A<5dwk{);RcEOIFwbTEv4N;qLr* zpS*yeKq*cJ6_m5kqChV&B99IbKQfTKHZ}zzEeRMB2FmZZj4*ylP-OvK1}J_ppc^3I zP!A;gQoyEHJOiZIqD*#=9QU>ic#NL~LL~fe4J4I#GiU?i<|JX2QxL48C^;6I{R(_LuX3 z`OA5*KVMP5n6^&a1+n9PK9Ax5@atH z7kp#YKa-IyXRYg=HA~eX}pYy{%lG@e?x0t$d5J zD&%H3%Lj0`ew-5-;GZ!NPzV!*fN*~mrS@Eq^|(xdLxi3Y^XT_<(*gP9#?AT~u){*Q zbiW3;ZI0ftgLSRt_!cV%1|rAlMUv5wApjA^1N8DU7jzf?zM z%zblwuX)hUK*3R`WC?qj_GF#+tobYxuiWlyqDQ?C)tM&_kHuz)Hh#{G-Ym3MGzekF zlS`U|ZXG&OTWF6&b*F^K#qX%w zZ+*{i?XV2~=m2lH2Nvt@C?QGnAzYHXZo%e#>@v>T5tFU@QK=SbiL93xeNUb`s1=$h zZHKmcBt74-m-mtec40LBZ1g-kp796qw0N#)2zG{C9rbdnniZ{5HcCi-I(seCu+f9$4QRK;fZCtx z2>7T`$2ds-jNy-`5?G&qi1g^a?Y29&YYerr6F~DNYBm13k~V7YLKnTT8q|-Chm|pW z1=~LooTM|4NVRJSd4^=p`#rAwW+Pdfs_fcFV|*9Lcz3ECn z{i@i7+Cp&*&ac~(PvchxzSoWO%a%f)(->pVTi>_iN3Zbr{?(T@DnzA16+zx~YYmc$9;sh@ zcR?)eJ5)k9%jxNRwUZq}165#Y%1q6?_SM@dkD9J%%Icv<#<;j`++-3g{Iq(|MY|C6 z8pk`MAyo5K1j5I53U#AC{ZX=`?FTbBV|WZ@JM3s8v|iW7v_YI(Tj*YOquu!u$LcGM zYYoLdAWJ+av25id2IE0{!olU!9j;cL5BX>aCkHf?e1mXj z^_Ev^_J*2LQ|^eF&%&o_1|6i{J^kL+9a{($n?rk===k$P#=Q>>^mHGpcr}Y`FRPM! z_OpL>o%uy)#h|xlHoHDbjjr-Tk1W!)5l;@PtA5*oJwN1>jScEjTOX6140WiqEa(s37eu_zhE z&$?|YO4TpPd{2@0kmB{_8q-JXQ_^^}He1Y{f@<|Jklxvr8;6^7A`#5+i)3r0huoMy zVG6z<9IS%%D~==BVy~tqHU=iRGi}ide)xH_Ob|tz)q~{$36M{_6D zUNS%$g$C0|CCyccfEUJQyI|5Fc=w#7B--`s+dhUYWsj2O} zj(x~>-4(Y|@SU48yhN+myIU=8Nx@S_B{J)}?$U1~zMhr8b9v#_Wvl{@6JeK)aF!AM zt$w?(uk>dZ5&gOOHKA2`ASfsuQ4;YYOC|NDtV-~%eE!4>YyX7v$cw0BGT_SBQvwc_ zvPfZ6^{5v@fUUbVdv3lN<-IY{qLDZS7p{6scfr0Yl zc60?*C7Ob`vjxI+Byfx2I#DLXbw6@K*Y=|ldXP_FY3s}>brM{WevXLD4$PgjM=h9U zVn*YzG1PF2ZRWV@jiFsf$(;#*4tZgY)LKr>MmBGOwM9x5X6ma9gOaX zwxJ8rr}Io?DtU@KVygYB_IKFSp`a}Nz+NZkyx=%K_V=V^N;Ja*`iN;($`*$t<-SsU z`)O{ND{jq#70Ri~%(<2E@)4`+`q@g(a}_5`oe5?sRv(+G2NUvB!6$-K9o<9rJXE~b zjuTlz;A{UMP)m7yI_ng;E4q_}BCL0Zp`*$YU1mKh!x|d;imWi_q;sqOh%!VlFTK8) zDPfOk=%LxU+v}@(1NpJdwe;rJT1vUbDbrwJ#5qjorsO@=8?CP$Myh_9=$Fp&p%afD zsE0u9>$e&^`I+(9Xy3I1HUz~#o8mckCZ1pFLBSs23MHdPW=vBl9HjVNRGy(ajarGl zY};Z@*SoZ2f|}Mak7+-uQ0b=W@RE=<9w&!AjR>DPq(sVS*rON5iutcTw53?EpTo@c zWXi;)m88NFbu4CXG>se3m zya@QGvmqQgmv(EM{$vh7JHEm(HVik&M$taf>s!7rM<2Kx5;U;5sCTpPsvkS%7G}WA*Am4z z6(g2zL|_*IRfF?w?&W94nzm;l%>fk(^}`ijT$jOJ!dHu_b55AyEsO(q5s>iU1x zX{%>+p=P2rKjW{V{z*oGpRb+-rME%%zv#~wlyX0Fa)KccM@Y=8MWf=owbP5kWAz&P zjNXLx7KTErzCJ^pralAG5-BlZ6(1koMFrkj7=~SAZAZEd;$#5hjG$qy#N*F4&l#Fo`SMS8W zs6nTM-Yw*#Tvg7OiLgo!s!5W{%`am^Rgo|$LEJ!N)E6zMPer${HpQbIDXil#i66sB zGNlev&tjdcPo(mkX)a7doo$i2)C)|CaxkEGi|k+eVx3UleXp$@3%^VfCXon}Ar4O* zS@sAwU7M9f2n5&zL7PkX(mM4vw`@!9zw$u#Kd%aXE0z6h8R#}trbrISPTsM{R=7EP z%fJvU=nsV~8#$o#q6tZO8tafzv)EeJP+0+In8aqQNg?|o#58ZIR|SUv&sQxm#lU66 z&qXBHwNnVcZq?gaR*cmx&H(bbL2vo@KOOCHa?wJzXjCMEIV%+JrrK5|zFAmMk2Xqo z4_@T4Y*#8x@owH-%yvK;+ai3NRU}xZ>Wo5jUa$-H1rd{>-Y|RlR4;v&Q(|IgCg#Lz zn`=s|ihtl;+zVYiU61#tD&qCwX`Z-C7ElQ+JK8fSTBviz*3`x2?|zZX5)Qys167(p zI1&uRca0d6Bcw_p;ZYv56YRCPYd)R>!Q(}RtFKAbxkM7}9chi%c-z?sj7i9%`G>@K z$hPtkn?V!FR{8!ypnebN8e{r2a~yqx$O1Z?9UhJ)xR#?PdA3ntLn=-yL(P&;g*#-9iGRmTARGehv2Ek63PKyy7M8OXy+!rMgpyegjfdeZ6& zX_$c$q09@W{aPc=j`%hg8sZ)-*l*NrJ4yQzIC3760RN2&L=KXDOdW_@JH{-beFL*# zmJ4F#?kBIaz^Db8(ghgmhIjY|%$6+4S}|}Q`JaB_b2?!UcsdvDGmPH@*l4DaO`iFz zAYFy>#7rPjTp5aM>+NV3+#(O}{zRl0(!d2~^T3|3Zi5-9mnYuf`#%Eiak9#Ob}4&2 zSL8}gh$wJ;iVs>XUBnDARnJu{4>m z%z8~b%n`xX@fBp?4umr};oBi{5ZAEmvAdc#o7-5LX$Uk_foJYEx2vk?Fxn=*lSpUh*MjVOD74 z(J%S=3)b>d%YfO%gMkLCgXElGP)|Jq5fLZupHT;T1P6@)n0?;n4>UPa=T*8g6saV| z$>2#Ou3;}@rNjag#)llFz+(Zi;tek;qAK?s$j+5f9u*Lo_JK|7#N-iaB=n|W>M~xU zJ>c5`*j!_2qtR#;?KLYFUFy49~U=d2RHbIm(tp? z?Gr!jk8SYf_(xL2o`7uHI*08Y8y9M^XB9biJ!SnWl4m6 zw~9ruos&TXmFV!ocgsApegHeUIY%R?5mW;^1;XLsLwfrI5cIEkvsy!cRNTUt-UpG zHJANTcQx&;N$Xnp_5_+&qM@}ZE2=mF?=czi%$+bzZutUSAmVeUL$+tOc5UNfW`_d@cMr3rFpMH_3sKd(rY}@G{ITg#ISHx}}Ic zVqWw#gfT81+TA5uAzdt2-+mpe$QC|w_466lGRX7Kxv&zsHYgBerQ{$d3HSls$ICn8 zPjcs2?M`fNImGz1GD->0?&IIx{)Y(OH*@2p%^!n`NKwBO6St?6YBTaj(OvKxpR8() zr1^K6r=`u@qLf}d3uoUJaficJ+FBz0;8~`R{+OA~;xGK+QKGj@5)lp2TSnWMHBNoY zZ`iw8C?1Pv?H)I6o}R$9PfO2s$%SowOxi1Q}j77i=(wYG2l4hP31#)-uPujseGYwB0yW*0@PY z_;4DB;Zs6{HKoiHVMiqZ@5bA&%G4ILw0K34o(5lWW-M_R6I~>c?wj!Uve`CV~d<-=#Z-}ZbkKBU2Xbk%1dy(`AqxNIO0039eN7ygjO

eO#~}O2Z9ABQ)AVCs{Ha2zbUJ>XS=j4-0ux7j=jM*wzEACg7AF#KkOwtp zz@q@0l-@N}{04(wDM--pMO|7AE+M2J1~u@K?Q1 zETuS?*Nmfs;_VZlNT}GSXKnM@(bO8U6mn#q<8Yz25<%*0H5ZgHoqPtf?-84|ch0UBP;FlN8wwyB*NV)<_ zx$9YI;fb~gn}`(sA_LPpFORulMByN%j$DlbB!7}i__i;@N6BI`{s*&#_4eq)SlNlla$#8SI6|<^ z#0l-g87#3x`nQ$IO)>wKM)0N?L=igazpdsv=U&slj6fd89zx2j*du{=Nm+4wS5NLQ z>APQea|d(LfyuW8DjD2cRruN|(AS@nO?W_J!T6CRuY!tJ2nGl}XPBeV8-f1MBL@ne z&u8@vi&tLrDXa*lah5HGy{t&{Uo5$0YM)ygx)nSlvut376quD~hgZ-rXna@1Qs;BH zGes+wdr~+CrE7MNbvd7Wsj6+s#h3gDMyoFko~}aQF3)7&l;J{tpD6JWJ8*u>RUA?h zjs;5HK!X+e7sCLof;R;PHPR!_5Y^88bt_g1h)j2d*n^r~b2DRd85+tsb zSx|6oonzaUv&{5H<#?p_F6=N6?Ri$XomzC@Uw?y}8ycyMYD{R?s!Sxl(HRG@CDc2q ze2Yh|WQ^}|LS)eC6GRyY`)8v($g*voaJU+vGpWRE633=;ViI{SI0O@5={W1=jr({F zP9lCd$kis>H)CHkBW(Q4Co<1z8AcnBk~_-o73;dDs@?Bq&8SKcRMc^DtV$nkBE82v zmSYu}&7#lwgbiPXJgSC?TGshqt$w)8Ml)pnrXdGj%RX%sMXsU?!iwF&ES|DcE>4oU z;7^?T$f_=4er&7+6&lp#(qA}9W)|i|McG!R!Sg@+JOKUFatu{{J7|Av#5ER6;2mch z^dHa`_ZY`F6=yJCmFJ08$33q6E8lii#*#~GZKJ%a0cJOr76YZqW*w*N000C2UK;!_N#y@i z8S{UaM6xpf580f9lkxv;68WD=Bqt{e`~Oci$Js=tg9|Yn*@4#F#?!==bfDh_0uCe< z2o!d33sWp^w<-{5IGjJcMRiLf4iWR3?K#{10$BC5HL5n6on~#2>|HY~R$4iTYY5B; zJjz352ZbeRp%NAq7LE_W?w_6>9iN_#=o>FX4BZF-7L6D#1GDw@DgQn5BQnVn5`HUC zzy|Y!sN^H|TbXeKArSbB$tR16CuiaGPtDLi(i1LApc2A518V^(a{$*BAR@vADL`EA z--5L^hVt(|)sgsbWP|sU5EBpoj^PoSAUJ|#ff50s5pH7}1Kf)^=i&3gSs{b=Zvkk5 z8r#_LZipv`jxH`IV4Fi61+uF28tcHd`t7U&C}5zR9sYlT`lKMrL2d;7(*83Cq2^m5 zhJKmM1^+W-1Sk;q6$~KRAO-c~=w=aN<5Bh>T>VoEnFMBl!9HNCzcKvr?%kY#c(Qo_ z@NaEzcLMt31ZMdioXfkwP!5p7)j+BG+=v1)x)~_95N41-MCg410^FF-3k2sNU|Ya5 z_`rUOkRZn;EI|EZFo0SF(_1iC{~-elG=c~-{6_qiLumYx@z3r-Uqdh)`xX-*_8x!wZ$ z0nzDKDV2$VweVYeWPmM7+CMI!?vD2lKpgJi?Z7wcH zNVx!HC3H9C>=1B2>G=cV@XQffod7rw;yPyUN7&^81ZlvJee%5XdRO6fI1Z< zcyDb;s_+$(5>EGb&W;bjVvxb3K6`u!7pAT44~EU};?=h<2R8#)F?>jXA3tN9?)Emo z$ZMO)X=}_3>~(ymPg-F8XmS7m_t*yKsa*+}6^1v2Z+Y+W=>`b|%1=U>$2)R(k4psV zPdamup%t+8TmQ3v7%XrQN9RA5*#n#^PZ7NrgpLYgKk=>llMvhitbGYc{fBSU?LrJ8a9~Bw*B8|M)(0ZwRspIx7%#mIVz8V2_L{pBeL86@xPK{FTDGhf%0^8O^^+tiI5 zx6Dm!(n?VLEEz;2bAa}WL_<&P!nUGUF(W9?b2>w1%?j8s$it1bml8v@GaD)1kVu$h zNN@i?WCfHYWC>LSqwaR;OEP4`>IG(g9$}o2gpl9uZC*4-=YS(a$u2wEI=hRNnz$H! zjoNAyUf9Y9%z(0PE4&NbXtwRI4D#*ZIv*mmNjr^>3$gk140aAtHi1kRt~ykM_dudx zJ$Rjt3V(|?*F|a@M-qm33Gw*wi<1kVotVE*Ue0&!2Gl-LH+-*go^BG0_1SaIPBTTw z@fr!#9y=%fqMKHQ3G6SqSl>dWzWpE9wo~RHk%O8f*GNxU8xZxX-O9+EH?%2P^gDGch=z-7tDA_D&iI*7B~~bnE86B*jtvG9Vrg%NCW;mkd{h zvX-T@-PlLGJ2NRImEP%16DAP?xuo4URrOT*6m!ogx_jsA7aicIOj4d_tLtUNz%XH> zS|4I20C+%U(gp{ga5vAo@;-Kb@)Y;T281J<1lCd(+2-05abxNm7ruGtMDWKi( zIXWIx8 zJ)!1!o!no;87;g-EM;@X$V-%{Ik;Q`oyV8Z(jvGWB#6I zFVkmDX9eJ~SXL)(M?D9EC!A)m^>uh}CDDJ5y`NZ%n^U&X=jbcwOs7V{!m{cTtNUA8 z!K#W1R(yyq)w@!XNOc4ZVoH2Uy8mamtmV!$5ji^FWhr8MbpG6S6Kvg(K4G)qgGnW@ zVY*btoITZ*N({TR!OxJoN2GFCu}WK-OqoQLZc^V%?%3ye$d4+$ zQgZvQyR+028mLrI5UW0r94ycM>J_n@k%iNfcE{n{CUZ3{70-+yYAgJy3)klnB&)0P znb9JNK-2u@%_saMJl0!TP<$@7OUtR~m`BD%UvSf3rd|^^y0cnlm;KISn4%}ikIE>y zHwmL@^0&L;=h1hJE3ZWYryAXonYjZoZ6~Dw2DwUuc7Q_SMUVln5}Hmz-7rFipLDz1 z(=2*V+wX^(!d~mf-A6RnuPwRZp&97pjOkG+yx3Av@FGRQCqcE$#jc9e-uFo5;&zRd zhz6iX!CN};mGwgR&QrH))gp&dy;MWEf#CARDJ-KL9Tm`8ssGwQsHKfRuoTgj6Y-59yor!CEB z&SD8?{R_W$QI>pHaD%TdB`Qcxl$D3(O{;y(tm!3t%9UX(Cwj*Yz@|H3yx`-YPXL|d z+Ud!w#3<1Ex$wsl0w~$8=qv?^I&})0XOIRQ7LM~q@+RcD7lHETi^T!Vjb)ch)p>Vk z2uVVd965e9Jztzux_odt;iQ{z7Q8m}DUK(zi|>sv@kbJ6oh~PCl1UwaeKi>)yLS+3 zoxY5vg-reicr2zM}?iYN$H~@5SICHJ_9P?v4q_992-P-&~#%A1SgW6bBA{BDDD%!FH+hr)HAM8Nj z{@_0SH=T;WdLH3SF=OX?p@Uh9ky2Ct7>_DAMUBCsm8P&!KvHFj~L6l+<~;vUvsn8{~_XbPN?!yH@I4ZjT4WBqqF9;em5 z+Ft)$JnmTqZWsx|%}gWmp>iU_b@FTmMXqn8PsEh;axRg>V|i2_D7+UT?wM%U?c$;t z%gcDZ)k@3An{ei41!q)|aVoNXv1Lq8U)_|~Y&rz8S$~erFGQkc ze%E^2Wx7y)zw1wH0=b-+ySnrNZFwf4jgHFe5mZT3@v$I0k!F^?2`J;bi78-RNIY!w%1}D)Qa%92Y=m8j2 z&#w`}l>ys!Z>$A2{~=eB=KWdYWP!~ z3GrTr%U<`(HrZ!GfeMhVW4ib;+hncb>}v}O=7g$x!AW`K9Fz?(8y!|{QksHJ{-NzQ z&RluVC=su|8A*3NGv^B;Z}q>;XT1a3EQ76(k}NTK=TG3t&w*8s8;UrPF!a}X3Y@5k z$z0Fv#Gg4mBgoa2WBPY`9lS2BypxPpd9$Y?$%uq4dL zC5%2kP%n3NQijN5o;5eqRQbPi={My&B<^#1Q>EFPepp#YeAy^1hqp~fd$wv0iV1<7 zW4&l&EESXmjn(B?;UwXiYu+Lqgjkm6T6Wgo6EbP6gze|ts*sPsI`$l@{e|Zdf%-r# zqx+LRR$s;VcO0<;u+f_FW}P2lC572Saa}YrE1lZjS8c(+;ku{~Qr&a4xs+kYDcLk# zl?w5lt?XbW0Oxs}DHv}pDMVZ}%In}LldG7|RNi_CHPMFNUV0|w5pBJc*oB;vMd8&)jq+sfDNuwZ3L>z zfnw7jqK>$;9orp&$Fr;D#jOGm!!V;Of=u;VOxrG*6j z`(Demr#DBW)hmBeZ&5}TKRg0@%oVwD1{=H;L!Bp7*&=LKdySh^b)2X?tVl3yIr|X} zpjr`jeSZz(^W`gY^aMn{8L1~(nIY4!!l-gXP(sHY?e?(8Lic-l%d!s|)AY23_ybh) zwUS(N`Y~>}BL_^IO%uI-5sVJ`myIm*kdQRG7Ix!O!@7Es-bKo41Dx*;d5mu380|rY0VarU~F=t`xQ3rJo{UPWKBcICFG)*v%hrsMo zlUChnWA=29Qi2WjzIZml^>MFUxJ^OR;zh0qi73Oz0~rN^8G0&~FI1k6W1dM2AQkcC zUZ{Q6y4j|p7AlH&WZ0h!uyq?uC2SLodIj1+Gkjq0;Ql!S^b5PDf=YO9=$Mg4m{MD_ z4n;zD4oCHrHLf8uXVwtze9CX7>9_nj$w+LHwTr-%sHMuQF77H1rr}fEp>aX)|N0)& z^N6kUSuU~C^fQwQ?kGReZHdrt{OWZ7OMV`Nx=Mitp`vm(;(CHyl40@XiOMye+?)|j zH*;DZrYToe>0ci3YFd-^TjFr_k5|$At`YsP4_n!*#u)J$#pcviN}HABWmPpIb;DtN zZyk$kHYjc`KF|u}cMc#onD7WkP^RUhwo&7IkQbMZdX(-OYixj@{L}+pyRbI8*(Q$hKgU@W{vJ5|^ zVZ+=kLjOQTiTJFF3El9q9!S^k3E$aPsT3FGbJ;n zP_CA~AUN$XTUJdbk*T}|320Rk9yAa&E>hfQs2gP@WO=ciOZ605NV{|Ip}x@UZ=$6C z?0b-Tg*UD16gX(7%(ATy^3jjh)>p6wP=)MthtA~LJ3IAmaVr9BWcc%v(Y_dcKk;$3 zZ?qdvSR(GDjw1O8<8xOVPvF4WEx`{Q|6|~=EPl)XVGh~5W77Uh)Supir%#ODScmXf zU?iw>%KHXBq!6RtAb9>KG~UST>pewTdPWM3a1fC2ho7X&G34HdM`*lSoAR8wkfN2@ zAHe&9Gt)*{roA;6%)F(p5nznr-QgF! zGEdMhoy;2#o9*s9fU%an!i-g@ZYL-Kc|n|q(XL41i86N%M9h6Dcmd)#fqtaYXO1KC ztrhw*PHGdBRE1aSqyDPIo}sp<(6JO_cRUyG$MJd@(BgW0npS`1W2v(yJZd>m$o5O@>E{9cZb+tw4K4Pgb&dOumnNp9_YAzt^dvg+E48$GGK1t`Ms+9V zX&(y(4eEY`B8$h`_h4dJ&trZ|j%iyJ_o)aWWpXdU_fx$Kev){ss+`>?(@aZ2gm0^1 zfyW;j6}OAKD$E&8zhUY$6B+N+Eg4U>vFsT z`$6Z?&0;F>?GrQ+pkC#7JNf~+?mPWm0Psote zLTY=k$SJLdy(PQd%xSOSGx<#VrI7E-*XvYladTjQCCD5?)Lvh;t3+~Qzmj~C_5Yd- zl~>EYnSz|w9gXQgUX22SS^qh=eD?ccS_hu(!eMv3rK#}VAt=1}$QjK?PCFLyf(