diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000..f53e4b7 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,32 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Python Debugger: gbnf decoding", + "type": "debugpy", + "request": "launch", + "program": "src/scenicNL/main.py", + "console": "integratedTerminal", + "args": [ + "--query_path", + "Scenic-CA-AV-Crash/crash_reports/uncategorized/", + "--llm_prompt_type", + "comp_gbnf", + "--count", + "1", + "-m", + "local", + "--verbose", + "--ignore-cache", + ], + "env": { + "PINECONE_API_KEY":"373c6f4b-61df-40fb-ba26-05e160088c83", + "PINECONE_ENVIRONMENT":"gcp-starter", + "PINECONE_INDEX":"scenic-programs", + } + } + ] +} \ No newline at end of file diff --git a/setup.py b/setup.py index 52753ec..c72235d 100644 --- a/setup.py +++ b/setup.py @@ -6,11 +6,10 @@ packages=find_packages(where="src"), package_dir={"": "src"}, install_requires=[ - "anthropic", + "anthropic==0.19.1", "bs4", - "carla==0.9.15", "scenic==3.0.0b2", - "openai>=0.28,<=0.28.1", + "openai==1.13.3", "pdf2image", "pyocr", "SQLAlchemy", diff --git a/src/scenicNL/adapters/anthropic_adapter.py b/src/scenicNL/adapters/anthropic_adapter.py index dce8947..7743215 100644 --- a/src/scenicNL/adapters/anthropic_adapter.py +++ b/src/scenicNL/adapters/anthropic_adapter.py @@ -1,6 +1,7 @@ from enum import Enum import json -from typing import cast +import os +from typing import List, cast from anthropic import Anthropic, AI_PROMPT, HUMAN_PROMPT import httpx @@ -12,16 +13,22 @@ class AnthropicModel(Enum): CLAUDE_INSTANT = "claude-instant-1.2" CLAUDE_2 = "claude-2.0" + CLAUDE_3_STRONG = "claude-3-opus-20240229" + CLAUDE_3_MEDIUM = "claude-3-sonnet-20240229" class AnthropicAdapter(ModelAdapter): """ This class servers as a wrapper for the Anthropic API. """ - def __init__(self, model : AnthropicModel): + def __init__(self, model : AnthropicModel, use_index : bool = True): super().__init__() self._model = model - self.index = VectorDB(index_name='scenic-programs') + if use_index: + self.index = VectorDB(index_name='scenic-programs') + else: + self.index = None + self.client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) def get_cache_key( self, @@ -288,6 +295,19 @@ def _format_message( raise NotImplementedError(f"Prompt type {prompt_type} was not formatted for Anthropic model {self._model.value}") return msg + + def predict( + self, + messages: List[dict], + ) -> str: + claude_response = self.client.messages.create( + messages=messages, + model=self._model.value, + max_tokens=3600 # Claude's default max tokens + ) + + return claude_response.content[0].text + def _predict( self, diff --git a/src/scenicNL/adapters/lmql_adapter.py b/src/scenicNL/adapters/lmql_adapter.py index 4e0828f..791114f 100644 --- a/src/scenicNL/adapters/lmql_adapter.py +++ b/src/scenicNL/adapters/lmql_adapter.py @@ -2,7 +2,7 @@ from scenicNL.adapters.api_adapter import Scenic3 import os -import openai +from openai import OpenAI import json from enum import Enum from typing import Dict @@ -22,9 +22,12 @@ class LMQLAdapter(ModelAdapter): """ def __init__(self, model: LMQLModel): super().__init__() - openai.api_key = os.getenv("OPENAI_API_KEY") if os.getenv("OPENAI_ORGANIZATION") and len(os.getenv("OPENAI_ORGANIZATION")) > 0: - openai.organization = os.getenv("OPENAI_ORGANIZATION") + # TODO: The 'openai.organization' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(organization=os.getenv("OPENAI_ORGANIZATION"))' + organization = os.getenv("OPENAI_ORGANIZATION") + self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"), organization=organization) + else: + self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) self.PROMPT_PATH = os.path.join(os.curdir, 'src', 'scenicNL', 'adapters', 'prompts') self._model = model diff --git a/src/scenicNL/adapters/local_adapter.py b/src/scenicNL/adapters/local_adapter.py index 39e54e1..4af6425 100644 --- a/src/scenicNL/adapters/local_adapter.py +++ b/src/scenicNL/adapters/local_adapter.py @@ -3,34 +3,15 @@ import requests from scenicNL.adapters.model_adapter import ModelAdapter -from scenicNL.common import LLMPromptType, ModelInput, VectorDB +from scenicNL.common import LOCAL_MODEL_DEFAULT_PARAMS, LOCAL_MODEL_ENDPOINT, LLMPromptType, ModelInput, VectorDB +from scenicNL.constraints.gbnf_decoding import CompositionalScenic class LocalModel(Enum): - MIXTRAL = "mixtral-8x7b" + local = "local" class LocalAdapter(ModelAdapter): - ENDPOINT = "http://127.0.0.1:8080/completion" - DEFAULT_PARAMS = { - "cache_prompt": False, - "image_data": [], - "mirostat": 0, - "mirostat_eta": 0.1, - "mirostat_tau": 5, - "n_predict": -1, - "n_probs": 0, - "presence_penalty": 0, - "repeat_last_n": 241, - "repeat_penalty": 1.18, - "slot_id": 0, - "stop": ["Question:", "Answer:"], - #"stream": False, - "tfs_z": 1, - "top_k": 40, - "top_p": 0.5, - "typical_p": 1, - } def __init__(self, model : LocalModel): super().__init__() @@ -68,8 +49,8 @@ def _format_message( msg = None # TODO: Add more prompt types - if msg is None: - raise NotImplementedError(f"Prompt type {prompt_type} was not formatted for Anthropic model {self._model.value}") + # if msg is None: + # raise NotImplementedError(f"Prompt type {prompt_type} was not formatted for Local model {self._model.value}") return msg @@ -83,13 +64,22 @@ def _predict( prompt_type: LLMPromptType, verbose: bool ) -> str: + if prompt_type == LLMPromptType.COMPOSITIONAL_GBNF: + program_generator = CompositionalScenic() + return program_generator.compositionally_construct_scenic_program( + model_input=model_input, + temperature=temperature, + max_tokens=max_length_tokens, + verbose=verbose + ) + prompt = self._format_message( model_input=model_input, prompt_type=prompt_type, verbose=verbose, ) - data = {"prompt": prompt, "temperature": temperature} | self.DEFAULT_PARAMS + data = {"prompt": prompt, "temperature": temperature} | LOCAL_MODEL_DEFAULT_PARAMS if max_length_tokens > 0: data["max_tokens"] = max_length_tokens @@ -99,7 +89,7 @@ def _predict( # - Add logic to check the correctness of each partial scenic program # - Add logic to synthesize the full scenic program from all of the partial scenic programs - response = requests.post(self.ENDPOINT, json=data) + response = requests.post(LOCAL_MODEL_ENDPOINT, json=data) if response.status_code != 200: raise ValueError(f"Local model {self._model} returned status code {response.status_code}") response_body = response.json() diff --git a/src/scenicNL/adapters/model_adapter.py b/src/scenicNL/adapters/model_adapter.py index 15ad0c1..c9ab922 100644 --- a/src/scenicNL/adapters/model_adapter.py +++ b/src/scenicNL/adapters/model_adapter.py @@ -156,7 +156,7 @@ def predict_batch( if verbose: print(f"Starting batch prediction using {self.__class__.__name__} " + - "with {num_workers} workers") + f"with {num_workers} workers") with Cache(cache_path) as cache: processor = self._batch_processor( diff --git a/src/scenicNL/adapters/openai_adapter.py b/src/scenicNL/adapters/openai_adapter.py index 8ed4e95..e1a5348 100644 --- a/src/scenicNL/adapters/openai_adapter.py +++ b/src/scenicNL/adapters/openai_adapter.py @@ -2,34 +2,44 @@ from scenicNL.adapters.model_adapter import ModelAdapter import json -from typing import Dict, List, cast +from typing import Dict, List, Optional, cast from enum import Enum from tenacity import retry, stop_after_attempt, wait_exponential_jitter import os -import openai +from openai import OpenAI + from scenicNL.common import DISCUSSION_TEMPERATURE, NUM_EXPERTS, LLMPromptType, ModelInput, PromptFiles, VectorDB, few_shot_prompt_with_rag, get_discussion_prompt, get_discussion_to_program_prompt, get_expert_synthesis_prompt class OpenAIModel(Enum): - GPT_35_TURBO = "gpt-3.5-turbo-0613" + GPT_35_TURBO = "gpt-3.5-turbo-0125" + GPT_35_TURBO_16k = "gpt-3.5-turbo-16k" GPT_4 = "gpt-4-0613" GPT_4_TURBO = "gpt-4-1106-preview" GPT_4_32K = "gpt-4-32k-0613" + GPT_4_128K_TURBO = "gpt-4-0125-preview" + GPT_4_PREVIEW = "gpt-4-turbo-preview" class OpenAIAdapter(ModelAdapter): """ This class servers as a wrapper for the OpenAI API. """ - def __init__(self, model: OpenAIModel): + def __init__(self, model: OpenAIModel, use_index : bool = True): super().__init__() - openai.api_key = os.getenv("OPENAI_API_KEY") if os.getenv("OPENAI_ORGANIZATION") and len(os.getenv("OPENAI_ORGANIZATION")) > 0: - openai.organization = os.getenv("OPENAI_ORGANIZATION") + # TODO: The 'openai.organization' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(organization=os.getenv("OPENAI_ORGANIZATION"))' + organization = os.getenv("OPENAI_ORGANIZATION") + self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"), organization=organization) + else: + self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) self.model = model - self.index = VectorDB(index_name='scenic-programs') + if use_index: + self.index = VectorDB(index_name='scenic-programs') + else: + self.index = None def _zero_shot_prompt( self, @@ -361,6 +371,69 @@ def get_cache_key( }, sort_keys=True, ) + + def predict( + self, + messages: List[dict], + temperature: float = 0.7, + ) -> str: + response = self.client.chat.completions.create(model=self.model.value, + messages=messages, + temperature=temperature, + max_tokens=1000, + top_p=1, + frequency_penalty=0.1, + presence_penalty=0 + ) + return response.choices[0].message.content + + + def vision_predit( + self, + images: List[str], + temperature: float = 0.7, + ) -> str: + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {self.api_key}", + } + system_message = { + "role": "system", + "content": [ + { + "type": "text", + "text": "Fill in some stuff about Scenic and figure out what to say" + } + ] + } + user_message = { + "role": "user", + "content": [ + { + "type": "text", + "text": "Figure out the user prompt" + }, + { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{images[0]}" + } + }, + { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{images[1]}" + } + } + ] + } + payload = { + "model": "gpt-4-vision-preview", + "messages": [system_message, user_message], + "max_tokens": 1000, + } + + @retry( wait=wait_exponential_jitter(initial=10, max=60), stop=stop_after_attempt(5) @@ -378,12 +451,10 @@ def _predict( # 1. Use tree of thought to answer all questions in the prompt panel_answers = [] for _ in range(NUM_EXPERTS): - response = openai.ChatCompletion.create( - temperature=DISCUSSION_TEMPERATURE, - model=self.model.value, - max_tokens=max_length_tokens, - messages=self._format_message(model_input=model_input, prompt_type=LLMPromptType.EXPERT_DISCUSSION, verbose=verbose), - ) + response = self.client.chat.completions.create(temperature=DISCUSSION_TEMPERATURE, + model=self.model.value, + max_tokens=max_length_tokens, + messages=self._format_message(model_input=model_input, prompt_type=LLMPromptType.EXPERT_DISCUSSION, verbose=verbose)) panel_answers.append(response.choices[0].message.content) if len(panel_answers) != NUM_EXPERTS: @@ -402,24 +473,20 @@ def _predict( f"Tree of thought answers: {panel_answers}\n") # 2. Ask an expert to synthesize the answers into a single program - expert_response = openai.ChatCompletion.create( - temperature=DISCUSSION_TEMPERATURE, - model=self.model.value, - max_tokens=max_length_tokens, - messages=self._format_message(model_input=model_input, prompt_type=LLMPromptType.EXPERT_SYNTHESIS, verbose=verbose), - ) + expert_response = self.client.chat.completions.create(temperature=DISCUSSION_TEMPERATURE, + model=self.model.value, + max_tokens=max_length_tokens, + messages=self._format_message(model_input=model_input, prompt_type=LLMPromptType.EXPERT_SYNTHESIS, verbose=verbose)) expert_synthesis = expert_response.choices[0].message.content if verbose: print(f"GPT model {self.model.value}\n" f"Expert synthesis: {expert_synthesis}\n") # 3. Do a few shot predict on the natural language description - response = openai.ChatCompletion.create( - temperature=temperature, - model=self.model.value, - max_tokens=max_length_tokens, - messages=self._format_message(model_input=model_input, prompt_type=LLMPromptType.PREDICT_FEW_SHOT, verbose=verbose), - ) + response = self.client.chat.completions.create(temperature=temperature, + model=self.model.value, + max_tokens=max_length_tokens, + messages=self._format_message(model_input=model_input, prompt_type=LLMPromptType.PREDICT_FEW_SHOT, verbose=verbose)) # 4. Use the resulting program to query the index to do HyDE thus obtaining the top k programs new_model_input = ModelInput( @@ -431,44 +498,36 @@ def _predict( ) # 5. Use the top k programs as examples for the few shot prediction along with the answer from the tree of thought - response = openai.ChatCompletion.create( - temperature=temperature, - model=self.model.value, - max_tokens=max_length_tokens, - messages=self._format_message(model_input=new_model_input, prompt_type=prompt_type, verbose=verbose), - ) + response = self.client.chat.completions.create(temperature=temperature, + model=self.model.value, + max_tokens=max_length_tokens, + messages=self._format_message(model_input=new_model_input, prompt_type=prompt_type, verbose=verbose)) # TODO: 6. Compile the program and loop feedback to GPT until the program compiles return response.choices[0].message.content elif prompt_type != LLMPromptType.PREDICT_FEW_SHOT_WITH_HYDE: messages = self._format_message(model_input=model_input, prompt_type=prompt_type, verbose=verbose) - response = openai.ChatCompletion.create( - temperature=temperature, - model=self.model.value, - max_tokens=max_length_tokens, - messages=messages - ) + response = self.client.chat.completions.create(temperature=temperature, + model=self.model.value, + max_tokens=max_length_tokens, + messages=messages) return response.choices[0].message.content else: # HyDE - response = openai.ChatCompletion.create( - temperature=temperature, - model=self.model.value, - max_tokens=max_length_tokens, - messages=self._format_message(model_input=model_input, prompt_type=LLMPromptType.PREDICT_FEW_SHOT, verbose=verbose) - ) + response = self.client.chat.completions.create(temperature=temperature, + model=self.model.value, + max_tokens=max_length_tokens, + messages=self._format_message(model_input=model_input, prompt_type=LLMPromptType.PREDICT_FEW_SHOT, verbose=verbose)) # We need to call GPT again new_model_input = ModelInput( examples=model_input.examples, # this will get overwritten by the search query nat_lang_scene_des=model_input.nat_lang_scene_des, first_attempt_scenic_program=response.choices[0].message.content, ) - response = openai.ChatCompletion.create( - temperature=temperature, - model=self.model.value, - max_tokens=max_length_tokens, - messages=self._format_message(model_input=new_model_input, prompt_type=prompt_type, verbose=verbose) - ) + response = self.client.chat.completions.create(temperature=temperature, + model=self.model.value, + max_tokens=max_length_tokens, + messages=self._format_message(model_input=new_model_input, prompt_type=prompt_type, verbose=verbose)) return response.choices[0].message.content def _format_scenic_tutorial_prompt( diff --git a/src/scenicNL/adapters/prompts/comp_gbnf_prompt.yml b/src/scenicNL/adapters/prompts/comp_gbnf_prompt.yml new file mode 100644 index 0000000..1b12474 --- /dev/null +++ b/src/scenicNL/adapters/prompts/comp_gbnf_prompt.yml @@ -0,0 +1,1342 @@ +--- + context: | + Scenic is a probabilistic programming language for modeling the environments of autonomous cars. A Scenic program defines a distribution over scenes, configurations of physical objects and agents. Scenic can also define (probabilistic) policies for dynamic agents, allowing modeling scenarios where agents take actions over time in response to the state of the world. We use CARLA to render the scenes and simulate the agents. + + We are going to play a game. For the following question, imagine that you are 3 different autonomous driving experts. For every question, each expert must briefly provide a step-by-step explanation for how they came up with their answer. After all the experts have answered the question, you will need to provide a final answer using the best parts of each expert's explanation. The total of the responses and final answer should be brief (2-3 sentences) and the whole response should be no more than 500, words. Use the following format: + EXPERT_1: + + EXPERT_2: + + EXPERT_3: + + FINAL_ANSWER: + + + discussion_grammar: | + root ::= experts final + + experts ::= expert1 expert2 expert3 + + expert1 ::= "EXPERT_1: " expert1answer + expert2 ::= "EXPERT_2: " expert2answer + expert3 ::= "EXPERT_3: " expert3answer + final ::= "FINAL_ANSWER: " finalanswer + + expert1answer ::= [^"EXPERT_1:"]+ + expert2answer ::= [^"EXPERT_2:"]+ + expert3answer ::= [^"EXPERT_3:"]+ + finalanswer ::= [^"FINAL_ANSWER:"]+ + + single_answer_grammar: | + root ::= work answer + work ::= "REASONING: " [^"FINAL_ANSWER:"]+ + answer ::= "FINAL_ANSWER: " [^"FINAL_ANSWER:"]+ + + examples: + nat_lang_descriptions: + one: |- + An autonomous Ford Explorer SUV, operating in full autonomous mode, was navigating the ramp to merge onto Sand Hill Road amidst a heavy rainstorm. The vehicle's sensors detected the wet road conditions and adjusted speed accordingly. However, the driver claims' there was debris on the road and they adjusted accordingly. They encountered an unexpected large puddle which caused the vehicle to hydroplane, leading to a temporary loss of traction. The autonomous system attempted corrective steering maneuvers, but the vehicle ultimately made contact with the guardrail on the right side of the ramp. The collision resulted in moderate damage to the vehicle's right rear quarter panel and bumper. No injuries were reported, as the vehicle was unoccupied except for the presence of a safety operator, who did not sustain any injuries. The California Highway Patrol was notified and arrived on the scene to document the incident and assist in the vehicle's recovery. The incident highlighted the challenges faced by autonomous vehicles in severe weather conditions and the need for ongoing improvements in sensor and navigation technologies to handle such situations. + + two: |- + At approximately 12:05 PM, an autonomous BMW i8 convertible was eastbound on University Avenue when it collided with a city bus running a red light at Hamilton Avenue. At the same time, a scooter rider and a bicyclist, legally crossing Hamilton, narrowly avoided the incident, while two pedestrians were nearby on the sidewalk. Despite the BMW’s attempt to avoid the collision through emergency braking and evasive maneuvers, it sustained significant front-end damage, and the bus minor damage on its right side. Three bus passengers reported minor injuries. The complex scenario, involving multiple road users, highlighted the challenges autonomous vehicles face in dynamic urban environments. The primary cause was identified as the bus driver's failure to obey the traffic signal. + + three: |- + A Chevy Cruise autonomous vehicle, while in autonomous mode, was attempting to merge onto northbound Maple Ave from 5th Street when a bicyclist unexpectedly entered the vehicle's path, causing the vehicle to apply emergency braking. The bicyclist made minor contact with the front sensor array of the vehicle but managed to remain upright and uninjured. The vehicle sustained minimal damage to its front sensor array. No law enforcement was called to the scene, and the incident was recorded by the vehicle's onboard cameras for further analysis. + + four: |- + An autonomous Audi A4 convertible, operating in full autonomous mode, was traveling north on 4th Street approaching Grand Avenue under sunny conditions. As the vehicle entered the intersection on a green light, a delivery truck running a red light from Grand Avenue intersected its path. Simultaneously, a skateboarder was crossing 4th Street within the crosswalk, and a dog, leashed to a nearby lamppost, became startled and attempted to run across the intersection. The Audi's advanced detection systems initiated an emergency stop, avoiding a collision with the truck and ensuring the skateboarder and dog remained unharmed. The incident caused minor traffic disruption but highlighted the vehicle's ability to respond to multiple hazards effectively. No damage or injuries were reported, and the truck driver was cited for running the red light. + + five: |- + During a light rain, a Tesla Model S in autonomous mode was driving west on Elm Street. As it approached the junction with 5th Avenue, a cyclist unexpectedly swerved into the lane from the sidewalk to avoid a puddle. At the same time, a pedestrian, distracted by their phone, stepped off the curb against the "Don't Walk" signal. The Tesla, utilizing its automatic emergency braking and evasive maneuver capabilities, managed to avoid contact with both the cyclist and pedestrian, coming to a stop in the intersection. The rapid response of the vehicle's systems prevented potential injuries. No damages were reported, and after a brief pause to ensure the safety of all parties, the vehicle continued on its route. + + complier_system_prompt: | + You are a Python programmer. User's will give you their programs and exception messages they encounter. Your task is to fix the syntax errors and respond with the full correct python code. Here are the only objects that are supported: + + {objects} + + Answer only with the full python code as plain text without code block syntax around it. Variables should be in all caps. + + complier_user_prompt: | + Your python program failed to compile. Please fix the syntax errors and try again. + Error message: + {error} + + Your program: + {program} + + objects: + + dynamic: + object_info: | + carModels = [ + "Audi - A2": "vehicle.audi.a2", + "Audi - E-Tron": "vehicle.audi.etron", + "Audi - TT": "vehicle.audi.tt", + "BMW - Gran Tourer": "vehicle.bmw.grandtourer", + "Chevrolet - Impala": "vehicle.chevrolet.impala", + "Citroen - C3": "vehicle.citroen.c3", + "Dodge - Charger 2020": "vehicle.dodge.charger_2020", + "Dodge - Police Charger": "vehicle.dodge.charger_police", + "Dodge - Police Charger 2020": "vehicle.dodge.charger_police_2020", + "Ford - Crown (taxi)": "vehicle.ford.crown", + "Ford - Mustang": "vehicle.ford.mustang", + "Jeep - Wrangler Rubicon": "vehicle.jeep.wrangler_rubicon", + "Lincoln - MKZ 2017": "vehicle.lincoln.mkz_2017", + "Lincoln - MKZ 2020": "vehicle.lincoln.mkz_2020", + "Mercedes - Coupe": "vehicle.mercedes.coupe", + "Mercedes - Coupe 2020": "vehicle.mercedes.coupe_2020", + "Micro - Microlino": "vehicle.micro.microlino", + "Mini - Cooper S": "vehicle.mini.cooper_s", + "Mini - Cooper S 2021": "vehicle.mini.cooper_s_2021", + "Nissan - Micra": "vehicle.nissan.micra", + "Nissan - Patrol": "vehicle.nissan.patrol", + "Nissan - Patrol 2021": "vehicle.nissan.patrol_2021", + "Seat - Leon": "vehicle.seat.leon", + "Tesla - Model 3": "vehicle.tesla.model3", + "Toyota - Prius": "vehicle.toyota.prius" + ] + truckModels = [ + "CARLA Motors - CarlaCola": "vehicle.carlamotors.carlacola", + "CARLA Motors - European HGV (cab-over-engine type)": "vehicle.carlamotors.european_hgv", + "CARLA Motors - Firetruck": "vehicle.carlamotors.firetruck", + "Tesla - Cybertruck": "vehicle.tesla.cybertruck" + ] + vanModels = [ + "Ford - Ambulance": "vehicle.ford.ambulance", + "Mercedes - Sprinter": "vehicle.mercedes.sprinter", + "Volkswagen - T2": "vehicle.volkswagen.t2", + "Volkswagen - T2 2021": "vehicle.volkswagen.t2_2021" + ] + busModels = [ + "Mitsubishi - Fusorosa": "vehicle.mitsubishi.fusorosa", + ] + motorcycleAndScooterModels = [ + "Harley Davidson - Low Rider": "vehicle.harley-davidson.low_rider", + "Kawasaki - Ninja": "vehicle.kawasaki.ninja", + "Vespa - ZX 125": "vehicle.vespa.zx125", + "Yamaha - YZF": "vehicle.yamaha.yzf" + ] + bicycleModels = [ + "BH - Crossbike": "vehicle.bh.crossbike", + "Diamondback - Century": "vehicle.diamondback.century", + "Gazelle - Omafiets": "vehicle.gazelle.omafiets" + ] + pedestrianModels = [ + "walker.pedestrian.0001", + "walker.pedestrian.0002", + "walker.pedestrian.0003", + "walker.pedestrian.0004", + "walker.pedestrian.0005", + "walker.pedestrian.0006", + "walker.pedestrian.0007", + "walker.pedestrian.0008", + "walker.pedestrian.0009", + "walker.pedestrian.0010", + "walker.pedestrian.0011", + "walker.pedestrian.0012", + "walker.pedestrian.0013", + "walker.pedestrian.0014", + ] + + system_prompt: | + You are a python programming writer. You only respond with python code as plain text without code block syntax around it. Your variables should be in all caps. + + A user will give you a description. From the description identify all cars, trucks, vans, buses, motorcycles, bicycles, or pedestrians. Identify how many of each there are. Then for each object, find all values that could be considered a match. You must pick at least one value per object instance even if there is no direct match (so do your best). If you find only one, directly assign it otherwise it should be in a list. + + Format your answer as: + + + My answer is: + + + {object_info} + + assistant_answer_1: | + The description involves an autonomous vehicle, specifically a Ford Explorer SUV. In the provided lists of vehicle models, there is no direct match for a "Ford Explorer SUV." However, we can select the closest match based on the brand and the type of vehicle, which in this case would be a car (SUV). The closest matches from the car models list under the Ford brand are "Ford - Crown (taxi)" and "Ford - Mustang." Since neither of these is an SUV, but are the only Ford options provided, we'll include them as the closest available matches. + + My answer is: + SUV = ["vehicle.ford.crown", "vehicle.ford.mustang"] + + assistant_answer_2: | + The description involves multiple entities: an autonomous BMW i8 convertible, a city bus, a scooter rider, a bicyclist, and two pedestrians. + + 1. For the BMW i8 convertible, there's no exact match in the car models list, but we can select the closest match based on the brand, which is BMW. The closest BMW model available is "BMW - Gran Tourer." + 2. The city bus is mentioned, and there is a direct match in the bus models list as "Mitsubishi - Fusorosa." + 3. A scooter rider is mentioned. In the motorcycle and scooter models list, we have options like "Harley Davidson - Low Rider," "Kawasaki - Ninja," "Vespa - ZX 125," and "Yamaha - YZF." Since it specifically mentions a scooter, "Vespa - ZX 125" is the closest match. + 4. A bicyclist is mentioned. The bicycle models list includes "BH - Crossbike," "Diamondback - Century," and "Gazelle - Omafiets." Since no specific model of bicycle is mentioned, we'll include all as possible matches. + 5. Two pedestrians are mentioned. Since there are multiple pedestrian models and no specific descriptions are given, we will include all pedestrian models as possible matches. + + My answer is: + CAR = "vehicle.bmw.grandtourer" + BUS = "vehicle.mitsubishi.fusorosa" + SCOOTER = "vehicle.vespa.zx125" + BICYCLE = ["vehicle.bh.crossbike", "vehicle.diamondback.century", "vehicle.gazelle.omafiets"] + PEDESTRIAN_ONE = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + PEDESTRIAN_TWO = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + + assistant_answer_3: | + The description involves a Chevy Cruise autonomous vehicle and a bicyclist. + + 1. For the Chevy Cruise, there's no exact match in the car models list, but we can select the closest match based on the brand, which would be "Chevrolet - Impala" as it is the only Chevrolet model listed. + 2. The bicyclist can be matched with any of the bicycle models. To keep it general, we'll select all available bicycle models. + + My answer is: + CAR = "vehicle.chevrolet.impala" + BICYCLE = ["vehicle.bh.crossbike", "vehicle.diamondback.century", "vehicle.gazelle.omafiets"] + + assistant_answer_4: | + The description involves several entities: an autonomous Audi A4 convertible, a delivery truck, a skateboarder, and a dog. + + 1. For the autonomous Audi A4 convertible, there's no exact match in the car models list, but we can select the closest match based on the brand and model, which is Audi. Since "Audi - A4" is not listed, we'll consider the closest available Audi models: "Audi - A2," "Audi - E-Tron," and "Audi - TT." + 2. The delivery truck is mentioned. In the truck models list, we have options like "CARLA Motors - CarlaCola," "CARLA Motors - European HGV (cab-over-engine type)," "CARLA Motors - Firetruck," and "Tesla - Cybertruck." Since no specific model of truck is mentioned, we'll include all as possible matches. + 3. A skateboarder is mentioned, but there are no skateboard models in the provided lists. Since the skateboarder is a pedestrian, we can include all pedestrian models as possible matches. + 4. A dog is mentioned, but there are no models for animals in the provided lists, and thus it cannot be directly matched to any of the given models. + + My answer is: + CAR = ["vehicle.audi.a2", "vehicle.audi.etron", "vehicle.audi.tt"] + TRUCK = ["vehicle.carlamotors.carlacola", "vehicle.carlamotors.european_hgv", "vehicle.carlamotors.firetruck", "vehicle.tesla.cybertruck"] + SKATEBOARDER = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + + assistant_answer_5: | + The description involves three entities: a Tesla Model S, a cyclist, and a pedestrian. + + 1. For the Tesla Model S, there is a direct match in the car models list as "Tesla - Model 3." Although the specific model mentioned is "Model S," the closest available match provided is "Tesla - Model 3." + 2. A cyclist is mentioned. The bicycle models list includes "BH - Crossbike," "Diamondback - Century," and "Gazelle - Omafiets." Since no specific model of bicycle is mentioned, we'll include all as possible matches. + 3. A pedestrian is mentioned. Since there are multiple pedestrian models and no specific descriptions are given, we will include all pedestrian models as possible matches. + + My answer is: + CAR = "vehicle.tesla.model3" + BICYCLE = ["vehicle.bh.crossbike", "vehicle.diamondback.century", "vehicle.gazelle.omafiets"] + PEDESTRIAN = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + + static: + object_info: | + trashCanModels = [ + "static.prop.trashcan01", + "static.prop.trashcan02", + "static.prop.trashcan03", + "static.prop.trashcan04", + "static.prop.trashcan05", + "static.prop.bin", + ] + trafficConeModels = [ + "static.prop.constructioncone", + "static.prop.trafficcone01", + "static.prop.trafficcone02", + ] + roadDebrisModels = [ + "static.prop.dirtdebris01", + "static.prop.dirtdebris02", + "static.prop.dirtdebris03", + ] + vendingMachineModels = [ + "static.prop.vendingmachine", + ] + chairModels = [ + "static.prop.plasticchair", + ] + busStopModels = [ + "static.prop.busstop", + ] + roadsideAdvertisementBillboardModels = [ + "static.prop.advertisement", + "static.prop.streetsign", + "static.prop.streetsign01", + "static.prop.streetsign04", + ] + garbageYouFindOnTheStreetModels = [ + "static.prop.colacan", + "static.prop.garbage01", + "static.prop.garbage02", + "static.prop.garbage03", + "static.prop.garbage04", + "static.prop.garbage05", + "static.prop.garbage06", + "static.prop.plasticbag", + "static.prop.trashbag", + ] + containerModels = [ + "static.prop.container", + "static.prop.clothcontainer", + "static.prop.glasscontainer", + ] + tableModels = [ + "static.prop.table", + "static.prop.plastictable", + ] + trafficBarrierModels = [ + "static.prop.streetbarrier", + "static.prop.chainbarrier", + "static.prop.chainbarrierend", + ] + plantpotModels = [ + "static.prop.plantpot01", + "static.prop.plantpot02", + "static.prop.plantpot03", + "static.prop.plantpot04", + "static.prop.plantpot05", + "static.prop.plantpot06", + "static.prop.plantpot07", + "static.prop.plantpot08", + ] + mailboxModels = [ + "static.prop.mailbox", + ] + gnomeModels = [ + "static.prop.gnome", + ] + creasedBoxModels = [ + "static.prop.creasedbox01", + "static.prop.creasedbox02", + "static.prop.creasedbox03", + ] + travelCaseModels = [ + "static.prop.travelcase", + "static.prop.briefcase", + "static.prop.guitarcase", + ] + boxModels = [ + "static.prop.box01", + "static.prop.box02", + "static.prop.box03", + ] + pedestrianBenchModels = [ + "static.prop.bench01", + "static.prop.bench02", + "static.prop.bench03", + ] + oilBarrelModels = [ + "static.prop.barrel", + ] + atmModels = [ + "static.prop.atm", + ] + kioskModels = [ + "static.prop.kiosk_01", + ] + ironPlateOnRoadModels = [ + "static.prop.ironplank", + ] + trafficwarningModels = [ + "static.prop.trafficwarning", + ] + weather = ['ClearNoon', 'CloudyNoon', 'WetNoon', 'WetCloudyNoon', 'SoftRainNoon', 'MidRainyNoon', 'HardRainNoon', 'ClearSunset', 'CloudySunset', 'WetSunset', 'WetCloudySunset', 'SoftRainSunset', 'MidRainSunset', 'HardRainSunset', + 'ClearNight', 'CloudyNight', 'WetNight', 'WetCloudyNight', 'SoftRainNight', 'MidRainyNight', 'HardRainNight' ,'DustStorm'] + + system_prompt: | + You are a python programming writer. You only respond with python code as plain text without code block syntax around it. Your variables should be in all caps. + + A user will give you a description. From the description identify the weather and any static objects. Reply with all values that could be considered a match. You must pick at least one value per object even if there is no direct match (so pick the closest ones). If you only pick one, you can directly assign it otherwise it should be in a list. + + Format your answer as: + + + My answer is: + + + {object_info} + + assistant_answer_1: | + Identifying static objects and conditions from the description: + + 1. **Weather Condition**: Heavy rainstorm implies a weather condition that could be classified as "HardRainNoon" or "HardRainSunset" or "HardRainNight" depending on the time of day the incident occurred. Since the time of day is not specified, all three possibilities are considered. + 2. **Road Debris**: The driver claimed there was debris on the road, which the autonomous vehicle had to adjust for. The closest matches from the provided models would be items from the `roadDebrisModels` list. + 3. **Large Puddle**: There's no direct match for a puddle, but it contributes to the wet road conditions already accounted for in the weather condition. + 4. **Guardrail**: While a guardrail is mentioned, there's no direct model match in the provided lists. The closest related object might be something that serves as a barrier, so "trafficBarrierModels" could be considered the closest match. + + My answer is: + WEATHER_CONDITIONS = ["HardRainNoon", "HardRainSunset", "HardRainNight"] + ROAD_DEBRIS = ["static.prop.dirtdebris01", "static.prop.dirtdebris02", "static.prop.dirtdebris03"] + TRAFFIC_BARRIER = ["static.prop.streetbarrier", "static.prop.chainbarrier", "static.prop.chainbarrierend"] + + assistant_answer_2: | + Identifying static objects and conditions from the description: + + 1. **Weather Condition**: The incident occurred at approximately 12:05 PM, which suggests a daytime scenario. However, there's no specific mention of weather conditions, so the closest match considering the time would be "ClearNoon" as the default assumption in the absence of any mention of adverse weather. + 2. **City Bus**: There's no direct match for a city bus in the provided lists, so no static object model can be directly assigned for the bus. + 3. **Scooter Rider and Bicyclist**: These are dynamic objects (users of the road) rather than static objects, so they don't match any of the provided models. + 4. **Pedestrians**: Similarly, pedestrians are dynamic objects and do not match any static object models provided. + 5. **Traffic Signal**: While a traffic signal is crucial to the incident, there's no direct match in the provided lists. The closest related objects might be something that serves as a road sign or indicator, so "roadsideAdvertisementBillboardModels" could be considered for its role in road signaling, albeit not a perfect match. + + My answer is: + WEATHER_CONDITION = "ClearNoon" + ROAD_SIGN = ["static.prop.advertisement", "static.prop.streetsign", "static.prop.streetsign01", "static.prop.streetsign04"] + + assistant_answer_3: | + Identifying static objects and conditions from the description: + + 1. **Weather Condition**: There's no mention of weather conditions in the incident description. Without any indication of adverse weather, the default assumption could be clear weather. However, without a specific time of day mentioned, it's challenging to choose between "ClearNoon", "ClearSunset", or "ClearNight". In the absence of further details, all three are considered possibilities. + 2. **Bicyclist**: The bicyclist is a dynamic object rather than a static one, so it doesn't match any of the provided static object models. + 3. **Front Sensor Array Damage**: The damage to the vehicle's front sensor array is specific to the vehicle and does not match any of the provided static object models. + + Given the information, the only variable that can be somewhat determined is the weather condition, with a broad assumption due to the lack of specific details about the time of day or weather. + + My answer is: + WEATHER_CONDITIONS = ["ClearNoon", "ClearSunset", "ClearNight"] + + assistant_answer_4: | + Identifying static objects and conditions from the description: + + 1. **Weather Condition**: The incident occurred under sunny conditions, which directly matches "ClearNoon" if we assume the incident happened around noon given the typical association of sunny conditions with midday. + 2. **Delivery Truck**: The delivery truck is a dynamic object and does not match any of the provided static object models. + 3. **Skateboarder**: The skateboarder is a dynamic object, not a static one, so it doesn't match any of the provided static object models. + 4. **Dog Leashed to a Lamppost**: The dog is a dynamic object, but the lamppost to which it was leashed can be considered a static object. However, there's no direct match for a lamppost in the provided lists. The closest related objects might be something that serves as a street fixture, but without a direct match, this detail cannot be accurately represented with the provided models. + 5. **Advanced Detection Systems**: This refers to the vehicle's technology and does not match any of the provided static object models. + + Given the information, the only variable that can be directly determined is the weather condition. + + My answer is: + WEATHER_CONDITION = "ClearNoon" + + assistant_answer_5: | + Identifying static objects and conditions from the description: + + 1. **Weather Condition**: The incident occurred during a light rain, which directly matches "SoftRainNoon" if we assume the incident happened around noon, or "SoftRainSunset" if in the evening, or "SoftRainNight" for nighttime, as the specific time of day is not mentioned. + 2. **Cyclist**: The cyclist is a dynamic object and does not match any of the provided static object models. + 3. **Pedestrian**: Similarly, the pedestrian is a dynamic object and does not match any of the provided static object models. + 4. **Puddle**: While the puddle is mentioned as part of the scenario, it is not a static object in the context of the provided models and is more a condition of the wet road, which is already accounted for in the weather condition. + 5. **Automatic Emergency Braking and Evasive Maneuver Capabilities**: These refer to the vehicle's technology and do not match any of the provided static object models. + + Given the information, the only variable that can be directly determined is the weather condition. + + My answer is: + WEATHER_CONDITIONS = ["SoftRainNoon", "SoftRainSunset", "SoftRainNight"] + + + one: + system_question: | + Based on the description, what are the main objects that need to be included in the scene? Provide step-by-step reasoning then provide your final answer as a numbered list. Be concise in your reasoning (no more than 1-2 sentences per object). Only respond with this exact format: + REASONING: + + + FINAL_ANSWER: + + + user_question_1: | + Original description: + A Chevy Cruise autonomous vehicle, while in autonomous mode, was attempting to merge onto northbound Maple Ave from 5th Street when a bicyclist unexpectedly entered the vehicle's path, causing the vehicle to apply emergency braking. The bicyclist made minor contact with the front sensor array of the vehicle but managed to remain upright and uninjured. The vehicle sustained minimal damage to its front sensor array. No law enforcement was called to the scene, and the incident was recorded by the vehicle's onboard cameras for further analysis. + + assistant_answer_1: | + REASONING: + 1. The Chevy Cruise autonomous vehicle is mentioned as attempting to merge, indicating it's moving and thus a movable object. + 2. The bicyclist entered the vehicle's path and made contact with it, indicating the bicyclist is also a movable object. + + FINAL_ANSWER: + 1. Chevy Cruise autonomous vehicle + 2. Bicyclist + + user_question_2: | + Original description: + At approximately 12:05 PM, an autonomous BMW i8 convertible was eastbound on University Avenue when it collided with a city bus running a red light at Hamilton Avenue. At the same time, a scooter rider and a bicyclist, legally crossing Hamilton, narrowly avoided the incident, while two pedestrians were nearby on the sidewalk. Despite the BMW’s attempt to avoid the collision through emergency braking and evasive maneuvers, it sustained significant front-end damage, and the bus minor damage on its right side. Three bus passengers reported minor injuries. The complex scenario, involving multiple road users, highlighted the challenges autonomous vehicles face in dynamic urban environments. The primary cause was identified as the bus driver's failure to obey the traffic signal. + + assistant_answer_2: | + REASONING: + 1. The autonomous BMW i8 convertible is described as moving eastbound and attempting to avoid a collision, indicating it's a movable object. + 2. The city bus is mentioned as running a red light and colliding with the BMW, indicating it is also a movable object. + 3. The scooter rider and bicyclist are described as legally crossing the intersection and narrowly avoiding the incident, signifying they are moving through the scene. + 4. The two pedestrians were nearby on the sidewalk, which doesn't inherently mean they were moving, but pedestrians are generally considered movable objects in traffic scenarios. + + FINAL_ANSWER: + 1. Autonomous BMW i8 convertible + 2. City bus + 3. Scooter rider + 4. Bicyclist + 5. Pedestrian one + 6. Pedestrian two + + user_question_3: | + Original description: + An autonomous Ford Explorer SUV, operating in full autonomous mode, was navigating the ramp to merge onto Sand Hill Road amidst a heavy rainstorm. The vehicle's sensors detected the wet road conditions and adjusted speed accordingly. However, the driver claims' there was debris on the road and they adjusted accordingly. They encountered an unexpected large puddle which caused the vehicle to hydroplane, leading to a temporary loss of traction. The autonomous system attempted corrective steering maneuvers, but the vehicle ultimately made contact with the guardrail on the right side of the ramp. The collision resulted in moderate damage to the vehicle's right rear quarter panel and bumper. No injuries were reported, as the vehicle was unoccupied except for the presence of a safety operator, who did not sustain any injuries. The California Highway Patrol was notified and arrived on the scene to document the incident and assist in the vehicle's recovery. The incident highlighted the challenges faced by autonomous vehicles in severe weather conditions and the need for ongoing improvements in sensor and navigation technologies to handle such situations. + + assistant_answer_3: | + REASONING: + 1. The autonomous Ford Explorer SUV is described as navigating and hydroplaning, indicating it is a moving object. + 2. The presence of debris on the road is a movable object in the scene. + + FINAL_ANSWER: + 1. Autonomous Ford Explorer SUV + 2. Road debris + + user_question: |- + Original description: + {description} + + one_static: + system_question: | + Based on the description, what are the main static objects that need to be included in the scene? Provide step-by-step reasoning then provide your final answer as a numbered list. Be concise in your reasoning (no more than 1-2 sentences per object). Only respond with this exact format: + REASONING: + + + FINAL_ANSWER: + + + user_question_1: | + Original description: + A Chevy Cruise autonomous vehicle, while in autonomous mode, was attempting to merge onto northbound Maple Ave from 5th Street when a bicyclist unexpectedly entered the vehicle's path, causing the vehicle to apply emergency braking. The bicyclist made minor contact with the front sensor array of the vehicle but managed to remain upright and uninjured. The vehicle sustained minimal damage to its front sensor array. No law enforcement was called to the scene, and the incident was recorded by the vehicle's onboard cameras for further analysis. + + assistant_answer_1: | + REASONING: + 1. There are no static objects mentioned in the description therefore my final answer will be empty. + + FINAL_ANSWER: + + + user_question_2: | + Original description: + At approximately 12:05 PM, an autonomous BMW i8 convertible was eastbound on University Avenue when it collided with a city bus running a red light at Hamilton Avenue. At the same time, a scooter rider and a bicyclist, legally crossing Hamilton, narrowly avoided the incident, while two pedestrians were nearby on the sidewalk. Despite the BMW’s attempt to avoid the collision through evasive maneuvers the presence of trash cans forced the vehicle to apply emergency braking instead, it sustained significant front-end damage, and the bus had minor damage on its right side. Three bus passengers reported minor injuries. + + assistant_answer_2: | + REASONING: + 1. Trash cans are a static object in the scene which affected the BMW's movement. + + FINAL_ANSWER: + 1. Trash cans + + + user_question_3: | + Original description: + An autonomous Ford Explorer SUV, operating in full autonomous mode, was navigating the ramp to merge onto Sand Hill Road amidst a heavy rainstorm. The vehicle's sensors detected the wet road conditions and adjusted speed accordingly. However, the driver claims' there was debris on the road and they adjusted accordingly. They encountered an unexpected large puddle which caused the vehicle to hydroplane, leading to a temporary loss of traction. The autonomous system attempted corrective steering maneuvers, but the vehicle ultimately made contact with the guardrail on the right side of the ramp. The collision resulted in moderate damage to the vehicle's right rear quarter panel and bumper. No injuries were reported, as the vehicle was unoccupied except for the presence of a safety operator, who did not sustain any injuries. The California Highway Patrol was notified and arrived on the scene to document the incident and assist in the vehicle's recovery. The incident highlighted the challenges faced by autonomous vehicles in severe weather conditions and the need for ongoing improvements in sensor and navigation technologies to handle such situations. + + assistant_answer_3: | + REASONING: + 1. The description mentions that there is debris on the road so I will include that. + + FINAL_ANSWER: + 1. Road debris + + user_question: |- + Original description: + {description} + + nine: # with help from src/scenic/simulators/carla/blueprints.py + system_question: | + A user will provide you with a list of main objects from a description. For each of the main objects, find the closest matching models from the list below. If there are any objects in the original description that you see a match for (e.g. a traffic cone), include them in your answer even if they are not listed as a main object. Specify your answer as the string value of that model. You CANNOT under any circumstance pick models that are not listed here. You must select only from the list provided. If there is no direct match or you are unsure, pick ALL eligible similar models as a python list. Never leave empty choices. You should prefer lists over single choices. Provide step-by-step reasoning for any decision you make. Specify your final answer as python code with variables in all caps. In your final answer, only respond with python code as plain text without code block syntax around it. + + Here are the models you can choose from: + carModels = [ + "Audi - A2": "vehicle.audi.a2", + "Audi - E-Tron": "vehicle.audi.etron", + "Audi - TT": "vehicle.audi.tt", + "BMW - Gran Tourer": "vehicle.bmw.grandtourer", + "Chevrolet - Impala": "vehicle.chevrolet.impala", + "Citroen - C3": "vehicle.citroen.c3", + "Dodge - Charger 2020": "vehicle.dodge.charger_2020", + "Dodge - Police Charger": "vehicle.dodge.charger_police", + "Dodge - Police Charger 2020": "vehicle.dodge.charger_police_2020", + "Ford - Crown (taxi)": "vehicle.ford.crown", + "Ford - Mustang": "vehicle.ford.mustang", + "Jeep - Wrangler Rubicon": "vehicle.jeep.wrangler_rubicon", + "Lincoln - MKZ 2017": "vehicle.lincoln.mkz_2017", + "Lincoln - MKZ 2020": "vehicle.lincoln.mkz_2020", + "Mercedes - Coupe": "vehicle.mercedes.coupe", + "Mercedes - Coupe 2020": "vehicle.mercedes.coupe_2020", + "Micro - Microlino": "vehicle.micro.microlino", + "Mini - Cooper S": "vehicle.mini.cooper_s", + "Mini - Cooper S 2021": "vehicle.mini.cooper_s_2021", + "Nissan - Micra": "vehicle.nissan.micra", + "Nissan - Patrol": "vehicle.nissan.patrol", + "Nissan - Patrol 2021": "vehicle.nissan.patrol_2021", + "Seat - Leon": "vehicle.seat.leon", + "Tesla - Model 3": "vehicle.tesla.model3", + "Toyota - Prius": "vehicle.toyota.prius" + ] + truckModels = [ + "CARLA Motors - CarlaCola": "vehicle.carlamotors.carlacola", + "CARLA Motors - European HGV (cab-over-engine type)": "vehicle.carlamotors.european_hgv", + "CARLA Motors - Firetruck": "vehicle.carlamotors.firetruck", + "Tesla - Cybertruck": "vehicle.tesla.cybertruck" + ] + vanModels = [ + "Ford - Ambulance": "vehicle.ford.ambulance", + "Mercedes - Sprinter": "vehicle.mercedes.sprinter", + "Volkswagen - T2": "vehicle.volkswagen.t2", + "Volkswagen - T2 2021": "vehicle.volkswagen.t2_2021" + ] + busModels = [ + "Mitsubishi - Fusorosa": "vehicle.mitsubishi.fusorosa", + ] + motorcycleModels = [ + "Harley Davidson - Low Rider": "vehicle.harley-davidson.low_rider", + "Kawasaki - Ninja": "vehicle.kawasaki.ninja", + "Vespa - ZX 125": "vehicle.vespa.zx125", + "Yamaha - YZF": "vehicle.yamaha.yzf" + ] + bicycleModels = [ + "BH - Crossbike": "vehicle.bh.crossbike", + "Diamondback - Century": "vehicle.diamondback.century", + "Gazelle - Omafiets": "vehicle.gazelle.omafiets" + ] + trashCanModels = [ + "static.prop.trashcan01", + "static.prop.trashcan02", + "static.prop.trashcan03", + "static.prop.trashcan04", + "static.prop.trashcan05", + "static.prop.bin", + ] + trafficConeModels = [ + "static.prop.constructioncone", + "static.prop.trafficcone01", + "static.prop.trafficcone02", + ] + roadDebrisModels = [ + "static.prop.dirtdebris01", + "static.prop.dirtdebris02", + "static.prop.dirtdebris03", + ] + vendingMachineModels = [ + "static.prop.vendingmachine", + ] + chairModels = [ + "static.prop.plasticchair", + ] + busStopModels = [ + "static.prop.busstop", + ] + roadsideAdvertisementBillboardModels = [ + "static.prop.advertisement", + "static.prop.streetsign", + "static.prop.streetsign01", + "static.prop.streetsign04", + ] + garbageYouFindOnTheStreetModels = [ + "static.prop.colacan", + "static.prop.garbage01", + "static.prop.garbage02", + "static.prop.garbage03", + "static.prop.garbage04", + "static.prop.garbage05", + "static.prop.garbage06", + "static.prop.plasticbag", + "static.prop.trashbag", + ] + containerModels = [ + "static.prop.container", + "static.prop.clothcontainer", + "static.prop.glasscontainer", + ] + tableModels = [ + "static.prop.table", + "static.prop.plastictable", + ] + trafficBarrierModels = [ + "static.prop.streetbarrier", + "static.prop.chainbarrier", + "static.prop.chainbarrierend", + ] + plantpotModels = [ + "static.prop.plantpot01", + "static.prop.plantpot02", + "static.prop.plantpot03", + "static.prop.plantpot04", + "static.prop.plantpot05", + "static.prop.plantpot06", + "static.prop.plantpot07", + "static.prop.plantpot08", + ] + mailboxModels = [ + "static.prop.mailbox", + ] + gnomeModels = [ + "static.prop.gnome", + ] + creasedBoxModels = [ + "static.prop.creasedbox01", + "static.prop.creasedbox02", + "static.prop.creasedbox03", + ] + travelCaseModels = [ + "static.prop.travelcase", + "static.prop.briefcase", + "static.prop.guitarcase", + ] + boxModels = [ + "static.prop.box01", + "static.prop.box02", + "static.prop.box03", + ] + pedestrianBenchModels = [ + "static.prop.bench01", + "static.prop.bench02", + "static.prop.bench03", + ] + oilBarrelModels = [ + "static.prop.barrel", + ] + atmModels = [ + "static.prop.atm", + ] + kioskModels = [ + "static.prop.kiosk_01", + ] + ironPlatOnRoadModels = [ + "static.prop.ironplank", + ] + trafficwarningModels = [ + "static.prop.trafficwarning", + ] + pedestrianModels = [ + "walker.pedestrian.0001", + "walker.pedestrian.0002", + "walker.pedestrian.0003", + "walker.pedestrian.0004", + "walker.pedestrian.0005", + "walker.pedestrian.0006", + "walker.pedestrian.0007", + "walker.pedestrian.0008", + "walker.pedestrian.0009", + "walker.pedestrian.0010", + "walker.pedestrian.0011", + "walker.pedestrian.0012", + "walker.pedestrian.0013", + "walker.pedestrian.0014", + ] + + For example, if the main objects are a tesla sedan, and road debris, a valid response could be: + REASONING: + The closest matching model to a tesla sedan is 'vehicle.tesla.model3'. + We do not know what kind of debris is on the road so we list all of them. + + FINAL_ANSWER: + CAR = 'vehicle.tesla.model3' + DEBRIS = ["static.prop.dirtdebris01", "static.prop.dirtdebris02", "static.prop.dirtdebris03"] + + Answer as follows: + REASONING: + + + FINAL_ANSWER: + + + user_question: | + Original description: + {description} + + Main objects: + {objects_from_one} + + user_question_1: | + Original description: + An autonomous Ford Explorer SUV, operating in full autonomous mode, was navigating the ramp to merge onto Sand Hill Road amidst a heavy rainstorm. The vehicle's sensors detected the wet road conditions and adjusted speed accordingly. However, the driver claims' there was debris on the road and they adjusted accordingly. They encountered an unexpected large puddle which caused the vehicle to hydroplane, leading to a temporary loss of traction. The autonomous system attempted corrective steering maneuvers, but the vehicle ultimately made contact with the guardrail on the right side of the ramp. The collision resulted in moderate damage to the vehicle's right rear quarter panel and bumper. No injuries were reported, as the vehicle was unoccupied except for the presence of a safety operator, who did not sustain any injuries. The California Highway Patrol was notified and arrived on the scene to document the incident and assist in the vehicle's recovery. The incident highlighted the challenges faced by autonomous vehicles in severe weather conditions and the need for ongoing improvements in sensor and navigation technologies to handle such situations. + + Main objects: + 1. Autonomous Ford Explorer SUV + 2. Road debris + + assistant_answer_1: | + REASONING: + 1. Autonomous Ford Explorer SUV: This is a specific model of a vehicle, so we need to find the closest matching model from the list of car models. The closest match would be "Ford - Crown (taxi)" as both are Ford vehicles. + + 2. Road debris: This is not listed as a main object, but it is mentioned in the description. We need to find the closest matching model from the list of road debris models. The description mentions "debris on the road" and "large puddle", so the closest match would be "static.prop.dirtdebris01" or "static.prop.dirtdebris02" from the road debris models. + + FINAL_ANSWER: + AUTONOMOUS_VEHICLE_MODEL = "vehicle.ford.crown" + ROAD_DEBRIS_MODEL = ["static.prop.dirtdebris01", "static.prop.dirtdebris02"] + + user_question_2: | + Original description: + At approximately 12:05 PM, an autonomous BMW i8 convertible was eastbound on University Avenue when it collided with a city bus running a red light at Hamilton Avenue. At the same time, a scooter rider and a bicyclist, legally crossing Hamilton, narrowly avoided the incident, while two pedestrians were nearby on the sidewalk. Despite the BMW’s attempt to avoid the collision through emergency braking and evasive maneuvers, it sustained significant front-end damage, and the bus minor damage on its right side. Three bus passengers reported minor injuries. The complex scenario, involving multiple road users, highlighted the challenges autonomous vehicles face in dynamic urban environments. The primary cause was identified as the bus driver's failure to obey the traffic signal. + + Main objects: + 1. Autonomous BMW i8 convertible + 2. City bus + 3. Scooter rider + 4. Bicyclist + 5. Pedestrian one + 6. Pedestrian two + + assistant_answer_2: | + REASONING: + 1. Autonomous BMW i8 convertible: This is a specific model of a vehicle, so we need to find the closest matching model from the list of car models. The closest match would be "BMW - Gran Tourer" as both are BMW vehicles. + + 2. City bus: We need to find the closest matching model from the list of bus models. The description does not provide specific details about the bus, so the closest match would be "Mitsubishi - Fusorosa" from the bus models. + + 3. Scooter rider: We need to find the closest matching model from the list of motorcycle models. The description does not provide specific details about the scooter, so the closest match would be "Vespa - ZX 125" from the motorcycle models. + + 4. Bicyclist: We need to find the closest matching model from the list of bicycle models. The description does not provide specific details about the bicycle, so we must pick all eligible bicycle models "BH - Crossbike", "Diamondback - Century", or "Gazelle - Omafiets". + + 5. Pedestrian one: We need to find the closest matching model from the list of pedestrian models. The description does not provide specific details about the pedestrian, so we can choose any pedestrian model from the list. Since all pedestrian models are eligible will use all of them. + + 6. Pedestrian two: We need to find the closest matching model from the list of pedestrian models. The description does not provide specific details about the pedestrian, so we can choose any pedestrian model from the list. Since all pedestrian models are eligible will use all of them. + + FINAL_ANSWER: + AUTONOMOUS_VEHICLE_MODEL = "vehicle.bmw.grandtourer" + BUS_MODEL = "vehicle.mitsubishi.fusorosa" + SCOOTER_MODEL = "vehicle.vespa.zx125" + BICYCLE_MODEL = ["vehicle.bh.crossbike", "vehicle.diamondback.century", "vehicle.gazelle.omafiets"] + PEDESTRIAN_ONE_MODEL = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + PEDESTRIAN_TWO_MODEL = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + + user_question_3: | + Original description: + A Chevy Cruise autonomous vehicle, while in autonomous mode, was attempting to merge onto northbound Maple Ave from 5th Street when a bicyclist unexpectedly entered the vehicle's path, causing the vehicle to apply emergency braking. The bicyclist made minor contact with the front sensor array of the vehicle but managed to remain upright and uninjured. The vehicle sustained minimal damage to its front sensor array. No law enforcement was called to the scene, and the incident was recorded by the vehicle's onboard cameras for further analysis. + + Main objects: + 1. Chevy Cruise autonomous vehicle + 2. Bicyclist + + assistant_answer_3: | + REASONING: + 1. Chevy Cruise autonomous vehicle: This is a specific model of a vehicle, so we need to find the closest matching model from the list of car models. The closest match would be "Chevrolet - Impala" as both are Chevrolet vehicles. + + 2. Bicyclist: We need to find the closest matching model from the list of bicycle models. The description does not provide specific details about the bicycle, so we must pick all eligible bicycle models "BH - Crossbike", "Diamondback - Century", or "Gazelle - Omafiets". + + FINAL_ANSWER: + AUTONOMOUS_VEHICLE_MODEL = "vehicle.chevrolet.impala" + BICYCLE_MODEL = ["vehicle.bh.crossbike", "vehicle.diamondback.century", "vehicle.gazelle.omafiets"] + + nine_b: + system_question: | + The user will input python variables that represent values that we will use for a probabilistic program. If any of the values are a list, your task is to replace the list with one of the supported probability distributions specified below. If the values are constants, leave them as is and repeat them in your answer. Use Uniform when the values all have equal probabilities otherwise, use Discrete when some values are more likely than others. + + Distributions: + Uniform(value, …) - Uniform distribution over the values provided. To be used when there is an equal probability of all values. + Discrete([value: weight, … ]) - Discrete distribution over the values provided with the given weights. To be used when some values have higher probabilities than others. The weights must add up to 1. + + Only respond with code as plain text without code block syntax around it. + + user_question: | + Original description: + {description} + + Program: + {program} + + user_question_1: | + Original description: + A Chevy Cruise autonomous vehicle, while in autonomous mode, was attempting to merge onto northbound Maple Ave from 5th Street when a bicyclist unexpectedly entered the vehicle's path, causing the vehicle to apply emergency braking. The bicyclist made minor contact with the front sensor array of the vehicle but managed to remain upright and uninjured. The vehicle sustained minimal damage to its front sensor array. No law enforcement was called to the scene, and the incident was recorded by the vehicle's onboard cameras for further analysis. + + Program: + AUTONOMOUS_VEHICLE_MODEL = "vehicle.chevrolet.impala" + BICYCLE_MODEL = ["vehicle.bh.crossbike", "vehicle.diamondback.century", "vehicle.gazelle.omafiets"] + + assistant_answer_1: | + AUTONOMOUS_VEHICLE_MODEL = "vehicle.chevrolet.impala" + BICYCLE_MODEL = Discrete({"vehicle.bh.crossbike": 0.4, "vehicle.diamondback.century": 0.3, "vehicle.gazelle.omafiets": 0.3}) + + user_question_2: | + Original description: + At approximately 12:05 PM, an autonomous BMW i8 convertible was eastbound on University Avenue when it collided with a city bus running a red light at Hamilton Avenue. At the same time, a scooter rider and a bicyclist, legally crossing Hamilton, narrowly avoided the incident, while two pedestrians were nearby on the sidewalk. Despite the BMW’s attempt to avoid the collision through emergency braking and evasive maneuvers, it sustained significant front-end damage, and the bus minor damage on its right side. Three bus passengers reported minor injuries. The complex scenario, involving multiple road users, highlighted the challenges autonomous vehicles face in dynamic urban environments. The primary cause was identified as the bus driver's failure to obey the traffic signal. + + Program: + AUTONOMOUS_VEHICLE_MODEL = "vehicle.bmw.grandtourer" + BUS_MODEL = "vehicle.mitsubishi.fusorosa" + SCOOTER_MODEL = "vehicle.vespa.zx125" + BICYCLE_MODEL = ["vehicle.bh.crossbike", "vehicle.diamondback.century", "vehicle.gazelle.omafiets"] + PEDESTRIAN_ONE_MODEL = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + PEDESTRIAN_TWO_MODEL = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + + assistant_answer_2: | + AUTONOMOUS_VEHICLE_MODEL = "vehicle.bmw.grandtourer" + BUS_MODEL = "vehicle.mitsubishi.fusorosa" + SCOOTER_MODEL = "vehicle.vespa.zx125" + BICYCLE_MODEL = Uniform("vehicle.bh.crossbike", "vehicle.diamondback.century", "vehicle.gazelle.omafiets") + PEDESTRIAN_ONE_MODEL = Uniform("walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014") + PEDESTRIAN_TWO_MODEL = Uniform("walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014") + + user_question_3: | + Original description: + An autonomous Ford Explorer SUV, operating in full autonomous mode, was navigating the ramp to merge onto Sand Hill Road amidst a heavy rainstorm. The vehicle's sensors detected the wet road conditions and adjusted speed accordingly. However, the driver claims' there was debris on the road and they adjusted accordingly. They encountered an unexpected large puddle which caused the vehicle to hydroplane, leading to a temporary loss of traction. The autonomous system attempted corrective steering maneuvers, but the vehicle ultimately made contact with the guardrail on the right side of the ramp. The collision resulted in moderate damage to the vehicle's right rear quarter panel and bumper. No injuries were reported, as the vehicle was unoccupied except for the presence of a safety operator, who did not sustain any injuries. The California Highway Patrol was notified and arrived on the scene to document the incident and assist in the vehicle's recovery. The incident highlighted the challenges faced by autonomous vehicles in severe weather conditions and the need for ongoing improvements in sensor and navigation technologies to handle such situations. + + Program: + AUTONOMOUS_VEHICLE_MODEL = "vehicle.ford.crown" + ROAD_DEBRIS_MODEL = ["static.prop.dirtdebris01", "static.prop.dirtdebris02"] + + assistant_answer_3: | + AUTONOMOUS_VEHICLE_MODEL = "vehicle.ford.crown" + ROAD_DEBRIS_MODEL = Uniform("static.prop.dirtdebris01", "static.prop.dirtdebris02") + + four_a: + question: |- + Original description: + {description} + + What details about the world and environment are missing from the description? (e.g. weather, time of day, etc.) + + Each expert and the final answer should be provided in the following format: + MISSING_ENV_INFO: + + four_b: + system_question: | + For each of the relevant objects, what details about the objects are missing from the description that you would need to ask the author about in order to create a more accurate scene? What are the main environmental factors that need to be included in the scene? Your questions should cover dynamics of objects in motion (e.g. speed), distances between every pair of objects, and environmental conditions (e.g. weather). Provide your questions as a numbered list, but do not ask about personal details of any individuals involved. + user_question: | + Original description: + {description} + + Relevant objects: + {objects_from_nine} + + user_question_1: | + Original description: + An autonomous Ford Explorer SUV, operating in full autonomous mode, was navigating the ramp to merge onto Sand Hill Road amidst a heavy rainstorm. The vehicle's sensors detected the wet road conditions and adjusted speed accordingly. However, the driver claims' there was debris on the road and they adjusted accordingly. They encountered an unexpected large puddle which caused the vehicle to hydroplane, leading to a temporary loss of traction. The autonomous system attempted corrective steering maneuvers, but the vehicle ultimately made contact with the guardrail on the right side of the ramp. The collision resulted in moderate damage to the vehicle's right rear quarter panel and bumper. No injuries were reported, as the vehicle was unoccupied except for the presence of a safety operator, who did not sustain any injuries. The California Highway Patrol was notified and arrived on the scene to document the incident and assist in the vehicle's recovery. The incident highlighted the challenges faced by autonomous vehicles in severe weather conditions and the need for ongoing improvements in sensor and navigation technologies to handle such situations. + + Relevant objects: + 1. Autonomous Ford Explorer SUV + 2. Road debris + + assistant_answer_1: | + Missing information: + 1. Can you provide more details about the speed at which the autonomous Ford Explorer SUV was traveling on the ramp during the heavy rainstorm? + 2. How large was the unexpected puddle that caused the vehicle to hydroplane? + 3. What type of debris was present on the road that the driver had to avoid? + 4. Were there any other vehicles in the vicinity on the ramp at the time of the incident? + 5. What specific weather conditions were present during the heavy rainstorm (e.g., visibility, wind speed)? + 6. How did the autonomous system respond to the hydroplaning situation before the collision occurred? + + user_question_2: | + Original description: + At approximately 12:05 PM, an autonomous BMW i8 convertible was eastbound on University Avenue when it collided with a city bus running a red light at Hamilton Avenue. At the same time, a scooter rider and a bicyclist, legally crossing Hamilton, narrowly avoided the incident, while two pedestrians were nearby on the sidewalk. Despite the BMW’s attempt to avoid the collision through emergency braking and evasive maneuvers, it sustained significant front-end damage, and the bus minor damage on its right side. Three bus passengers reported minor injuries. The complex scenario, involving multiple road users, highlighted the challenges autonomous vehicles face in dynamic urban environments. The primary cause was identified as the bus driver's failure to obey the traffic signal. + + Relevant objects: + 1. Autonomous BMW i8 convertible + 2. City bus + 3. Scooter rider + 4. Bicyclist + 5. Pedestrian one + 6. Pedestrian two + + assistant_answer_2: | + Missing information: + 1. What was the approximate speed of the autonomous BMW i8 convertible when it was traveling eastbound on University Avenue? + 2. Can you provide more details about the evasive maneuvers that the BMW attempted to avoid the collision? + 3. How close were the scooter rider and bicyclist to the point of impact between the BMW and the city bus? + 4. Were there any specific actions taken by the scooter rider and bicyclist to avoid the collision? + 5. How far away were the two pedestrians on the sidewalk from the intersection where the collision occurred? + 6. What were the weather conditions like at the time of the incident? + 7. Were there any specific road markings or signs at the intersection of University Avenue and Hamilton Avenue that may have influenced the events leading up to the collision? + 7. How did the autonomous system of the BMW respond to the situation when it detected the city bus running a red light? + 9. Were there any traffic congestion or other vehicles around the intersection that could have affected the incident? + + five: + system_question: |- + Based on the missing object information from the user, provide a reasonable probability distribution over the missing values. Answer only the questions that are about distance between objects, speed, weather, or time. For example, if the time of day is missing but you know that the scene is in the morning, you could use a normal distribution with mean 8am and standard deviation 1 hour (Normal(8, 1)). If the color of the car is missing, you could use a uniform distribution over common car color string names. If the car speed is missing, you could use a normal distribution with mean around a reasonable speed limit for area of the scene and reasonable standard deviation, etc. + + First provide step-by-step reasoning as to why you choose such a distribution then provide your final answer as a numbered list. Be concise in your reasoning (no more than 1-2 sentences per object) and specify your answer in the following format: + REASONING: + + + FINAL_ANSWER: + + + user_question: |- + Original description: + {description} + + Relevant objects: + {objects_from_nine} + + Missing object information: + {missing_object_info} + + user_question_1: | + Original description: + An autonomous Ford Explorer SUV, operating in full autonomous mode, was navigating the ramp to merge onto Sand Hill Road amidst a heavy rainstorm. The vehicle's sensors detected the wet road conditions and adjusted speed accordingly. However, the driver claims' there was debris on the road and they adjusted accordingly. They encountered an unexpected large puddle which caused the vehicle to hydroplane, leading to a temporary loss of traction. The autonomous system attempted corrective steering maneuvers, but the vehicle ultimately made contact with the guardrail on the right side of the ramp. The collision resulted in moderate damage to the vehicle's right rear quarter panel and bumper. No injuries were reported, as the vehicle was unoccupied except for the presence of a safety operator, who did not sustain any injuries. The California Highway Patrol was notified and arrived on the scene to document the incident and assist in the vehicle's recovery. The incident highlighted the challenges faced by autonomous vehicles in severe weather conditions and the need for ongoing improvements in sensor and navigation technologies to handle such situations. + + Relevant objects: + AUTONOMOUS_VEHICLE_MODEL = "vehicle.ford.crown" + ROAD_DEBRIS_MODEL = ["static.prop.dirtdebris01", "static.prop.dirtdebris02"] + + Missing information: + 1. Can you provide more details about the speed at which the autonomous Ford Explorer SUV was traveling on the ramp during the heavy rainstorm? + 2. How large was the unexpected puddle that caused the vehicle to hydroplane? + 3. What type of debris was present on the road that the driver had to avoid? + 4. Were there any other vehicles in the vicinity on the ramp at the time of the incident? + 5. What specific weather conditions were present during the heavy rainstorm (e.g., visibility, wind speed)? + 6. How did the autonomous system respond to the hydroplaning situation before the collision occurred? + + assistant_answer_1: | + REASONING: + 1. The speed at which the autonomous Ford Explorer SUV was traveling on the ramp during the heavy rainstorm can be modeled using a normal distribution with a mean around the speed limit for ramps (e.g., 35-45 mph) and a reasonable standard deviation to account for variations in driving behavior and road conditions. + + 2. The size of the unexpected puddle that caused the vehicle to hydroplane can be modeled using a normal distribution with a mean based the fact that the puddle must have been at least as wide as the vehicle and on common puddle sizes on roads (e.g., 6-8 feet in diameter) and a standard deviation to capture variations in puddle sizes. + + 3. The type of debris present on the road that the driver had to avoid can be modeled using a uniform distribution over the provided ROAD_DEBRIS_MODEL options ["static.prop.dirtdebris01", "static.prop.dirtdebris02"]. + + 4. The presence of other vehicles in the vicinity on the ramp at the time of the incident can be modeled using a Bernoulli distribution with a parameter reflecting the likelihood of other vehicles being present (e.g., low probability due to heavy rainstorm and specific location). + + 5. The specific weather conditions present during the heavy rainstorm (e.g., visibility, wind speed) can be modeled using a combination of distributions such as normal distribution for visibility range and wind speed based on historical weather data for the area. + + 6. The response of the autonomous system to the hydroplaning situation before the collision occurred can be modeled as a categorical distribution with options such as "applied corrective steering maneuvers," "adjusted speed," "issued warnings to safety operator," etc. + + FINAL_ANSWER: + 1. Speed at which the autonomous Ford Explorer SUV was traveling: Normal distribution with mean around 40 mph and standard deviation 5 mph. + 2. Size of the unexpected puddle: Normal distribution with mean around 7 feet in diameter and standard deviation 0.5 feet. + 3. Type of debris present on the road: Uniform distribution over ["static.prop.dirtdebris01", "static.prop.dirtdebris02"]. + 4. Presence of other vehicles in the vicinity: Bernoulli distribution with low probability. + 5. Specific weather conditions: Modeled using appropriate distributions based on historical data. + 6. Autonomous system response to hydroplaning situation: Categorical distribution with relevant options. + + user_question_2: | + Original description: + At approximately 12:05 PM, an autonomous BMW i8 convertible was eastbound on University Avenue when it collided with a city bus running a red light at Hamilton Avenue. At the same time, a scooter rider and a bicyclist, legally crossing Hamilton, narrowly avoided the incident, while two pedestrians were nearby on the sidewalk. Despite the BMW’s attempt to avoid the collision through emergency braking and evasive maneuvers, it sustained significant front-end damage, and the bus minor damage on its right side. Three bus passengers reported minor injuries. The complex scenario, involving multiple road users, highlighted the challenges autonomous vehicles face in dynamic urban environments. The primary cause was identified as the bus driver's failure to obey the traffic signal. + + Relevant objects: + AUTONOMOUS_VEHICLE_MODEL = "vehicle.bmw.grandtourer" + BUS_MODEL = "vehicle.mitsubishi.fusorosa" + SCOOTER_MODEL = "vehicle.vespa.zx125" + BICYCLE_MODEL = ["vehicle.bh.crossbike", "vehicle.diamondback.century", "vehicle.gazelle.omafiets"] + PEDESTRIAN_ONE_MODEL = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + PEDESTRIAN_TWO_MODEL = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + + Missing information: + 1. What was the approximate speed of the autonomous BMW i8 convertible when it was traveling eastbound on University Avenue? + 2. Can you provide more details about the evasive maneuvers that the BMW attempted to avoid the collision? + 3. How close were the scooter rider and bicyclist to the point of impact between the BMW and the city bus? + 4. Were there any specific actions taken by the scooter rider and bicyclist to avoid the collision? + 5. How far away were the two pedestrians on the sidewalk from the intersection where the collision occurred? + 6. What were the weather conditions like at the time of the incident? + 7. Were there any specific road markings or signs at the intersection of University Avenue and Hamilton Avenue that may have influenced the events leading up to the collision? + 7. How did the autonomous system of the BMW respond to the situation when it detected the city bus running a red light? + 9. Were there any traffic congestion or other vehicles around the intersection that could have affected the incident? + + assistant_answer_2: | + REASONING: + 1. The approximate speed of the autonomous BMW i8 convertible when traveling eastbound on University Avenue can be modeled using a normal distribution with a mean around the speed limit for urban roads (e.g., 25-35 mph) and a standard deviation to account for variations in driving conditions. + + 2. Details about the evasive maneuvers that the BMW attempted to avoid the collision can be modeled as a categorical distribution with options such as "emergency braking," "swerve to the left," "swerve to the right," etc. + + 3. The proximity of the scooter rider and bicyclist to the point of impact between the BMW and the city bus can be modeled using a normal distribution with a mean distance based on typical reaction times and speeds of the vehicles involved and a standard deviation to capture variations. + + 4. Specific actions taken by the scooter rider and bicyclist to avoid the collision can be modeled as a categorical distribution with options such as "braking," "changing direction," "accelerating," etc. + + 5. The distance of the two pedestrians on the sidewalk from the intersection where the collision occurred can be modeled using a normal distribution with a mean distance based on typical sidewalk widths and locations relative to intersections, with an appropriate standard deviation. + + 6. Weather conditions at the time of the incident can be modeled using appropriate distributions based on historical weather data for the area, such as visibility range, precipitation intensity, and wind speed. + + 7. Specific road markings or signs at the intersection of University Avenue and Hamilton Avenue that may have influenced the events leading up to the collision can be modeled as a categorical distribution with options like "traffic lights," "stop signs," "pedestrian crossings," etc. + + 8. The response of the autonomous system of the BMW when it detected the city bus running a red light can be modeled as a categorical distribution with options like "alerted safety operator," "initiated emergency braking," "attempted evasive maneuvers," etc. + + 9. Traffic congestion or other vehicles around the intersection that could have affected the incident can be modeled using a Poisson distribution to estimate the number of vehicles in the vicinity. + + FINAL_ANSWER: + 1. Approximate speed of the autonomous BMW i8 convertible: Normal distribution with mean around 30 mph and standard deviation 5 mph. + 2. Evasive maneuvers attempted by the BMW: Categorical distribution with relevant options. + 3. Proximity of scooter rider and bicyclist to point of impact: Normal distribution with appropriate parameters. + 4. Actions taken by scooter rider and bicyclist: Categorical distribution with relevant options. + 5. Distance of pedestrians on sidewalk from intersection: Normal distribution with mean distance and standard deviation. + 6. Weather conditions at the time: Modeled using appropriate weather data distributions. + 7. Influence of road markings/signs at intersection: Categorical distribution with relevant options. + 8. Response of BMW's autonomous system to bus running red light: Categorical distribution with relevant options. + 9. Traffic congestion or other vehicles around intersection: Poisson distribution for estimating vehicle presence. + + + six: + dist_info: | + Range(low, high) # Uniform distribution over the real range [low, high] + DiscreteRange(low, high) # Uniform distribution over the discreet integer range [low, high] + Normal(mean, std) # Normal distribution with mean and standard deviation + TruncatedNormal(mean, stdDev, low, high) # Normal distribution with mean and standard deviation truncated to the range [low, high] + Uniform(value, …) - Uniform distribution over the list of values provided. + Discrete([value: weight, … ]) - Discrete distribution over the list of values provided with the given weights (e.g., [value: 0.5, value: 0.2, value: 0.3]) + + complier_user_prompt: | + Your python program failed to compile. Please fix the syntax errors and try again. Ensure that the program is using supported distributions. If there are any unsupported distributions, replace them with the closest supported distribution. + Error message: + {error} + + Your program: + {program} + + system_prompt: | + You are a specialized agent for writing Scenic, a probabilistic programming language. + + A user will provide you with probability distributions for missing information in a vehicle crash description. Your task is to interpret the probability distributions and express them as a Scenic program. + + Scenic can only support the following distributions so you must pick the closest matching distribution. Under no circumstance should you use any of the other distributions: + {dist_info} + + You will answer the user by first providing step-by-step reasoning as to why you choose such a distribution then provide your final answer as code. Be concise in your reasoning (no more than 1-2 sentences per object). Provide your final answer as Python variables in all capital letters. Only respond with code as plain text without code block syntax around it. Answer in the following format: + REASONING: + + + FINAL_ANSWER: + + + sys_prompt: | + You are a specialized agent for writing Scenic, a probabilistic programming language. + + A user will provide you with probability distributions for missing information in a vehicle crash description. Your task is to interpret the probability distributions and express them as a Scenic program. + + Scenic can only support the following distributions so you must pick the closest matching distribution. Under no circumstance should you use any of the other distributions: + Range(low, high) - Uniform distribution over the real range [low, high] + DiscreteRange(low, high) - Uniform distribution over the discreet integer range [low, high] + Normal(mean, std) - Normal distribution with mean and standard deviation + TruncatedNormal(mean, stdDev, low, high) - Normal distribution with mean and standard deviation truncated to the range [low, high] + Uniform(value, …) - Uniform distribution over the list of values provided. + Discrete([value: weight, … ]) - Discrete distribution over the list of values provided with the given weights (e.g., [value: 0.5, value: 0.2, value: 0.3]) + + For weather, Scenic can only support a Uniform or Discrete distribution over the following values: + ['ClearNoon', 'CloudyNoon', 'WetNoon', 'WetCloudyNoon', 'SoftRainNoon', 'MidRainyNoon', 'HardRainNoon', 'ClearSunset', 'CloudySunset', 'WetSunset', 'WetCloudySunset', 'SoftRainSunset', 'MidRainSunset', 'HardRainSunset', + 'ClearNight', 'CloudyNight', 'WetNight', 'WetCloudyNight', 'SoftRainNight', 'MidRainyNight', 'HardRainNight' ,'DustStorm'] + + Based on the distributions and original description, define Scenic distributions over the uncertain values. Provide values for the parameters to your distributions. You may not use any of the other distributions. If you cannot find a distribution that matches the missing information, you must choose the closest matching distribution. + + You will answer the user by first providing step-by-step reasoning as to why you choose such a distribution then provide your final answer as code. Be concise in your reasoning (no more than 1-2 sentences per object). Provide your final answer as Python variables in all capital letters. Only respond with code as plain text without code block syntax around it. Answer in the following format: + REASONING: + + + FINAL_ANSWER: + + + system_question: | + A user will provide you with probability distributions for missing information. You must interpret the distributions, replace them with the closest matching Scenic distributions. + + Scenic only supports the following distributions so you must only use these in your final answer: + Range(low, high) - Uniform distribution over the range [low, high] + DiscreteRange(low, high) - Uniform distribution over the discreet integer range [low, high] + Normal(mean, std) - Normal distribution with mean and standard deviation + TruncatedNormal(mean, stdDev, low, high) - Normal distribution with mean and standard deviation truncated to the range [low, high] + Uniform(value, …) - Uniform distribution over the values provided + Discrete([value: weight, … ]) - Discrete distribution over the values provided with the given weights (e.g., [value: 0.5, value: 0.2, value: 0.3]) + + Any other distribution (Categorical, Bernoulli, Poison, etc.) IS NOT SUPPORTED. They CANNOT be in your FINAL_ANSWER. Replace them with the closest matching supported distribution (Range, DiscreteRange, Normal, TruncatedNormal, Uniform, or Discrete) + + Note that for weather, you can only provide a Uniform or Discrete distribution over the following values: + ['ClearNoon', 'CloudyNoon', 'WetNoon', 'WetCloudyNoon', 'SoftRainNoon', 'MidRainyNoon', 'HardRainNoon', 'ClearSunset', 'CloudySunset', 'WetSunset', 'WetCloudySunset', 'SoftRainSunset', 'MidRainSunset', 'HardRainSunset', + 'ClearNight', 'CloudyNight', 'WetNight', 'WetCloudyNight', 'SoftRainNight', 'MidRainyNight', 'HardRainNight' ,'DustStorm'] + So for example, if the description says that the road was wet around the evening, then the weather was most likely rainy, you could use a Discrete distribution as: + WEATHER = Discrete(['WetSunset': 0.7, 'WetCloudySunset': 0.2, 'WetNight': 0.1]) + + Based on the distributions and original description, define Scenic distributions over the uncertain values. Provide values for the parameters to your distributions. DO NOT use any of the other distributions. If you cannot find a distribution that matches the missing information, you must choose the closest matching distribution. If you are unsure of the parameter values, pick the best numerical values as appropriate for the scene. + + Answer by providing step-by-step reasoning as to why you choose such a distribution. Remember to replace unsupported distributions. Then provide your final answer as code. Provide your final answer as Python variables in all capital letters. Only respond with code as plain text without code block syntax around it. Answer in the following format: + REASONING: + + + FINAL_ANSWER: + + + user_question: | + Original description: + {description} + + Missing information: + {missing_info} + + Distributions for missing information: + {distributions} + + user_question_1: | + Original description: + An autonomous Ford Explorer SUV, operating in full autonomous mode, was navigating the ramp to merge onto Sand Hill Road amidst a heavy rainstorm. The vehicle's sensors detected the wet road conditions and adjusted speed accordingly. However, the driver claims' there was debris on the road and they adjusted accordingly. They encountered an unexpected large puddle which caused the vehicle to hydroplane, leading to a temporary loss of traction. The autonomous system attempted corrective steering maneuvers, but the vehicle ultimately made contact with the guardrail on the right side of the ramp. The collision resulted in moderate damage to the vehicle's right rear quarter panel and bumper. No injuries were reported, as the vehicle was unoccupied except for the presence of a safety operator, who did not sustain any injuries. The California Highway Patrol was notified and arrived on the scene to document the incident and assist in the vehicle's recovery. The incident highlighted the challenges faced by autonomous vehicles in severe weather conditions and the need for ongoing improvements in sensor and navigation technologies to handle such situations. + + Relevant objects: + AUTONOMOUS_VEHICLE_MODEL = "vehicle.ford.crown" + ROAD_DEBRIS_MODEL = ["static.prop.dirtdebris01", "static.prop.dirtdebris02"] + + Missing information: + 1. Can you provide more details about the speed at which the autonomous Ford Explorer SUV was traveling on the ramp during the heavy rainstorm? + 2. How large was the unexpected puddle that caused the vehicle to hydroplane? + 3. What type of debris was present on the road that the driver had to avoid? + 4. Were there any other vehicles in the vicinity on the ramp at the time of the incident? + 5. What specific weather conditions were present during the heavy rainstorm (e.g., visibility, wind speed)? + 6. How did the autonomous system respond to the hydroplaning situation before the collision occurred? + + assistant_answer_1: | + REASONING: + 1. For the speed of the autonomous Ford Explorer SUV during the heavy rainstorm, we can use a TruncatedNormal distribution with a mean around a reasonable speed for a ramp area and a standard deviation to capture variability while ensuring the speed is within possible values. + 2. To estimate the size of the unexpected puddle that caused the vehicle to hydroplane, we can use a Normal distribution with a mean and standard deviation based on common sizes of puddles that can lead to hydroplaning. + 3. The type of debris present on the road that the driver had to avoid can be modeled using a Uniform distribution over the provided ROAD_DEBRIS_MODEL options. + 4. Since there is no explicit mention of other vehicles in the vicinity, we can use a Uniform distribution to represent uncertainty about the presence of other vehicles on the ramp at the time of the incident. + 5. The specific weather conditions during the heavy rainstorm can be represented using a Discrete distribution over the possible weather conditions listed in Scenic for heavy rain events. + + FINAL_ANSWER: + AUTONOMOUS_VEHICLE_SPEED = TruncatedNormal(mean=45, stdDev=7.5, low=30, high=60) + PUDDLE_SIZE = Normal(mean=2, std=0.5) + DEBRIS_TYPE = Uniform(["static.prop.dirtdebris01", "static.prop.dirtdebris02"]) + OTHER_VEHICLES_PRESENT = Uniform("Yes", "No") + WEATHER_CONDITIONS = Uniform(['WetRainNoon', 'HardRainNoon', 'WetCloudyNoon']) + + user_question_2: | + Original description: + At approximately 12:05 PM, an autonomous BMW i8 convertible was eastbound on University Avenue when it collided with a city bus running a red light at Hamilton Avenue. At the same time, a scooter rider and a bicyclist, legally crossing Hamilton, narrowly avoided the incident, while two pedestrians were nearby on the sidewalk. Despite the BMW’s attempt to avoid the collision through emergency braking and evasive maneuvers, it sustained significant front-end damage, and the bus minor damage on its right side. Three bus passengers reported minor injuries. The complex scenario, involving multiple road users, highlighted the challenges autonomous vehicles face in dynamic urban environments. The primary cause was identified as the bus driver's failure to obey the traffic signal. + + Relevant objects: + AUTONOMOUS_VEHICLE_MODEL = "vehicle.bmw.grandtourer" + BUS_MODEL = "vehicle.mitsubishi.fusorosa" + SCOOTER_MODEL = "vehicle.vespa.zx125" + BICYCLE_MODEL = ["vehicle.bh.crossbike", "vehicle.diamondback.century", "vehicle.gazelle.omafiets"] + PEDESTRIAN_ONE_MODEL = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + PEDESTRIAN_TWO_MODEL = ["walker.pedestrian.0001", "walker.pedestrian.0002", "walker.pedestrian.0003", "walker.pedestrian.0004", "walker.pedestrian.0005", "walker.pedestrian.0006", "walker.pedestrian.0007", "walker.pedestrian.0008", "walker.pedestrian.0009", "walker.pedestrian.0010", "walker.pedestrian.0011", "walker.pedestrian.0012", "walker.pedestrian.0013", "walker.pedestrian.0014"] + + Missing information: + 1. What was the approximate speed of the autonomous BMW i8 convertible when it was traveling eastbound on University Avenue? + 2. Can you provide more details about the evasive maneuvers that the BMW attempted to avoid the collision? + 3. How close were the scooter rider and bicyclist to the point of impact between the BMW and the city bus? + 4. Were there any specific actions taken by the scooter rider and bicyclist to avoid the collision? + 5. How far away were the two pedestrians on the sidewalk from the intersection where the collision occurred? + 6. What were the weather conditions like at the time of the incident? + 7. Were there any specific road markings or signs at the intersection of University Avenue and Hamilton Avenue that may have influenced the events leading up to the collision? + 7. How did the autonomous system of the BMW respond to the situation when it detected the city bus running a red light? + 9. Were there any traffic congestion or other vehicles around the intersection that could have affected the incident? + + assistant_answer_2: | + REASONING: + 1. The speed of the autonomous BMW i8 convertible traveling eastbound on University Avenue is likely within the typical speed range for urban roads. A normal distribution with a mean of 30 mph and a standard deviation of 5 mph captures this variability in driving conditions. + + 2. The evasive maneuvers attempted by the BMW to avoid the collision can be categorized into options such as emergency braking, swerving left, swerving right, etc., which fits a categorical distribution. However, Scenic does not support categorical so we can replace it with Uniform. + + 3. The proximity of the scooter rider and bicyclist to the point of impact between the BMW and the city bus is unknown except that they narrowly avoided the incident. We can model this distance between them as a Range between really close (1 foot) and close (6ft standard car with). + + 4. The actions taken by the scooter rider and bicyclist to avoid the collision can be categorized into options like braking, changing direction, accelerating, etc., fitting a categorical distribution. However, Scenic does not support categorical so we can replace it with Uniform. + + 5. The distance of the two pedestrians on the sidewalk from the intersection where the collision occurred can be modeled with a normal distribution. The mean distance would be based on typical sidewalk widths and locations relative to intersections, which is between 8 to 12 feet. + + FINAL_ANSWER: + SPEED_DISTRIBUTION = Normal(30, 5) # 30 mph with std 5 mph + EVASIVE_MANEUVERS_DISTRIBUTION = Uniform(["emergency braking", "swerve to the left", "swerve to the right"]) + PROXIMITY_TO_IMPACT_DISTRIBUTION = Range(1, 6) # Between 1 foot and 6 feet + ACTIONS_TAKEN_DISTRIBUTION = Uniform(["braking", "changing direction", "accelerating"]) + PEDESTRIAN_DISTANCE_DISTRIBUTION = Normal(10, 2) # Mean distance of 10 feet with std 2 feet + + seven: + question: |- + Original description: + {description} + + Based on the original description, pick from the following the best matching town. You may not choose any other town. If you cannot find a town that matches the original description, you must choose the closest matching town. Then after selecting a town, provide a high-level description (ignoring road names) of where in the town we should replicate the original description. For example, if the original description specified a highway such as US-101, provide a description about the properties of that highway, such as it is a 4 lane road. + Town07 - imitates a quiet rural community, a green landscape filled with cornfields, barns, grain silos and windmills. + Town06 - is a low density town set into a coniferous landscape exhibiting a multitude of large, 4-6 lane roads and special junctions like the Michigan Left. + Town05 - is an urban environment set into a backdrop of conifer-covered hills with a raised highway and large multilane roads and junctions. + Town04 - is a small town with a backdrop of snow-capped mountains and conifers. A multi-lane road circumnavigates the town in a "figure of 8". + Town03 - is a larger town with features of a downtown urban area. The map includes some interesting road network features such as a roundabout, underpasses and overpasses. The town also includes a raised metro track and a large building under construction. + Town02 - is a small town with numerous T-junctions and a variety of buildings, there are patches of coniferous trees, a park and a residential and commercial area. + Town01 - is a small town with numerous T-junctions and a variety of buildings, surrounded by coniferous trees and featuring several small bridges spanning across a river that divides the town into 2 halves. + + Each expert and the final answer should be provided in the following format: + TOWN: + + + LOCATION_IN_TOWN: + + + three: + a: + question: |- + Original description: + {description} + + What are the main events that happened in the scene? (e.g. car stopped when pedestrian crossed the street, a car was driving in a lane then switched lanes then made a left turn, etc.). Describe these events in natural language. + + Each expert and the final answer should be provided in the following format: + EVENTS: + + + b: + question: |- + Original description: + {description} + + Scenic only allows certain properties to be described in Linear Temporal Logic (LTL) formula (the end of the events or time invariant properties). So for the events that we can, describe the end of the events in LTL formula for them. Here are some examples of valid LTL formulas that are supported in Scenic: + car2 not in intersection until car1 in intersection + eventually car2 in intersection + eventually ego in intersection + (always car.speed < 30) implies (always distance to car > 10) + always not ((ego can see car1) and (ego can see car2)) + + LTL_FORMULA: + + + two: + question: |- + Original description: + {description} + + Relevant Objects: + {relevant_objects} + + Based on the relevant objects selected from the original description, what are the spacial relationships between the objects? (e.g. car is in front of pedestrian, etc.) Are the objects moving or stationary? Are they visible or occluded? You can only use the following terms to describe spatial relationships: in front of, behind, left of, right of, facing, ahead of, behind, visible, and not visible. + + Each expert and the final answer should be provided in the following format: + SPATIAL_RELATIONSHIPS: + + + MOVEMENT: + + + VISIBILITY: + + + eight: + question: |- + Original description: + {description} + + Relevant Objects: + {relevant_objects} + + Important Events: + {important_events} + + Here is a list of the supported behaviors in Scenic. Based on the relevant objects and important events, which behaviors do we need to use to recreate the original description? You may select more than one behavior as they are composable. If you cannot find a behavior that matches the original description, you must choose the closest matching behavior. + + Here are the only behaviors that are allowed for vehicles, buses, motorcycles, and bicycles: + behavior ConstantThrottleBehavior(x : float): + behavior DriveAvoidingCollisions(target_speed : float = 25, avoidance_threshold : float = 10): + # Drive at a target speed, avoiding collisions with other vehicles + # Throttle is off and braking is applied if the distance to the nearest vehicle is less + # than the avoidance threshold + behavior AccelerateForwardBehavior(): # Accelerate forward with throttle set to 0.5 + behavior FollowLaneBehavior(target_speed : float = 10, laneToFollow : Lane = None, is_oppositeTraffic : bool = False): + # Follow's the lane on which the vehicle is at, unless the laneToFollow is specified. + # Once the vehicle reaches an intersection, by default, the vehicle will take the straight route. + # If straight route is not available, then any available turn route will be taken, uniformly randomly. + # If turning at the intersection, the vehicle will slow down to make the turn, safely. + # This behavior does not terminate. A recommended use of the behavior is to accompany it with condition, + # e.g. do FollowLaneBehavior() until ... + # :param target_speed: Its unit is in m/s. By default, it is set to 10 m/s + # :param laneToFollow: If the lane to follow is different from the lane that the vehicle is on, this parameter can be used to specify that lane. By default, this variable will be set to None, which means that the vehicle will follow the lane that it is currently on. + behavior FollowTrajectoryBehavior(target_speed : float = 10, trajectory : List[Lane] = None, turn_speed : float = None): + # Follows the given trajectory. The behavior terminates once the end of the trajectory is reached. + # :param target_speed: Its unit is in m/s. By default, it is set to 10 m/s + # :param trajectory: It is a list of sequential lanes to track, from the lane that the vehicle is initially on to the lane it should end up on. + behavior TurnBehavior(trajectory : List[Lane] = None, target_speed : float = 6): + # This behavior uses a controller specifically tuned for turning at an intersection. + # This behavior is only operational within an intersection, it will terminate if the vehicle is outside of an intersection. + behavior LaneChangeBehavior(laneSectionToSwitchTo : Lane, is_oppositeTraffic : bool = False, target_speed : float = 10): + # is_oppositeTraffic should be specified as True only if the laneSectionToSwitch to has + # the opposite traffic direction to the initial lane from which the vehicle started LaneChangeBehavior + + Here are the only behaviors that are allowed for pedestrians: + behavior WalkForwardBehavior(speed=0.5): + take SetWalkingDirectionAction(self.heading), SetWalkingSpeedAction(speed) + # Walk forward behavior for pedestrians by uniformly sampling either side of the sidewalk for the pedestrian to walk on + behavior WalkBehavior(maxSpeed=1.4): + take SetWalkAction(True, maxSpeed) + behavior CrossingBehavior(reference_actor, min_speed=1, threshold=10, final_speed=None): + # This behavior dynamically controls the speed of an actor that will perpendicularly (or close to) + # cross the road, so that it arrives at a spot in the road at the same time as a reference actor. + # Args: + # min_speed (float): minimum speed of the crossing actor. As this is a type of "synchronization action", + # a minimum speed is needed, to allow the actor to keep moving even if the reference actor has stopped + # threshold (float): starting distance at which the crossing actor starts moving + # final_speed (float): speed of the crossing actor after the reference one surpasses it + + Each expert and the final answer should be provided in the following format: + BEHAVIOR: + + + constants: + prompt: |- + Scenic is a probabilistic programming language for modeling the environments of autonomous cars. A Scenic program defines a distribution over scenes, configurations of physical objects and agents. Scenic can also define (probabilistic) policies for dynamic agents, allowing modeling scenarios where agents take actions over time in response to the state of the world. We use CARLA to render the scenes and simulate the agents. + + Your task is to write the first part of a Scenic program, the declaration section. This section defines, objects, agents, and distributions that are used in the program. Here are a few examples: + {example_1} + + {example_2} + + {example_3} + + Please write your own declaration section from the following input. Provide only the correct Scenic program. + {objects} + {distributions} + + FINAL_ANSWER: + + + grammar: |- + root ::= talking program + talking ::= [^"FINAL_ANSWER:"]+ + program ::= ["FINAL_ANSWER:"]+ + diff --git a/src/scenicNL/common.py b/src/scenicNL/common.py index 5dbd28e..678bf49 100644 --- a/src/scenicNL/common.py +++ b/src/scenicNL/common.py @@ -1,6 +1,8 @@ +import base64 from enum import Enum import json import os +import cv2 import pinecone import random from dataclasses import dataclass @@ -27,6 +29,7 @@ class LLMPromptType(Enum): PREDICT_TOT_THEN_HYDE = "predict_tot_then_hyde" EXPERT_DISCUSSION = "expert_discussion" EXPERT_SYNTHESIS = "expert_synthesis" + COMPOSITIONAL_GBNF = "comp_gbnf" class PromptFiles(Enum): @@ -38,6 +41,7 @@ class PromptFiles(Enum): SCENIC_TUTORIAL = os.path.join(PROMPT_PATH, 'scenic_tutorial_prompt.txt') TOT_EXPERT_DISCUSSION = os.path.join(PROMPT_PATH, 'tot_questions.txt') EXPERT_SYNTHESIS = os.path.join(PROMPT_PATH, 'expert_synthesis.txt') + COMPOSITIONAL_GBNF = os.path.join(PROMPT_PATH, 'comp_gbnf_prompt.yml') @dataclass(frozen=True) @@ -286,4 +290,67 @@ def few_shot_prompt_with_rag( nat_lang_scene_des=model_input.nat_lang_scene_des, first_attempt_scenic_program=model_input.first_attempt_scenic_program, ) - return few_shot_prompt_generator(relevant_model_input, False) \ No newline at end of file + return few_shot_prompt_generator(relevant_model_input, False) + + +def sample_frames_from_video(video_path: str, num_frames : int = 10) -> List[str]: + if not os.path.isfile(video_path): + raise FileNotFoundError(f"Video file not found: {video_path}") + + # Open the video file + cap = cv2.VideoCapture(video_path) + + if not cap.isOpened(): + raise ValueError(f"Unable to open video file: {video_path}") + + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + # Calculate the step size between frames to sample evenly + step_size = total_frames // num_frames + + frames = [] + + # Loop through the video and sample frames at the calculated step size + for i in range(num_frames): + frame_idx = (i + 1) * step_size + cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) + ret, frame = cap.read() + + if ret: + frames.append(frame) + + # Release the video capture object + cap.release() + + return frames + + +def encode_saved_image(image_path): + with open(image_path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode('utf-8') + + +def encode_image(image): + return base64.b64encode(image).decode('utf-8') + + +LOCAL_MODEL_ENDPOINT = "http://127.0.0.1:8080/completion" +LOCAL_MODEL_DEFAULT_PARAMS = { + "cache_prompt": False, + "image_data": [], + "mirostat": 0, + "mirostat_eta": 0.1, + "mirostat_tau": 5, + "n_predict": -1, + "n_probs": 2, + "presence_penalty": 0, + "repeat_last_n": 241, + "repeat_penalty": 1.18, + "slot_id": 0, + # "stop": ["Question:", "Answer:"], + #"stream": False, + "tfs_z": 1, + "top_k": 40, + "top_p": 0.5, + "typical_p": 1, +} \ No newline at end of file diff --git a/src/scenicNL/constraints/gbnf_decoding.py b/src/scenicNL/constraints/gbnf_decoding.py new file mode 100644 index 0000000..c021a8e --- /dev/null +++ b/src/scenicNL/constraints/gbnf_decoding.py @@ -0,0 +1,655 @@ + + +from typing import Dict, List +import httpx +from openai import OpenAI +import os +import requests + +import yaml +import scenic +from scenicNL.adapters.anthropic_adapter import AnthropicAdapter, AnthropicModel +from scenicNL.adapters.openai_adapter import OpenAIAdapter, OpenAIModel +from scenicNL.common import LOCAL_MODEL_DEFAULT_PARAMS, LOCAL_MODEL_ENDPOINT, ModelInput, PromptFiles +import tempfile + +class CompositionalScenic(): + + DEFAULT_PARAMS = { + "cache_prompt": True, + "image_data": [], + "mirostat": 0, + "mirostat_eta": 0.1, + "mirostat_tau": 5, + "n_predict": -1, + "n_probs": 0, + "presence_penalty": 0, + "repeat_last_n": 241, + "repeat_penalty": 1.18, + "slot_id": 0, + "temperature": 0.7, + "tfs_z": 1, + "top_k": 40, + "top_p": 0.9, + "typical_p": 1, +} + + def __init__(self): + super().__init__() + self.mixtral = OpenAI( + base_url="http://localhost:8080/v1", + api_key="sk-no-key-required" + ) + self.coder = OpenAI( + base_url="http://localhost:8079/v1", + api_key="sk-no-key-required" + ) + self.anthropic = AnthropicAdapter(model=AnthropicModel.CLAUDE_3_MEDIUM, use_index=False) + self.gpt = OpenAIAdapter(model=OpenAIModel.GPT_35_TURBO_16k, use_index=False) + + + def _query_and_parse_for_final_answer( + self, + messages: List[Dict], + temperature: float, + question: str, + grammar: str, + coding: bool, + verbose: bool, + ) -> str: + response = None + if coding: + # response = self.coder.chat.completions.create( + # model="gpt-3.5-turbo", + # messages=messages, + # temperature=temperature, + # grammar=grammar, + # ) + response = httpx.post( + "http://localhost:8079/v1/chat/completions", + json={ + "model": "wizardCoder33B", + "messages": messages, + "temperature": temperature, + # "grammar": grammar, + "max_tokens": 1000, + "top_p": 1, + "frequency_penalty": 0.1, + "presence_penalty": 0, + }, + timeout=1000 + ).json() + else: + # response = self.mixtral.chat.completions.create( + # model="LLama_CPP", + # messages=messages, + # temperature=temperature, + # grammar=grammar, + # ) + response = requests.post( + "http://localhost:8080/v1/chat/completions", + json={ + "model": "mixtral-8x7b", + "messages": messages, + "temperature": temperature, + "grammar": grammar, + "max_tokens": 1000, + "top_p": 1, + "frequency_penalty": 0.1, + "presence_penalty": 0, + }, + timeout=1000 + ).json() + answer = response['choices'][0]['message']['content'] + if verbose: + print(f"--- BEGIN: Question {question} Response ---") + print(answer) + print(f"--- END: Question {question} Response ---") + + # the answer is provided as 3 experts and 1 final where each of them + # says JUSTIFICATION and FINAL_ANSWER + # we just want the FINAL_ANSWER of the final + answer = answer.split("FINAL_ANSWER:")[-1] + + return answer + + + def validate_with_feedback(self, program: str) -> None: + for _ in range(3): + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: + f.write(program) + temp_file_path = f.name + try: + scenic.scenarioFromFile(temp_file_path, mode2D=True) + os.remove(temp_file_path) + return program + except Exception as e: + feedback = str(e) + prompt = f"The following Scenic program is invalid: {program}\n\n" + prompt += f"Given the following compiler feedback: {feedback}\n\n" + prompt += "Please correct the program to make it valid. Do not include any other text." + prompt += "Your response will be taken as your corrected program." + new_program = openai.ChatCompletion.create( + model="LLama_CPP", + messages=prompt, + ) + new_program = new_program.choices[0].message.content + program = new_program + + + + + def _step_one( + self, + model_input: ModelInput, + temperature: float, + system_message: dict, + questions: dict, + verbose: bool, + ) -> str: + question_num = "one" + messages = self._get_messages(questions, question_num) + messages[-1]["content"] = messages[-1]["content"].format( + description=model_input.nat_lang_scene_des + ) + # TODO: re-enable gpt + answer = self.gpt.predict(messages=messages) + # after a few tests, it seems that GPT_35_TURBO does much better than mixtral here + # mixtral_answer = httpx.post( + # "http://localhost:8080/v1/chat/completions", + # json={ + # "model": "mixtral-8x7b", + # "messages": messages, + # "temperature": temperature, + # "max_tokens": 1000, + # "top_p": 1, + # "frequency_penalty": 0.1, + # "presence_penalty": 0, + # "grammar": questions["single_answer_grammar"], + # }, + # timeout=1000 + # ).json() + # mixtral_answer = mixtral_answer['choices'][0]['message']['content'] + if verbose: + print(f"--- BEGIN: Question {question_num} Response ---") + print("GPT answer:") + print(answer) + # print("\nMixTral answer:") + # print(mixtral_answer) + print(f"--- END: Question {question_num} Response ---") + + answer = answer.split("FINAL_ANSWER:")[-1].strip() + # mixtral_answer = mixtral_answer.split("FINAL_ANSWER:")[-1].strip() + + return answer + + + def _get_messages( + self, + questions: dict, + get_objs_q_num: str, + num_examples: int = 3, + ) -> List[Dict]: + user_message = { + "role": "user", + "content": questions[get_objs_q_num]["user_question"], + } + system_msg = { + "role": "system", + "content": questions[get_objs_q_num]["system_question"], + } + example_question_1 = { + "role": "user", + "content": questions[get_objs_q_num]["user_question_1"] + } + assistant_answer_1 = { + "role": "assistant", + "content": questions[get_objs_q_num]["assistant_answer_1"] + } + if num_examples == 1: + messages = [ + system_msg, + example_question_1, + assistant_answer_1, + user_message + ] + return messages + example_question_2 = { + "role": "user", + "content": questions[get_objs_q_num]["user_question_2"] + } + assistant_answer_2 = { + "role": "assistant", + "content": questions[get_objs_q_num]["assistant_answer_2"] + } + if num_examples == 2: + messages = [ + system_msg, + example_question_1, + assistant_answer_1, + example_question_2, + assistant_answer_2, + user_message + ] + return messages + example_question_3 = { + "role": "user", + "content": questions[get_objs_q_num]["user_question_3"] + } + assistant_answer_3 = { + "role": "assistant", + "content": questions[get_objs_q_num]["assistant_answer_3"] + } + messages = [ + system_msg, + example_question_1, + assistant_answer_1, + example_question_2, + assistant_answer_2, + example_question_3, + assistant_answer_3, + user_message + ] + + return messages + + + def _build_constants( + self, + model_input: ModelInput, + temperature: float, + system_message: dict, + objects: str, + questions: dict, + verbose: bool, + ) -> str: + # get_objs_q_num = "nine" + # single_question_grammar = questions["single_answer_grammar"] + # messages = self._get_messages(questions, get_objs_q_num) + # messages[-1]["content"] = messages[-1]["content"].format( + # description=model_input.nat_lang_scene_des, + # objects_from_one=objects + # ) + # response = self.gpt.predict(messages=messages) + # # local_response = self._query_and_parse_for_final_answer( + # # messages=messages, + # # temperature=temperature, + # # question=get_objs_q_num, + # # grammar=single_question_grammar, + # # coding=True, + # # verbose=verbose, + # # ) + # objects = response.split("FINAL_ANSWER:")[-1].strip() + # other_objects = local_response.split("FINAL_ANSWER:")[-1].strip() + + # now in case there are any lists of objects, we need to convert them to probability distributions + get_objs_q_num = "nine_b" + messages = self._get_messages(questions, get_objs_q_num) + messages[-1]["content"] = messages[-1]["content"].format( + description=model_input.nat_lang_scene_des, + program=objects + ) + + response = self.gpt.predict(messages=messages) + + current_program = f"# Scenic Program for the description:\n\"\"\"{model_input.nat_lang_scene_des}\"\"\"\n\n\n" + current_program += f"{objects}\n\n\n" + + """ + objects_list = objects.split("\n") + objects_list = map(lambda x: x.strip(), objects_list) + objects_list = [obj for obj in objects_list if obj != ""] + program_objects = "" + for object in objects_list: + object = object.strip() + user_message = { + "role": "user", + "content": questions[get_objs_q_num]["question"].format(objects_from_one=object), + } + single_question_grammar = questions["single_answer_grammar"] + messages = [user_message] + program_object = self._query_and_parse_for_final_answer( + messages=messages, + temperature=temperature, + verbose=verbose, + question=get_objs_q_num, + grammar=single_question_grammar, + ) + program_object = program_object.strip() + program_objects = program_objects + program_object + "\n" + + self.validate_with_feedback(current_program) + """ + + # Question 4 b + get_objs_q_num = "four_b" + messages = self._get_messages(questions, get_objs_q_num, num_examples=2) + messages[-1]["content"] = messages[-1]["content"].format( + description=model_input.nat_lang_scene_des, + objects_from_nine=objects + ) + missing_objects = self.gpt.predict(messages=messages) + + # Question 5 + generic_dists_q_num = "five" + messages = self._get_messages(questions, generic_dists_q_num, num_examples=2) + messages[-1]["content"] = messages[-1]["content"].format( + description=model_input.nat_lang_scene_des, + objects_from_nine=objects, + missing_object_info=missing_objects + ) + generic_dists = self.gpt.predict(messages=messages) + # generic_dists = generic_dists.split("FINAL_ANSWER:")[-1].strip() + + scenic_dists_q_num = "six" + messages = self._get_messages(questions, scenic_dists_q_num, num_examples=2) + messages[-1]["content"] = messages[-1]["content"].format( + description=model_input.nat_lang_scene_des, + missing_info=missing_objects, + distributions=generic_dists + ) + # scenic_dists = self._query_and_parse_for_final_answer( + # messages=messages, + # temperature=temperature, + # verbose=verbose, + # question=scenic_dists_q_num, + # ) + scenic_dists = self.gpt.predict(messages=messages) + program_dists = scenic_dists.split("FINAL_ANSWER:")[-1].strip() + updated_program = self.compile_scenic_program( + questions["complier_system_prompt"].format( + dist_info=questions[scenic_dists_q_num]["dist_info"], + ), + questions[scenic_dists_q_num]["complier_user_prompt"], + program_dists, + temperature, + verbose + ) + + current_program += "\n# PARAMETERS:\n" + updated_program + + return current_program + + + def get_dynamic_objects( + self, + model_input: ModelInput, + questions: dict, + temperature: float, + verbose: bool, + ) -> str: + temperature = 0.23 + messages=[ + { + "role": "system", + "content": questions["objects"]["dynamic"]["system_prompt"].format( + object_info=questions["objects"]["dynamic"]["object_info"] + ) + }, + { + "role": "user", + "content": questions["examples"]["nat_lang_descriptions"]["one"] + }, + { + "role": "assistant", + "content": questions["objects"]["dynamic"]["assistant_answer_1"] + }, + { + "role": "user", + "content": questions["examples"]["nat_lang_descriptions"]["two"] + }, + { + "role": "assistant", + "content": questions["objects"]["dynamic"]["assistant_answer_2"] + }, + { + "role": "user", + "content": questions["examples"]["nat_lang_descriptions"]["three"] + }, + { + "role": "assistant", + "content": questions["objects"]["dynamic"]["assistant_answer_3"] + }, + { + "role": "user", + "content": questions["examples"]["nat_lang_descriptions"]["four"] + }, + { + "role": "assistant", + "content": questions["objects"]["dynamic"]["assistant_answer_4"] + }, + { + "role": "user", + "content": questions["examples"]["nat_lang_descriptions"]["five"] + }, + { + "role": "assistant", + "content": questions["objects"]["dynamic"]["assistant_answer_5"] + }, + { + "role": "user", + "content": model_input.nat_lang_scene_des + } + ] + + response = self.gpt.predict( + messages=messages, + temperature=temperature, + ) + + if verbose: + print(f"Response: {response}") + + response = response.split("My answer is:")[-1].strip() + return response + + + def get_static_objects( + self, + model_input: ModelInput, + questions: dict, + temperature: float, + verbose: bool, + ) -> str: + temperature = 0.23 + messages=[ + { + "role": "system", + "content": questions["objects"]["static"]["system_prompt"].format( + object_info=questions["objects"]["static"]["object_info"] + ) + }, + { + "role": "user", + "content": questions["examples"]["nat_lang_descriptions"]["one"] + }, + { + "role": "assistant", + "content": questions["objects"]["static"]["assistant_answer_1"] + }, + { + "role": "user", + "content": questions["examples"]["nat_lang_descriptions"]["two"] + }, + { + "role": "assistant", + "content": questions["objects"]["static"]["assistant_answer_2"] + }, + { + "role": "user", + "content": questions["examples"]["nat_lang_descriptions"]["three"] + }, + { + "role": "assistant", + "content": questions["objects"]["static"]["assistant_answer_3"] + }, + { + "role": "user", + "content": questions["examples"]["nat_lang_descriptions"]["four"] + }, + { + "role": "assistant", + "content": questions["objects"]["static"]["assistant_answer_4"] + }, + { + "role": "user", + "content": questions["examples"]["nat_lang_descriptions"]["five"] + }, + { + "role": "assistant", + "content": questions["objects"]["static"]["assistant_answer_5"] + }, + { + "role": "user", + "content": model_input.nat_lang_scene_des + } + ] + + response = self.gpt.predict( + messages=messages, + temperature=temperature, + ) + + if verbose: + print(f"Response: {response}") + + response = response.split("My answer is:")[-1].strip() + + # TODO: complie check the response + + + return response + + + def compile_scenic_program( + self, + system_prompt: str, + user_prompt: str, + scenic_program: str, + temperature: float, + verbose: bool, + ) -> str: + for _ in range(5): + try: + with tempfile.NamedTemporaryFile(mode="w", delete=True, suffix=".scenic") as f: + f.write(scenic_program) + f.flush() + temp_file_path = f.name + scenic.scenarioFromFile(temp_file_path, mode2D=True) + + return scenic_program + except Exception as e: + if verbose: + print("Error compiling Scenic program.") + print(e) + + messages = [ + { + "role": "system", + "content": system_prompt + }, + { + "role": "user", + "content": user_prompt.format( + error=str(e), + program=scenic_program + ) + }, + ] + + response = self.gpt.predict( + messages=messages, + temperature=temperature, + ) + + if verbose: + print(f"Updated scenic program:\n{response}") + + scenic_program = response + + print("Error compiling Scenic program.") + print(e) + print(scenic_program) + print("Retrying...") + + # We could not compile the program so return the original + # TODO: do a GPT-Judge thing here and ask which was the best then return that one? + return scenic_program + + + + def compositionally_construct_scenic_program( + self, + model_input: ModelInput, + temperature: float, + max_tokens: int, + verbose: bool, + ) -> str: + """ + Constructs a scenic program by parts + """ + questions = None + with open(PromptFiles.COMPOSITIONAL_GBNF.value, "r") as f: + contents = f.read() + questions = yaml.safe_load(contents) + + system_message = { + "role": "system", + "content": questions["context"], + } + + # Step 1: getting the objects + # objects = self._step_one( + # model_input=model_input, + # temperature=temperature, + # system_message=system_message, + # questions=questions, + # verbose=verbose, + # ) + + dynamic_objects = self.get_dynamic_objects( + model_input=model_input, + temperature=temperature, + questions=questions, + verbose=verbose, + ) + + dynamic_objects = self.compile_scenic_program( + system_prompt=questions["complier_system_prompt"], + user_prompt=questions["complier_user_prompt"], + scenic_program=dynamic_objects, + temperature=temperature, + verbose=verbose + ) + + static_objects = self.get_static_objects( + model_input=model_input, + temperature=temperature, + questions=questions, + verbose=verbose, + ) + + static_objects = self.compile_scenic_program( + system_prompt=questions["objects"]["complier_system_prompt"], + user_prompt=questions["objects"]["complier_user_prompt"], + scenic_program=static_objects, + temperature=temperature, + verbose=verbose + ) + + objects = "# Dynamic Objects:\n" + dynamic_objects + "\n\n# Static Objects:\n" + static_objects + + # Step 2: write the constants and variables section of the scenic program + constants = self._build_constants( + model_input=model_input, + temperature=temperature, + system_message=system_message, + objects=objects, + questions=questions, + verbose=verbose, + ) + + # Step 3: + + +