Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ The code has been verified to work under `Python 3.10.13` with the following dep
- google.generativeai (0.1.0)
- immutabledict (3.0.0)
- openai (0.27.2)
- boto3 (1.34.162)
```

## Usage
Expand Down
23 changes: 23 additions & 0 deletions opro/evaluation/evaluate_instructions.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,29 @@ def main(_):
scorer_llm_dict.update(scorer_finetuned_palm_dict)
call_scorer_server_func = call_scorer_finetuned_palm_server_func

elif scorer_llm_name.startswith("bedrock"):
# Amazon Bedrock models
scorer_bedrock_max_decode_steps = 1024
scorer_bedrock_temperature = 0.0

scorer_bedrock_dict = dict()
scorer_bedrock_dict["max_decode_steps"] = scorer_bedrock_max_decode_steps
scorer_bedrock_dict["temperature"] = scorer_bedrock_temperature
scorer_bedrock_dict["num_decodes"] = 1
scorer_bedrock_dict["batch_size"] = 1
scorer_bedrock_dict["num_servers"] = 1

scorer_llm_dict = {
"model_type": scorer_llm_name,
}
scorer_llm_dict.update(scorer_bedrock_dict)
call_scorer_server_func = functools.partial(
prompt_utils.call_amazon_bedrock_func,
model=scorer_llm_name.split("/")[-1],
max_decode_steps=scorer_bedrock_max_decode_steps,
temperature=scorer_bedrock_temperature,
)

else:
# GPT models
assert scorer_llm_name.lower() in {"gpt-3.5-turbo", "gpt-4"}
Expand Down
55 changes: 51 additions & 4 deletions opro/optimization/optimize_instructions.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,12 +160,12 @@ def main(_):
"text-bison",
"gpt-3.5-turbo",
"gpt-4",
}
} or scorer_llm_name.startswith("bedrock")
assert optimizer_llm_name in {
"text-bison",
"gpt-3.5-turbo",
"gpt-4",
}
} or optimizer_llm_name.startswith("bedrock")
assert meta_prompt_type in {
"both_instructions_and_exemplars",
"instructions_only",
Expand All @@ -191,6 +191,10 @@ def main(_):
if scorer_llm_name in {"gpt-3.5-turbo", "gpt-4"}:
assert openai_api_key, "The OpenAI API key must be provided."
openai.api_key = openai_api_key
elif scorer_llm_name.startswith("bedrock"):
# For more information on how to configure boto3 credentials, see
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
pass
else:
assert scorer_llm_name == "text-bison"
assert (
Expand All @@ -201,6 +205,8 @@ def main(_):
if optimizer_llm_name in {"gpt-3.5-turbo", "gpt-4"}:
assert openai_api_key, "The OpenAI API key must be provided."
openai.api_key = openai_api_key
elif optimizer_llm_name.startswith("bedrock"):
pass
else:
assert optimizer_llm_name == "text-bison"
assert (
Expand Down Expand Up @@ -274,6 +280,29 @@ def main(_):
scorer_llm_dict.update(scorer_finetuned_palm_dict)
call_scorer_server_func = call_scorer_finetuned_palm_server_func

elif scorer_llm_name.startswith("bedrock"):
# Amazon Bedrock models
scorer_bedrock_max_decode_steps = 1024
scorer_bedrock_temperature = 0.0

scorer_bedrock_dict = dict()
scorer_bedrock_dict["max_decode_steps"] = scorer_bedrock_max_decode_steps
scorer_bedrock_dict["temperature"] = scorer_bedrock_temperature
scorer_bedrock_dict["num_decodes"] = 1
scorer_bedrock_dict["batch_size"] = 1
scorer_bedrock_dict["num_servers"] = 1

scorer_llm_dict = {
"model_type": scorer_llm_name,
}
scorer_llm_dict.update(scorer_bedrock_dict)
call_scorer_server_func = functools.partial(
prompt_utils.call_amazon_bedrock_func,
model=scorer_llm_name.split("/")[-1],
max_decode_steps=scorer_bedrock_max_decode_steps,
temperature=scorer_bedrock_temperature,
)

else:
assert scorer_llm_name.lower() in {"gpt-3.5-turbo", "gpt-4"}
scorer_gpt_max_decode_steps = 1024
Expand Down Expand Up @@ -335,6 +364,22 @@ def main(_):
optimizer_llm_dict.update(optimizer_finetuned_palm_dict)
call_optimizer_server_func = call_optimizer_finetuned_palm_server_func

elif optimizer_llm_name.startswith("bedrock"):
optimizer_bedrock_max_decode_steps = 512
optimizer_bedrock_temperature = 1.0

optimizer_llm_dict = dict()
optimizer_llm_dict["max_decode_steps"] = optimizer_bedrock_max_decode_steps
optimizer_llm_dict["temperature"] = optimizer_bedrock_temperature
optimizer_llm_dict["batch_size"] = 1
optimizer_llm_dict["num_decodes"] = 1
call_optimizer_server_func = functools.partial(
prompt_utils.call_amazon_bedrock_func,
model=optimizer_llm_name.split("/")[-1],
max_decode_steps=optimizer_bedrock_max_decode_steps,
temperature=optimizer_bedrock_temperature,
)

else:
assert optimizer_llm_name in {"gpt-3.5-turbo", "gpt-4"}
optimizer_gpt_max_decode_steps = 512
Expand Down Expand Up @@ -679,14 +724,16 @@ def main(_):
)

# ========== set other optimization experiment hyperparameters ==============
if scorer_llm_name == "text-bison":
if scorer_llm_name == "text-bison" or \
scorer_llm_name.startswith("bedrock"):
old_instruction_score_threshold = 0.0
# old_instruction_score_threshold = 0.15 # for GSM8K
else:
assert scorer_llm_name in {"gpt-3.5-turbo", "gpt-4"}
old_instruction_score_threshold = 0.3

if scorer_llm_name == "text-bison":
if scorer_llm_name == "text-bison" or \
scorer_llm_name.startswith("bedrock"):
extract_final_answer_by_prompting_again = False
include_qa = False
evaluate_in_parallel = False
Expand Down
19 changes: 18 additions & 1 deletion opro/optimization/optimize_linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,13 +75,15 @@ def main(_):
"text-bison",
"gpt-3.5-turbo",
"gpt-4",
}
} or optimizer_llm_name.startswith("bedrock")
openai_api_key = _OPENAI_API_KEY.value
palm_api_key = _PALM_API_KEY.value

if optimizer_llm_name in {"gpt-3.5-turbo", "gpt-4"}:
assert openai_api_key, "The OpenAI API key must be provided."
openai.api_key = openai_api_key
elif optimizer_llm_name.startswith("bedrock"):
pass
else:
assert optimizer_llm_name == "text-bison"
assert (
Expand Down Expand Up @@ -139,6 +141,21 @@ def main(_):
optimizer_llm_dict.update(optimizer_finetuned_palm_dict)
call_optimizer_server_func = call_optimizer_finetuned_palm_server_func

elif optimizer_llm_name.startswith("bedrock"):
optimizer_bedrock_max_decode_steps = 1024
optimizer_bedrock_temperature = 1.0

optimizer_llm_dict = dict()
optimizer_llm_dict["max_decode_steps"] = optimizer_bedrock_max_decode_steps
optimizer_llm_dict["temperature"] = optimizer_bedrock_temperature
optimizer_llm_dict["batch_size"] = 1
call_optimizer_server_func = functools.partial(
prompt_utils.call_amazon_bedrock_func,
model=optimizer_llm_name.split("/")[-1],
max_decode_steps=optimizer_bedrock_max_decode_steps,
temperature=optimizer_bedrock_temperature,
)

else:
assert optimizer_llm_name in {"gpt-3.5-turbo", "gpt-4"}
optimizer_gpt_max_decode_steps = 1024
Expand Down
19 changes: 18 additions & 1 deletion opro/optimization/optimize_tsp.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,13 +78,15 @@ def main(_):
"text-bison",
"gpt-3.5-turbo",
"gpt-4",
}
} or optimizer_llm_name.startswith("bedrock")
openai_api_key = _OPENAI_API_KEY.value
palm_api_key = _PALM_API_KEY.value

if optimizer_llm_name in {"gpt-3.5-turbo", "gpt-4"}:
assert openai_api_key, "The OpenAI API key must be provided."
openai.api_key = openai_api_key
elif optimizer_llm_name.startswith("bedrock"):
pass
else:
assert optimizer_llm_name == "text-bison"
assert (
Expand Down Expand Up @@ -142,6 +144,21 @@ def main(_):
optimizer_llm_dict.update(optimizer_finetuned_palm_dict)
call_optimizer_server_func = call_optimizer_finetuned_palm_server_func

elif optimizer_llm_name.startswith("bedrock"):
optimizer_bedrock_max_decode_steps = 1024
optimizer_bedrock_temperature = 1.0

optimizer_llm_dict = dict()
optimizer_llm_dict["max_decode_steps"] = optimizer_bedrock_max_decode_steps
optimizer_llm_dict["temperature"] = optimizer_bedrock_temperature
optimizer_llm_dict["batch_size"] = 1
call_optimizer_server_func = functools.partial(
prompt_utils.call_amazon_bedrock_func,
model=optimizer_llm_name.split("/")[-1],
max_decode_steps=optimizer_bedrock_max_decode_steps,
temperature=optimizer_bedrock_temperature,
)

else:
assert optimizer_llm_name in {"gpt-3.5-turbo", "gpt-4"}
optimizer_gpt_max_decode_steps = 1024
Expand Down
61 changes: 61 additions & 0 deletions opro/prompt_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import time
import google.generativeai as palm
import openai
import boto3


def call_openai_server_single_prompt(
Expand Down Expand Up @@ -130,3 +131,63 @@ def call_palm_server_from_cloud(
return call_palm_server_from_cloud(
input_text, max_decode_steps=max_decode_steps, temperature=temperature
)


def call_amazon_bedrock_single_prompt(
prompt, model="anthropic.claude-3-sonnet-20240229-v1:0", max_decode_steps=20, temperature=0.8
):
"""Makes a call to Amazon Bedrock via Converse API."""
bedrock_runtime = boto3.client('bedrock-runtime')
try:
response = bedrock_runtime.converse(
modelId=model,
inferenceConfig={
"temperature": temperature,
"maxTokens": max_decode_steps
},
messages=[{
"role": "user",
"content": [{
"text": prompt
}]
}]
)
return response["output"]["message"]["content"][0]["text"]

except (bedrock_runtime.exceptions.ModelTimeoutException,
bedrock_runtime.exceptions.ThrottlingException,
bedrock_runtime.exceptions.ServiceUnavailableException) as e:
retry_time = e.retry_after if hasattr(e, "retry_after") else 30
print(f"{e} occurred. Retrying in {retry_time} seconds...")
time.sleep(retry_time)
return call_amazon_bedrock_single_prompt(
prompt, max_decode_steps=max_decode_steps, temperature=temperature
)

except OSError as e:
retry_time = 5 # Adjust the retry time as needed
print(
f"Connection error occurred: {e}. Retrying in {retry_time} seconds..."
)
time.sleep(retry_time)
return call_amazon_bedrock_single_prompt(
prompt, max_decode_steps=max_decode_steps, temperature=temperature
)


def call_amazon_bedrock_func(
inputs, model="anthropic.claude-3-sonnet-20240229-v1:0", max_decode_steps=20, temperature=0.8
):
"""Function to call Amazon Bedrock with a list of input strings."""
if isinstance(inputs, str):
inputs = [inputs]
outputs = []
for input_str in inputs:
output = call_amazon_bedrock_single_prompt(
input_str,
model=model,
max_decode_steps=max_decode_steps,
temperature=temperature,
)
outputs.append(output)
return outputs