Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
2a44aa3
Merge pull request #34 from Azure/main
0xm00n Nov 3, 2025
23cd05a
FEAT: Add Sorry-Bench dataset integration
0xm00n Nov 3, 2025
a4e2de9
Address reviewer feedback: update prompt_style API and add input vali…
0xm00n Nov 3, 2025
1603279
FEAT: Add Anthropic model-written-evals dataset (#450)
0xm00n Nov 5, 2025
cee6223
Merge branch 'Azure:main' into feat/anthropic-model-written-evals-dat…
0xm00n Nov 5, 2025
3c3d27b
Update pyrit/datasets/anthropic_evals_dataset.py
0xm00n Nov 7, 2025
9f6bd77
Update pyrit/datasets/sorry_bench_dataset.py
0xm00n Nov 7, 2025
246723e
get rid of redundant error block
0xm00n Nov 7, 2025
ebc3345
Merge branch 'main' into feat/anthropic-model-written-evals-dataset
0xm00n Nov 7, 2025
7ef1b1d
Merge branch 'main' into feat/anthropic-model-written-evals-dataset
0xm00n Nov 9, 2025
569033f
precommit fixes
0xm00n Nov 12, 2025
c411244
Merge branch 'main' into feat/anthropic-model-written-evals-dataset
0xm00n Nov 12, 2025
7f85e30
Merge branch 'main' into feat/anthropic-model-written-evals-dataset
0xm00n Nov 12, 2025
15395bb
Merge branch 'main' into feat/anthropic-model-written-evals-dataset
romanlutz Nov 13, 2025
8f4c44b
Merge branch 'main' into feat/anthropic-model-written-evals-dataset
0xm00n Nov 13, 2025
e6f3318
fix category loading for winogenerated and advanced-ai-risk. winogene…
0xm00n Nov 14, 2025
c9225a9
Merge branch 'main' into feat/anthropic-model-written-evals-dataset
0xm00n Nov 14, 2025
9037ee7
Merge branch 'main' into feat/anthropic-model-written-evals-dataset
0xm00n Nov 21, 2025
6d00ba1
Refactor anthropic evals dataset to use QuestionAnsweringDataset stru…
0xm00n Nov 21, 2025
5fd45a1
Merge branch 'main' into feat/anthropic-model-written-evals-dataset
0xm00n Nov 21, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions doc/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ API Reference
:toctree: _autosummary/

fetch_adv_bench_dataset
fetch_anthropic_evals_dataset
fetch_aya_redteaming_dataset
fetch_babelscape_alert_dataset
fetch_ccp_sensitive_prompts_dataset
Expand Down
2 changes: 2 additions & 0 deletions pyrit/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
"""

from pyrit.datasets.adv_bench_dataset import fetch_adv_bench_dataset
from pyrit.datasets.anthropic_evals_dataset import fetch_anthropic_evals_dataset
from pyrit.datasets.aya_redteaming_dataset import fetch_aya_redteaming_dataset
from pyrit.datasets.babelscape_alert_dataset import fetch_babelscape_alert_dataset
from pyrit.datasets.darkbench_dataset import fetch_darkbench_dataset
Expand Down Expand Up @@ -43,6 +44,7 @@

__all__ = [
"fetch_adv_bench_dataset",
"fetch_anthropic_evals_dataset",
"fetch_aya_redteaming_dataset",
"fetch_babelscape_alert_dataset",
"fetch_ccp_sensitive_prompts_dataset",
Expand Down
198 changes: 198 additions & 0 deletions pyrit/datasets/anthropic_evals_dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

import logging
import re
from pathlib import Path
from typing import Dict, List, Optional

import requests

from pyrit.datasets.dataset_helper import fetch_examples
from pyrit.models import (
QuestionAnsweringDataset,
QuestionAnsweringEntry,
QuestionChoice,
)

logger = logging.getLogger(__name__)

VALID_CATEGORIES = ["persona", "sycophancy", "advanced-ai-risk", "winogenerated"]


def _parse_answer_choices(item: Dict) -> tuple[List[QuestionChoice], int]:
"""
Parse answer choices from Anthropic eval item format.

Returns:
Tuple of (choices, correct_answer_index) where correct_answer_index points to the matching behavior.
"""
choices = []
correct_index = 0

if "pronoun_options" in item:
pronoun_options = item.get("pronoun_options", [])
for idx, pronoun in enumerate(pronoun_options):
choices.append(QuestionChoice(index=idx, text=pronoun))

matching_behavior = item.get("answer_matching_behavior", "")
match = re.search(r"\([A-Z]\)\s*(.+)", matching_behavior)
if match:
matching_pronoun = match.group(1).strip()
try:
correct_index = pronoun_options.index(matching_pronoun)
except ValueError:
correct_index = 0
return choices, correct_index

matching = item.get("answer_matching_behavior", "")
not_matching = item.get("answer_not_matching_behavior", "")

all_answers = []
if matching:
all_answers.append((0, matching))
if not_matching:
all_answers.append((1, not_matching))

def extract_letter(answer_text: str) -> str:
match = re.match(r"\(([A-Z])\)", answer_text.strip())
return match.group(1) if match else "Z"

all_answers_sorted = sorted(all_answers, key=lambda x: extract_letter(x[1]))

for idx, (original_idx, answer_text) in enumerate(all_answers_sorted):
choices.append(QuestionChoice(index=idx, text=answer_text.strip()))
if original_idx == 0:
correct_index = idx

return choices, correct_index


def fetch_anthropic_evals_dataset(
category: Optional[str] = None,
cache_dir: Optional[str] = None,
) -> QuestionAnsweringDataset:
"""
Fetch Anthropic's model-written-evals dataset from GitHub.

The model-written-evals dataset contains evaluations for testing LLM behaviors
across 4 main categories:
- persona: 119 datasets testing personality traits, beliefs, and potentially dangerous goals
- advanced-ai-risk: 32 datasets (16 human + 16 LM-generated) testing AI safety concerns
- sycophancy: 3 datasets testing whether models repeat back user views
- winogenerated: 2 datasets testing gender bias in occupations
Comment on lines +80 to +83
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

only the persona and sycophancy ones come through if you run this.

The winogenerated ones don't have a "question", but a "sentence_with_blank" that needs to have the blank replaced with the values. So those will map to multiple prompts each.

I'm not sure why the advanced-ai-risk ones got filtered (yet) but you may want to investigate why they're getting filtered.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i think i've fixed all of the issues and added tests (which all passed). however, when loading up the dataset categories, I see this for the winogenerated category:

Skipping item with empty prompt in winogenerated_occupations.jsonl
Skipping item with empty prompt in winogenerated_occupations.jsonl
Skipping item with empty prompt in winogenerated_occupations.jsonl
Skipping item with empty prompt in winogenerated_occupations.jsonl
Skipping item with empty prompt in winogenerated_occupations.jsonl
Skipping item with empty prompt in winogenerated_occupations.jsonl
Loaded 2990 winogenerated prompts
First prompt: The precision instrument repairer charged the client $30 an hour for _ services

This is running the following below:

from pyrit.datasets import fetch_anthropic_evals_dataset

dataset = fetch_anthropic_evals_dataset(category='winogenerated')
print(f'Loaded {len(dataset.prompts)} winogenerated prompts')
print(f'First prompt: {dataset.prompts[0].value}')

not entirely sure if theres anything wrong though (seems it loads all of the 2990). let me know your thoughts.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will rerun tomorrow

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@AdrGav941 upon further inspection of this dataset I wonder if this is better suited for Q&A?

https://github.com/anthropics/evals

Everything appears to be Q&A. In some files the parsing would be easier than in others, though. The winogenerated one has pronoun options which would be the answer options. But in some others it's just in the text itself...

Any thoughts?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@romanlutz this dataset does seem to lend itself well to the QA structure we have. Especially for winogenerated. Like you said, in winogenerated it looks to need minimal transformation to work with QA. The others would need some parsing but all look to have "choices" baked into the items themselves.

I agree that this would be a great candidate to make into QA prompts.

Copy link
Contributor Author

@0xm00n 0xm00n Nov 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@romanlutz @AdrGav941 how would making it into QA prompts change the code structure now?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@0xm00n #894 is the PR for most of the QA code that is present today, if you look at the orchestrator code itself and the sample dataset at pyrit/datasets/orchestrators/benchmark/one_plus_one.yaml you can get a pretty good idea of the structure for the question, choices, and metadata for a seed prompt that is compatible with QA.

From what I can tell in this PR as it is now you are doing a lot of the parsing already and just putting it in metadata, instead we can take the choices (without labelling expected behavior) and put those in the choices for the seed prompt and then put the other information in metadata, QA prompts already support "correct" answers for the prompts where that is applicable.


Reference: https://arxiv.org/abs/2212.09251
GitHub: https://github.com/anthropics/evals

Args:
category: Optional category to filter. Available categories:
"persona", "sycophancy", "advanced-ai-risk", "winogenerated"
Default: None (returns all categories)
cache_dir: Optional cache directory for downloaded files

Returns:
QuestionAnsweringDataset containing Anthropic eval questions with structured choices.

Raises:
ValueError: If invalid category is provided or dataset is empty.
"""
if category is not None and category not in VALID_CATEGORIES:
raise ValueError(f"Invalid category '{category}'. Must be one of: {', '.join(VALID_CATEGORIES)}")

categories_to_fetch = [category] if category else VALID_CATEGORIES

try:
logger.info("Loading Anthropic model-written-evals dataset")

question_entries = []

for cat in categories_to_fetch:
logger.info(f"Fetching files from category: {cat}")
files = _get_category_files(cat)

for file_info in files:
file_name = file_info["name"]
if file_name.endswith(".jsonl"):
file_url = file_info["download_url"]
eval_name = file_name.removesuffix(".jsonl")

logger.info(f"Loading evaluation: {eval_name}")

examples = fetch_examples(
source=file_url,
source_type="public_url",
cache=True,
data_home=Path(cache_dir) if cache_dir else None,
)

for item in examples:
question_text = (item.get("question") or item.get("sentence_with_blank", "")).strip()
if not question_text:
logger.warning(f"Skipping item with empty prompt in {file_name}")
continue

choices, correct_index = _parse_answer_choices(item)

if not choices:
logger.warning(f"Skipping item with no valid choices in {file_name}")
continue

entry = QuestionAnsweringEntry(
question=question_text,
answer_type="int",
correct_answer=correct_index,
choices=choices,
)
question_entries.append(entry)

if not question_entries:
raise ValueError("QuestionAnsweringDataset cannot be empty. Check your filter criteria.")

logger.info(f"Successfully loaded {len(question_entries)} questions from Anthropic evals dataset")

dataset = QuestionAnsweringDataset(
name="anthropic-evals",
version="1.0",
description="""Evaluations for testing LLM behaviors across persona, sycophancy, AI risk, and bias.
The model-written-evals dataset from Anthropic contains 154+ evaluation datasets across 4 categories.""",
author="Anthropic",
group=category if category else "all",
source="https://github.com/anthropics/evals",
questions=question_entries,
)

return dataset

except Exception as e:
logger.error(f"Failed to load Anthropic evals dataset: {str(e)}")
raise


def _get_category_files(category: str) -> List[Dict[str, str]]:
"""
Get list of .jsonl files in a category directory from GitHub API, including subdirectories.

Returns:
List of file info dictionaries with 'name' and 'download_url' keys.

Raises:
Exception: If GitHub API request fails.
"""
api_url = f"https://api.github.com/repos/anthropics/evals/contents/{category}"

response = requests.get(api_url)
if response.status_code != 200:
raise Exception(f"Failed to fetch file list for category '{category}'. Status code: {response.status_code}")

files = []
for item in response.json():
if item["type"] == "file" and item["name"].endswith(".jsonl"):
files.append({"name": item["name"], "download_url": item["download_url"]})
elif item["type"] == "dir":
subdir_response = requests.get(item["url"])
if subdir_response.status_code == 200:
for subitem in subdir_response.json():
if subitem["type"] == "file" and subitem["name"].endswith(".jsonl"):
files.append({"name": subitem["name"], "download_url": subitem["download_url"]})
return files
2 changes: 2 additions & 0 deletions tests/integration/datasets/test_fetch_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from pyrit.datasets import (
fetch_adv_bench_dataset,
fetch_anthropic_evals_dataset,
fetch_aya_redteaming_dataset,
fetch_babelscape_alert_dataset,
fetch_ccp_sensitive_prompts_dataset,
Expand Down Expand Up @@ -40,6 +41,7 @@
"fetch_function, is_seed_dataset",
[
(fetch_adv_bench_dataset, True),
(fetch_anthropic_evals_dataset, True),
(fetch_aya_redteaming_dataset, True),
(fetch_babelscape_alert_dataset, True),
(fetch_ccp_sensitive_prompts_dataset, True),
Expand Down
Loading