Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 12 additions & 2 deletions .github/scripts/benchmark_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@
lines = p.read_text(encoding="utf-8").splitlines()
processed_lines = []
in_code = False
delta_col = None # record "Diff" column start per table
align_hint = None # derived from benchstat header last pipe position

ALIGN_COLUMN = 60 # fallback alignment when header not found

def strip_worker_suffix(text: str) -> str:
return re.sub(r'(\S+?)-\d+(\s|$)', r'\1\2', text)

Expand Down Expand Up @@ -117,8 +122,6 @@ def extract_two_numbers(tokens):
# Diff column width ~12 chars (e.g. "+100.00% 🚀")
right_boundary = diff_col_start + 14

# Reset code fence tracking state for Pass 1
in_code = False
for line in lines:

if line.strip() == "```":
Expand Down Expand Up @@ -146,6 +149,12 @@ def extract_two_numbers(tokens):
stripped_header = re.sub(r'\s+Delta\b', '', stripped_header, flags=re.IGNORECASE)

# Pad to diff_col_start
padding = diff_col_start - len(stripped_header)
if padding < 2:
padding = 2 # minimum spacing
# If header is wider than data (unlikely but possible), adjust diff_col_start
# But for now let's trust max_content_width or just append

if len(stripped_header) < diff_col_start:
new_header = stripped_header + " " * (diff_col_start - len(stripped_header))
else:
Expand All @@ -169,6 +178,7 @@ def extract_two_numbers(tokens):
processed_lines.append(line)
continue

original_line = line
line = strip_worker_suffix(line)
tokens = line.split()
if not tokens:
Expand Down
101 changes: 101 additions & 0 deletions .github/scripts/format_benchmark_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import json
import os
import sys
import datetime
import re


def normalize_name(name):
# Remove "Benchmark" prefix
name = re.sub(r"^Benchmark", "", name)
# Remove -N suffix (GOMAXPROCS)
name = re.sub(r"-\d+$", "", name)

parts = re.split(r'[/_]', name)
new_parts = []
for p in parts:
if p.lower() in ["rbac", "abac", "acl", "api", "rest"]:
new_parts.append(p.upper())
else:
new_parts.append(p.capitalize())
return "".join(new_parts)


def main():
if len(sys.argv) < 3:
print("Usage: python format_benchmark_data.py input.txt output.json")
sys.exit(1)

input_path = sys.argv[1]
output_path = sys.argv[2]

# Get commit info from environment variables
# These should be set in the GitHub Action
commit_info = {
"author": {
"email": os.environ.get("COMMIT_AUTHOR_EMAIL", ""),
"name": os.environ.get("COMMIT_AUTHOR_NAME", ""),
"username": os.environ.get("COMMIT_AUTHOR_USERNAME", ""),
},
"committer": {
"email": os.environ.get("COMMIT_COMMITTER_EMAIL", ""),
"name": os.environ.get("COMMIT_COMMITTER_NAME", ""),
"username": os.environ.get("COMMIT_COMMITTER_USERNAME", ""),
},
"distinct": True, # Assuming true for push to master
"id": os.environ.get("COMMIT_ID", ""),
"message": os.environ.get("COMMIT_MESSAGE", ""),
"timestamp": os.environ.get("COMMIT_TIMESTAMP", ""),
"tree_id": os.environ.get("COMMIT_TREE_ID", ""),
"url": os.environ.get("COMMIT_URL", ""),
}

# Get CPU count
cpu_count = os.cpu_count() or 1

benches = []

try:
with open(input_path, "r", encoding="utf-8") as f:
lines = f.readlines()

for line in lines:
# Parse Go benchmark output: BenchmarkName-8 10000 123 ns/op
# Also handle lines with 4 columns if any, but standard is: Name Iterations Value Unit
match = re.search(r'^(Benchmark\S+)\s+(\d+)\s+([\d\.]+)\s+ns/op', line)
if match:
name = match.group(1)
iterations = int(match.group(2))
val_ns = float(match.group(3))

# Format extra info
extra = f"{iterations} times"

# Create entry
benches.append({
"name": normalize_name(name),
"value": round(val_ns, 2),
"unit": "ns/op",
"extra": extra
})

except Exception as e:
print(f"Error processing {input_path}: {e}")
sys.exit(1)

output_data = {
"commit": commit_info,
"date": int(datetime.datetime.now().timestamp() * 1000), # Current timestamp in ms
"tool": "go",
"procs": cpu_count,
"benches": benches,
}

with open(output_path, "w", encoding="utf-8") as f:
json.dump(output_data, f, indent=2)

print(f"Successfully formatted benchmark data to {output_path}")


if __name__ == "__main__":
main()
36 changes: 36 additions & 0 deletions .github/scripts/merge_benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import json
import sys
import glob


def merge_jsons(output_file, input_files):
merged_data = None
all_benchmarks = []

for file_path in input_files:
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
if merged_data is None:
merged_data = data
all_benchmarks.extend(data.get("benchmarks", []))

if merged_data:
merged_data["benchmarks"] = all_benchmarks
with open(output_file, "w", encoding="utf-8") as f:
json.dump(merged_data, f, indent=4)


if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: python merge_benchmarks.py output.json input1.json input2.json ...")
sys.exit(1)

output_file = sys.argv[1]
input_files = sys.argv[2:]

# Expand globs if shell didn't
expanded_inputs = []
for p in input_files:
expanded_inputs.extend(glob.glob(p))

merge_jsons(output_file, expanded_inputs)
82 changes: 82 additions & 0 deletions .github/scripts/update_data_js.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
import json
import sys
import os
import re
import time


def load_data_js(filepath):
if not os.path.exists(filepath):
return {"lastUpdate": 0, "repoUrl": "https://github.com/casbin/casbin", "entries": {}}

with open(filepath, "r", encoding="utf-8") as f:
content = f.read()

# Strip window.BENCHMARK_DATA =
match = re.search(r"window\.BENCHMARK_DATA\s*=\s*({.*});?", content, re.DOTALL)
if match:
try:
return json.loads(match.group(1))
except json.JSONDecodeError:
print("Error decoding JSON from data.js", file=sys.stderr)
sys.exit(1)
return {"lastUpdate": 0, "repoUrl": "https://github.com/casbin/casbin", "entries": {}}


def save_data_js(filepath, data):
content = f"window.BENCHMARK_DATA = {json.dumps(data, indent=4)};"
with open(filepath, "w", encoding="utf-8") as f:
f.write(content)


def main():
if len(sys.argv) < 3:
print("Usage: python update_data_js.py benchmark_result.json data.js")
sys.exit(1)

bench_file = sys.argv[1]
data_js_file = sys.argv[2]

with open(bench_file, "r", encoding="utf-8") as f:
bench_data = json.load(f)

js_data = load_data_js(data_js_file)

# Construct new entry
commit_info = bench_data.get("commit_info", {})

entry = {
"commit": {
"time": commit_info.get("time"),
"id": commit_info.get("id", "unknown"),
"author": commit_info.get("author_name", "unknown"),
"message": commit_info.get("message", "unknown"),
},
"date": int(time.time() * 1000), # Current time in ms
"tool": "casbin",
"benchmarks": [],
}

for b in bench_data.get("benchmarks", []):
name = b["name"]
# Name is already normalized in format_benchmark_data.py

# value is already in ns/op in format_benchmark_data.py
value = b["stats"]["mean"]

entry["benchmarks"].append({"name": name, "unit": "ns/op", "value": value})

# Append to entries
group_name = "Casbin"
if group_name not in js_data["entries"]:
js_data["entries"][group_name] = []

js_data["entries"][group_name].append(entry)
js_data["lastUpdate"] = int(time.time() * 1000)

save_data_js(data_js_file, js_data)
print(f"Updated {data_js_file} with {len(entry['benchmarks'])} benchmarks.")


if __name__ == "__main__":
main()
58 changes: 58 additions & 0 deletions .github/workflows/benchmark-push.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
name: Push Benchmark Data

on:
push:
branches:
- master

jobs:
benchmark:
name: Run Benchmark & Push Data
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4

- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.21'

- name: Run Benchmark
run: |
go test -bench '.' -benchmem ./... > benchmark_result.txt

- name: Checkout Data Repo
uses: actions/checkout@v4
with:
repository: casbin/casbin-benchmark-data
token: ${{ secrets.CASBIN_BENCHMARK_DATA_TOKEN }}
path: benchmark-data

- name: Format Benchmark Data
env:
COMMIT_AUTHOR_EMAIL: ${{ github.event.head_commit.author.email }}
COMMIT_AUTHOR_NAME: ${{ github.event.head_commit.author.name }}
COMMIT_AUTHOR_USERNAME: ${{ github.event.head_commit.author.username }}
COMMIT_COMMITTER_EMAIL: ${{ github.event.head_commit.committer.email }}
COMMIT_COMMITTER_NAME: ${{ github.event.head_commit.committer.name }}
COMMIT_COMMITTER_USERNAME: ${{ github.event.head_commit.committer.username }}
COMMIT_MESSAGE: ${{ github.event.head_commit.message }}
COMMIT_TIMESTAMP: ${{ github.event.head_commit.timestamp }}
COMMIT_URL: ${{ github.event.head_commit.url }}
COMMIT_ID: ${{ github.event.head_commit.id }}
run: |
python .github/scripts/format_benchmark_data.py benchmark_result.txt formatted_result.json

- name: Push Benchmark Result
working-directory: benchmark-data
run: |
mkdir -p casbin
cp ../formatted_result.json casbin/benchmark-${{ github.sha }}.json

git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"

git add casbin/benchmark-${{ github.sha }}.json
git commit -m "Add benchmark result for casbin commit ${{ github.sha }}"
git push
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,4 @@ _testmain.go

# vendor files
vendor
yml-test/
Loading