-
Notifications
You must be signed in to change notification settings - Fork 36
⚡ Bolt: Fix duplicate cache clear & syntax error #572
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -125,7 +125,9 @@ def _calculate_urgency(self, text: str, severity_score: int): | |
| # Pre-extract literal keywords for fast substring pre-filtering | ||
| # Only apply this optimization if the pattern is a simple list of words like \b(word1|word2)\b | ||
| keywords = [] | ||
| if re.fullmatch(r'\\b\([a-zA-Z0-9\s|]+\)\\b', pattern): | ||
| # Optimization: Extract literal keywords from simple regex strings like "\b(word1|word2)\b" | ||
| # This allows us to use a fast substring check (`in text`) before executing the regex engine. | ||
| if pattern.startswith('\\b(') and pattern.endswith(')\\b') and not any(c in pattern[3:-3] for c in ['.', '*', '+', '?', '^', '$', '[', ']', '{', '}']): | ||
| clean_pattern = pattern.replace('\\b', '').replace('(', '').replace(')', '') | ||
| keywords = [k.strip() for k in clean_pattern.split('|') if k.strip()] | ||
| self._regex_cache.append((re.compile(pattern), weight, pattern, keywords)) | ||
|
Comment on lines
+128
to
133
|
||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,102 @@ | ||||||||||||||
| import time | ||||||||||||||
| from sqlalchemy.orm import Session | ||||||||||||||
| from sqlalchemy import func, create_engine | ||||||||||||||
| from backend.database import Base | ||||||||||||||
| from backend.models import Grievance, GrievanceFollower, ClosureConfirmation, Issue, Jurisdiction, JurisdictionLevel, SeverityLevel | ||||||||||||||
| from sqlalchemy import case, distinct | ||||||||||||||
| import datetime | ||||||||||||||
|
|
||||||||||||||
| # Create a temporary in-memory database for testing | ||||||||||||||
| engine = create_engine("sqlite:///:memory:") | ||||||||||||||
| Base.metadata.create_all(bind=engine) | ||||||||||||||
| SessionLocal = Session(bind=engine) | ||||||||||||||
|
|
||||||||||||||
|
Comment on lines
+9
to
+13
|
||||||||||||||
| def populate_db(db: Session, grievance_id: int): | ||||||||||||||
| # Add Jurisdiction | ||||||||||||||
| j = Jurisdiction(id=1, level=JurisdictionLevel.STATE, geographic_coverage={"states": ["Maharashtra"]}, responsible_authority="PWD", default_sla_hours=48) | ||||||||||||||
| db.add(j) | ||||||||||||||
|
|
||||||||||||||
| # Add Grievance | ||||||||||||||
| g = Grievance( | ||||||||||||||
| id=grievance_id, | ||||||||||||||
| current_jurisdiction_id=1, | ||||||||||||||
| sla_deadline=datetime.datetime.now(datetime.timezone.utc), | ||||||||||||||
| status="open", | ||||||||||||||
| category="Road", | ||||||||||||||
| unique_id="123", | ||||||||||||||
| severity=SeverityLevel.LOW, | ||||||||||||||
| assigned_authority="PWD" | ||||||||||||||
| ) | ||||||||||||||
| db.add(g) | ||||||||||||||
|
|
||||||||||||||
| # Add Followers | ||||||||||||||
| for i in range(50): | ||||||||||||||
| db.add(GrievanceFollower(grievance_id=grievance_id, user_email=f"user{i}@test.com")) | ||||||||||||||
|
|
||||||||||||||
| # Add Confirmations | ||||||||||||||
| for i in range(30): | ||||||||||||||
| db.add(ClosureConfirmation(grievance_id=grievance_id, user_email=f"conf_user{i}@test.com", confirmation_type="confirmed")) | ||||||||||||||
| for i in range(10): | ||||||||||||||
| db.add(ClosureConfirmation(grievance_id=grievance_id, user_email=f"disp_user{i}@test.com", confirmation_type="disputed")) | ||||||||||||||
|
|
||||||||||||||
| db.commit() | ||||||||||||||
|
|
||||||||||||||
| def benchmark_old(db: Session, grievance_id: int, iterations=1000): | ||||||||||||||
| start = time.perf_counter() | ||||||||||||||
| for _ in range(iterations): | ||||||||||||||
| total_followers = db.query(func.count(GrievanceFollower.id)).filter( | ||||||||||||||
| GrievanceFollower.grievance_id == grievance_id | ||||||||||||||
| ).scalar() | ||||||||||||||
|
|
||||||||||||||
| counts = db.query( | ||||||||||||||
| ClosureConfirmation.confirmation_type, | ||||||||||||||
| func.count(ClosureConfirmation.id) | ||||||||||||||
| ).filter(ClosureConfirmation.grievance_id == grievance_id).group_by(ClosureConfirmation.confirmation_type).all() | ||||||||||||||
|
|
||||||||||||||
| counts_dict = {ctype: count for ctype, count in counts} | ||||||||||||||
| confirmations_count = counts_dict.get("confirmed", 0) | ||||||||||||||
| disputes_count = counts_dict.get("disputed", 0) | ||||||||||||||
| end = time.perf_counter() | ||||||||||||||
| if iterations > 10: | ||||||||||||||
| print(f"Old approach ({iterations} iters): {end - start:.4f}s") | ||||||||||||||
| return total_followers, confirmations_count, disputes_count | ||||||||||||||
|
|
||||||||||||||
| def benchmark_new_agg(db: Session, grievance_id: int, iterations=1000): | ||||||||||||||
| start = time.perf_counter() | ||||||||||||||
| for _ in range(iterations): | ||||||||||||||
| total_followers = db.query(func.count(GrievanceFollower.id)).filter( | ||||||||||||||
| GrievanceFollower.grievance_id == grievance_id | ||||||||||||||
| ).scalar() | ||||||||||||||
|
|
||||||||||||||
| # Optimize the two counts into one aggregate without group_by | ||||||||||||||
| stats = db.query( | ||||||||||||||
| func.sum(case((ClosureConfirmation.confirmation_type == 'confirmed', 1), else_=0)).label('confirmed'), | ||||||||||||||
| func.sum(case((ClosureConfirmation.confirmation_type == 'disputed', 1), else_=0)).label('disputed') | ||||||||||||||
| ).filter(ClosureConfirmation.grievance_id == grievance_id).first() | ||||||||||||||
|
|
||||||||||||||
| confirmations_count = stats.confirmed or 0 | ||||||||||||||
| disputes_count = stats.disputed or 0 | ||||||||||||||
| end = time.perf_counter() | ||||||||||||||
| if iterations > 10: | ||||||||||||||
| print(f"New approach (Agg) ({iterations} iters): {end - start:.4f}s") | ||||||||||||||
| return total_followers, confirmations_count, disputes_count | ||||||||||||||
|
|
||||||||||||||
| if __name__ == "__main__": | ||||||||||||||
| db = SessionLocal | ||||||||||||||
| populate_db(db, 1) | ||||||||||||||
|
|
||||||||||||||
| # Warm up | ||||||||||||||
| benchmark_old(db, 1, 10) | ||||||||||||||
| benchmark_new_agg(db, 1, 10) | ||||||||||||||
|
|
||||||||||||||
| res_old = benchmark_old(db, 1) | ||||||||||||||
| res_agg = benchmark_new_agg(db, 1) | ||||||||||||||
|
|
||||||||||||||
| print(f"Old Results: {res_old}") | ||||||||||||||
| print(f"New Agg Results: {res_agg}") | ||||||||||||||
| def benchmark_new_single(db: Session, grievance_id: int, iterations=1000): | ||||||||||||||
| start = time.perf_counter() | ||||||||||||||
| for _ in range(iterations): | ||||||||||||||
| # We can't easily join them perfectly without cross product, but what if we do subqueries? | ||||||||||||||
| # Actually it's probably better to just leave it. Let's look for N+1 queries instead. | ||||||||||||||
| pass | ||||||||||||||
|
Comment on lines
+97
to
+102
|
||||||||||||||
| def benchmark_new_single(db: Session, grievance_id: int, iterations=1000): | |
| start = time.perf_counter() | |
| for _ in range(iterations): | |
| # We can't easily join them perfectly without cross product, but what if we do subqueries? | |
| # Actually it's probably better to just leave it. Let's look for N+1 queries instead. | |
| pass |
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,51 @@ | ||||||
| import time | ||||||
| from backend.priority_engine import priority_engine | ||||||
| import cProfile | ||||||
| import pstats | ||||||
| import io | ||||||
|
|
||||||
| # We create a sample text that does not contain any of the urgency keywords | ||||||
| # but is long enough to simulate a real-world scenario. | ||||||
| sample_text = ( | ||||||
| "There is a small pothole on the corner of 5th and Main. " | ||||||
| "It has been there for a few days and is causing some inconvenience to the drivers. " | ||||||
| "Please send someone to look at it when possible. " | ||||||
| "The road condition is generally poor in this area and needs attention. " | ||||||
| "We have noticed an increase in traffic recently, which might be contributing to the wear and tear. " | ||||||
| "No one has been injured, but we would like to avoid any accidents." | ||||||
| ) * 10 # Make it reasonably long | ||||||
|
|
||||||
| def benchmark(iterations=10000): | ||||||
| start_time = time.perf_counter() | ||||||
| for _ in range(iterations): | ||||||
| # We only benchmark _calculate_urgency. We give it a base severity of 10. | ||||||
| priority_engine._calculate_urgency(sample_text, 10) | ||||||
| end_time = time.perf_counter() | ||||||
|
|
||||||
| total_time = end_time - start_time | ||||||
| avg_time_ms = (total_time / iterations) * 1000 | ||||||
|
|
||||||
| print(f"Benchmark: _calculate_urgency") | ||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: cat -n backend/tests/benchmark_urgency.py | sed -n '20,35p'Repository: RohanExploit/VishwaGuru Length of output: 736 Remove unnecessary f-string prefix. Line 28 contains a non-interpolated string and should not use the Minimal fix- print(f"Benchmark: _calculate_urgency")
+ print("Benchmark: _calculate_urgency")📝 Committable suggestion
Suggested change
🧰 Tools🪛 Ruff (0.15.6)[error] 28-28: f-string without any placeholders Remove extraneous (F541) 🤖 Prompt for AI Agents |
||||||
| print(f"Iterations: {iterations}") | ||||||
| print(f"Total time: {total_time:.4f} seconds") | ||||||
| print(f"Average time per call: {avg_time_ms:.4f} ms") | ||||||
| return avg_time_ms | ||||||
|
|
||||||
| if __name__ == "__main__": | ||||||
| # Warm up | ||||||
| priority_engine._calculate_urgency(sample_text, 10) | ||||||
|
|
||||||
| print("--- Running Benchmark ---") | ||||||
| benchmark() | ||||||
|
|
||||||
| # Profile to show where time is spent | ||||||
| print("\n--- Running Profiler ---") | ||||||
| pr = cProfile.Profile() | ||||||
| pr.enable() | ||||||
| for _ in range(5000): | ||||||
| priority_engine._calculate_urgency(sample_text, 10) | ||||||
| pr.disable() | ||||||
| s = io.StringIO() | ||||||
| ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') | ||||||
| ps.print_stats(15) | ||||||
| print(s.getvalue()) | ||||||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,58 @@ | ||||||
| import time | ||||||
| from backend.priority_engine import priority_engine | ||||||
| import cProfile | ||||||
| import pstats | ||||||
| import io | ||||||
| import re | ||||||
|
|
||||||
| # We create a sample text that does not contain any of the urgency keywords | ||||||
| # but is long enough to simulate a real-world scenario. | ||||||
| sample_text = ( | ||||||
| "There is a small pothole on the corner of 5th and Main. " | ||||||
| "It has been there for a few days and is causing some inconvenience to the drivers. " | ||||||
| "Please send someone to look at it when possible. " | ||||||
| "The road condition is generally poor in this area and needs attention. " | ||||||
| "We have noticed an increase in traffic recently, which might be contributing to the wear and tear. " | ||||||
| "No one has been injured, but we would like to avoid any accidents." | ||||||
| ) * 10 # Make it reasonably long | ||||||
|
|
||||||
| def benchmark(iterations=10000): | ||||||
| start_time = time.perf_counter() | ||||||
| for _ in range(iterations): | ||||||
| priority_engine._calculate_urgency(sample_text, 10) | ||||||
| end_time = time.perf_counter() | ||||||
|
|
||||||
| total_time = end_time - start_time | ||||||
| avg_time_ms = (total_time / iterations) * 1000 | ||||||
|
|
||||||
| print(f"Benchmark: _calculate_urgency") | ||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: # Find and read the file
find . -name "benchmark_urgency_unoptimized.py" -type fRepository: RohanExploit/VishwaGuru Length of output: 115 🏁 Script executed: # Read the file to check line 28
cat -n ./backend/tests/benchmark_urgency_unoptimized.py | sed -n '20,35p'Repository: RohanExploit/VishwaGuru Length of output: 761 Remove unnecessary f-string prefix. Line 28 is a plain string and triggers Ruff F541. Minimal fix- print(f"Benchmark: _calculate_urgency")
+ print("Benchmark: _calculate_urgency")📝 Committable suggestion
Suggested change
🧰 Tools🪛 Ruff (0.15.6)[error] 28-28: f-string without any placeholders Remove extraneous (F541) 🤖 Prompt for AI Agents |
||||||
| print(f"Iterations: {iterations}") | ||||||
| print(f"Total time: {total_time:.4f} seconds") | ||||||
| print(f"Average time per call: {avg_time_ms:.4f} ms") | ||||||
| return avg_time_ms | ||||||
|
|
||||||
| if __name__ == "__main__": | ||||||
| # Force the engine to clear its cache and simulate the old unoptimized behavior | ||||||
| # where the keywords list is empty and regex.search is always called. | ||||||
| from backend.adaptive_weights import adaptive_weights | ||||||
| priority_engine._regex_cache = [] | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. P2: The benchmark setup is ineffective: Prompt for AI agents |
||||||
| for pattern, weight in adaptive_weights.get_urgency_patterns(): | ||||||
| priority_engine._regex_cache.append((re.compile(pattern), weight, pattern, [])) | ||||||
|
|
||||||
| # Warm up | ||||||
| priority_engine._calculate_urgency(sample_text, 10) | ||||||
|
Comment on lines
+35
to
+43
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. “Unoptimized” cache is being overwritten before measurement. Line [43] calls Fix to preserve unoptimized cache during benchmark if __name__ == "__main__":
@@
from backend.adaptive_weights import adaptive_weights
priority_engine._regex_cache = []
for pattern, weight in adaptive_weights.get_urgency_patterns():
priority_engine._regex_cache.append((re.compile(pattern), weight, pattern, []))
+ priority_engine._last_reload_count = adaptive_weights.reload_count🤖 Prompt for AI Agents |
||||||
|
|
||||||
| print("--- Running Unoptimized Benchmark ---") | ||||||
| benchmark() | ||||||
|
|
||||||
| # Profile to show where time is spent | ||||||
| print("\n--- Running Profiler ---") | ||||||
| pr = cProfile.Profile() | ||||||
| pr.enable() | ||||||
| for _ in range(5000): | ||||||
| priority_engine._calculate_urgency(sample_text, 10) | ||||||
| pr.disable() | ||||||
| s = io.StringIO() | ||||||
| ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') | ||||||
| ps.print_stats(15) | ||||||
| print(s.getvalue()) | ||||||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,26 @@ | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| import time | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| from backend.database import SessionLocal | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| from backend.models import Grievance, GrievanceFollower, ClosureConfirmation | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| from backend.routers.grievances import get_closure_status | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| from sqlalchemy import func | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| def bench(): | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| db = SessionLocal() | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. P2: Close the SQLAlchemy session (or use a context manager) to prevent connection/resource leaks. Prompt for AI agents |
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| start = time.perf_counter() | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| for _ in range(100): | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| total_followers = db.query(func.count(GrievanceFollower.id)).filter( | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| GrievanceFollower.grievance_id == 1 | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| ).scalar() | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| counts = db.query( | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| ClosureConfirmation.confirmation_type, | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| func.count(ClosureConfirmation.id) | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| ).filter(ClosureConfirmation.grievance_id == 1).group_by(ClosureConfirmation.confirmation_type).all() | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| print(f"Old approach: {time.perf_counter() - start}") | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| start = time.perf_counter() | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| for _ in range(100): | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| # Instead of two queries, we could potentially do this in one, or just measure DB hits | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| pass | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+22
to
+24
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The “new approach” benchmark does no work. Line [22]-Line [24] uses Use the intended code path in the loop start = time.perf_counter()
for _ in range(100):
- # Instead of two queries, we could potentially do this in one, or just measure DB hits
- pass
+ get_closure_status(grievance_id=1, db=db)
+ print(f"New approach: {time.perf_counter() - start}")🤖 Prompt for AI Agents |
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| bench() | ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+9
to
+26
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
| start = time.perf_counter() | |
| for _ in range(100): | |
| total_followers = db.query(func.count(GrievanceFollower.id)).filter( | |
| GrievanceFollower.grievance_id == 1 | |
| ).scalar() | |
| counts = db.query( | |
| ClosureConfirmation.confirmation_type, | |
| func.count(ClosureConfirmation.id) | |
| ).filter(ClosureConfirmation.grievance_id == 1).group_by(ClosureConfirmation.confirmation_type).all() | |
| print(f"Old approach: {time.perf_counter() - start}") | |
| start = time.perf_counter() | |
| for _ in range(100): | |
| # Instead of two queries, we could potentially do this in one, or just measure DB hits | |
| pass | |
| bench() | |
| try: | |
| start = time.perf_counter() | |
| for _ in range(100): | |
| total_followers = db.query(func.count(GrievanceFollower.id)).filter( | |
| GrievanceFollower.grievance_id == 1 | |
| ).scalar() | |
| counts = db.query( | |
| ClosureConfirmation.confirmation_type, | |
| func.count(ClosureConfirmation.id) | |
| ).filter(ClosureConfirmation.grievance_id == 1).group_by(ClosureConfirmation.confirmation_type).all() | |
| print(f"Old approach: {time.perf_counter() - start}") | |
| start = time.perf_counter() | |
| for _ in range(100): | |
| # Instead of two queries, we could potentially do this in one, or just measure DB hits | |
| pass | |
| finally: | |
| db.close() | |
| if __name__ == "__main__": | |
| bench() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
P1: Avoid executing benchmark code at module import time in a test_*.py file; it will run during pytest collection.
Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At test_grievances_opt.py, line 26:
<comment>Avoid executing benchmark code at module import time in a `test_*.py` file; it will run during pytest collection.</comment>
<file context>
@@ -0,0 +1,26 @@
+ # Instead of two queries, we could potentially do this in one, or just measure DB hits
+ pass
+
+bench()
</file context>
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Avoid executing benchmarks at import time in a test module.
Line [26] runs bench() immediately on import. In a test_*.py file, this can execute during pytest collection and cause unwanted DB/network/runtime side effects.
Guard execution
-bench()
+if __name__ == "__main__":
+ bench()📝 Committable suggestion
‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.
| bench() | |
| if __name__ == "__main__": | |
| bench() |
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.
In `@test_grievances_opt.py` at line 26, The test module currently calls bench()
at import time which triggers side effects during pytest collection; change this
so bench() is only executed intentionally — either remove the top-level call and
invoke bench() from a dedicated test function or script, or wrap the call in an
if __name__ == "__main__": guard, or convert it into a pytest test/fixture
(e.g., def test_bench(): bench()) so execution only happens when explicitly run;
locate the standalone bench() invocation and apply one of these guards or moves
to prevent import-time execution.
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
P2: The new “simple regex” detection is too permissive and can skip valid regex matches, causing urgency false negatives.
Prompt for AI agents