Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions backend/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Modular routers for VishwaGuru Backend

# Fix for googletrans compatibility with newer httpcore (Issue #290)
# This monkeypatch must happen before any imports of googletrans or httpx
try:
import httpcore
if not hasattr(httpcore, "SyncHTTPTransport"):
httpcore.SyncHTTPTransport = object
except ImportError:
pass
1 change: 1 addition & 0 deletions backend/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,3 +174,4 @@ def invalidate(self):
nearby_issues_cache = ThreadSafeCache(ttl=60, max_size=100) # 1 minute TTL, max 100 entries
user_upload_cache = ThreadSafeCache(ttl=3600, max_size=1000) # 1 hour TTL for upload limits
blockchain_last_hash_cache = ThreadSafeCache(ttl=3600, max_size=1)
grievance_last_hash_cache = ThreadSafeCache(ttl=3600, max_size=1)
23 changes: 22 additions & 1 deletion backend/grievance_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@

import json
import uuid
import hashlib
from typing import Dict, Any, Optional, List
from sqlalchemy.orm import Session, joinedload
from datetime import datetime, timezone, timedelta

from backend.models import Grievance, Jurisdiction, GrievanceStatus, SeverityLevel, Issue
from backend.cache import grievance_last_hash_cache
from backend.database import SessionLocal
from backend.routing_service import RoutingService
from backend.sla_config_service import SLAConfigService
Expand Down Expand Up @@ -84,6 +86,19 @@ def create_grievance(self, grievance_data: Dict[str, Any], db: Session = None) -
# Generate unique ID
unique_id = str(uuid.uuid4())[:8].upper()

# Blockchain chaining logic (Issue #290 optimization)
# Performance Boost: Use thread-safe cache to eliminate DB query for last hash
prev_hash = grievance_last_hash_cache.get("last_hash")
if prev_hash is None:
# Cache miss: Fetch only the last hash from DB
prev_grievance = db.query(Grievance.integrity_hash).order_by(Grievance.id.desc()).first()
prev_hash = prev_grievance[0] if prev_grievance and prev_grievance[0] else ""
grievance_last_hash_cache.set(data=prev_hash, key="last_hash")
Comment on lines +89 to +96
Copy link

Copilot AI Mar 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

grievance_last_hash_cache is process-local. In a multi-worker deployment (or if grievances are created from multiple app instances), different workers can compute prev_hash from stale cache values, producing non-linear/forked chains that depend on which worker handled the request. If you need a single global chain, derive prev_hash from the DB within the same transaction (or use a DB-backed/centralized last-hash store with appropriate locking) rather than an in-memory cache.

Suggested change
# Blockchain chaining logic (Issue #290 optimization)
# Performance Boost: Use thread-safe cache to eliminate DB query for last hash
prev_hash = grievance_last_hash_cache.get("last_hash")
if prev_hash is None:
# Cache miss: Fetch only the last hash from DB
prev_grievance = db.query(Grievance.integrity_hash).order_by(Grievance.id.desc()).first()
prev_hash = prev_grievance[0] if prev_grievance and prev_grievance[0] else ""
grievance_last_hash_cache.set(data=prev_hash, key="last_hash")
# Blockchain chaining logic
# Derive prev_hash from the database to ensure a single, global chain across workers
prev_grievance = (
db.query(Grievance.integrity_hash)
.order_by(Grievance.id.desc())
.first()
)
prev_hash = prev_grievance[0] if prev_grievance and prev_grievance[0] else ""

Copilot uses AI. Check for mistakes.
Comment on lines +89 to +96
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

The last-hash cache breaks chain correctness across workers.

grievance_last_hash_cache is process-local, so another worker can keep a stale "last_hash" for up to an hour and keep chaining new grievances off an old predecessor. Even inside one worker, the separate get() and set() calls are not an atomic read-modify-write, so concurrent requests can compute different integrity_hash values from the same prev_hash. This forks the chain instead of preserving a single linear history.

Also applies to: 132-133

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/grievance_service.py` around lines 89 - 96, The process-local
grievance_last_hash_cache causes stale/unsafe chaining; replace the separate
get()/set() logic by obtaining the previous hash from the single authoritative
store inside the same insertion transaction so it is atomic: during grievance
creation, query Grievance.integrity_hash using a DB-level lock (e.g., SELECT ...
ORDER BY id DESC FOR UPDATE) or otherwise fetch the last hash in the same
transactional scope that computes and persists the new integrity_hash, and
remove the process-local grievance_last_hash_cache.get()/set() use (or—if you
must keep a cache—switch to a centralized cache with an atomic compare-and-set
operation such as Redis GETSET/WATCH+MULTI to ensure no concurrent worker can
produce a forked chain).


# SHA-256 chaining based on key grievance fields
hash_content = f"{unique_id}|{grievance_data.get('category', 'general')}|{severity.value}|{prev_hash}"
integrity_hash = hashlib.sha256(hash_content.encode()).hexdigest()
Comment on lines +89 to +100
Copy link

Copilot AI Mar 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The integrity hash is computed from unique_id|category|severity|prev_hash only. As a result, changes to other persisted grievance fields (e.g., description, city/pincode, address, current_jurisdiction_id, sla_deadline) will not be detected by /blockchain-verify, even though the endpoint message implies full grievance integrity verification. Consider hashing a canonical representation of the immutable grievance payload (at least the user-supplied fields like description + location) so tampering with any of those fields invalidates the seal.

Copilot uses AI. Check for mistakes.

# Extract location data
location_data = grievance_data.get('location', {})
latitude = location_data.get('latitude') if isinstance(location_data, dict) else None
Expand All @@ -106,11 +121,17 @@ def create_grievance(self, grievance_data: Dict[str, Any], db: Session = None) -
assigned_authority=assigned_authority,
sla_deadline=sla_deadline,
status=GrievanceStatus.OPEN,
issue_id=grievance_data.get('issue_id')
issue_id=grievance_data.get('issue_id'),
integrity_hash=integrity_hash,
previous_integrity_hash=prev_hash
)

db.add(grievance)
db.commit()

# Update cache for next grievance only AFTER successful commit (Issue #290 optimization)
grievance_last_hash_cache.set(data=integrity_hash, key="last_hash")

db.refresh(grievance)

return grievance
Expand Down
11 changes: 11 additions & 0 deletions backend/init_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,17 @@ def index_exists(table, index_name):
if not index_exists("grievances", "ix_grievances_category_status"):
conn.execute(text("CREATE INDEX IF NOT EXISTS ix_grievances_category_status ON grievances (category, status)"))

if not column_exists("grievances", "integrity_hash"):
conn.execute(text("ALTER TABLE grievances ADD COLUMN integrity_hash VARCHAR"))
logger.info("Added integrity_hash column to grievances")

if not column_exists("grievances", "previous_integrity_hash"):
conn.execute(text("ALTER TABLE grievances ADD COLUMN previous_integrity_hash VARCHAR"))
logger.info("Added previous_integrity_hash column to grievances")
Comment on lines +169 to +175
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

These ALTER TABLE checks are not safe under concurrent startup.

column_exists() reads from the inspector snapshot created before engine.begin() on Line 32, so two instances can both conclude these columns are missing and then race on the same ALTER TABLE grievances ADD COLUMN .... Now that backend/main.py runs migrate_db() during boot, one replica can fail startup or leave the app serving against a partially migrated schema.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/init_db.py` around lines 169 - 175, The pre-check using
column_exists(...) before conn.execute(...) can race under concurrent startup;
instead remove the pre-check and run the ALTER TABLE statements inside
migrate_db with a safe retry/ignore pattern: call conn.execute(text("ALTER TABLE
grievances ADD COLUMN integrity_hash VARCHAR")) (and similarly for
previous_integrity_hash) inside try/except that catches the DB-specific "column
already exists" / duplicate column error (or SQLAlchemy's
ProgrammingError/OperationalError) and only suppresses that specific error while
re-raising other failures; reference the existing functions column_exists,
migrate_db, and the conn.execute/text calls so you update the ALTER logic there
to be idempotent under concurrency.


if not index_exists("grievances", "ix_grievances_previous_integrity_hash"):
conn.execute(text("CREATE INDEX IF NOT EXISTS ix_grievances_previous_integrity_hash ON grievances (previous_integrity_hash)"))

# Field Officer Visits Table (Issue #288)
# This table is newly created for field officer check-in system
if not inspector.has_table("field_officer_visits"):
Expand Down
21 changes: 15 additions & 6 deletions backend/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,15 @@
from pathlib import Path
from dotenv import load_dotenv

# Fix for googletrans compatibility with newer httpcore (Issue #290)
# This monkeypatch must happen before any imports of googletrans or httpx
try:
import httpcore
if not hasattr(httpcore, "SyncHTTPTransport"):
httpcore.SyncHTTPTransport = object
except ImportError:
pass

load_dotenv()

# Add project root to sys.path to ensure 'backend.*' imports work
Expand Down Expand Up @@ -85,9 +94,10 @@ async def lifespan(app: FastAPI):
logger.info("Starting database initialization...")
await run_in_threadpool(Base.metadata.create_all, bind=engine)
logger.info("Base.metadata.create_all completed.")
# Temporarily disabled - comment out to debug startup issues
# await run_in_threadpool(migrate_db)
logger.info("Database initialized successfully (migrations skipped for local dev).")

# Run migrations to ensure schema is up-to-date (Issue #290)
await run_in_threadpool(migrate_db)
logger.info("Database initialized and migrations applied successfully.")
except Exception as e:
logger.error(f"Database initialization failed: {e}", exc_info=True)
# We continue to allow health checks even if DB has issues (for debugging)
Expand Down Expand Up @@ -126,9 +136,8 @@ async def lifespan(app: FastAPI):
app = FastAPI(
title="VishwaGuru Backend",
description="AI-powered civic issue reporting and resolution platform",
version="1.0.0"
# Temporarily disable lifespan for local dev debugging
# lifespan=lifespan
version="1.0.0",
lifespan=lifespan
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Mar 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1: Re-enabling the lifespan handler means migrate_db() now runs on every application boot. The underlying migration logic in init_db.py uses a check-then-act pattern (column_exists()ALTER TABLE ADD COLUMN) that is not atomic. When multiple replicas start concurrently (e.g., in a scaled deployment), they can both observe the column as missing and race on the same ALTER TABLE, causing one replica to fail at startup or leave the schema partially migrated. Consider wrapping the migration in an advisory lock or using a proper migration tool (e.g., Alembic) that handles concurrent execution safely.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At backend/main.py, line 140:

<comment>Re-enabling the `lifespan` handler means `migrate_db()` now runs on every application boot. The underlying migration logic in `init_db.py` uses a check-then-act pattern (`column_exists()` → `ALTER TABLE ADD COLUMN`) that is not atomic. When multiple replicas start concurrently (e.g., in a scaled deployment), they can both observe the column as missing and race on the same `ALTER TABLE`, causing one replica to fail at startup or leave the schema partially migrated. Consider wrapping the migration in an advisory lock or using a proper migration tool (e.g., Alembic) that handles concurrent execution safely.</comment>

<file context>
@@ -127,9 +136,8 @@ async def lifespan(app: FastAPI):
-    # Temporarily disable lifespan for local dev debugging
-    # lifespan=lifespan
+    version="1.0.0",
+    lifespan=lifespan
 )
 
</file context>
Fix with Cubic

)

# Add centralized exception handlers
Expand Down
4 changes: 3 additions & 1 deletion backend/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,9 @@ class Grievance(Base):
created_at = Column(DateTime, default=lambda: datetime.datetime.now(datetime.timezone.utc), index=True)
updated_at = Column(DateTime, default=lambda: datetime.datetime.now(datetime.timezone.utc), onupdate=lambda: datetime.datetime.now(datetime.timezone.utc))
resolved_at = Column(DateTime, nullable=True)

integrity_hash = Column(String, nullable=True) # Blockchain integrity seal
previous_integrity_hash = Column(String, nullable=True, index=True) # Linked hash for O(1) verification

# Closure confirmation fields
closure_requested_at = Column(DateTime, nullable=True)
closure_confirmation_deadline = Column(DateTime, nullable=True)
Expand Down
1 change: 1 addition & 0 deletions backend/requirements-render.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@ googletrans==4.0.2
langdetect
numpy
scikit-learn
httpcore
2 changes: 1 addition & 1 deletion backend/routers/detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ async def detect_abandoned_vehicle_endpoint(image: UploadFile = File(...)):
@router.post("/api/detect-emotion")
async def detect_emotion_endpoint(
image: UploadFile = File(...),
client: httpx.AsyncClient = backend.dependencies.Depends(get_http_client)
client = backend.dependencies.Depends(get_http_client)
):
Comment on lines 459 to 463
Copy link

Copilot AI Mar 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This dependency declaration is inconsistent with the rest of the file and hard to read: backend.dependencies.Depends(get_http_client) relies on re-exported Depends from another module. Prefer importing Depends from fastapi (or adding request: Request and calling get_http_client(request) like other endpoints here) so the signature clearly communicates this is a FastAPI dependency and remains maintainable.

Copilot uses AI. Check for mistakes.
"""
Analyze facial emotions in the image using Hugging Face inference.
Expand Down
52 changes: 51 additions & 1 deletion backend/routers/grievances.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import os
import json
import logging
import hashlib
from datetime import datetime, timezone

from backend.database import get_db
Expand All @@ -15,7 +16,7 @@
FollowGrievanceRequest, FollowGrievanceResponse,
RequestClosureRequest, RequestClosureResponse,
ConfirmClosureRequest, ConfirmClosureResponse,
ClosureStatusResponse
ClosureStatusResponse, GrievanceBlockchainVerificationResponse
)
from backend.grievance_service import GrievanceService
from backend.closure_service import ClosureService
Expand Down Expand Up @@ -436,3 +437,52 @@ def get_closure_status(
except Exception as e:
logger.error(f"Error getting closure status for grievance {grievance_id}: {e}", exc_info=True)
raise HTTPException(status_code=500, detail="Failed to get closure status")


@router.get("/grievances/{grievance_id}/blockchain-verify", response_model=GrievanceBlockchainVerificationResponse)
def verify_grievance_blockchain(grievance_id: int, db: Session = Depends(get_db)):
"""
Verify the cryptographic integrity of a grievance using blockchain-style chaining.
Optimized: Uses previous_integrity_hash column for O(1) verification.
"""
# Fetch current grievance data including the link to previous hash
# Performance Boost: Use projected previous_integrity_hash to avoid N+1 or secondary lookups
grievance = db.query(
Grievance.id,
Grievance.unique_id,
Grievance.category,
Grievance.severity,
Grievance.integrity_hash,
Grievance.previous_integrity_hash
).filter(Grievance.id == grievance_id).first()

if not grievance:
raise HTTPException(status_code=404, detail="Grievance not found")

# Determine previous hash (use stored link or fallback for legacy records)
prev_hash = grievance.previous_integrity_hash

if prev_hash is None:
# Fallback for legacy records created before O(1) optimization
prev_grievance_hash = db.query(Grievance.integrity_hash).filter(Grievance.id < grievance_id).order_by(Grievance.id.desc()).first()
prev_hash = prev_grievance_hash[0] if prev_grievance_hash and prev_grievance_hash[0] else ""

# Recompute hash based on current data and previous hash
# Chaining logic matches grievance_service.py: hash(unique_id|category|severity|prev_hash)
severity_value = grievance.severity.value if hasattr(grievance.severity, 'value') else str(grievance.severity)
hash_content = f"{grievance.unique_id}|{grievance.category}|{severity_value}|{prev_hash}"
computed_hash = hashlib.sha256(hash_content.encode()).hexdigest()

is_valid = (computed_hash == grievance.integrity_hash)

if is_valid:
message = "Integrity verified. This grievance is cryptographically sealed and has not been tampered with."
else:
message = "Integrity check failed! The grievance data does not match its cryptographic seal."

return GrievanceBlockchainVerificationResponse(
is_valid=is_valid,
current_hash=grievance.integrity_hash,
computed_hash=computed_hash,
message=message
)
6 changes: 6 additions & 0 deletions backend/schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,12 @@ class BlockchainVerificationResponse(BaseModel):
computed_hash: str = Field(..., description="Hash computed from current issue data and previous issue's hash")
message: str = Field(..., description="Verification result message")

class GrievanceBlockchainVerificationResponse(BaseModel):
is_valid: bool = Field(..., description="Whether the grievance integrity is intact")
current_hash: Optional[str] = Field(None, description="Current integrity hash stored in DB")
computed_hash: str = Field(..., description="Hash computed from current grievance data and previous grievance's hash")
message: str = Field(..., description="Verification result message")


# Resolution Proof Schemas (Issue #292)

Expand Down
95 changes: 95 additions & 0 deletions backend/tests/test_grievance_blockchain.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
from fastapi.testclient import TestClient
import pytest
import hashlib
from backend.main import app
from backend.database import get_db, Base, engine
from backend.models import Grievance, Jurisdiction, JurisdictionLevel, SeverityLevel
from sqlalchemy.orm import Session
from datetime import datetime, timedelta, timezone

@pytest.fixture
def db_session():
Base.metadata.create_all(bind=engine)
session = Session(bind=engine)
# Create a jurisdiction which is required for grievance
jurisdiction = Jurisdiction(
level=JurisdictionLevel.LOCAL,
geographic_coverage={"cities": ["Mumbai"]},
responsible_authority="Mumbai MC",
default_sla_hours=24
)
session.add(jurisdiction)
session.commit()
yield session
session.close()
Base.metadata.drop_all(bind=engine)
Comment on lines +10 to +25
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Use a dedicated test database here.

Lines 12 and 25 call Base.metadata.create_all/drop_all on backend.database.engine, so this fixture operates on whatever database the app is configured to use. If DATABASE_URL is set in CI or on a developer machine, these tests can create/drop the real schema instead of an isolated test schema. Please bind the fixture to a disposable test engine and point the get_db override at sessions from that engine.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/tests/test_grievance_blockchain.py` around lines 10 - 25, The
db_session fixture currently calls Base.metadata.create_all/drop_all against the
global engine, which can modify a real DB; change it to create a disposable test
engine and sessionmaker (e.g., create an in-memory or temp test engine and a
SessionLocal bound to that engine), use that test Session (the fixture's
Session) to add the Jurisdiction and yield the test session, then drop only the
test engine's metadata; additionally override the application's get_db
dependency in the test (or test client) to return sessions from this test
Session so all DB operations in tests use the isolated test engine instead of
the global engine.


Comment on lines +11 to +26
Copy link

Copilot AI Mar 20, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The db_session fixture uses the global app engine (defaulting to the on-disk sqlite:///./data/issues.db) and calls Base.metadata.drop_all() after each test. This can interfere with other tests (and can accidentally wipe a local dev DB if tests are run against the default config). Prefer creating an isolated test engine (e.g., a temporary SQLite file / in-memory DB) and binding a SessionLocal to it, or use transactions/rollback per test instead of drop_all on the shared engine.

Copilot uses AI. Check for mistakes.
@pytest.fixture
def client(db_session):
app.dependency_overrides[get_db] = lambda: db_session
with TestClient(app) as c:
yield c
app.dependency_overrides = {}

def test_grievance_blockchain_chaining(client, db_session):
# Create two grievances through the API/Service logic would be better but let's test the chaining manually first then through service
from backend.grievance_service import GrievanceService
service = GrievanceService()

# Reset cache to ensure clean state
from backend.cache import grievance_last_hash_cache
grievance_last_hash_cache.clear()

# Grievance 1
g1_data = {
"category": "Road",
"severity": "medium",
"city": "Mumbai",
"description": "Pothole in sector 5"
}
g1 = service.create_grievance(g1_data, db=db_session)
assert g1 is not None
assert g1.integrity_hash is not None
assert g1.previous_integrity_hash == ""

# Grievance 2
g2_data = {
"category": "Garbage",
"severity": "high",
"city": "Mumbai",
"description": "Waste overflow"
}
g2 = service.create_grievance(g2_data, db=db_session)
assert g2 is not None
assert g2.integrity_hash is not None
assert g2.previous_integrity_hash == g1.integrity_hash

# Verify through API
response = client.get(f"/grievances/{g1.id}/blockchain-verify")
assert response.status_code == 200
assert response.json()["is_valid"] == True

response = client.get(f"/grievances/{g2.id}/blockchain-verify")
assert response.status_code == 200
assert response.json()["is_valid"] == True
Comment on lines +68 to +74
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Switch these assertions to direct truthiness checks.

Ruff E712 flags the == True/False comparisons on Lines 70, 74, and 94. assert response.json()["is_valid"] and assert not response.json()["is_valid"] keep the same intent and clear the lint error.

🧹 Minimal fix
-    assert response.json()["is_valid"] == True
+    assert response.json()["is_valid"]
 ...
-    assert response.json()["is_valid"] == True
+    assert response.json()["is_valid"]
 ...
-    assert response.json()["is_valid"] == False
+    assert not response.json()["is_valid"]

Also applies to: 92-95

🧰 Tools
🪛 Ruff (0.15.6)

[error] 70-70: Avoid equality comparisons to True; use response.json()["is_valid"]: for truth checks

Replace with response.json()["is_valid"]

(E712)


[error] 74-74: Avoid equality comparisons to True; use response.json()["is_valid"]: for truth checks

Replace with response.json()["is_valid"]

(E712)

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/tests/test_grievance_blockchain.py` around lines 68 - 74, Replace
explicit boolean comparisons in the test assertions with direct truthiness
checks: change assertions that use == True to simply assert
response.json()["is_valid"] and those that use == False to assert not
response.json()["is_valid"]; update the assertions following the client.get
calls that fetch "/grievances/{g1.id}/blockchain-verify" and
"/grievances/{g2.id}/blockchain-verify" in this test (and the similar assertions
referenced at lines 92-95) to remove the "== True/False" comparisons while
preserving the same semantic checks.


def test_grievance_blockchain_failure(client, db_session):
# Manually create a tampered grievance
jurisdiction = db_session.query(Jurisdiction).first()
g = Grievance(
unique_id="TAMPERED",
category="Road",
severity=SeverityLevel.MEDIUM,
current_jurisdiction_id=jurisdiction.id,
assigned_authority="Mumbai MC",
sla_deadline=datetime.now(timezone.utc) + timedelta(hours=24),
integrity_hash="fakehash",
previous_integrity_hash=""
)
db_session.add(g)
db_session.commit()

response = client.get(f"/grievances/{g.id}/blockchain-verify")
assert response.status_code == 200
assert response.json()["is_valid"] == False
assert "Integrity check failed" in response.json()["message"]
3 changes: 3 additions & 0 deletions render.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ services:
value: production
- key: DEBUG
value: false
# Disable local ML on Render to prevent OOM errors (Issue #290)
- key: USE_LOCAL_ML
value: false
# CORS settings
- key: CORS_ORIGINS
sync: false # Set to your frontend URL
Expand Down
53 changes: 53 additions & 0 deletions server_log.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
2026-03-20 14:51:01,153 - backend.adaptive_weights - INFO - Adaptive weights loaded/reloaded.
2026-03-20 14:51:01,225 - backend.rag_service - INFO - Loaded 5 civic policies for RAG.
Starting server on port 10000
Traceback (most recent call last):
File "/app/start-backend.py", line 13, in <module>
uvicorn.run("backend.main:app", host="0.0.0.0", port=port, log_level="info")
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/site-packages/uvicorn/main.py", line 606, in run
server.run()
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/site-packages/uvicorn/server.py", line 75, in run
return asyncio_run(self.serve(sockets=sockets), loop_factory=self.config.get_loop_factory())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/asyncio/runners.py", line 195, in run
return runner.run(main)
^^^^^^^^^^^^^^^^
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/asyncio/runners.py", line 118, in run
return self._loop.run_until_complete(task)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/asyncio/base_events.py", line 691, in run_until_complete
return future.result()
^^^^^^^^^^^^^^^
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/site-packages/uvicorn/server.py", line 79, in serve
await self._serve(sockets)
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/site-packages/uvicorn/server.py", line 86, in _serve
config.load()
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/site-packages/uvicorn/config.py", line 441, in load
self.loaded_app = import_from_string(self.app)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/site-packages/uvicorn/importer.py", line 19, in import_from_string
module = importlib.import_module(module_str)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/importlib/__init__.py", line 90, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<frozen importlib._bootstrap>", line 1387, in _gcd_import
File "<frozen importlib._bootstrap>", line 1360, in _find_and_load
File "<frozen importlib._bootstrap>", line 1331, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 935, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 999, in exec_module
File "<frozen importlib._bootstrap>", line 488, in _call_with_frames_removed
File "/app/backend/main.py", line 35, in <module>
from backend.routers import issues, detection, grievances, utility, auth, admin, analysis, voice, field_officer, hf, resolution_proof
File "/app/backend/routers/voice.py", line 27, in <module>
from backend.voice_service import get_voice_service
File "/app/backend/voice_service.py", line 14, in <module>
from googletrans import Translator
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/site-packages/googletrans/__init__.py", line 6, in <module>
from googletrans.client import Translator
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/site-packages/googletrans/client.py", line 30, in <module>
class Translator:
File "/home/jules/.pyenv/versions/3.12.13/lib/python3.12/site-packages/googletrans/client.py", line 62, in Translator
proxies: typing.Dict[str, httpcore.SyncHTTPTransport] = None,
^^^^^^^^^^^^^^^^^^^^^^^^^^
AttributeError: module 'httpcore' has no attribute 'SyncHTTPTransport'
Loading
Loading