|
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +from contextlib import contextmanager |
| 4 | +from multiprocessing import Semaphore |
| 5 | + |
| 6 | +import typing_extensions as te |
| 7 | +from sanic import Sanic |
| 8 | +from sanic.config import Config |
| 9 | +from sanic.exceptions import ( |
| 10 | + BadRequest, |
| 11 | + NotFound, |
| 12 | +) |
| 13 | +from sanic.request import Request |
| 14 | +from sanic.response import ( |
| 15 | + empty, |
| 16 | + HTTPResponse, |
| 17 | + text, |
| 18 | +) |
| 19 | + |
| 20 | +from .context import BenchKitContext |
| 21 | +from .env import env |
| 22 | +from .workloads import Workload |
| 23 | + |
| 24 | + |
| 25 | +T_App: te.TypeAlias = "Sanic[Config, BenchKitContext]" |
| 26 | + |
| 27 | + |
| 28 | +def create_app() -> T_App: |
| 29 | + app: T_App = Sanic("Python_BenchKit", ctx=BenchKitContext()) |
| 30 | + |
| 31 | + @app.main_process_start |
| 32 | + async def main_process_start(app: T_App) -> None: |
| 33 | + app.shared_ctx.running = Semaphore(1) |
| 34 | + |
| 35 | + @app.before_server_start |
| 36 | + async def before_server_start(app: T_App) -> None: |
| 37 | + if env.driver_debug: |
| 38 | + from neo4j.debug import watch |
| 39 | + watch("neo4j") |
| 40 | + |
| 41 | + running = app.shared_ctx.running |
| 42 | + acquired = running.acquire(block=False) |
| 43 | + if not acquired: |
| 44 | + raise RuntimeError( |
| 45 | + "The server does not support multiple worker processes" |
| 46 | + ) |
| 47 | + |
| 48 | + @app.after_server_stop |
| 49 | + async def after_server_stop(app: T_App) -> None: |
| 50 | + await app.ctx.shutdown() |
| 51 | + running = app.shared_ctx.running |
| 52 | + running.release() |
| 53 | + |
| 54 | + @contextmanager |
| 55 | + def _loading_workload(): |
| 56 | + try: |
| 57 | + yield |
| 58 | + except (ValueError, TypeError) as e: |
| 59 | + print(e) |
| 60 | + raise BadRequest(str(e)) |
| 61 | + |
| 62 | + def _get_workload(app: T_App, name: str) -> Workload: |
| 63 | + try: |
| 64 | + workload = app.ctx.workloads[name] |
| 65 | + except KeyError: |
| 66 | + raise NotFound(f"Workload {name} not found") |
| 67 | + return workload |
| 68 | + |
| 69 | + @app.get("/ready") |
| 70 | + async def ready(_: Request) -> HTTPResponse: |
| 71 | + await app.ctx.get_db() # check that the database is available |
| 72 | + return empty() |
| 73 | + |
| 74 | + @app.post("/workload") |
| 75 | + async def post_workload(request: Request) -> HTTPResponse: |
| 76 | + data = request.json |
| 77 | + with _loading_workload(): |
| 78 | + name = app.ctx.workloads.store_workload(data) |
| 79 | + location = f"/workload/{name}" |
| 80 | + return text(f"created at {location}", |
| 81 | + status=204, |
| 82 | + headers={"location": location}) |
| 83 | + |
| 84 | + @app.put("/workload") |
| 85 | + async def put_workload(request: Request) -> HTTPResponse: |
| 86 | + data = request.json |
| 87 | + with _loading_workload(): |
| 88 | + workload = app.ctx.workloads.parse_workload(data) |
| 89 | + driver = await app.ctx.get_db() |
| 90 | + await workload(driver) |
| 91 | + return empty() |
| 92 | + |
| 93 | + @app.get("/workload/<name>") |
| 94 | + async def get_workload(_: Request, name: str) -> HTTPResponse: |
| 95 | + workload = _get_workload(app, name) |
| 96 | + driver = await app.ctx.get_db() |
| 97 | + await workload(driver) |
| 98 | + return empty() |
| 99 | + |
| 100 | + @app.patch("/workload/<name>") |
| 101 | + async def patch_workload(request: Request, name: str) -> HTTPResponse: |
| 102 | + data = request.json |
| 103 | + workload = _get_workload(app, name) |
| 104 | + with _loading_workload(): |
| 105 | + workload.patch(data) |
| 106 | + return empty() |
| 107 | + |
| 108 | + @app.delete("/workload/<name>") |
| 109 | + async def delete_workload(_: Request, name: str) -> HTTPResponse: |
| 110 | + _get_workload(app, name) |
| 111 | + del app.ctx.workloads[name] |
| 112 | + return empty() |
| 113 | + |
| 114 | + return app |
0 commit comments