diff --git a/configs/cargo/Cargo.lock b/configs/cargo/Cargo.lock index 24fe2fb4e9..190f43f182 100644 --- a/configs/cargo/Cargo.lock +++ b/configs/cargo/Cargo.lock @@ -2613,7 +2613,7 @@ dependencies = [ [[package]] name = "hive-apollo-router-plugin" -version = "2.3.3" +version = "2.3.4" dependencies = [ "anyhow", "apollo-router", @@ -2647,7 +2647,7 @@ dependencies = [ [[package]] name = "hive-console-sdk" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", "async-trait", diff --git a/packages/web/docs/src/content/router/guides/_meta.ts b/packages/web/docs/src/content/router/guides/_meta.ts index 61130150e8..4f667027d8 100644 --- a/packages/web/docs/src/content/router/guides/_meta.ts +++ b/packages/web/docs/src/content/router/guides/_meta.ts @@ -2,4 +2,5 @@ export default { 'dynamic-subgraph-routing': 'Dynamic Subgraph Routing', 'header-manipulation': 'Header Manipulation', 'performance-tuning': 'Performance Tuning & Traffic Shaping', + 'extending-the-router': 'Extending the Router', }; diff --git a/packages/web/docs/src/content/router/guides/extending-the-router/index.mdx b/packages/web/docs/src/content/router/guides/extending-the-router/index.mdx new file mode 100644 index 0000000000..d766286b88 --- /dev/null +++ b/packages/web/docs/src/content/router/guides/extending-the-router/index.mdx @@ -0,0 +1,840 @@ +--- +title: 'Extending the Router' +--- + +import { Tabs } from '@theguild/components' + +# Extending the Router + +Hive Router is designed to be flexible and extensible, allowing you to customize its behavior to fit +your specific needs. This guide explores various ways to extend the router's functionality, +including custom plugins. + +## Custom Builds with Rust Plugins + +Hive Router is built using Rust, which allows for high performance and safety. One of the powerful +features of Hive Router is the ability to create custom builds with your own Rust plugins. This +enables you to add new capabilities or modify existing ones to better suit your requirements. + +### Create a new Rust project + +First, ensure you have the necessary development environment set up for +[Rust 1.91.1 or later](https://rust-lang.org/tools/install/). Then, you need to create a new Rust +project for your custom router; + +```bash +cargo new --bin my_custom_router +cd my_custom_router +``` + +Install `hive-router` as a dependency by adding it to your `Cargo.toml` file: + +```toml +[dependencies] +hive-router = "0.17" +``` + +You can use our example supergraph as a starting point; + +```bash +curl -sSL https://federation-demo.theguild.workers.dev/supergraph.graphql > supergraph.graphql +``` + +Then point to that supergraph in your `router.config.yaml`: + +```yaml filename="router.config.yaml" +supergraph: + source: file + path: ./supergraph.graphql +``` + +Or you can use other ways to provide the supergraph, see +[Supergraph Sources](https://the-guild.dev/graphql/hive/docs/router/supergraph). + +### Create an entrypoint for your custom router + +Next, you need to create an entrypoint for your custom router. This is where you'll initialize the +router and register your plugins. Create a new file `src/main.rs` and add the following code: + +```rust +use hive_router::{PluginRegistry, router_entrypoint}; + +#[ntex::main] +async fn main() -> Result<(), Box> { + /// This is where you can register your custom plugins + let plugin_registry = PluginRegistry::new(); + /// Start the Hive Router with the plugin registry + match router_entrypoint(Some(plugin_registry)).await { + Ok(_) => Ok(()), + Err(err) => { + eprintln!("Failed to start Hive Router:\n {}", err); + + Err(err) + } + } +} +``` + +### Run your custom router + +Finally, you can build and run your custom router using Cargo: + +```bash +cargo run +``` + +### Configure your plugins in `router.config.yaml` + +`plugins` section in your `router.config.yaml` allows you to configure your custom plugins. Here is +an example configuration: + +```yaml filename="router.config.yaml" +plugins: + my_custom_plugin: + option1: value1 + option2: value2 +``` + +During this phase of development, you need to learn more about the Hive Router plugin system and how +the hooks are structured. + +## Hooks Lifecycle + +We have the following lifecycle hooks available for plugins: + +```mermaid +flowchart TD + A[on_http_request] --> B[on_graphql_params] + B --> D[on_graphql_parse] + D --> F[on_graphql_validation] + F --> H[on_query_plan] + H --> J[on_execute] + J --> K[on_subgraph_execute] + K --> L[on_subgraph_http_request] + L --> K + K --> J + J --> A +``` + +In addition there is also `on_supergraph_load` hook that is called when the supergraph is loaded or +reloaded. + +### `on_http_request` + +This hook is called immediately after the router receives an HTTP request. It allows you to inspect +or modify the request before any further processing occurs. Remember that, we don't know yet if the +request is a GraphQL request or not. + +On the start of this hook, you can do the following things for example; + +- Implement custom authentication and authorization logic based on HTTP headers, method, path, etc. +- Short-circuit the request by providing a custom response (for example for health checks or + metrics, or custom playgrounds like Apollo Sandbox, Altair etc.) + +On the end of this hook, you can do the following things for example; + +- Header propagation from the incoming request to the final response. +- Custom HTTP Caching headers for your response caching plugin that also uses those headers. +- Handle deserialization for different content types other than JSON + +You can check the following example implementations; + +- `apollo-sandbox` plugin that serves Apollo Sandbox in a custom endpoint using `on_http_request` + hook +- `propagate-status-code` plugin that propagates status codes from subgraphs to clients using the + end phase of `on_http_request` hook by manipulating the final response. + +### `on_graphql_params` + +This hook is called after the router has determined that the incoming request is a GraphQL request +and it decides to parse the GraphQL parameters (query, variables, operation name, etc.). Here you +can; + +On this hook's start; + +- Handle a specific validation based on the GraphQL parameters +- Inspect or modify the raw body +- Short-circuit the request by providing a custom response (for example for caching) +- Custom request parsing like Multipart requests + +On this hook's end; + +- Persisted Operations or Trusted Documents so that you can get the operation key from the body, and + put the actual `query` string into the GraphQL parameters body. Then the router can continue + processing the request as usual. +- Max Tokens security check that counts the tokens in the operation and rejects the request if it + exceeds a certain limit. +- Any auth logic that relies on `extensions` or other GraphQL parameters instead of HTTP-specific + `headers`. +- A potential response caching plugin that caches based on GraphQL parameters, so that it returns + the response before any further steps like parsing, validation, execution. + +You can check the following example implementations; + +- `forbid-anon-ops` example that checks the parsed `operation_name` in the end payload of this hook + and rejects the operations without operation names. +- `apq` example shows how to implement a simple Automatic Persisted Queries plugin using this hook. + So it takes the hash from the extensions from the parsed body, and looks up the actual query from + a map then puts it into the GraphQL parameters. +- `multipart` plugin that parses the body using `multer`, and holds the file bytes in the context + then fetches then lazily when needed during the subgraph execution. So it shows how to override + parsing logic using this hook. +- `async_auth` plugin that shows how to implement a custom auth logic + +### `on_graphql_parse` + +This hook is called after the deserialization of the request, and the router has parsed the GraphQL +parameters body expected by GraphQL-over-HTTP spec. But we still need to parse the operation into +AST. + +On the start of this hooks, you can do the following things for example; + +- Some kind of trusted documents implementation that holds not the string representation of the + query, but the parsed AST in a map. So when you get the query string from the GraphQL parameters, + you can look up the AST from the map and put it into the context. Then the router can skip parsing + step. +- Replace or extend the parser for some future RFCs like Fragment Variables etc. + +On the end of this hook, you can do the following things for example; + +- Prevent certain operations from being executed by checked the HTTP headers or other request + properties along with the parsed AST. +- Logging or metrics based on the parsed AST like counting certain fields or directives usage. + +### `on_graphql_validation` + +This hook is called after the router is ready to validate the operation against the supergraph. In +this stage, we know the supergraph and the operation are ready for validation, then query planning +and execution if valid. + +On the start of this hook, you can do the following things for example; + +- Skip validation for certain operations based on request properties like headers, method, etc. +- Or skip validation for trusted documents/persisted operations since they are trusted +- Custom validation rules like `@oneOf` directive validation etc. +- Custom rules for max depth analysis, rate limiting based on the operation structure +- Preventing the introspection queries based on request properties +- Caching the validation result in some other ways, if cache hit, you can skip validation + +On the end of this hook, you can do the following things for example; + +- Bypassing certain errors based on request properties +- Again caching the validation result in some other ways + +You can check the following example implementations; + +- `root-field-limit` plugin that implements a new `ValidationRule` to limit the number of root + fields in an operation using this hook. +- `one-of` plugin is a more complicated one that combines this hook and `on_execute` hook to + implement custom validation and execution logic for `@oneOf` directive. + +### `on_query_plan` + +This hook is invoked before the query planner starts the planning process. At this point, we have +the query planner, normalized document, the supergraph, and the public schema and all the parameters +needed for the planning. + +On the start of this hook, you can do the following things for example; + +- Modify the normalized document that is used for planning, or do some validation based on it since + this is the normalized, flattened version of the operation. + +On the end of this hook, you can do the following things for example; + +- Demand Control or Cost Limiting based on the subgraph requests that would be made for the + operation. + +You can check the following example implementations; + +- `root-field-limit` plugin that implements another variant of root field limiting using this hook + besides `on_graphql_validation`. + +### `on_execute` + +Whenever the variables are coerced in addition to the operation being valid just like other +parameters. This hook is called before the execution starts. At this point, we have the query plan +ready along with the coerced variables. So you can block the operation, manipulate the result, +variables, etc. + +This is different the end of `on_query_plan` hook because we don't have all the parameters ready +like coerced variables, filling the introspection fields that are seperated from the actual planning +and execution etc. + +On the start of this hook, you can do the following things for example; + +- Response Caching based on the query plan or the operation AST together with the coerced variables, + and some auth info from the request. +- Blocking certain operations based on the query plan structure together with the coerced variables. + +On the end of this hook, you can do the following things for example; + +- Add extra metadata to the response based on the execution result like response cache info, tracing + info, some information about the query plan used etc. + +You can check the following example implementations; + +- `one_of` plugin that implements the execution logic for `@oneOf` directive using this hook. + `on_graphql_validation` is not enough by itself because the coerced variables need to be checked + as well. +- `response_caching` plugin that implements a simple response caching mechanism using this hook. + +### `on_subgraph_execute` + +This hook is called before an execution request is prepared for a subgraph based on the query plan. +At this point, we have the subgraph name, the execution request that would be sent to the subgraph, +and other contextual information. + +But we still don't have the actual "HTTP" request that would be sent to the subgraph. So this is +different than `on_subgraph_http_request` hook. So this is before the serialization to HTTP request. +On the other hand, at the end of this hook, we have deserialized version of the subgraph response, +but not the actual HTTP response. + +On the start of this hook, you can do the following things for example; + +- Mocking the subgraph response based on the execution request and other request properties. +- Demand Control and Cost Limiting based on the execution request and other request properties. +- Handling a custom subgraph auth like HMAC and JWT based auth by manipulating the execution request + headers. +- Custom subgraph execution logic for different transports like protobuf instead of JSON over HTTP. +- APQ for subgraph requests by storing the persisted queries for subgraphs. +- Block some subgraph operations based on some policy. This is the right time before the + serialization. + +On the end of this hook, you can do the following things for example; + +- Any work that needs to be done such as collecting `extensions` data from subgraph responses for + logging, metrics, tracing etc. + +You can check the following example implementations; + +- `subgraph_response_cache` plugin that implements a simple subgraph response caching mechanism + using this hook. +- `context_data` plugin that shows how to pass data between the main request lifecycle and subgraph + execution using this hook. + +### `on_subgraph_http_request` + +After the subgraph execution request is serialized into an actual HTTP request, this hook is +invoked. At this point, you have access to the full HTTP request that will be sent to the subgraph. + +On the start of this hook, you can do the following things for example; + +- Send custom headers to subgraphs based on the main request properties. +- Some custom HTTP-based subgraph auth mechanisms like AWS Sigv4 that generates signature based on + HTTP properties and the payload +- Change the HTTP method, URL, or other properties based on some logic. +- Choose different endpoints based on the request parameters like region, version, etc. +- HTTP based caching +- Limit the request size based on some policy. +- Replace the default HTTP transport with some custom transport like multipart/form-data for file + uploads. + +On the end of this hook, you can do the following things for example; + +- Header propagation but this time from the subgraph response to the main response. +- Forward cookies from subgraph responses to the main response. +- HTTP Caching's response side based on HTTP response headers like ETag, Cache-Control etc. +- Respecting TTL returned by the subgraph in your response caching plugin. So you can decide on the + final TTL based on subgraph response headers. + +You can check the following example implementations; + +- `propagate-status-code` plugin that propagates status codes from subgraphs to clients using the + end phase of this hook by manipulating the final response. +- `multipart` plugin that overrides the default deserialization logic of Hive Router to handle + `multipart/form-data` requests from the client, and it holds the files in the context for lazy + fetching during subgraph execution. In this hook, it re-uses the files from the context to prepare + the subgraph HTTP request, and replaces the default JSON-based HTTP transport with + `multipart/form-data` when needed. + +### `on_supergraph_load` + +This hook is called whenever the supergraph is loaded or reloaded. This can happen at startup or +when the supergraph configuration changes. + +You can do the following things for example; + +- Precalculate some data based on the supergraph schema that would be used later during request + processing such as TTL calculation based on `@cacheControl` directives for response caching + plugin. +- In addition to above, you can also precalculate cost analysis data for cost limiting plugins. +- You can also refresh the state of the plugin based on the supergraph changes, for example the + caching plugins can clear their caches. + +You can check the following example implementations; + +- `one_of` plugin that precalculates the `@oneOf` directive locations in the supergraph schema for + faster access during request processing. So it avoids traversing the schema for each request, and + refreshes the state whenever the supergraph is reloaded. +- `response_cache` plugin that precalculates the TTL information based on `@cacheControl` directives + in the supergraph schema for response caching. + +## Short Circuit Responses + +In many of the hooks mentioned above, you have the ability to short-circuit the request processing +by providing a custom response. + +Let's say you want to implement a plugin that returns an early error response if it doesn't have a +specific operation name. So basically this plugin rejects anonymous operations. See the highlighted +code below; + +```rust filename="src/forbid_anon_ops.rs" {33} +#[async_trait::async_trait] +impl RouterPlugin for ForbidAnonymousOperationsPlugin { + async fn on_graphql_params<'exec>( + &'exec self, + payload: OnGraphQLParamsStartHookPayload<'exec>, + ) -> OnGraphQLParamsStartHookResult<'exec> { + // After the GraphQL parameters have been parsed, we can check if the operation is anonymous + // So we use `on_end` + payload.on_end(|payload| { + let maybe_operation_name = &payload + .graphql_params + .operation_name + .as_ref(); + + if maybe_operation_name + .is_none_or(|operation_name| operation_name.is_empty()) + { + // let's log the error + tracing::error!("Operation is not allowed!"); + + // Prepare an HTTP 400 response with a GraphQL error message + let body = json!({ + "errors": [ + { + "message": "Anonymous operations are not allowed", + "extensions": { + "code": "ANONYMOUS_OPERATION" + } + } + ] + }); + // Here we short-circuit the request processing by returning an early response + return payload.end_response(HttpResponse { + body: body.to_string().into(), + headers: http::HeaderMap::new(), + status: StatusCode::BAD_REQUEST, + }); + } + // we're good to go! + tracing::info!("operation is allowed!"); + payload.cont() + }) + } +} +``` + +Here we use `end_response` method on the `OnGraphQLParamsEndHookPayload` to provide a custom HTTP +response. Then the router will skip all further processing and return this response to the client. + +## Overriding Default Behavior + +Instead of short-circuiting the entire HTTP request, and returning an early HTTP response, you might +want to override default behavior in certain stages. + +For example, in case of automatic persisted queries, you basically need to manipulate the parsed +request body received from the client. In this case, you need to modify the `query` field in the +`GraphQLParams` struct to put the actual query string into it based on the hash received from the +client. + +In the following example, we implement a simple APQ plugin that uses an in-memory cache to store the +persisted queries. When a request comes in with a hash, it looks up the query from the cache and +puts it into the `GraphQLParams`. If the query is not found, it returns an error response. + +```rust filename="src/apq.rs" {1,2,12-88} +struct APQPlugin { + cache: DashMap, +} + +#[async_trait::async_trait] +impl RouterPlugin for APQPlugin { + async fn on_graphql_params<'exec>( + &'exec self, + payload: OnGraphQLParamsStartHookPayload<'exec>, + ) -> OnGraphQLParamsStartHookResult<'exec> { + payload.on_end(|mut payload| { + let persisted_query_ext = payload + .graphql_params + .extensions + .as_ref() + .and_then(|ext| ext.get("persistedQuery")) + .and_then(|pq| pq.as_object()); + if let Some(persisted_query_ext) = persisted_query_ext { + match persisted_query_ext.get(&"version").and_then(|v| v.as_i64()) { + Some(1) => {} + _ => { + let body = json!({ + "errors": [ + { + "message": "Unsupported persisted query version", + "extensions": { + "code": "UNSUPPORTED_PERSISTED_QUERY_VERSION" + } + } + ] + }); + return payload.end_response(HttpResponse { + body: body.to_string().into_bytes().into(), + status: StatusCode::BAD_REQUEST, + headers: http::HeaderMap::new(), + }); + } + } + let sha256_hash = match persisted_query_ext + .get(&"sha256Hash") + .and_then(|h| h.as_str()) + { + Some(h) => h, + None => { + let body = json!({ + "errors": [ + { + "message": "Missing sha256Hash in persisted query", + "extensions": { + "code": "MISSING_PERSISTED_QUERY_HASH" + } + } + ] + }); + return payload.end_response(HttpResponse { + body: body.to_string().into_bytes().into(), + status: StatusCode::BAD_REQUEST, + headers: http::HeaderMap::new(), + }); + } + }; + if let Some(query_param) = &payload.graphql_params.query { + // Store the query in the cache + self.cache + .insert(sha256_hash.to_string(), query_param.to_string()); + } else { + // Try to get the query from the cache + if let Some(cached_query) = self.cache.get(sha256_hash) { + // Update the graphql_params with the cached query + payload.graphql_params.query = Some(cached_query.value().to_string()); + } else { + let body = json!({ + "errors": [ + { + "message": "PersistedQueryNotFound", + "extensions": { + "code": "PERSISTED_QUERY_NOT_FOUND" + } + } + ] + }); + return payload.end_response(HttpResponse { + body: body.to_string().into_bytes().into(), + status: StatusCode::NOT_FOUND, + headers: http::HeaderMap::new(), + }); + } + } + } + + payload.cont() + }) + } +} +``` + +## Context Data Sharing + +Sometimes, you might want to share data between different hooks or stages of the request processing. +Hive Router provides a way to store and retrieve custom data in the request context, allowing you to +pass information between hooks. + +For example, you can store some data in the context during the `on_graphql_params` hook and retrieve +it later in the `on_subgraph_execute` hook. + +```rust filename="src/context_data.rs" {1,2,12-55} +pub struct ContextData { + incoming_data: String, + response_count: u64, +} + +#[async_trait::async_trait] +impl RouterPlugin for ContextDataPlugin { + async fn on_graphql_params<'exec>( + &'exec self, + payload: OnGraphQLParamsStartHookPayload<'exec>, + ) -> OnGraphQLParamsStartHookResult<'exec> { + let context_data = ContextData { + incoming_data: "world".to_string(), + response_count: 0, + }; + + payload.context.insert(context_data); + + payload.on_end(|payload| { + let context_data = payload.context.get_mut::(); + if let Some(mut context_data) = context_data { + context_data.response_count += 1; + tracing::info!("subrequest count {}", context_data.response_count); + } + payload.cont() + }) + } + async fn on_subgraph_execute<'exec>( + &'exec self, + mut payload: OnSubgraphExecuteStartHookPayload<'exec>, + ) -> OnSubgraphExecuteStartHookResult<'exec> { + let context_data_entry = payload.context.get_ref::(); + if let Some(ref context_data_entry) = context_data_entry { + tracing::info!("hello {}", context_data_entry.incoming_data); // Hello world! + let new_header_value = format!("Hello {}", context_data_entry.incoming_data); + payload.execution_request.headers.insert( + "x-hello", + http::HeaderValue::from_str(&new_header_value).unwrap(), + ); + } + payload.on_end(|payload: OnSubgraphExecuteEndHookPayload<'exec>| { + let context_data = payload.context.get_mut::(); + if let Some(mut context_data) = context_data { + context_data.response_count += 1; + tracing::info!("subrequest count {}", context_data.response_count); + } + payload.cont() + }) + } +} +``` + +In the example above, we define a `ContextData` struct to hold our custom data. In the +`on_graphql_params` hook, we create an instance of `ContextData` and insert it into the request +context. Later, in the `on_subgraph_execute` hook, we retrieve the `ContextData` from the context +and use its values to modify the subgraph execution request. + +`context` provides a convenient way to share data between different hooks and stages of the request +processing, enabling more complex and stateful plugin behavior. + +`context.insert`, `context.get_ref`, `context.get_mut` methods are used to store and +retrieve data of type `T` in the request context. + +- `insert(&self, data: T)` - Inserts data of type `T` into the context. +- `get_ref(&self) -> Option<&T>` - Retrieves a reference to the data of type `T` from the + context. +- `get_mut(&mut self) -> Option<&mut T>` - Retrieves a mutable reference to the data of type `T` + from the context. + +## Refresh State on Supergraph Reload + +Plugins can refresh their internal state whenever the supergraph is reloaded. This is useful for +plugins that depend on the supergraph schema or configuration. + +The following code is from `response_cache` example plugin refreshes `ttl_per_type` map whenever the +supergraph is reloaded by visiting the schema and looking for `@cacheControl` directives. + +```rust +pub struct ResponseCachePlugin { + /// ... + ttl_per_type: DashMap, +} + + +/// ... + fn on_supergraph_reload<'a>( + &'a self, + payload: OnSupergraphLoadStartHookPayload, + ) -> OnSupergraphLoadStartHookResult<'a> { + // Visit the schema and update ttl_per_type based on some directive + payload.new_ast.definitions.iter().for_each(|def| { + if let graphql_parser::schema::Definition::TypeDefinition(type_def) = def { + if let graphql_parser::schema::TypeDefinition::Object(obj_type) = type_def { + for directive in &obj_type.directives { + if directive.name == "cacheControl" { + for arg in &directive.arguments { + if arg.0 == "maxAge" { + if let graphql_parser::query::Value::Int(max_age) = &arg.1 { + if let Some(max_age) = max_age.as_i64() { + self.ttl_per_type + .insert(obj_type.name.clone(), max_age as u64); + } + } + } + } + } + } + } + } + }); + + payload.cont() + } +``` + +## Configuration of Plugins + +Plugins can be configured via the `router.config.yaml` file. Each plugin can have its own entry +under `plugins` section, where you can specify various options and settings specific to that plugin. + +The configuration `struct` should be `serde` compliant, so that it can be deserialized from the YAML +file. + +Let's say we have a custom auth logic that checks the expected header values from a file +dynamically. + +```rust + +pub struct AllowClientIdFromFilePlugin { + header_key: String, + allowed_ids_path: PathBuf, +} + +#[async_trait::async_trait] +impl RouterPlugin for AllowClientIdFromFilePlugin { + // Whenever it is a GraphQL request, + // We don't use on_http_request here because we want to run this only when it is a GraphQL request + async fn on_graphql_params<'exec>( + &'exec self, + payload: OnGraphQLParamsStartHookPayload<'exec>, + ) -> OnGraphQLParamsStartHookResult<'exec> { + let header = payload.router_http_request.headers.get(&self.header_key); + match header { + Some(client_id) => { + let client_id_str = client_id.to_str(); + match client_id_str { + Ok(client_id) => { + let allowed_clients: Vec = serde_json::from_str( + std::fs::read_to_string(self.allowed_ids_path.clone()) + .unwrap() + .as_str(), + ) + .unwrap(); + + if !allowed_clients.contains(&client_id.to_string()) { + // Prepare an HTTP 403 response with a GraphQL error message + let body = json!( + { + "errors": [ + { + "message": "client-id is not allowed", + "extensions": { + "code": "UNAUTHORIZED_CLIENT_ID" + } + } + ] + } + ); + return payload.end_response(HttpResponse { + body: sonic_rs::to_vec(&body).unwrap_or_default().into(), + headers: http::HeaderMap::new(), + status: http::StatusCode::FORBIDDEN, + }); + } + } + Err(_not_a_string_error) => { + let message = format!("'{}' value is not a string", &self.header_key); + tracing::error!(message); + let body = json!( + { + "errors": [ + { + "message": message, + "extensions": { + "code": "BAD_CLIENT_ID" + } + } + ] + } + ); + return payload.end_response(HttpResponse { + body: sonic_rs::to_vec(&body).unwrap_or_default().into(), + headers: http::HeaderMap::new(), + status: http::StatusCode::BAD_REQUEST, + }); + } + } + } + None => { + let message = format!("Missing '{}' header", &self.header_key); + tracing::error!(message); + let body = json!( + { + "errors": [ + { + "message": message, + "extensions": { + "code": "AUTH_ERROR" + } + } + ] + } + ); + return payload.end_response(HttpResponse { + body: sonic_rs::to_vec(&body).unwrap_or_default().into(), + headers: http::HeaderMap::new(), + status: http::StatusCode::UNAUTHORIZED, + }); + } + } + payload.cont() + } +} +``` + +So we can have a configuration struct like below; + +```rust filename="src/dynamic_auth.rs" +use serde::Deserialize; + +#[derive(Deserialize)] +pub struct AllowClientIdConfig { + pub enabled: bool, + pub header: String, + pub path: String, +} +``` + +Then attach it to the plugin struct; + +```rust filename="src/dynamic_auth.rs" +impl RouterPluginWithConfig for AllowClientIdFromFilePlugin { + type Config = AllowClientIdConfig; + fn plugin_name() -> &'static str { + "allow_client_id_from_file" + } + fn from_config(config: AllowClientIdConfig) -> Option { + if config.enabled { + Some(AllowClientIdFromFilePlugin { + header_key: config.header, + allowed_ids_path: PathBuf::from(config.path), + }) + } else { + None + } + } +} +``` + +`plugin_name` method should return the name of the plugin as it appears in the `router.config.yaml` +file. The `from_config` method is responsible for creating an instance of the plugin from the +provided configuration. If `from_config` returns `None`, the plugin will not be registered. + +With this setup, you can now configure the `allow_client_id_from_file` plugin in your +`router.config.yaml` file like this: + +```yaml filename="router.config.yaml" +plugins: + allow_client_id_from_file: + enabled: true + header: 'x-client-id' +``` + +## Registration of Plugins + +Finally, to use your custom plugin, you need to register it with the `PluginRegistry` in your +`main.rs` file. + +```rust filename="src/main.rs" {9-14} + let mut plugin_registry = PluginRegistry::new(); + // Register your custom plugin + plugin_registry.register_plugin::(); +``` + +Then pass the `plugin_registry` to the `router_entrypoint` function as shown earlier. + +```rust filename="src/main.rs" {15} + match router_entrypoint(Some(plugin_registry)).await { +```