diff --git a/.bleep b/.bleep
index bcde257d..a9416ff6 100644
--- a/.bleep
+++ b/.bleep
@@ -1 +1 @@
-ed8657309187516d2e673037821a9fbd8405d703
\ No newline at end of file
+d2c25d726c5738e6a8028dc3e7642ecfe6c1824e
diff --git a/.cargo/config.toml b/.cargo/config.toml
new file mode 100644
index 00000000..3c1f3636
--- /dev/null
+++ b/.cargo/config.toml
@@ -0,0 +1,2 @@
+[resolver]
+incompatible-rust-versions = "fallback"
\ No newline at end of file
diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml
index cd10b8c0..6fe67dea 100644
--- a/.github/workflows/audit.yml
+++ b/.github/workflows/audit.yml
@@ -24,7 +24,7 @@ jobs:
- name: Generate Cargo.lock
# https://github.com/rustsec/audit-check/issues/27
- run: cargo generate-lockfile
+ run: cargo generate-lockfile --ignore-rust-version
- name: Audit Check
# https://github.com/rustsec/audit-check/issues/2
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 43c8aa9d..22a4c458 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -8,7 +8,7 @@ jobs:
fail-fast: false
matrix:
# nightly, msrv, and latest stable
- toolchain: [nightly, 1.83.0, 1.87.0]
+ toolchain: [nightly, 1.84.0, 1.91.1]
runs-on: ubuntu-latest
# Only run on "pull_request" event for external PRs. This is to avoid
# duplicate builds for PRs created from internal branches.
@@ -48,12 +48,12 @@ jobs:
- name: Run cargo clippy
run: |
- [[ ${{ matrix.toolchain }} != 1.87.0 ]] || cargo clippy --all-targets --all -- --allow=unknown-lints --deny=warnings
+ [[ ${{ matrix.toolchain }} != 1.91.1 ]] || cargo clippy --all-targets --all -- --allow=unknown-lints --deny=warnings
- name: Run cargo audit
run: |
- [[ ${{ matrix.toolchain }} != 1.87.0 ]] || (cargo install --locked cargo-audit && cargo audit)
+ [[ ${{ matrix.toolchain }} != 1.91.1 ]] || (cargo install --locked cargo-audit && cargo generate-lockfile --ignore-rust-version && cargo audit)
- name: Run cargo machete
run: |
- [[ ${{ matrix.toolchain }} != 1.87.0 ]] || (cargo install cargo-machete --version 0.7.0 && cargo machete)
+ [[ ${{ matrix.toolchain }} != 1.91.1 ]] || (cargo install cargo-machete --version 0.7.0 && cargo machete)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e7c25c7f..e8cb8dbe 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,10 +2,125 @@
All notable changes to this project will be documented in this file.
+## [0.8.0](https://github.com/cloudflare/pingora/compare/0.7.0...0.8.0) - 2026-03-02
+
+
+**π Features**
+
+* Add support for client certificate verification in mTLS configuration.
+* Add upstream\_write\_pending\_time to Session for upload diagnostics.
+* Pipe subrequests utility: creates a state machine to treat subrequests as a "pipe," enabling direct sending of request body and writing of response tasks, with a handler for error propagation and support for reusing a preset or captured input body for chained subrequests.
+* Add the ability to limit the number of times a downstream connection can be reused
+* Add a system for specifying and using service-level dependencies
+* Add a builder for pingora proxy service, e.g. to specify ServerOptions.
+
+**π Bug Fixes**
+
+* Fix various Windows compiler issues.
+* Handle custom ALPNs in s2n impl of ALPN::to\_wire\_protocols() to fix s2n compile issues.
+* Fix: don't use βallβ permissions for socket.
+* Fix a bug with the ketama load balancing where configurations were not persisted after updates.
+* Ensure http1 downstream session is not reused on more body bytes than expected.
+* Send RST\_STREAM CANCEL on application read timeouts for h2 client.
+* Start close-delimited body mode after 101 is received for WebSocket upgrades. `UpgradedBody` is now an explicit HttpTask.
+* Avoid close delimit mode on http/1.0 req.
+* Reject invalid content-length http/1 requests to eliminate ambiguous request framing.
+* Validate invalid content-length on http/1 resp by default, and removes content-length from the response if transfer-encoding is present, per RFC.
+* Correct the custom protocol code for shutdown: changed the numeric code passed on shutdown to 0 to indicate an explicit shutdown rather than a transport error.
+
+**βοΈ Miscellaneous Tasks**
+
+* Remove `CacheKey::default` impl, users of caching should implement `cache_key_callback` themselves
+* Allow server bootstrapping to take place in the context of services with dependents and dependencies
+* Don't consider "bytes=" a valid range header: added an early check for an empty/whitespace-only range-set after the `bytes=` prefix, returning 416 Range Not Satisfiable, consistent with RFC 9110 14.1.2.
+* Strip {content, transfer}-encoding from 416s to mirror the behavior for 304 Not Modified responses.
+* Disable CONNECT method proxying by default, with an option to enable via server options; unsupported requests will now be automatically rejected.
+
+## [0.7.0](https://github.com/cloudflare/pingora/compare/0.6.0...0.7.0) - 2026-01-30
+
+### Highlights
+
+- Extensible SslDigest to save user-defined TLS context
+- Add ConnectionFilter trait for early TCP connection filtering
+
+### π Features
+
+- Add ConnectionFilter trait for early TCP connection filtering
+- Introduce a virtual L4 stream abstraction
+- Add support for verify_cert and verify_hostname using rustls
+- Exposes the HttpProxy struct to allow external crates to customize the proxy logic.
+- Exposes a new_mtls method for creating a HttpProxy with a client_cert_key to enable mtls peers.
+- Add SSLKEYLOGFILE support to rustls connector
+- Allow spawning background subrequests from main session
+- Allow Extensions in cache LockCore and user tracing
+- Add body-bytes tracking across H1/H2 and proxy metrics
+- Allow setting max_weight on MissFinishType::Appended
+- Allow adding SslDigestExtensions on downstream and upstream
+- Add Custom session support for encapsulated HTTP
+
+### π Bug Fixes
+
+- Use write timeout consistently for h2 body writes
+- Prevent downstream error prior to header from canceling cache fill
+- Fix debug log and new tests
+- Fix size calculation for buffer capacity
+- Fix cache admission on header only misses
+- Fix duplicate zero-size chunk on cache hit
+- Fix chunked trailer end parsing
+- Lock age timeouts cause lock reacquisition
+- Fix transfer fd compile error for non linux os
+
+### Sec
+
+- Removed atty
+- Upgrade lru to >= 0.16.3 crate version because of RUSTSEC-2026-0002
+
+### Everything Else
+
+- Add tracing to log reason for not caching an asset on cache put
+- Evict when asset count exceeds optional watermark
+- Remove trailing comma from Display for HttpPeer
+- Make ProxyHTTP::upstream_response_body_filter return an optional duration for rate limiting
+- Restore daemonize STDOUT/STDERR when error log file is not specified
+- Log task info when upstream header failed to send
+- Check cache enablement to determine cache fill
+- Update meta when revalidating before lock release
+- Add ForceFresh status to cache hit filter
+- Pass stale status to cache lock
+- Bump max multipart ranges to 200
+- Downgrade Expires header warn to debug log
+- CI and effective msrv bump to 1.83
+- Add default noop custom param to client Session
+- Use static str in ErrorSource or ErrorType as_str
+- Use bstr for formatting byte strings
+- Tweak the implementation of and documentation of `connection_filter` feature
+- Set h1.1 when proxying cacheable responses
+- Add or remove accept-ranges on range header filter
+- Update msrv in github ci, fixup .bleep
+- Override request keepalive on process shutdown
+- Add shutdown flag to proxy session
+- Add ResponseHeader in pingora_http crate's prelude
+- Add a configurable upgrade for pingora-ketama that reduces runtime cpu and memory
+- Add to cache api spans
+- Increase visibility of multirange items
+- Use seek_multipart on body readers
+- Log read error when reading trailers end
+- Re-add the warning about cache-api volatility
+- Default to close on downstream response before body finish
+- Ensure idle_timeout is polled even if idle_timeout is unset so notify events are registered for h2 idle pool, filter out closed connections when retrieving from h2 in use pool.
+- Add simple read test for invalid extra char in header end
+- Allow customizing lock status on Custom NoCacheReasons
+- Close h1 conn by default if req header unfinished
+- Add configurable retries for upgrade sock connect/accept
+- Deflake test by increasing write size
+- Make the version restrictions on rmp and rmp-serde more strict to prevent forcing consumers to use 2024 edition
+- Rewind preread bytes when parsing next H1 response
+- Add epoch and epoch_override to CacheMeta
+
## [0.6.0](https://github.com/cloudflare/pingora/compare/0.5.0...0.6.0) - 2025-08-15
-
+
### Highlights
-- This release bumps the minimum h2 crate dependency to guard against the [MadeYouReset]((https://blog.cloudflare.com/madeyoureset-an-http-2-vulnerability-thwarted-by-rapid-reset-mitigations/)) H2 attack
+- This release bumps the minimum h2 crate dependency to guard against the [MadeYouReset]((https://blog.cloudflare.com/madeyoureset-an-http-2-vulnerability-thwarted-by-rapid-reset-mitigations/)) H2 attack
### π Features
@@ -63,7 +178,7 @@ All notable changes to this project will be documented in this file.
## [0.5.0](https://github.com/cloudflare/pingora/compare/0.4.0...0.5.0) - 2025-05-09
-
+
### π Features
- [Add tweak_new_upstream_tcp_connection hook to invoke logic on new upstream TCP sockets prior to connection](https://github.com/cloudflare/pingora/commit/be4a023d18c2b061f64ad5efd0868f9498199c91)
@@ -76,7 +191,7 @@ All notable changes to this project will be documented in this file.
- [Add get_stale and get_stale_while_update for memory-cache](https://github.com/cloudflare/pingora/commit/bb28044cbe9ac9251940b8a313d970c7d15aaff6)
### π Bug Fixes
-
+
- [Fix deadloop if proxy_handle_upstream exits earlier than proxy_handle_downstream](https://github.com/cloudflare/pingora/commit/bb111aaa92b3753e650957df3a68f56b0cffc65d)
- [Check on h2 stream end if error occurred for forwarding HTTP tasks](https://github.com/cloudflare/pingora/commit/e18f41bb6ddb1d6354e824df3b91d77f3255bea2)
- [Check for content-length underflow on end of stream h2 header](https://github.com/cloudflare/pingora/commit/575d1aafd7c679a50a443701a4c55dcfdbc443b2)
@@ -91,9 +206,9 @@ All notable changes to this project will be documented in this file.
- [Always drain v1 request body before session reuse](https://github.com/cloudflare/pingora/commit/fda3317ec822678564d641e7cf1c9b77ee3759ff)
- [Fixes HTTP1 client reads to properly timeout on initial read](https://github.com/cloudflare/pingora/commit/3c7db34acb0d930ae7043290a88bc56c1cd77e45)
- [Fixes issue where if TLS client never sends any bytes, hangs forever](https://github.com/cloudflare/pingora/commit/d1bf0bcac98f943fd716278d674e7d10dce2223e)
-
+
### Everything Else
-
+
- [Add builder api for pingora listeners](https://github.com/cloudflare/pingora/commit/3f564af3ae56e898478e13e71d67d095d7f5dbbd)
- [Better handling for h1 requests that contain both transfer-encoding and content-length](https://github.com/cloudflare/pingora/commit/9287b82645be4a52b0b63530ba38aa0c7ddc4b77)
- [Allow setting raw path in request to support non-UTF8 use cases](https://github.com/cloudflare/pingora/commit/e6b823c5d89860bb97713fdf14f197f799aed6af)
@@ -209,7 +324,7 @@ All notable changes to this project will be documented in this file.
## [0.1.1](https://github.com/cloudflare/pingora/compare/0.1.0...0.1.1) - 2024-04-05
### π Features
-- `Server::new` now accepts `Into>`
+- `Server::new` now accepts `Into >`
- Implemented client `HttpSession::get_keepalive_values` for Keep-Alive parsing
- Expose `ListenFds` and `Fds` to fix a voldemort types issue
- Expose config options in `ServerConf`, provide new `Server` constructor
diff --git a/Cargo.toml b/Cargo.toml
index ce057972..d3c8603b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -29,16 +29,18 @@ members = [
]
[workspace.dependencies]
+bstr = "1.12.0"
tokio = "1"
+tokio-stream = { version = "0.1" }
async-trait = "0.1.42"
httparse = "1"
bytes = "1.0"
derivative = "2.2.0"
-http = "1.0.0"
+http = "1"
log = "0.4"
h2 = ">=0.4.11"
once_cell = "1"
-lru = "0.14"
+lru = "0.16.3"
ahash = ">=0.8.9"
[profile.bench]
diff --git a/README.md b/README.md
index 1cc716dc..94fd1a59 100644
--- a/README.md
+++ b/README.md
@@ -59,11 +59,11 @@ Both x86_64 and aarch64 architectures will be supported.
## Rust version
-Pingora keeps a rolling MSRV (minimum supported Rust version) policy of 6 months. This means we will accept PRs that upgrade the MSRV as long as the new Rust version used is at least 6 months old.
+Pingora keeps a rolling MSRV (minimum supported Rust version) policy of 6 months. This means we will accept PRs that upgrade the MSRV as long as the new Rust version used is at least 6 months old. However, we generally will not bump the highest MSRV across the workspace without a sufficiently compelling reason.
-Our current MSRV is effectively 1.83.
+Our current MSRV is 1.84.
-Previously Pingora advertised an MSRV of 1.72. Older Rust versions may still be able to compile via `cargo update` pinning dependencies such as `backtrace@0.3.74`. The advertised MSRV in config files will be officially bumped to 1.83 in an upcoming release.
+Currently not all crates enforce `rust-version` as it is possible to use some crates on lower versions.
## Build Requirements
diff --git a/clippy.toml b/clippy.toml
index ebba0354..83a5e087 100644
--- a/clippy.toml
+++ b/clippy.toml
@@ -1 +1 @@
-msrv = "1.72"
+msrv = "1.84"
diff --git a/docs/user_guide/rate_limiter.md b/docs/user_guide/rate_limiter.md
index fe337a19..31a6b5a9 100644
--- a/docs/user_guide/rate_limiter.md
+++ b/docs/user_guide/rate_limiter.md
@@ -20,7 +20,6 @@ Pingora provides a crate `pingora-limits` which provides a simple and easy to us
```rust
use async_trait::async_trait;
use once_cell::sync::Lazy;
-use pingora::http::ResponseHeader;
use pingora::prelude::*;
use pingora_limits::rate::Rate;
use std::sync::Arc;
@@ -135,11 +134,11 @@ impl ProxyHttp for LB {
```
## Testing
-To use the example above,
+To use the example above,
-1. Run your program with `cargo run`.
+1. Run your program with `cargo run`.
2. Verify the program is working with a few executions of ` curl localhost:6188 -H "appid:1" -v`
- - The first request should work and any later requests that arrive within 1s of a previous request should fail with:
+ - The first request should work and any later requests that arrive within 1s of a previous request should fail with:
```
* Trying 127.0.0.1:6188...
* Connected to localhost (127.0.0.1) port 6188 (#0)
@@ -148,20 +147,20 @@ To use the example above,
> User-Agent: curl/7.88.1
> Accept: */*
> appid:1
- >
+ >
< HTTP/1.1 429 Too Many Requests
< X-Rate-Limit-Limit: 1
< X-Rate-Limit-Remaining: 0
< X-Rate-Limit-Reset: 1
< Date: Sun, 14 Jul 2024 20:29:02 GMT
< Connection: close
- <
+ <
* Closing connection 0
```
## Complete Example
-You can run the pre-made example code in the [`pingora-proxy` examples folder](https://github.com/cloudflare/pingora/tree/main/pingora-proxy/examples/rate_limiter.rs) with
+You can run the pre-made example code in the [`pingora-proxy` examples folder](https://github.com/cloudflare/pingora/tree/main/pingora-proxy/examples/rate_limiter.rs) with
```
cargo run --example rate_limiter
-```
\ No newline at end of file
+```
diff --git a/pingora-boringssl/Cargo.toml b/pingora-boringssl/Cargo.toml
index ec0b7bc0..03086460 100644
--- a/pingora-boringssl/Cargo.toml
+++ b/pingora-boringssl/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "pingora-boringssl"
-version = "0.6.0"
+version = "0.8.0"
authors = ["Yuchen Wu "]
license = "Apache-2.0"
edition = "2021"
diff --git a/pingora-boringssl/src/boring_tokio.rs b/pingora-boringssl/src/boring_tokio.rs
index 4dd2f91e..ef5d60c2 100644
--- a/pingora-boringssl/src/boring_tokio.rs
+++ b/pingora-boringssl/src/boring_tokio.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -263,9 +263,7 @@ where
return Poll::Pending;
}
Err(e) => {
- return Poll::Ready(Err(e
- .into_io_error()
- .unwrap_or_else(|e| io::Error::new(io::ErrorKind::Other, e))));
+ return Poll::Ready(Err(e.into_io_error().unwrap_or_else(io::Error::other)));
}
}
diff --git a/pingora-boringssl/src/ext.rs b/pingora-boringssl/src/ext.rs
index 0af2bb0b..256e4ac5 100644
--- a/pingora-boringssl/src/ext.rs
+++ b/pingora-boringssl/src/ext.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-boringssl/src/lib.rs b/pingora-boringssl/src/lib.rs
index dd560a84..9701c598 100644
--- a/pingora-boringssl/src/lib.rs
+++ b/pingora-boringssl/src/lib.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-cache/Cargo.toml b/pingora-cache/Cargo.toml
index cd51b638..401d827c 100644
--- a/pingora-cache/Cargo.toml
+++ b/pingora-cache/Cargo.toml
@@ -1,9 +1,10 @@
[package]
name = "pingora-cache"
-version = "0.6.0"
+version = "0.8.0"
authors = ["Yuchen Wu "]
license = "Apache-2.0"
edition = "2021"
+rust-version = "1.84"
repository = "https://github.com/cloudflare/pingora"
categories = ["asynchronous", "network-programming"]
keywords = ["async", "http", "cache"]
@@ -17,19 +18,20 @@ name = "pingora_cache"
path = "src/lib.rs"
[dependencies]
-pingora-core = { version = "0.6.0", path = "../pingora-core", default-features = false }
-pingora-error = { version = "0.6.0", path = "../pingora-error" }
-pingora-header-serde = { version = "0.6.0", path = "../pingora-header-serde" }
-pingora-http = { version = "0.6.0", path = "../pingora-http" }
-pingora-lru = { version = "0.6.0", path = "../pingora-lru" }
-pingora-timeout = { version = "0.6.0", path = "../pingora-timeout" }
+pingora-core = { version = "0.8.0", path = "../pingora-core", default-features = false }
+pingora-error = { version = "0.8.0", path = "../pingora-error" }
+pingora-header-serde = { version = "0.8.0", path = "../pingora-header-serde" }
+pingora-http = { version = "0.8.0", path = "../pingora-http" }
+pingora-lru = { version = "0.8.0", path = "../pingora-lru" }
+pingora-timeout = { version = "0.8.0", path = "../pingora-timeout" }
+bstr = { workspace = true }
http = { workspace = true }
indexmap = "1"
once_cell = { workspace = true }
regex = "1"
blake2 = "0.10"
serde = { version = "1.0", features = ["derive"] }
-rmp-serde = "1"
+rmp-serde = "1.3.0"
bytes = { workspace = true }
httpdate = "1.0.2"
log = { workspace = true }
@@ -37,7 +39,7 @@ async-trait = { workspace = true }
parking_lot = "0.12"
cf-rustracing = "1.0"
cf-rustracing-jaeger = "1.0"
-rmp = "0.8"
+rmp = "0.8.14"
tokio = { workspace = true }
lru = { workspace = true }
ahash = { workspace = true }
@@ -49,7 +51,7 @@ rand = "0.8"
[dev-dependencies]
tokio-test = "0.4"
tokio = { workspace = true, features = ["fs"] }
-env_logger = "0.9"
+env_logger = "0.11"
dhat = "0"
futures = "0.3"
diff --git a/pingora-cache/benches/lru_memory.rs b/pingora-cache/benches/lru_memory.rs
index 1d0678dc..67428671 100644
--- a/pingora-cache/benches/lru_memory.rs
+++ b/pingora-cache/benches/lru_memory.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-cache/benches/lru_serde.rs b/pingora-cache/benches/lru_serde.rs
index 5c0809e4..237a827e 100644
--- a/pingora-cache/benches/lru_serde.rs
+++ b/pingora-cache/benches/lru_serde.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-cache/benches/simple_lru_memory.rs b/pingora-cache/benches/simple_lru_memory.rs
index 30500c72..fa1199e3 100644
--- a/pingora-cache/benches/simple_lru_memory.rs
+++ b/pingora-cache/benches/simple_lru_memory.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-cache/src/cache_control.rs b/pingora-cache/src/cache_control.rs
index 8083298e..98af7fbb 100644
--- a/pingora-cache/src/cache_control.rs
+++ b/pingora-cache/src/cache_control.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -255,13 +255,13 @@ impl CacheControl {
self.has_key_without_value("private")
}
- fn get_field_names(&self, key: &str) -> Option {
+ fn get_field_names(&self, key: &str) -> Option> {
let value = self.directives.get(key)?.as_ref()?;
Some(ListValueIter::from(value))
}
/// Get the values of `private=`
- pub fn private_field_names(&self) -> Option {
+ pub fn private_field_names(&self) -> Option> {
self.get_field_names("private")
}
@@ -271,7 +271,7 @@ impl CacheControl {
}
/// Get the values of `no-cache=`
- pub fn no_cache_field_names(&self) -> Option {
+ pub fn no_cache_field_names(&self) -> Option> {
self.get_field_names("no-cache")
}
diff --git a/pingora-cache/src/eviction/lru.rs b/pingora-cache/src/eviction/lru.rs
index 7b4846b9..d241ee69 100644
--- a/pingora-cache/src/eviction/lru.rs
+++ b/pingora-cache/src/eviction/lru.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -62,6 +62,29 @@ impl Manager {
Manager(Lru::with_capacity_and_watermark(limit, capacity, watermark))
}
+ /// Get the number of shards
+ pub fn shards(&self) -> usize {
+ self.0.shards()
+ }
+
+ /// Get the weight (total size) of a specific shard
+ pub fn shard_weight(&self, shard: usize) -> usize {
+ self.0.shard_weight(shard)
+ }
+
+ /// Get the number of items in a specific shard
+ pub fn shard_len(&self, shard: usize) -> usize {
+ self.0.shard_len(shard)
+ }
+
+ /// Get the shard index for a given cache key
+ ///
+ /// This allows callers to know which shard was affected by an operation
+ /// without acquiring any locks.
+ pub fn get_shard_for_key(&self, key: &CompactCacheKey) -> usize {
+ (u64key(key) % N as u64) as usize
+ }
+
/// Serialize the given shard
pub fn serialize_shard(&self, shard: usize) -> Result> {
use rmp_serde::encode::Serializer;
@@ -101,6 +124,12 @@ impl Manager {
.or_err(InternalError, "when deserializing LRU")?;
Ok(())
}
+
+ /// Peek the weight associated with a cache key without changing its LRU order.
+ pub fn peek_weight(&self, item: &CompactCacheKey) -> Option {
+ let key = u64key(item);
+ self.0.peek_weight(key)
+ }
}
struct InsertToManager<'a, const N: usize> {
@@ -171,9 +200,14 @@ impl EvictionManager for Manager {
.collect()
}
- fn increment_weight(&self, item: CompactCacheKey, delta: usize) -> Vec {
- let key = u64key(&item);
- self.0.increment_weight(key, delta);
+ fn increment_weight(
+ &self,
+ item: &CompactCacheKey,
+ delta: usize,
+ max_weight: Option,
+ ) -> Vec {
+ let key = u64key(item);
+ self.0.increment_weight(key, delta, max_weight);
self.0
.evict_to_limit()
.into_iter()
diff --git a/pingora-cache/src/eviction/mod.rs b/pingora-cache/src/eviction/mod.rs
index cd48cd4a..0e78fbe1 100644
--- a/pingora-cache/src/eviction/mod.rs
+++ b/pingora-cache/src/eviction/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -60,10 +60,18 @@ pub trait EvictionManager: Send + Sync {
/// Adjust an item's weight upwards by a delta. If the item is not already admitted,
/// nothing will happen.
///
+ /// An optional `max_weight` hint indicates the known max weight of the current key in case the
+ /// weight should not be incremented above this amount.
+ ///
/// Return one or more items to evict. The sizes of these items are deducted
/// from the total size already. The caller needs to make sure that these assets are actually
/// removed from the storage.
- fn increment_weight(&self, item: CompactCacheKey, delta: usize) -> Vec;
+ fn increment_weight(
+ &self,
+ item: &CompactCacheKey,
+ delta: usize,
+ max_weight: Option,
+ ) -> Vec;
/// Remove an item from the eviction manager.
///
diff --git a/pingora-cache/src/eviction/simple_lru.rs b/pingora-cache/src/eviction/simple_lru.rs
index 3125dfb4..1c887552 100644
--- a/pingora-cache/src/eviction/simple_lru.rs
+++ b/pingora-cache/src/eviction/simple_lru.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -124,7 +124,7 @@ impl Manager {
if self.used.load(Ordering::Relaxed) <= self.limit
&& self
.items_watermark
- .map_or(true, |w| self.items.load(Ordering::Relaxed) <= w)
+ .is_none_or(|w| self.items.load(Ordering::Relaxed) <= w)
{
return vec![];
}
@@ -235,8 +235,13 @@ impl EvictionManager for Manager {
self.evict()
}
- fn increment_weight(&self, item: CompactCacheKey, delta: usize) -> Vec {
- let key = u64key(&item);
+ fn increment_weight(
+ &self,
+ item: &CompactCacheKey,
+ delta: usize,
+ _max_weight: Option,
+ ) -> Vec {
+ let key = u64key(item);
self.increase_weight(key, delta);
self.evict()
}
diff --git a/pingora-cache/src/filters.rs b/pingora-cache/src/filters.rs
index 20202ea2..607e6303 100644
--- a/pingora-cache/src/filters.rs
+++ b/pingora-cache/src/filters.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -89,7 +89,7 @@ pub fn calculate_fresh_until(
if authorization_present {
let uncacheable = cache_control
.as_ref()
- .map_or(true, |cc| !cc.allow_caching_authorized_req());
+ .is_none_or(|cc| !cc.allow_caching_authorized_req());
if uncacheable {
return None;
}
diff --git a/pingora-cache/src/hashtable.rs b/pingora-cache/src/hashtable.rs
index 52292046..07ca5f3f 100644
--- a/pingora-cache/src/hashtable.rs
+++ b/pingora-cache/src/hashtable.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -49,11 +49,11 @@ where
}
#[allow(dead_code)]
- pub fn read(&self, key: u128) -> RwLockReadGuard> {
+ pub fn read(&self, key: u128) -> RwLockReadGuard<'_, HashMap> {
self.get(key).read()
}
- pub fn write(&self, key: u128) -> RwLockWriteGuard> {
+ pub fn write(&self, key: u128) -> RwLockWriteGuard<'_, HashMap> {
self.get(key).write()
}
@@ -103,7 +103,7 @@ where
pub fn new(shard_capacity: usize) -> Self {
use std::num::NonZeroUsize;
// safe, 1 != 0
- const ONE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(1) };
+ const ONE: NonZeroUsize = NonZeroUsize::new(1).unwrap();
let mut cache = ConcurrentLruCache {
lrus: Default::default(),
};
@@ -119,11 +119,11 @@ where
}
#[allow(dead_code)]
- pub fn read(&self, key: u128) -> RwLockReadGuard> {
+ pub fn read(&self, key: u128) -> RwLockReadGuard<'_, LruCache> {
self.get(key).read()
}
- pub fn write(&self, key: u128) -> RwLockWriteGuard> {
+ pub fn write(&self, key: u128) -> RwLockWriteGuard<'_, LruCache> {
self.get(key).write()
}
diff --git a/pingora-cache/src/key.rs b/pingora-cache/src/key.rs
index 0e2d51a6..c606d85d 100644
--- a/pingora-cache/src/key.rs
+++ b/pingora-cache/src/key.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,6 @@
//! Cache key
-use super::*;
-
use blake2::{Blake2b, Digest};
use http::Extensions;
use serde::{Deserialize, Serialize};
@@ -214,18 +212,6 @@ impl CacheKey {
hasher
}
- /// Create a default [CacheKey] from a request, which just takes its URI as the primary key.
- pub fn default(req_header: &ReqHeader) -> Self {
- CacheKey {
- namespace: Vec::new(),
- primary: format!("{}", req_header.uri).into_bytes(),
- primary_bin_override: None,
- variance: None,
- user_tag: "".into(),
- extensions: Extensions::new(),
- }
- }
-
/// Create a new [CacheKey] from the given namespace, primary, and user_tag input.
///
/// Both `namespace` and `primary` will be used for the primary hash
diff --git a/pingora-cache/src/lib.rs b/pingora-cache/src/lib.rs
index 98b466c2..867cff08 100644
--- a/pingora-cache/src/lib.rs
+++ b/pingora-cache/src/lib.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -398,8 +398,7 @@ impl HttpCache {
OriginNotCache | ResponseTooLarge | PredictedResponseTooLarge => {
LockStatus::GiveUp
}
- // not sure which LockStatus make sense, we treat it as GiveUp for now
- Custom(_) => LockStatus::GiveUp,
+ Custom(reason) => lock_ctx.cache_lock.custom_lock_status(reason),
// should never happen, NeverEnabled shouldn't hold a lock
NeverEnabled => panic!("NeverEnabled holds a write lock"),
CacheLockGiveUp | CacheLockTimeout => {
@@ -688,7 +687,7 @@ impl HttpCache {
self.inner_mut()
.max_file_size_tracker
.as_mut()
- .map_or(true, |t| t.add_body_bytes(bytes_len))
+ .is_none_or(|t| t.add_body_bytes(bytes_len))
}
/// Check if the max file size has been exceeded according to max file size tracker.
@@ -823,6 +822,18 @@ impl HttpCache {
}
}
+ /// Return whether the underlying storage backend supports streaming partial write.
+ ///
+ /// Returns None if cache is not enabled.
+ pub fn support_streaming_partial_write(&self) -> Option {
+ self.inner.as_ref().and_then(|inner| {
+ inner
+ .enabled_ctx
+ .as_ref()
+ .map(|c| c.storage.support_streaming_partial_write())
+ })
+ }
+
/// Call this when cache hit is fully read.
///
/// This call will release resource if any and log the timing in tracing if set.
@@ -969,8 +980,8 @@ impl HttpCache {
MissFinishType::Created(size) => {
eviction.admit(cache_key, size, meta.0.internal.fresh_until)
}
- MissFinishType::Appended(size) => {
- eviction.increment_weight(cache_key, size)
+ MissFinishType::Appended(size, max_size) => {
+ eviction.increment_weight(&cache_key, size, max_size)
}
};
// actual eviction can be done async
@@ -1250,6 +1261,18 @@ impl HttpCache {
}
}
+ /// Return the [`CacheKey`] of this asset if any.
+ ///
+ /// This is allowed to be called in any phase. If the cache key callback was not called,
+ /// this will return None.
+ pub fn maybe_cache_key(&self) -> Option<&CacheKey> {
+ (!matches!(
+ self.phase(),
+ CachePhase::Disabled(NoCacheReason::NeverEnabled) | CachePhase::Uninit
+ ))
+ .then(|| self.cache_key())
+ }
+
/// Perform the cache lookup from the given cache storage with the given cache key
///
/// A cache hit will return [CacheMeta] which contains the header and meta info about
@@ -1426,7 +1449,7 @@ impl HttpCache {
let mut span = inner_enabled.traces.child("cache_lock");
// should always call is_cache_locked() before this function, which should guarantee that
// the inner cache has a read lock and lock ctx
- if let Some(lock_ctx) = inner_enabled.lock_ctx.as_mut() {
+ let (read_lock, status) = if let Some(lock_ctx) = inner_enabled.lock_ctx.as_mut() {
let lock = lock_ctx.lock.take(); // remove the lock from self
if let Some(Locked::Read(r)) = lock {
let now = Instant::now();
@@ -1437,23 +1460,26 @@ impl HttpCache {
wait_timeout.saturating_sub(self.lock_duration().unwrap_or(Duration::ZERO));
match timeout(wait_timeout, r.wait()).await {
Ok(()) => r.lock_status(),
- // TODO: need to differentiate WaitTimeout vs. Lock(Age)Timeout (expired)?
- Err(_) => LockStatus::Timeout,
+ Err(_) => LockStatus::WaitTimeout,
}
} else {
r.wait().await;
r.lock_status()
};
self.digest.add_lock_duration(now.elapsed());
- let tag_value: &'static str = status.into();
- span.set_tag(|| Tag::new("status", tag_value));
- status
+ (r, status)
} else {
panic!("cache_lock_wait on wrong type of lock")
}
} else {
panic!("cache_lock_wait without cache lock")
+ };
+ if let Some(lock_ctx) = self.inner_enabled().lock_ctx.as_ref() {
+ lock_ctx
+ .cache_lock
+ .trace_lock_wait(&mut span, &read_lock, status);
}
+ status
}
/// How long did this request wait behind the read lock
diff --git a/pingora-cache/src/lock.rs b/pingora-cache/src/lock.rs
index 680f609e..5633b09c 100644
--- a/pingora-cache/src/lock.rs
+++ b/pingora-cache/src/lock.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,12 +15,14 @@
//! Cache lock
use crate::{hashtable::ConcurrentHashTable, key::CacheHashKey, CacheKey};
+use crate::{Span, Tag};
+use http::Extensions;
use pingora_timeout::timeout;
use std::sync::Arc;
use std::time::Duration;
-pub type CacheKeyLockImpl = (dyn CacheKeyLock + Send + Sync);
+pub type CacheKeyLockImpl = dyn CacheKeyLock + Send + Sync;
pub trait CacheKeyLock {
/// Try to lock a cache fetch
@@ -37,6 +39,19 @@ pub trait CacheKeyLock {
/// When the write lock is dropped without being released, the read lock holders will consider
/// it to be failed so that they will compete for the write lock again.
fn release(&self, key: &CacheKey, permit: WritePermit, reason: LockStatus);
+
+ /// Set tags on a trace span for the cache lock wait.
+ fn trace_lock_wait(&self, span: &mut Span, _read_lock: &ReadLock, lock_status: LockStatus) {
+ let tag_value: &'static str = lock_status.into();
+ span.set_tag(|| Tag::new("status", tag_value));
+ }
+
+ /// Set a lock status for a custom `NoCacheReason`.
+ fn custom_lock_status(&self, _custom_no_cache: &'static str) -> LockStatus {
+ // treat custom no cache reasons as GiveUp by default
+ // (like OriginNotCache)
+ LockStatus::GiveUp
+ }
}
const N_SHARDS: usize = 16;
@@ -106,7 +121,7 @@ impl CacheKeyLock for CacheLock {
// requests ought to recreate the lock.
if !matches!(
lock.0.lock_status(),
- LockStatus::Dangling | LockStatus::Timeout
+ LockStatus::Dangling | LockStatus::AgeTimeout
) {
return Locked::Read(lock.read_lock());
}
@@ -119,12 +134,13 @@ impl CacheKeyLock for CacheLock {
if let Some(lock) = table.get(&key) {
if !matches!(
lock.0.lock_status(),
- LockStatus::Dangling | LockStatus::Timeout
+ LockStatus::Dangling | LockStatus::AgeTimeout
) {
return Locked::Read(lock.read_lock());
}
}
- let (permit, stub) = WritePermit::new(self.age_timeout_default, stale_writer);
+ let (permit, stub) =
+ WritePermit::new(self.age_timeout_default, stale_writer, Extensions::new());
table.insert(key, stub);
Locked::Write(permit)
}
@@ -132,13 +148,13 @@ impl CacheKeyLock for CacheLock {
fn release(&self, key: &CacheKey, mut permit: WritePermit, reason: LockStatus) {
let hash = key.combined_bin();
let key = u128::from_be_bytes(hash); // endianness doesn't matter
- if permit.lock.lock_status() == LockStatus::Timeout {
+ if permit.lock.lock_status() == LockStatus::AgeTimeout {
// if lock age timed out, then readers are capable of
// replacing the lock associated with this permit from the lock table
// (see lock() implementation)
// keep the lock status as Timeout accordingly when unlocking
// (because we aren't removing it from the lock_table)
- permit.unlock(LockStatus::Timeout);
+ permit.unlock(LockStatus::AgeTimeout);
} else if let Some(_lock) = self.lock_table.write(key).remove(&key) {
permit.unlock(reason);
}
@@ -150,25 +166,28 @@ impl CacheKeyLock for CacheLock {
use log::warn;
use std::sync::atomic::{AtomicU8, Ordering};
use std::time::Instant;
-use strum::IntoStaticStr;
+use strum::{FromRepr, IntoStaticStr};
use tokio::sync::Semaphore;
/// Status which the read locks could possibly see.
-#[derive(Debug, Copy, Clone, PartialEq, Eq, IntoStaticStr)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq, IntoStaticStr, FromRepr)]
+#[repr(u8)]
pub enum LockStatus {
/// Waiting for the writer to populate the asset
- Waiting,
+ Waiting = 0,
/// The writer finishes, readers can start
- Done,
+ Done = 1,
/// The writer encountered error, such as network issue. A new writer will be elected.
- TransientError,
+ TransientError = 2,
/// The writer observed that no cache lock is needed (e.g., uncacheable), readers should start
/// to fetch independently without a new writer
- GiveUp,
+ GiveUp = 3,
/// The write lock is dropped without being unlocked
- Dangling,
- /// The lock is held for too long
- Timeout,
+ Dangling = 4,
+ /// Reader has held onto cache locks for too long, give up
+ WaitTimeout = 5,
+ /// The lock is held for too long by the writer
+ AgeTimeout = 6,
}
impl From for u8 {
@@ -179,22 +198,15 @@ impl From for u8 {
LockStatus::TransientError => 2,
LockStatus::GiveUp => 3,
LockStatus::Dangling => 4,
- LockStatus::Timeout => 5,
+ LockStatus::WaitTimeout => 5,
+ LockStatus::AgeTimeout => 6,
}
}
}
impl From for LockStatus {
fn from(v: u8) -> Self {
- match v {
- 0 => Self::Waiting,
- 1 => Self::Done,
- 2 => Self::TransientError,
- 3 => Self::GiveUp,
- 4 => Self::Dangling,
- 5 => Self::Timeout,
- _ => Self::GiveUp, // placeholder
- }
+ Self::from_repr(v).unwrap_or(Self::GiveUp)
}
}
@@ -206,16 +218,18 @@ pub struct LockCore {
// use u8 for Atomic enum
lock_status: AtomicU8,
stale_writer: bool,
+ extensions: Extensions,
}
impl LockCore {
- pub fn new_arc(timeout: Duration, stale_writer: bool) -> Arc {
+ pub fn new_arc(timeout: Duration, stale_writer: bool, extensions: Extensions) -> Arc {
Arc::new(LockCore {
lock: Semaphore::new(0),
age_timeout: timeout,
lock_start: Instant::now(),
lock_status: AtomicU8::new(LockStatus::Waiting.into()),
stale_writer,
+ extensions,
})
}
@@ -224,6 +238,10 @@ impl LockCore {
}
pub fn unlock(&self, reason: LockStatus) {
+ assert!(
+ reason != LockStatus::WaitTimeout,
+ "WaitTimeout is not stored in LockCore"
+ );
self.lock_status.store(reason.into(), Ordering::SeqCst);
// Any small positive number will do, 10 is used for RwLock as well.
// No need to wake up all at once.
@@ -238,6 +256,10 @@ impl LockCore {
pub fn stale_writer(&self) -> bool {
self.stale_writer
}
+
+ pub fn extensions(&self) -> &Extensions {
+ &self.extensions
+ }
}
// all 3 structs below are just Arc with different interfaces
@@ -268,14 +290,14 @@ impl ReadLock {
Err(_) => {
self.0
.lock_status
- .store(LockStatus::Timeout.into(), Ordering::SeqCst);
+ .store(LockStatus::AgeTimeout.into(), Ordering::SeqCst);
}
}
} else {
// expiration has already occurred, store timeout status
self.0
.lock_status
- .store(LockStatus::Timeout.into(), Ordering::SeqCst);
+ .store(LockStatus::AgeTimeout.into(), Ordering::SeqCst);
}
}
@@ -295,11 +317,15 @@ impl ReadLock {
pub fn lock_status(&self) -> LockStatus {
let status = self.0.lock_status();
if matches!(status, LockStatus::Waiting) && self.expired() {
- LockStatus::Timeout
+ LockStatus::AgeTimeout
} else {
status
}
}
+
+ pub fn extensions(&self) -> &Extensions {
+ self.0.extensions()
+ }
}
/// WritePermit: requires who get it need to populate the cache and then release it
@@ -311,8 +337,12 @@ pub struct WritePermit {
impl WritePermit {
/// Create a new lock, with a permit to be given to the associated writer.
- pub fn new(timeout: Duration, stale_writer: bool) -> (WritePermit, LockStub) {
- let lock = LockCore::new_arc(timeout, stale_writer);
+ pub fn new(
+ timeout: Duration,
+ stale_writer: bool,
+ extensions: Extensions,
+ ) -> (WritePermit, LockStub) {
+ let lock = LockCore::new_arc(timeout, stale_writer, extensions);
let stub = LockStub(lock.clone());
(
WritePermit {
@@ -336,6 +366,10 @@ impl WritePermit {
pub fn lock_status(&self) -> LockStatus {
self.lock.lock_status()
}
+
+ pub fn extensions(&self) -> &Extensions {
+ self.lock.extensions()
+ }
}
impl Drop for WritePermit {
@@ -354,6 +388,10 @@ impl LockStub {
pub fn read_lock(&self) -> ReadLock {
ReadLock(self.0.clone())
}
+
+ pub fn extensions(&self) -> &Extensions {
+ &self.0.extensions
+ }
}
#[cfg(test)]
@@ -417,7 +455,7 @@ mod test {
let handle = tokio::spawn(async move {
// timed out
lock.wait().await;
- assert_eq!(lock.lock_status(), LockStatus::Timeout);
+ assert_eq!(lock.lock_status(), LockStatus::AgeTimeout);
});
tokio::time::sleep(Duration::from_millis(2100)).await;
@@ -462,7 +500,7 @@ mod test {
let handle = tokio::spawn(async move {
// timed out
lock.wait().await;
- assert_eq!(lock.lock_status(), LockStatus::Timeout);
+ assert_eq!(lock.lock_status(), LockStatus::AgeTimeout);
});
tokio::time::sleep(Duration::from_millis(1100)).await; // let lock age time out
@@ -512,9 +550,9 @@ mod test {
};
// reader expires write permit
lock.wait().await;
- assert_eq!(lock.lock_status(), LockStatus::Timeout);
- assert_eq!(permit.lock.lock_status(), LockStatus::Timeout);
- permit.unlock(LockStatus::Timeout);
+ assert_eq!(lock.lock_status(), LockStatus::AgeTimeout);
+ assert_eq!(permit.lock.lock_status(), LockStatus::AgeTimeout);
+ permit.unlock(LockStatus::AgeTimeout);
}
#[tokio::test]
diff --git a/pingora-cache/src/max_file_size.rs b/pingora-cache/src/max_file_size.rs
index 106b012e..7c9eccd9 100644
--- a/pingora-cache/src/max_file_size.rs
+++ b/pingora-cache/src/max_file_size.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-cache/src/memory.rs b/pingora-cache/src/memory.rs
index 786cf453..6ab57c80 100644
--- a/pingora-cache/src/memory.rs
+++ b/pingora-cache/src/memory.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-cache/src/meta.rs b/pingora-cache/src/meta.rs
index 9c6bd6fc..4545ee22 100644
--- a/pingora-cache/src/meta.rs
+++ b/pingora-cache/src/meta.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -93,8 +93,10 @@ mod internal_meta {
// schema to decode it
// After full releases, remove `skip_serializing_if` so that we can add the next extended field.
#[serde(default)]
- #[serde(skip_serializing_if = "Option::is_none")]
pub(crate) variance: Option,
+ #[serde(default)]
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub(crate) epoch_override: Option,
}
impl Default for InternalMetaV2 {
@@ -108,6 +110,7 @@ mod internal_meta {
stale_while_revalidate_sec: 0,
stale_if_error_sec: 0,
variance: None,
+ epoch_override: None,
}
}
}
@@ -258,35 +261,75 @@ mod internal_meta {
assert_eq!(meta2.created, meta2.updated);
}
- #[test]
- fn test_internal_meta_serde_v2_extend_fields() {
- // make sure that v2 format is backward compatible
- // this is the base version of v2 without any extended fields
- #[derive(Deserialize, Serialize)]
- pub(crate) struct InternalMetaV2Base {
- pub(crate) version: u8,
- pub(crate) fresh_until: SystemTime,
- pub(crate) created: SystemTime,
- pub(crate) updated: SystemTime,
- pub(crate) stale_while_revalidate_sec: u32,
- pub(crate) stale_if_error_sec: u32,
+ // make sure that v2 format is backward compatible
+ // this is the base version of v2 without any extended fields
+ #[derive(Deserialize, Serialize)]
+ struct InternalMetaV2Base {
+ version: u8,
+ fresh_until: SystemTime,
+ created: SystemTime,
+ updated: SystemTime,
+ stale_while_revalidate_sec: u32,
+ stale_if_error_sec: u32,
+ }
+
+ impl InternalMetaV2Base {
+ pub const VERSION: u8 = 2;
+ pub fn serialize(&self) -> Result> {
+ assert!(self.version >= Self::VERSION);
+ rmp_serde::encode::to_vec(self).or_err(InternalError, "failed to encode cache meta")
+ }
+ fn deserialize(buf: &[u8]) -> Result {
+ rmp_serde::decode::from_slice(buf)
+ .or_err(InternalError, "failed to decode cache meta v2")
}
+ }
- impl InternalMetaV2Base {
- pub const VERSION: u8 = 2;
- pub fn serialize(&self) -> Result> {
- assert!(self.version >= Self::VERSION);
- rmp_serde::encode::to_vec(self)
- .or_err(InternalError, "failed to encode cache meta")
- }
- fn deserialize(buf: &[u8]) -> Result {
- rmp_serde::decode::from_slice(buf)
- .or_err(InternalError, "failed to decode cache meta v2")
+ // this is the base version of v2 with variance but without epoch_override
+ #[derive(Deserialize, Serialize)]
+ struct InternalMetaV2BaseWithVariance {
+ version: u8,
+ fresh_until: SystemTime,
+ created: SystemTime,
+ updated: SystemTime,
+ stale_while_revalidate_sec: u32,
+ stale_if_error_sec: u32,
+ #[serde(default)]
+ #[serde(skip_serializing_if = "Option::is_none")]
+ variance: Option,
+ }
+
+ impl Default for InternalMetaV2BaseWithVariance {
+ fn default() -> Self {
+ let epoch = SystemTime::UNIX_EPOCH;
+ InternalMetaV2BaseWithVariance {
+ version: InternalMetaV2::VERSION,
+ fresh_until: epoch,
+ created: epoch,
+ updated: epoch,
+ stale_while_revalidate_sec: 0,
+ stale_if_error_sec: 0,
+ variance: None,
}
}
+ }
+ impl InternalMetaV2BaseWithVariance {
+ pub const VERSION: u8 = 2;
+ pub fn serialize(&self) -> Result> {
+ assert!(self.version >= Self::VERSION);
+ rmp_serde::encode::to_vec(self).or_err(InternalError, "failed to encode cache meta")
+ }
+ fn deserialize(buf: &[u8]) -> Result {
+ rmp_serde::decode::from_slice(buf)
+ .or_err(InternalError, "failed to decode cache meta v2")
+ }
+ }
+
+ #[test]
+ fn test_internal_meta_serde_v2_extend_fields_variance() {
// ext V2 to base v2
- let meta = InternalMetaV2::default();
+ let meta = InternalMetaV2BaseWithVariance::default();
let binary = meta.serialize().unwrap();
let meta2 = InternalMetaV2Base::deserialize(&binary).unwrap();
assert_eq!(meta2.version, 2);
@@ -305,11 +348,62 @@ mod internal_meta {
stale_if_error_sec: 0,
};
let binary = meta.serialize().unwrap();
+ let meta2 = InternalMetaV2BaseWithVariance::deserialize(&binary).unwrap();
+ assert_eq!(meta2.version, 2);
+ assert_eq!(meta.fresh_until, meta2.fresh_until);
+ assert_eq!(meta.created, meta2.created);
+ assert_eq!(meta.updated, meta2.updated);
+ }
+
+ #[test]
+ fn test_internal_meta_serde_v2_extend_fields_epoch_override() {
+ let now = SystemTime::now();
+
+ // ext V2 (with epoch_override = None) to V2 with variance (without epoch_override field)
+ let meta = InternalMetaV2 {
+ fresh_until: now,
+ created: now,
+ updated: now,
+ epoch_override: None, // None means it will be skipped during serialization
+ ..Default::default()
+ };
+ let binary = meta.serialize().unwrap();
+ let meta2 = InternalMetaV2BaseWithVariance::deserialize(&binary).unwrap();
+ assert_eq!(meta2.version, 2);
+ assert_eq!(meta.fresh_until, meta2.fresh_until);
+ assert_eq!(meta.created, meta2.created);
+ assert_eq!(meta.updated, meta2.updated);
+ assert!(meta2.variance.is_none());
+
+ // V2 base with variance (without epoch_override) to ext V2 (with epoch_override)
+ let mut meta = InternalMetaV2BaseWithVariance {
+ version: InternalMetaV2::VERSION,
+ fresh_until: now,
+ created: now,
+ updated: now,
+ stale_while_revalidate_sec: 0,
+ stale_if_error_sec: 0,
+ variance: None,
+ };
+ let binary = meta.serialize().unwrap();
let meta2 = InternalMetaV2::deserialize(&binary).unwrap();
assert_eq!(meta2.version, 2);
assert_eq!(meta.fresh_until, meta2.fresh_until);
assert_eq!(meta.created, meta2.created);
assert_eq!(meta.updated, meta2.updated);
+ assert!(meta2.variance.is_none());
+ assert!(meta2.epoch_override.is_none());
+
+ // try with variance set
+ meta.variance = Some(*b"variance_testing");
+ let binary = meta.serialize().unwrap();
+ let meta2 = InternalMetaV2::deserialize(&binary).unwrap();
+ assert_eq!(meta2.version, 2);
+ assert_eq!(meta.fresh_until, meta2.fresh_until);
+ assert_eq!(meta.created, meta2.created);
+ assert_eq!(meta.updated, meta2.updated);
+ assert_eq!(meta.variance, meta2.variance);
+ assert!(meta2.epoch_override.is_none());
}
}
}
@@ -364,6 +458,32 @@ impl CacheMeta {
self.0.internal.updated
}
+ /// The reference point for cache age. This represents the "starting point" for `fresh_until`.
+ ///
+ /// This defaults to the `updated` timestamp but is overridden by the `epoch_override` field
+ /// if set.
+ pub fn epoch(&self) -> SystemTime {
+ self.0.internal.epoch_override.unwrap_or(self.updated())
+ }
+
+ /// Get the epoch override for this asset
+ pub fn epoch_override(&self) -> Option {
+ self.0.internal.epoch_override
+ }
+
+ /// Set the epoch override for this asset
+ ///
+ /// When set, this will be used as the reference point for calculating age and freshness
+ /// instead of the updated time.
+ pub fn set_epoch_override(&mut self, epoch: SystemTime) {
+ self.0.internal.epoch_override = Some(epoch);
+ }
+
+ /// Remove the epoch override for this asset
+ pub fn remove_epoch_override(&mut self) {
+ self.0.internal.epoch_override = None;
+ }
+
/// Is the asset still valid
pub fn is_fresh(&self, time: SystemTime) -> bool {
// NOTE: HTTP cache time resolution is second
@@ -372,15 +492,17 @@ impl CacheMeta {
/// How long (in seconds) the asset should be fresh since its admission/revalidation
///
- /// This is essentially the max-age value (or its equivalence)
+ /// This is essentially the max-age value (or its equivalence).
+ /// If an epoch override is set, it will be used as the reference point instead of the updated time.
pub fn fresh_sec(&self) -> u64 {
// swallow `duration_since` error, assets that are always stale have earlier `fresh_until` than `created`
// practically speaking we can always treat these as 0 ttl
// XXX: return Error if `fresh_until` is much earlier than expected?
+ let reference = self.epoch();
self.0
.internal
.fresh_until
- .duration_since(self.0.internal.updated)
+ .duration_since(reference)
.map_or(0, |duration| duration.as_secs())
}
@@ -390,9 +512,12 @@ impl CacheMeta {
}
/// How old the asset is since its admission/revalidation
+ ///
+ /// If an epoch override is set, it will be used as the reference point instead of the updated time.
pub fn age(&self) -> Duration {
+ let reference = self.epoch();
SystemTime::now()
- .duration_since(self.updated())
+ .duration_since(reference)
.unwrap_or_default()
}
@@ -499,6 +624,7 @@ impl CacheMeta {
pub fn serialize(&self) -> Result<(Vec, Vec)> {
let internal = self.0.internal.serialize()?;
let header = header_serialize(&self.0.header)?;
+ log::debug!("header to serialize: {:?}", &self.0.header);
Ok((internal, header))
}
@@ -616,3 +742,93 @@ pub fn set_compression_dict_path(path: &str) -> bool {
pub fn set_compression_dict_content(content: Cow<'static, [u8]>) -> bool {
COMPRESSION_DICT_CONTENT.set(content).is_ok()
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::time::Duration;
+
+ #[test]
+ fn test_cache_meta_age_without_override() {
+ let now = SystemTime::now();
+ let header = ResponseHeader::build_no_case(200, None).unwrap();
+ let meta = CacheMeta::new(now + Duration::from_secs(300), now, 0, 0, header);
+
+ // Without epoch_override, age() should use updated() as reference
+ std::thread::sleep(Duration::from_millis(100));
+ let age = meta.age();
+ assert!(age.as_secs() < 1, "age should be close to 0");
+
+ // epoch() should return updated() when no override is set
+ assert_eq!(meta.epoch(), meta.updated());
+ }
+
+ #[test]
+ fn test_cache_meta_age_with_epoch_override_past() {
+ let now = SystemTime::now();
+ let header = ResponseHeader::build(200, None).unwrap();
+ let mut meta = CacheMeta::new(now + Duration::from_secs(300), now, 0, 0, header);
+
+ // Set epoch_override to 10 seconds in the past
+ let epoch_override = now - Duration::from_secs(10);
+ meta.set_epoch_override(epoch_override);
+
+ // age() should now use epoch_override as the reference
+ let age = meta.age();
+ assert!(age.as_secs() >= 10);
+ assert!(age.as_secs() < 12);
+
+ // epoch() should return the override
+ assert_eq!(meta.epoch(), epoch_override);
+ assert_eq!(meta.epoch_override(), Some(epoch_override));
+ }
+
+ #[test]
+ fn test_cache_meta_age_with_epoch_override_future() {
+ let now = SystemTime::now();
+ let header = ResponseHeader::build(200, None).unwrap();
+ let mut meta = CacheMeta::new(now + Duration::from_secs(100), now, 0, 0, header);
+
+ // Set epoch_override to a future time
+ let future_epoch = now + Duration::from_secs(10);
+ meta.set_epoch_override(future_epoch);
+
+ let age_with_epoch = meta.age();
+ // age should be 0 since epoch_override is in the future
+ assert_eq!(age_with_epoch, Duration::ZERO);
+ }
+
+ #[test]
+ fn test_cache_meta_fresh_sec() {
+ let header = ResponseHeader::build(StatusCode::OK, None).unwrap();
+ let mut meta = CacheMeta::new(
+ SystemTime::now() + Duration::from_secs(100),
+ SystemTime::now() - Duration::from_secs(100),
+ 0,
+ 0,
+ header,
+ );
+
+ meta.0.internal.updated = SystemTime::UNIX_EPOCH + Duration::from_secs(1000);
+ meta.0.internal.fresh_until = SystemTime::UNIX_EPOCH + Duration::from_secs(1100);
+
+ // Without epoch_override, fresh_sec should use updated as reference
+ let fresh_sec_without_override = meta.fresh_sec();
+ assert_eq!(fresh_sec_without_override, 100); // 1100 - 1000 = 100 seconds
+
+ // With epoch_override set to a later time (1050), fresh_sec should be calculated from that reference
+ let epoch_override = SystemTime::UNIX_EPOCH + Duration::from_secs(1050);
+ meta.set_epoch_override(epoch_override);
+ assert_eq!(meta.epoch_override(), Some(epoch_override));
+ assert_eq!(meta.epoch(), epoch_override);
+
+ let fresh_sec_with_override = meta.fresh_sec();
+ // fresh_until - epoch_override = 1100 - 1050 = 50 seconds
+ assert_eq!(fresh_sec_with_override, 50);
+
+ meta.remove_epoch_override();
+ assert_eq!(meta.epoch_override(), None);
+ assert_eq!(meta.epoch(), meta.updated());
+ assert_eq!(meta.fresh_sec(), 100); // back to normal calculation
+ }
+}
diff --git a/pingora-cache/src/predictor.rs b/pingora-cache/src/predictor.rs
index 58f1315f..8c2f5a8f 100644
--- a/pingora-cache/src/predictor.rs
+++ b/pingora-cache/src/predictor.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-cache/src/put.rs b/pingora-cache/src/put.rs
index 4c82a482..fbbbb70e 100644
--- a/pingora-cache/src/put.rs
+++ b/pingora-cache/src/put.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -34,6 +34,9 @@ pub trait CachePut {
/// Return the [CacheMetaDefaults]
fn cache_defaults() -> &'static CacheMetaDefaults;
+
+ /// Put interesting things in the span given the parsed response header.
+ fn trace_header(&mut self, _response: &ResponseHeader) {}
}
use parse_response::ResponseParse;
@@ -81,11 +84,12 @@ impl CachePutCtx {
}
async fn put_header(&mut self, meta: CacheMeta) -> Result<()> {
- let trace = self.trace.child("cache put header", |o| o.start()).handle();
+ let mut trace = self.trace.child("cache put header", |o| o.start());
let miss_handler = self
.storage
- .get_miss_handler(&self.key, &meta, &trace)
+ .get_miss_handler(&self.key, &meta, &trace.handle())
.await?;
+ trace::tag_span_with_meta(&mut trace, &meta);
self.miss_handler = Some(miss_handler);
self.meta = Some(meta);
Ok(())
@@ -121,7 +125,9 @@ impl CachePutCtx {
let cache_key = self.key.to_compact();
let meta = self.meta.as_ref().unwrap();
let evicted = match finish {
- MissFinishType::Appended(delta) => eviction.increment_weight(cache_key, delta),
+ MissFinishType::Appended(delta, max_size) => {
+ eviction.increment_weight(&cache_key, delta, max_size)
+ }
MissFinishType::Created(size) => {
eviction.admit(cache_key, size, meta.0.internal.fresh_until)
}
@@ -144,29 +150,48 @@ impl CachePutCtx {
Ok(())
}
+ fn trace_header(&mut self, header: &ResponseHeader) {
+ self.trace.set_tag(|| {
+ Tag::new(
+ "cache-control",
+ header
+ .headers
+ .get_all(http::header::CACHE_CONTROL)
+ .into_iter()
+ .map(|v| String::from_utf8_lossy(v.as_bytes()).to_string())
+ .collect::>()
+ .join(","),
+ )
+ });
+ }
+
async fn do_cache_put(&mut self, data: &[u8]) -> Result> {
let tasks = self.parser.inject_data(data)?;
for task in tasks {
match task {
- HttpTask::Header(header, _eos) => match self.cache_put.cacheable(*header) {
- RespCacheable::Cacheable(meta) => {
- if let Some(max_file_size_tracker) = &self.max_file_size_tracker {
- let content_length_hdr = meta.headers().get(header::CONTENT_LENGTH);
- if let Some(content_length) =
- header_value_content_length(content_length_hdr)
- {
- if content_length > max_file_size_tracker.max_file_size_bytes() {
- return Ok(Some(NoCacheReason::ResponseTooLarge));
+ HttpTask::Header(header, _eos) => {
+ self.trace_header(&header);
+ match self.cache_put.cacheable(*header) {
+ RespCacheable::Cacheable(meta) => {
+ if let Some(max_file_size_tracker) = &self.max_file_size_tracker {
+ let content_length_hdr = meta.headers().get(header::CONTENT_LENGTH);
+ if let Some(content_length) =
+ header_value_content_length(content_length_hdr)
+ {
+ if content_length > max_file_size_tracker.max_file_size_bytes()
+ {
+ return Ok(Some(NoCacheReason::ResponseTooLarge));
+ }
}
}
- }
- self.put_header(meta).await?;
- }
- RespCacheable::Uncacheable(reason) => {
- return Ok(Some(reason));
+ self.put_header(meta).await?;
+ }
+ RespCacheable::Uncacheable(reason) => {
+ return Ok(Some(reason));
+ }
}
- },
+ }
HttpTask::Body(data, eos) => {
if let Some(data) = data {
self.put_body(data, eos).await?;
@@ -369,6 +394,7 @@ mod test {
mod parse_response {
use super::*;
+ use bstr::ByteSlice;
use bytes::BytesMut;
use httparse::Status;
use pingora_error::{
@@ -475,7 +501,7 @@ mod parse_response {
self.state = ParseState::Invalid(e);
return Error::e_because(
InvalidHTTPHeader,
- format!("buf: {:?}", String::from_utf8_lossy(&self.buf)),
+ format!("buf: {:?}", self.buf.as_bstr()),
e,
);
}
diff --git a/pingora-cache/src/storage.rs b/pingora-cache/src/storage.rs
index 6a870b43..5df1526d 100644
--- a/pingora-cache/src/storage.rs
+++ b/pingora-cache/src/storage.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -117,18 +117,39 @@ pub trait HandleHit {
trace: &SpanHandle,
) -> Result<()>;
- /// Whether this storage allow seeking to a certain range of body
+ /// Whether this storage allows seeking to a certain range of body for single ranges.
fn can_seek(&self) -> bool {
false
}
- /// Try to seek to a certain range of the body
+ /// Whether this storage allows seeking to a certain range of body for multipart ranges.
+ ///
+ /// By default uses the `can_seek` implementation.
+ fn can_seek_multipart(&self) -> bool {
+ self.can_seek()
+ }
+
+ /// Try to seek to a certain range of the body for single ranges.
///
/// `end: None` means to read to the end of the body.
fn seek(&mut self, _start: usize, _end: Option) -> Result<()> {
// to prevent impl can_seek() without impl seek
todo!("seek() needs to be implemented")
}
+
+ /// Try to seek to a certain range of the body for multipart ranges.
+ ///
+ /// Works in an identical manner to `seek()`.
+ ///
+ /// `end: None` means to read to the end of the body.
+ ///
+ /// By default uses the `seek` implementation, but hit handlers may customize the
+ /// implementation specifically to anticipate multipart requests.
+ fn seek_multipart(&mut self, start: usize, end: Option) -> Result<()> {
+ // to prevent impl can_seek() without impl seek
+ self.seek(start, end)
+ }
+
// TODO: fn is_stream_hit()
/// Should we count this hit handler instance as an access in the eviction manager.
@@ -157,12 +178,14 @@ pub trait HandleHit {
}
/// Hit Handler
-pub type HitHandler = Box<(dyn HandleHit + Sync + Send)>;
+pub type HitHandler = Box;
/// MissFinishType
pub enum MissFinishType {
+ /// A new asset was created with the given size.
Created(usize),
- Appended(usize),
+ /// Appended size to existing asset, with an optional max size param.
+ Appended(usize, Option),
}
/// Cache miss handling trait
@@ -197,7 +220,7 @@ pub trait HandleMiss {
}
/// Miss Handler
-pub type MissHandler = Box<(dyn HandleMiss + Sync + Send)>;
+pub type MissHandler = Box;
pub mod streaming_write {
/// Portable u64 (sized) write id convenience type for use with streaming writes.
diff --git a/pingora-cache/src/trace.rs b/pingora-cache/src/trace.rs
index 90d4f1c3..f27929a2 100644
--- a/pingora-cache/src/trace.rs
+++ b/pingora-cache/src/trace.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -33,6 +33,28 @@ pub(crate) struct CacheTraceCTX {
pub hit_span: Span,
}
+pub fn tag_span_with_meta(span: &mut Span, meta: &CacheMeta) {
+ fn ts2epoch(ts: SystemTime) -> f64 {
+ ts.duration_since(SystemTime::UNIX_EPOCH)
+ .unwrap_or_default() // should never overflow but be safe here
+ .as_secs_f64()
+ }
+ let internal = &meta.0.internal;
+ span.set_tags(|| {
+ [
+ Tag::new("created", ts2epoch(internal.created)),
+ Tag::new("fresh_until", ts2epoch(internal.fresh_until)),
+ Tag::new("updated", ts2epoch(internal.updated)),
+ Tag::new("stale_if_error_sec", internal.stale_if_error_sec as i64),
+ Tag::new(
+ "stale_while_revalidate_sec",
+ internal.stale_while_revalidate_sec as i64,
+ ),
+ Tag::new("variance", internal.variance.is_some()),
+ ]
+ });
+}
+
impl CacheTraceCTX {
pub fn new() -> Self {
CacheTraceCTX {
@@ -82,33 +104,11 @@ impl CacheTraceCTX {
self.hit_span.set_finish_time(SystemTime::now);
}
- fn log_meta(span: &mut Span, meta: &CacheMeta) {
- fn ts2epoch(ts: SystemTime) -> f64 {
- ts.duration_since(SystemTime::UNIX_EPOCH)
- .unwrap_or_default() // should never overflow but be safe here
- .as_secs_f64()
- }
- let internal = &meta.0.internal;
- span.set_tags(|| {
- [
- Tag::new("created", ts2epoch(internal.created)),
- Tag::new("fresh_until", ts2epoch(internal.fresh_until)),
- Tag::new("updated", ts2epoch(internal.updated)),
- Tag::new("stale_if_error_sec", internal.stale_if_error_sec as i64),
- Tag::new(
- "stale_while_revalidate_sec",
- internal.stale_while_revalidate_sec as i64,
- ),
- Tag::new("variance", internal.variance.is_some()),
- ]
- });
- }
-
pub fn log_meta_in_hit_span(&mut self, meta: &CacheMeta) {
- CacheTraceCTX::log_meta(&mut self.hit_span, meta);
+ tag_span_with_meta(&mut self.hit_span, meta);
}
pub fn log_meta_in_miss_span(&mut self, meta: &CacheMeta) {
- CacheTraceCTX::log_meta(&mut self.miss_span, meta);
+ tag_span_with_meta(&mut self.miss_span, meta);
}
}
diff --git a/pingora-core/Cargo.toml b/pingora-core/Cargo.toml
index e5ed2834..5822932d 100644
--- a/pingora-core/Cargo.toml
+++ b/pingora-core/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "pingora-core"
-version = "0.6.0"
+version = "0.8.0"
authors = ["Yuchen Wu "]
license = "Apache-2.0"
edition = "2021"
@@ -19,16 +19,18 @@ name = "pingora_core"
path = "src/lib.rs"
[dependencies]
-pingora-runtime = { version = "0.6.0", path = "../pingora-runtime" }
-pingora-openssl = { version = "0.6.0", path = "../pingora-openssl", optional = true }
-pingora-boringssl = { version = "0.6.0", path = "../pingora-boringssl", optional = true }
-pingora-pool = { version = "0.6.0", path = "../pingora-pool" }
-pingora-error = { version = "0.6.0", path = "../pingora-error" }
-pingora-timeout = { version = "0.6.0", path = "../pingora-timeout" }
-pingora-http = { version = "0.6.0", path = "../pingora-http" }
-pingora-rustls = { version = "0.6.0", path = "../pingora-rustls", optional = true }
-pingora-s2n = { version = "0.6.0", path = "../pingora-s2n", optional = true }
+pingora-runtime = { version = "0.8.0", path = "../pingora-runtime" }
+pingora-openssl = { version = "0.8.0", path = "../pingora-openssl", optional = true }
+pingora-boringssl = { version = "0.8.0", path = "../pingora-boringssl", optional = true }
+pingora-pool = { version = "0.8.0", path = "../pingora-pool" }
+pingora-error = { version = "0.8.0", path = "../pingora-error" }
+pingora-timeout = { version = "0.8.0", path = "../pingora-timeout" }
+pingora-http = { version = "0.8.0", path = "../pingora-http" }
+pingora-rustls = { version = "0.8.0", path = "../pingora-rustls", optional = true }
+pingora-s2n = { version = "0.8.0", path = "../pingora-s2n", optional = true }
+bstr = { workspace = true }
tokio = { workspace = true, features = ["net", "rt-multi-thread", "signal"] }
+tokio-stream = { workspace = true }
futures = "0.3"
async-trait = { workspace = true }
httparse = { workspace = true }
@@ -37,10 +39,10 @@ http = { workspace = true }
log = { workspace = true }
h2 = { workspace = true }
derivative.workspace = true
-clap = { version = "3.2.25", features = ["derive"] }
+clap = { version = "4.5", features = ["derive"] }
once_cell = { workspace = true }
serde = { version = "1.0", features = ["derive"] }
-serde_yaml = "0.8"
+serde_yaml = "0.9"
strum = "0.26.2"
strum_macros = "0.26.2"
libc = "0.2.70"
@@ -69,7 +71,8 @@ zstd = "0"
httpdate = "1"
x509-parser = { version = "0.16.0", optional = true }
ouroboros = { version = "0.18.4", optional = true }
-lru = { version = "0.16.0", optional = true }
+lru = { workspace = true, optional = true }
+daggy = "0.8"
proxy-protocol = {git = "https://github.com/arxignis/proxy-protocol.git"}
[target.'cfg(unix)'.dependencies]
daemonize = "0.5.0"
@@ -79,14 +82,15 @@ nix = "~0.24.3"
windows-sys = { version = "0.59.0", features = ["Win32_Networking_WinSock"] }
[dev-dependencies]
-h2 = { workspace = true, features=["unstable"]}
+h2 = { workspace = true, features = ["unstable"] }
tokio-stream = { version = "0.1", features = ["full"] }
-env_logger = "0.9"
+env_logger = "0.11"
reqwest = { version = "0.11", features = [
"rustls-tls",
], default-features = false }
hyper = "0.14"
rstest = "0.23.0"
+rustls = "0.23"
[target.'cfg(unix)'.dev-dependencies]
hyperlocal = "0.8"
@@ -102,3 +106,4 @@ patched_http1 = ["pingora-http/patched_http1"]
openssl_derived = ["any_tls"]
any_tls = []
sentry = ["dep:sentry"]
+connection_filter = []
diff --git a/pingora-core/examples/bootstrap_as_a_service.rs b/pingora-core/examples/bootstrap_as_a_service.rs
new file mode 100644
index 00000000..c49ad271
--- /dev/null
+++ b/pingora-core/examples/bootstrap_as_a_service.rs
@@ -0,0 +1,102 @@
+// Copyright 2026 Cloudflare, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Example demonstrating how to start a server using [`Server::bootstrap_as_a_service`]
+//! instead of calling [`Server::bootstrap`] directly.
+//!
+//! # Why `bootstrap_as_a_service`?
+//!
+//! [`Server::bootstrap`] runs the bootstrap phase synchronously before any services start.
+//! This means the calling thread blocks during socket FD acquisition and Sentry initialization.
+//!
+//! [`Server::bootstrap_as_a_service`] instead schedules bootstrap as a dependency-aware init
+//! service. This allows other services to declare a dependency on the bootstrap handle and
+//! ensures they only start after bootstrap completes β while keeping setup fully asynchronous
+//! and composable with the rest of the service graph.
+//!
+//! Use `bootstrap_as_a_service` when:
+//! - You want to integrate bootstrap into the service dependency graph
+//! - You want services to wait for bootstrap without blocking the main thread
+//! - You are building more complex startup sequences (e.g. multiple ordered init steps)
+//!
+//! # Running the example
+//!
+//! ```bash
+//! cargo run --example bootstrap_as_a_service --package pingora-core
+//! ```
+//!
+//! # Expected behaviour
+//!
+//! Bootstrap runs as a service before `MyService` starts. `MyService` declares a dependency
+//! on the bootstrap handle, so it will not be started until bootstrap has completed.
+
+use async_trait::async_trait;
+use log::info;
+use pingora_core::server::configuration::Opt;
+#[cfg(unix)]
+use pingora_core::server::ListenFds;
+use pingora_core::server::{Server, ShutdownWatch};
+use pingora_core::services::Service;
+
+/// A simple application service that requires bootstrap to be complete before it starts.
+pub struct MyService;
+
+#[async_trait]
+impl Service for MyService {
+ async fn start_service(
+ &mut self,
+ #[cfg(unix)] _fds: Option,
+ mut shutdown: ShutdownWatch,
+ _listeners_per_fd: usize,
+ ) {
+ info!("MyService: bootstrap is complete, starting up");
+
+ // Keep running until a shutdown signal is received.
+ shutdown.changed().await.ok();
+
+ info!("MyService: shutting down");
+ }
+
+ fn name(&self) -> &str {
+ "my_service"
+ }
+
+ fn threads(&self) -> Option {
+ Some(1)
+ }
+}
+
+fn main() {
+ env_logger::Builder::from_default_env()
+ .filter_level(log::LevelFilter::Info)
+ .init();
+
+ let opt = Opt::parse_args();
+ let mut server = Server::new(Some(opt)).unwrap();
+
+ // Schedule bootstrap as a service instead of calling server.bootstrap() directly.
+ // The returned handle can be used to declare dependencies so that other services
+ // only start after bootstrap has finished.
+ let bootstrap_handle = server.bootstrap_as_a_service();
+
+ // Register our application service and get its handle.
+ let service_handle = server.add_service(MyService);
+
+ // MyService will not start until the bootstrap service has signaled that it is ready.
+ service_handle.add_dependency(&bootstrap_handle);
+
+ info!("Starting server β bootstrap will run as a service before MyService starts");
+
+ server.run_forever();
+}
diff --git a/pingora-core/examples/client_cert.rs b/pingora-core/examples/client_cert.rs
new file mode 100644
index 00000000..cbac46a1
--- /dev/null
+++ b/pingora-core/examples/client_cert.rs
@@ -0,0 +1,227 @@
+// Copyright 2026 Cloudflare, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![cfg_attr(not(feature = "openssl"), allow(unused))]
+
+use std::any::Any;
+use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use clap::Parser;
+use http::header::{CONTENT_LENGTH, CONTENT_TYPE};
+use http::{Response, StatusCode};
+use pingora_core::apps::http_app::ServeHttp;
+use pingora_core::listeners::tls::TlsSettings;
+use pingora_core::listeners::TlsAccept;
+use pingora_core::protocols::http::ServerSession;
+use pingora_core::protocols::tls::TlsRef;
+use pingora_core::server::configuration::Opt;
+use pingora_core::server::Server;
+use pingora_core::services::listening::Service;
+use pingora_core::Result;
+#[cfg(feature = "openssl")]
+use pingora_openssl::{
+ nid::Nid,
+ ssl::{NameType, SslFiletype, SslVerifyMode},
+ x509::{GeneralName, X509Name},
+};
+
+// Custom structure to hold TLS information
+struct MyTlsInfo {
+ // SNI (Server Name Indication) from the TLS handshake
+ sni: Option,
+ // SANs (Subject Alternative Names) from client certificate
+ sans: Vec,
+ // Common Name (CN) from client certificate
+ common_name: Option,
+}
+
+struct MyApp;
+
+#[async_trait]
+impl ServeHttp for MyApp {
+ async fn response(&self, session: &mut ServerSession) -> http::Response> {
+ static EMPTY_VEC: Vec = vec![];
+
+ // Extract TLS info from the session's digest extensions
+ let my_tls_info = session
+ .digest()
+ .and_then(|digest| digest.ssl_digest.as_ref())
+ .and_then(|ssl_digest| ssl_digest.extension.get::());
+ let sni = my_tls_info
+ .and_then(|my_tls_info| my_tls_info.sni.as_deref())
+ .unwrap_or("");
+ let sans = my_tls_info
+ .map(|my_tls_info| &my_tls_info.sans)
+ .unwrap_or(&EMPTY_VEC);
+ let common_name = my_tls_info
+ .and_then(|my_tls_info| my_tls_info.common_name.as_deref())
+ .unwrap_or("");
+
+ // Create response message
+ let mut message = String::new();
+ message += &format!("Your SNI was: {sni}\n");
+ message += &format!("Your SANs were: {sans:?}\n");
+ message += &format!("Client Common Name (CN): {}\n", common_name);
+ let message = message.into_bytes();
+
+ Response::builder()
+ .status(StatusCode::OK)
+ .header(CONTENT_TYPE, "text/plain")
+ .header(CONTENT_LENGTH, message.len())
+ .body(message)
+ .unwrap()
+ }
+}
+
+struct MyTlsCallbacks;
+
+#[async_trait]
+impl TlsAccept for MyTlsCallbacks {
+ #[cfg(feature = "openssl")]
+ async fn handshake_complete_callback(
+ &self,
+ tls_ref: &TlsRef,
+ ) -> Option> {
+ // Here you can inspect the TLS connection and return an extension if needed.
+
+ // Extract SNI (Server Name Indication)
+ let sni = tls_ref
+ .servername(NameType::HOST_NAME)
+ .map(ToOwned::to_owned);
+
+ // Extract SAN (Subject Alternative Names) from the client certificate
+ let sans = tls_ref
+ .peer_certificate()
+ .and_then(|cert| cert.subject_alt_names())
+ .map_or(vec![], |sans| {
+ sans.into_iter()
+ .filter_map(|san| san_to_string(&san))
+ .collect::>()
+ });
+
+ // Extract Common Name (CN) from the client certificate
+ let common_name = tls_ref.peer_certificate().and_then(|cert| {
+ let cn = cert.subject_name().entries_by_nid(Nid::COMMONNAME).next()?;
+ Some(cn.data().as_utf8().ok()?.to_string())
+ });
+
+ let tls_info = MyTlsInfo {
+ sni,
+ sans,
+ common_name,
+ };
+ Some(Arc::new(tls_info))
+ }
+}
+
+// Convert GeneralName of SAN to String representation
+#[cfg(feature = "openssl")]
+fn san_to_string(san: &GeneralName) -> Option {
+ if let Some(dnsname) = san.dnsname() {
+ return Some(dnsname.to_owned());
+ }
+ if let Some(uri) = san.uri() {
+ return Some(uri.to_owned());
+ }
+ if let Some(email) = san.email() {
+ return Some(email.to_owned());
+ }
+ if let Some(ip) = san.ipaddress() {
+ return bytes_to_ip_addr(ip).map(|addr| addr.to_string());
+ }
+ None
+}
+
+// Convert byte slice to IpAddr
+fn bytes_to_ip_addr(bytes: &[u8]) -> Option {
+ match bytes.len() {
+ 4 => {
+ let addr = Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]);
+ Some(IpAddr::V4(addr))
+ }
+ 16 => {
+ let mut octets = [0u8; 16];
+ octets.copy_from_slice(bytes);
+ let addr = Ipv6Addr::from(octets);
+ Some(IpAddr::V6(addr))
+ }
+ _ => None,
+ }
+}
+
+// This example demonstrates an HTTP server that requires client certificates.
+// The server extracts the SNI (Server Name Indication) from the TLS handshake and
+// SANs (Subject Alternative Names) from the client certificate, then returns them
+// as part of the HTTP response.
+//
+// ## How to run
+//
+// cargo run -F openssl --example client_cert
+//
+// # In another terminal, run the following command to test the server:
+// cd pingora-core
+// curl -k -i \
+// --cert examples/keys/clients/cert-1.pem --key examples/keys/clients/key-1.pem \
+// --resolve myapp.example.com:6196:127.0.0.1 \
+// https://myapp.example.com:6196/
+// curl -k -i \
+// --cert examples/keys/clients/cert-2.pem --key examples/keys/clients/key-2.pem \
+// --resolve myapp.example.com:6196:127.0.0.1 \
+// https://myapp.example.com:6196/
+// curl -k -i \
+// --cert examples/keys/clients/invalid-cert.pem --key examples/keys/clients/invalid-key.pem \
+// --resolve myapp.example.com:6196:127.0.0.1 \
+// https://myapp.example.com:6196/
+#[cfg(feature = "openssl")]
+fn main() -> Result<(), Box> {
+ env_logger::init();
+
+ // read command line arguments
+ let opt = Opt::parse();
+ let mut my_server = Server::new(Some(opt))?;
+ my_server.bootstrap();
+
+ let mut my_app = Service::new("my app".to_owned(), MyApp);
+
+ // Paths to server certificate, private key, and client CA certificate
+ let manifest_dir = env!("CARGO_MANIFEST_DIR");
+ let server_cert_path = format!("{manifest_dir}/examples/keys/server/cert.pem");
+ let server_key_path = format!("{manifest_dir}/examples/keys/server/key.pem");
+ let client_ca_path = format!("{manifest_dir}/examples/keys/client-ca/cert.pem");
+
+ // Create TLS settings with callbacks
+ let callbacks = Box::new(MyTlsCallbacks);
+ let mut tls_settings = TlsSettings::with_callbacks(callbacks)?;
+ // Set server certificate and private key
+ tls_settings.set_certificate_chain_file(&server_cert_path)?;
+ tls_settings.set_private_key_file(server_key_path, SslFiletype::PEM)?;
+ // Require client certificate
+ tls_settings.set_verify(SslVerifyMode::PEER | SslVerifyMode::FAIL_IF_NO_PEER_CERT);
+ // Set CA for client certificate verification
+ tls_settings.set_ca_file(&client_ca_path)?;
+ // Optionally, set the list of acceptable client CAs sent to the client
+ tls_settings.set_client_ca_list(X509Name::load_client_ca_file(&client_ca_path)?);
+
+ my_app.add_tls_with_settings("0.0.0.0:6196", None, tls_settings);
+ my_server.add_service(my_app);
+
+ my_server.run_forever();
+}
+
+#[cfg(not(feature = "openssl"))]
+fn main() {
+ eprintln!("This example requires the 'openssl' feature to be enabled.");
+}
diff --git a/pingora-core/examples/keys/client-ca/cert.pem b/pingora-core/examples/keys/client-ca/cert.pem
new file mode 100644
index 00000000..2025cda3
--- /dev/null
+++ b/pingora-core/examples/keys/client-ca/cert.pem
@@ -0,0 +1,15 @@
+-----BEGIN CERTIFICATE-----
+MIICTjCCAfWgAwIBAgIULuUoq/di4EKmLyN0YwAkd6MQjv4wCgYIKoZIzj0EAwIw
+dTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh
+biBGcmFuY2lzY28xGDAWBgNVBAoMD0Nsb3VkZmxhcmUsIEluYzEfMB0GA1UEAwwW
+RXhhbXBsZSBDbGllbnQgUm9vdCBDQTAeFw0yNTExMTkwNDU5MjRaFw0zNTExMTcw
+NDU5MjRaMHUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYD
+VQQHDA1TYW4gRnJhbmNpc2NvMRgwFgYDVQQKDA9DbG91ZGZsYXJlLCBJbmMxHzAd
+BgNVBAMMFkV4YW1wbGUgQ2xpZW50IFJvb3QgQ0EwWTATBgcqhkjOPQIBBggqhkjO
+PQMBBwNCAARxcxOAR4zUDPilKpMLiBzNs+HxdW6ZBlHVA7/0VyJtSPw03IdlbtFs
+FhgcIa8uQ9nrppHlrzploTA7cg7YWUoso2MwYTAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUL6S83l9AGZmmwHh+64YlUtMQzZcwHwYD
+VR0jBBgwFoAUL6S83l9AGZmmwHh+64YlUtMQzZcwCgYIKoZIzj0EAwIDRwAwRAIg
+cohFQxG22J2YKw+DGAidU5u3mxtB/BALxIusqd+OfFUCIGmT2GHVxz1FwK2pJrM1
+FTWEcEbAw3r86iIVJBYP4qX6
+-----END CERTIFICATE-----
diff --git a/pingora-core/examples/keys/client-ca/key.pem b/pingora-core/examples/keys/client-ca/key.pem
new file mode 100644
index 00000000..a4c54f95
--- /dev/null
+++ b/pingora-core/examples/keys/client-ca/key.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIJOxEQowpYL5VLNf+qaCEBhic8e26UyR0ku65Sk6gjMIoAoGCCqGSM49
+AwEHoUQDQgAEcXMTgEeM1Az4pSqTC4gczbPh8XVumQZR1QO/9FcibUj8NNyHZW7R
+bBYYHCGvLkPZ66aR5a86ZaEwO3IO2FlKLA==
+-----END EC PRIVATE KEY-----
diff --git a/pingora-core/examples/keys/clients/cert-1.pem b/pingora-core/examples/keys/clients/cert-1.pem
new file mode 100644
index 00000000..7d6ce13f
--- /dev/null
+++ b/pingora-core/examples/keys/clients/cert-1.pem
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE-----
+MIICjjCCAjWgAwIBAgIUYUSqEzxm/oebfxxQmZEesZL2WFAwCgYIKoZIzj0EAwIw
+dTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh
+biBGcmFuY2lzY28xGDAWBgNVBAoMD0Nsb3VkZmxhcmUsIEluYzEfMB0GA1UEAwwW
+RXhhbXBsZSBDbGllbnQgUm9vdCBDQTAeFw0yNTExMTkwNTEyMThaFw0zNTExMTcw
+NTEyMThaMG8xCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYD
+VQQHDA1TYW4gRnJhbmNpc2NvMRgwFgYDVQQKDA9DbG91ZGZsYXJlLCBJbmMxGTAX
+BgNVBAMMEGV4YW1wbGUtY2xpZW50LTEwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
+AATDe6hBwpmE4Jt//sIWGWuBDYXHezVoFeoHsDzcWo6RwyHDfm7lvnACmqWAdRUV
+1GA7yfkzc1CaTqnvU8GjFdfXo4GoMIGlMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/
+BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDAGA1UdEQQpMCeGJXNwaWZmZTov
+L2V4YW1wbGUuY29tL2V4YW1wbGUtY2xpZW50LTEwHQYDVR0OBBYEFAjfTzgX+AVh
+M+BIaU0qTgINZWOdMB8GA1UdIwQYMBaAFC+kvN5fQBmZpsB4fuuGJVLTEM2XMAoG
+CCqGSM49BAMCA0cAMEQCIHyJDCvYKgxVthHcLjlEGW4Pj0Y7XnQUCJARa3jAUTd9
+AiB8tSXbo6J6Jhy6nasaxT1HAZwjgMVQwdo8O8UYOXXZpQ==
+-----END CERTIFICATE-----
diff --git a/pingora-core/examples/keys/clients/cert-2.pem b/pingora-core/examples/keys/clients/cert-2.pem
new file mode 100644
index 00000000..b209b933
--- /dev/null
+++ b/pingora-core/examples/keys/clients/cert-2.pem
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC0zCCAnmgAwIBAgIUVQlGCD9Zryvkh9G8GZXFBa2L9kQwCgYIKoZIzj0EAwIw
+dTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh
+biBGcmFuY2lzY28xGDAWBgNVBAoMD0Nsb3VkZmxhcmUsIEluYzEfMB0GA1UEAwwW
+RXhhbXBsZSBDbGllbnQgUm9vdCBDQTAeFw0yNTExMTkwODA5MDlaFw0zNTExMTcw
+ODA5MDlaMG8xCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYD
+VQQHDA1TYW4gRnJhbmNpc2NvMRgwFgYDVQQKDA9DbG91ZGZsYXJlLCBJbmMxGTAX
+BgNVBAMMEGV4YW1wbGUtY2xpZW50LTIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
+AAS2J10rq5Rt4TjhqEjHED0UPdceuzHUcw8doLC4StBIxJIrFk9Ag0g5ti9vN4fG
+kK6J11GXk/pBmu3O3s48Gsfgo4HsMIHpMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/
+BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMHQGA1UdEQRtMGuGJXNwaWZmZTov
+L2V4YW1wbGUuY29tL2V4YW1wbGUtY2xpZW50LTKCFGNsaWVudC0yLmV4YW1wbGUu
+Y29thwR/AAABhxAAAAAAAAAAAAAAAAAAAAABgRRjbGllbnQtMkBleGFtcGxlLmNv
+bTAdBgNVHQ4EFgQUGHwnr7Ube1hqsodgcxJkfYuCKE8wHwYDVR0jBBgwFoAUL6S8
+3l9AGZmmwHh+64YlUtMQzZcwCgYIKoZIzj0EAwIDSAAwRQIgK4JL1OO2nB7MqvGW
+y2nbH4yYMu2jUkYhw9HFLUG2B6MCIQC4iDWKXp7R977LvuaaQaNcMmbGysrmfo8V
+wOmp1JGOtA==
+-----END CERTIFICATE-----
diff --git a/pingora-core/examples/keys/clients/invalid-cert.pem b/pingora-core/examples/keys/clients/invalid-cert.pem
new file mode 100644
index 00000000..27ae7c93
--- /dev/null
+++ b/pingora-core/examples/keys/clients/invalid-cert.pem
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE-----
+MIICjzCCAjWgAwIBAgIUHYIVFYFooGVi2bNlk5R6GsbDKqUwCgYIKoZIzj0EAwIw
+dTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh
+biBGcmFuY2lzY28xGDAWBgNVBAoMD0Nsb3VkZmxhcmUsIEluYzEfMB0GA1UEAwwW
+RXhhbXBsZSBDbGllbnQgUm9vdCBDQTAeFw0yNTExMTkwODEzNDJaFw0zNTExMTcw
+ODEzNDJaMG8xCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYD
+VQQHDA1TYW4gRnJhbmNpc2NvMRgwFgYDVQQKDA9DbG91ZGZsYXJlLCBJbmMxGTAX
+BgNVBAMMEGV4YW1wbGUtY2xpZW50LTMwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
+AATGKppMkUDsNvpzPPPiKmz53bbyIJPemIq5OdgJli8XZUFozxroJuFKhUuJOuFF
+Jns2pzLHewIDzFXgErPqPxA/o4GoMIGlMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/
+BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDAGA1UdEQQpMCeGJXNwaWZmZTov
+L2V4YW1wbGUuY29tL2V4YW1wbGUtY2xpZW50LTMwHQYDVR0OBBYEFDV/v0zsiC/t
+aomzxKa0jJ4SlmSzMB8GA1UdIwQYMBaAFK04aCtyumAb4PEMnh9OXLW7EIJSMAoG
+CCqGSM49BAMCA0gAMEUCIH/wxvS0ae8DF1QteE+2FDOd/G2WeBMjsS8A6VyebAru
+AiEAl2vjq0KePvM2X0jTZ/+RMJO33HOpYr0+PZw6FAa+aaw=
+-----END CERTIFICATE-----
diff --git a/pingora-core/examples/keys/clients/invalid-key.pem b/pingora-core/examples/keys/clients/invalid-key.pem
new file mode 100644
index 00000000..343688aa
--- /dev/null
+++ b/pingora-core/examples/keys/clients/invalid-key.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIFyLneOGHgjTBS8I2GB8kF0LHgDS/eTJBSDNS4PAkJ0JoAoGCCqGSM49
+AwEHoUQDQgAExiqaTJFA7Db6czzz4ips+d228iCT3piKuTnYCZYvF2VBaM8a6Cbh
+SoVLiTrhRSZ7Nqcyx3sCA8xV4BKz6j8QPw==
+-----END EC PRIVATE KEY-----
diff --git a/pingora-core/examples/keys/clients/key-1.pem b/pingora-core/examples/keys/clients/key-1.pem
new file mode 100644
index 00000000..e5a27feb
--- /dev/null
+++ b/pingora-core/examples/keys/clients/key-1.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIFNioASifzPy0Fcp+qmMoMUhFOJGLki20ygISqZb+HY1oAoGCCqGSM49
+AwEHoUQDQgAEw3uoQcKZhOCbf/7CFhlrgQ2Fx3s1aBXqB7A83FqOkcMhw35u5b5w
+ApqlgHUVFdRgO8n5M3NQmk6p71PBoxXX1w==
+-----END EC PRIVATE KEY-----
diff --git a/pingora-core/examples/keys/clients/key-2.pem b/pingora-core/examples/keys/clients/key-2.pem
new file mode 100644
index 00000000..8d4063c7
--- /dev/null
+++ b/pingora-core/examples/keys/clients/key-2.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEICd8DwjvpvE6nIKKKH2smrnLBM5zQyIkAKwBCiiRZGGsoAoGCCqGSM49
+AwEHoUQDQgAEtiddK6uUbeE44ahIxxA9FD3XHrsx1HMPHaCwuErQSMSSKxZPQINI
+ObYvbzeHxpCuiddRl5P6QZrtzt7OPBrH4A==
+-----END EC PRIVATE KEY-----
diff --git a/pingora-core/examples/keys/server/cert.pem b/pingora-core/examples/keys/server/cert.pem
new file mode 100644
index 00000000..4e927ce4
--- /dev/null
+++ b/pingora-core/examples/keys/server/cert.pem
@@ -0,0 +1,15 @@
+-----BEGIN CERTIFICATE-----
+MIICVzCCAf6gAwIBAgIUYGbx/r4kY40a+zNq7IW/1lsvzk0wCgYIKoZIzj0EAwIw
+bDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh
+biBGcmFuY2lzY28xGDAWBgNVBAoMD0Nsb3VkZmxhcmUsIEluYzEWMBQGA1UEAwwN
+b3BlbnJ1c3R5Lm9yZzAeFw0yNTExMTkwNDUxMzdaFw0zNTExMTcwNDUxMzdaMGwx
+CzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4g
+RnJhbmNpc2NvMRgwFgYDVQQKDA9DbG91ZGZsYXJlLCBJbmMxFjAUBgNVBAMMDW9w
+ZW5ydXN0eS5vcmcwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT9EuNEw3e3syHW
+SNnyJw7QVtOzDlILlt6F+jXT8UMBoMn4OnwC7AFlV8XzR9UpYSf1yq7Raps7c8TU
+W9YF6ee4o34wfDAdBgNVHQ4EFgQU6B2YXLmWaboIZsf9YOCePRQXrO4wHwYDVR0j
+BBgwFoAU6B2YXLmWaboIZsf9YOCePRQXrO4wDwYDVR0TAQH/BAUwAwEB/zApBgNV
+HREEIjAggg8qLm9wZW5ydXN0eS5vcmeCDW9wZW5ydXN0eS5vcmcwCgYIKoZIzj0E
+AwIDRwAwRAIgcSThJ5CWjuyWKfHbR+RuJ/9DtH1ag/47OolMQAvOczsCIDKVgPO/
+A69bTOk4sq0y92YBBbe3hF82KrsgTR3nlkKF
+-----END CERTIFICATE-----
diff --git a/pingora-core/examples/keys/server/key.pem b/pingora-core/examples/keys/server/key.pem
new file mode 100644
index 00000000..5781629a
--- /dev/null
+++ b/pingora-core/examples/keys/server/key.pem
@@ -0,0 +1,5 @@
+-----BEGIN PRIVATE KEY-----
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgTAnVhDuKvV5epzX4
+uuC8kEZL2vUPI49gUmS5kM+j5VWhRANCAAT9EuNEw3e3syHWSNnyJw7QVtOzDlIL
+lt6F+jXT8UMBoMn4OnwC7AFlV8XzR9UpYSf1yq7Raps7c8TUW9YF6ee4
+-----END PRIVATE KEY-----
diff --git a/pingora-core/examples/service_dependencies.rs b/pingora-core/examples/service_dependencies.rs
new file mode 100644
index 00000000..d5f5e392
--- /dev/null
+++ b/pingora-core/examples/service_dependencies.rs
@@ -0,0 +1,234 @@
+// Copyright 2026 Cloudflare, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Example demonstrating service dependency management.
+//!
+//! This example shows how services can declare dependencies on other services using
+//! a fluent API with [`ServiceHandle`] references, ensuring they start in the correct
+//! order and wait for dependencies to be ready.
+//!
+//! # Running the example
+//!
+//! ```bash
+//! cargo run --example service_dependencies --package pingora-core
+//! ```
+//!
+//! Expected output:
+//! - DatabaseService starts and initializes (takes 2 seconds)
+//! - CacheService starts and initializes (takes 1 second)
+//! - ApiService waits for both dependencies, then starts
+//!
+//! # Key Features Demonstrated
+//!
+//! - Fluent API for declaring dependencies via [`ServiceHandle::add_dependency()`]
+//! - Type-safe dependency declaration (no strings)
+//! - Multiple ways to implement services based on readiness needs:
+//! - **DatabaseService**: Custom readiness timing (uses `ServiceWithDependencies`)
+//! - **CacheService**: Ready immediately (uses `Service`)
+//! - **ApiService**: Ready immediately (uses `Service`)
+//! - Automatic dependency ordering and validation
+//! - Prevention of typos in service names (compile-time safety)
+
+use async_trait::async_trait;
+use log::info;
+use pingora_core::server::configuration::Opt;
+#[cfg(unix)]
+use pingora_core::server::ListenFds;
+use pingora_core::server::{Server, ShutdownWatch};
+use pingora_core::services::{Service, ServiceWithDependents};
+// DatabaseService needs to control readiness timing
+use pingora_core::services::ServiceReadyNotifier;
+use std::sync::Arc;
+use tokio::sync::Mutex;
+use tokio::time::{sleep, Duration};
+
+/// A custom service that delays signaling ready until initialization is complete
+pub struct DatabaseService {
+ connection_string: Arc>>,
+}
+
+impl DatabaseService {
+ fn new() -> Self {
+ Self {
+ connection_string: Arc::new(Mutex::new(None)),
+ }
+ }
+
+ fn get_connection_string(&self) -> Arc>> {
+ self.connection_string.clone()
+ }
+}
+
+#[async_trait]
+impl ServiceWithDependents for DatabaseService {
+ async fn start_service(
+ &mut self,
+ #[cfg(unix)] _fds: Option,
+ mut shutdown: ShutdownWatch,
+ _listeners_per_fd: usize,
+ ready_notifier: ServiceReadyNotifier,
+ ) {
+ info!("DatabaseService: Starting initialization...");
+
+ // Simulate database connection setup
+ sleep(Duration::from_secs(2)).await;
+
+ // Store the connection string
+ {
+ let mut conn = self.connection_string.lock().await;
+ *conn = Some("postgresql://localhost:5432/mydb".to_string());
+ }
+
+ info!("DatabaseService: Initialization complete, signaling ready");
+
+ // Signal that the service is ready
+ ready_notifier.notify_ready();
+
+ // Keep running until shutdown
+ shutdown.changed().await.ok();
+ info!("DatabaseService: Shutting down");
+ }
+
+ fn name(&self) -> &str {
+ "database"
+ }
+
+ fn threads(&self) -> Option {
+ Some(1)
+ }
+}
+
+/// A cache service that uses the simplified API
+/// Signals ready immediately (using default implementation)
+pub struct CacheService;
+
+#[async_trait]
+impl Service for CacheService {
+ // Uses default start_service implementation which signals ready immediately
+
+ async fn start_service(
+ &mut self,
+ #[cfg(unix)] _fds: Option,
+ mut shutdown: ShutdownWatch,
+ _listeners_per_fd: usize,
+ ) {
+ info!("CacheService: Starting (ready immediately)...");
+
+ // Simulate cache warmup
+ sleep(Duration::from_secs(1)).await;
+ info!("CacheService: Warmup complete");
+
+ // Keep running until shutdown
+ shutdown.changed().await.ok();
+ info!("CacheService: Shutting down");
+ }
+
+ fn name(&self) -> &str {
+ "cache"
+ }
+
+ fn threads(&self) -> Option {
+ Some(1)
+ }
+}
+
+/// An API service that depends on both database and cache
+/// Uses the simplest API - signals ready immediately and just implements [Service]
+pub struct ApiService {
+ db_connection: Arc>>,
+}
+
+impl ApiService {
+ fn new(db_connection: Arc>>) -> Self {
+ Self { db_connection }
+ }
+}
+
+#[async_trait]
+impl Service for ApiService {
+ // Uses default start_service - signals ready immediately
+
+ async fn start_service(
+ &mut self,
+ #[cfg(unix)] _fds: Option,
+ mut shutdown: ShutdownWatch,
+ _listeners_per_fd: usize,
+ ) {
+ info!("ApiService: Starting (dependencies should be ready)...");
+
+ // Verify database connection is available
+ {
+ let conn = self.db_connection.lock().await;
+ if let Some(conn_str) = &*conn {
+ info!("ApiService: Using database connection: {}", conn_str);
+ } else {
+ panic!("ApiService: Database connection not available!");
+ }
+ }
+
+ info!("ApiService: Ready to serve requests");
+
+ // Keep running until shutdown
+ shutdown.changed().await.ok();
+ info!("ApiService: Shutting down");
+ }
+
+ fn name(&self) -> &str {
+ "api"
+ }
+
+ fn threads(&self) -> Option {
+ Some(1)
+ }
+}
+
+fn main() {
+ env_logger::Builder::from_default_env()
+ .filter_level(log::LevelFilter::Info)
+ .init();
+
+ info!("Starting server with service dependencies...");
+
+ let opt = Opt::parse_args();
+ let mut server = Server::new(Some(opt)).unwrap();
+ server.bootstrap();
+
+ // Create the database service
+ let db_service = DatabaseService::new();
+ let db_connection = db_service.get_connection_string();
+
+ // Create services
+ let cache_service = CacheService;
+ let api_service = ApiService::new(db_connection);
+
+ // Add services and get their handles
+ let db_handle = server.add_service(db_service);
+ let cache_handle = server.add_service(cache_service);
+ let api_handle = server.add_service(api_service);
+
+ // Declare dependencies using the fluent API
+ // The API service will not start until both dependencies signal ready
+ api_handle.add_dependency(db_handle);
+ api_handle.add_dependency(&cache_handle);
+
+ info!("Services configured. Starting server...");
+ info!("Expected startup order:");
+ info!(" 1. database (will initialize for 2 seconds)");
+ info!(" 2. cache (will initialize for 1 second)");
+ info!(" 3. api (will wait for both, then start)");
+ info!("");
+ info!("Press Ctrl+C to shut down");
+
+ server.run_forever();
+}
diff --git a/pingora-core/src/apps/http_app.rs b/pingora-core/src/apps/http_app.rs
index d2c59513..f511012c 100644
--- a/pingora-core/src/apps/http_app.rs
+++ b/pingora-core/src/apps/http_app.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/apps/mod.rs b/pingora-core/src/apps/mod.rs
index 461084e4..d751fbcc 100644
--- a/pingora-core/src/apps/mod.rs
+++ b/pingora-core/src/apps/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -61,24 +61,57 @@ pub trait ServerApp {
#[derive(Default)]
/// HTTP Server options that control how the server handles some transport types.
pub struct HttpServerOptions {
- /// Use HTTP/2 for plaintext.
+ /// Allow HTTP/2 for plaintext.
pub h2c: bool,
+
+ /// Allow proxying CONNECT requests when handling HTTP traffic.
+ ///
+ /// When disabled, CONNECT requests are rejected with 405 by proxy services.
+ pub allow_connect_method_proxying: bool,
+
+ #[doc(hidden)]
+ pub force_custom: bool,
+
+ /// Maximum number of requests that this connection will handle. This is
+ /// equivalent to [Nginx's keepalive requests](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive_requests)
+ /// which says:
+ ///
+ /// > Closing connections periodically is necessary to free per-connection
+ /// > memory allocations. Therefore, using too high maximum number of
+ /// > requests could result in excessive memory usage and not recommended.
+ ///
+ /// Unlike nginx, the default behavior here is _no limit_.
+ pub keepalive_request_limit: Option,
}
#[derive(Debug, Clone)]
pub struct HttpPersistentSettings {
keepalive_timeout: Option,
+ keepalive_reuses_remaining: Option,
}
impl HttpPersistentSettings {
pub fn for_session(session: &ServerSession) -> Self {
HttpPersistentSettings {
keepalive_timeout: session.get_keepalive(),
+ keepalive_reuses_remaining: session.get_keepalive_reuses_remaining(),
}
}
- pub fn apply_to_session(&self, session: &mut ServerSession) {
- session.set_keepalive(self.keepalive_timeout);
+ pub fn apply_to_session(self, session: &mut ServerSession) {
+ let Self {
+ keepalive_timeout,
+ mut keepalive_reuses_remaining,
+ } = self;
+
+ // Reduce the number of times the connection for this session can be
+ // reused by one. A session with reuse count of zero won't be reused
+ if let Some(reuses) = keepalive_reuses_remaining.as_mut() {
+ *reuses = reuses.saturating_sub(1);
+ }
+
+ session.set_keepalive(keepalive_timeout);
+ session.set_keepalive_reuses_remaining(keepalive_reuses_remaining);
}
}
@@ -133,6 +166,15 @@ pub trait HttpServerApp {
}
async fn http_cleanup(&self) {}
+
+ #[doc(hidden)]
+ async fn process_custom_session(
+ self: Arc,
+ _stream: Stream,
+ _shutdown: &ShutdownWatch,
+ ) -> Option {
+ None
+ }
}
#[async_trait]
@@ -146,9 +188,13 @@ where
shutdown: &ShutdownWatch,
) -> Option {
let mut h2c = self.server_options().as_ref().map_or(false, |o| o.h2c);
+ let custom = self
+ .server_options()
+ .as_ref()
+ .map_or(false, |o| o.force_custom);
// try to read h2 preface
- if h2c {
+ if h2c && !custom {
let mut buf = [0u8; H2_PREFACE.len()];
let peeked = stream
.try_peek(&mut buf)
@@ -215,6 +261,8 @@ where
.await;
});
}
+ } else if custom || matches!(stream.selected_alpn_proto(), Some(ALPN::Custom(_))) {
+ return self.clone().process_custom_session(stream, shutdown).await;
} else {
// No ALPN or ALPN::H1 and h2c was not configured, fallback to HTTP/1.1
let mut session = ServerSession::new_http1(stream);
@@ -225,6 +273,10 @@ where
// default 60s
session.set_keepalive(Some(60));
}
+ session.set_keepalive_reuses_remaining(
+ self.server_options()
+ .and_then(|opts| opts.keepalive_request_limit),
+ );
let mut result = self.process_new_http(session, shutdown).await;
while let Some((stream, persistent_settings)) = result.map(|r| r.consume()) {
@@ -232,10 +284,6 @@ where
if let Some(persistent_settings) = persistent_settings {
persistent_settings.apply_to_session(&mut session);
}
- if *shutdown.borrow() {
- // stop downstream from reusing if this service is shutting down soon
- session.set_keepalive(None);
- }
result = self.process_new_http(session, shutdown).await;
}
diff --git a/pingora-core/src/apps/prometheus_http_app.rs b/pingora-core/src/apps/prometheus_http_app.rs
index 963d5a9e..ed8a217a 100644
--- a/pingora-core/src/apps/prometheus_http_app.rs
+++ b/pingora-core/src/apps/prometheus_http_app.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/connectors/http/custom/mod.rs b/pingora-core/src/connectors/http/custom/mod.rs
new file mode 100644
index 00000000..e1e8a11d
--- /dev/null
+++ b/pingora-core/src/connectors/http/custom/mod.rs
@@ -0,0 +1,80 @@
+// Copyright 2026 Cloudflare, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use async_trait::async_trait;
+use std::time::Duration;
+
+use pingora_error::Result;
+
+use crate::{
+ protocols::{http::custom::client::Session, Stream},
+ upstreams::peer::Peer,
+};
+
+// Either returns a Custom Session or the Stream for creating a new H1 session as a fallback.
+pub enum Connection {
+ Session(S),
+ Stream(Stream),
+}
+#[doc(hidden)]
+#[async_trait]
+pub trait Connector: Send + Sync + Unpin + 'static {
+ type Session: Session;
+
+ async fn get_http_session(
+ &self,
+ peer: &P,
+ ) -> Result<(Connection, bool)>;
+
+ async fn reused_http_session(
+ &self,
+ peer: &P,
+ ) -> Option;
+
+ async fn release_http_session(
+ &self,
+ mut session: Self::Session,
+ peer: &P,
+ idle_timeout: Option,
+ );
+}
+
+#[doc(hidden)]
+#[async_trait]
+impl Connector for () {
+ type Session = ();
+
+ async fn get_http_session(
+ &self,
+ _peer: &P,
+ ) -> Result<(Connection, bool)> {
+ unreachable!("connector: get_http_session")
+ }
+
+ async fn reused_http_session(
+ &self,
+ _peer: &P,
+ ) -> Option {
+ unreachable!("connector: reused_http_session")
+ }
+
+ async fn release_http_session(
+ &self,
+ _session: Self::Session,
+ _peer: &P,
+ _idle_timeout: Option,
+ ) {
+ unreachable!("connector: release_http_session")
+ }
+}
diff --git a/pingora-core/src/connectors/http/mod.rs b/pingora-core/src/connectors/http/mod.rs
index 45b14f44..2545cf7c 100644
--- a/pingora-core/src/connectors/http/mod.rs
+++ b/pingora-core/src/connectors/http/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,25 +14,47 @@
//! Connecting to HTTP servers
+use crate::connectors::http::custom::Connection;
use crate::connectors::ConnectorOptions;
+use crate::listeners::ALPN;
use crate::protocols::http::client::HttpSession;
+use crate::protocols::http::v1::client::HttpSession as Http1Session;
use crate::upstreams::peer::Peer;
use pingora_error::Result;
use std::time::Duration;
+pub mod custom;
pub mod v1;
pub mod v2;
-pub struct Connector {
+pub struct Connector
+where
+ C: custom::Connector,
+{
h1: v1::Connector,
h2: v2::Connector,
+ custom: C,
}
-impl Connector {
+impl Connector<()> {
pub fn new(options: Option) -> Self {
Connector {
h1: v1::Connector::new(options.clone()),
- h2: v2::Connector::new(options),
+ h2: v2::Connector::new(options.clone()),
+ custom: Default::default(),
+ }
+ }
+}
+
+impl Connector
+where
+ C: custom::Connector,
+{
+ pub fn new_custom(options: Option, custom: C) -> Self {
+ Connector {
+ h1: v1::Connector::new(options.clone()),
+ h2: v2::Connector::new(options.clone()),
+ custom,
}
}
@@ -42,14 +64,46 @@ impl Connector {
pub async fn get_http_session(
&self,
peer: &P,
- ) -> Result<(HttpSession, bool)> {
+ ) -> Result<(HttpSession, bool)> {
+ let peer_opts = peer.get_peer_options();
+
+ // Switch to custom protocol as early as possible
+ if peer_opts.is_some_and(|o| matches!(o.alpn, ALPN::Custom(_))) {
+ // We create the Connector before TLS, so we need to make sure that the server also supports the same custom protocol.
+ // We will first check for sessions that we can reuse, if not we will create a new one based on the negotiated protocol
+
+ // Step 1: Look for reused Custom Session
+ if let Some(session) = self.custom.reused_http_session(peer).await {
+ return Ok((HttpSession::Custom(session), true));
+ }
+ // Step 2: Check reuse pool for reused H1 session
+ if let Some(h1) = self.h1.reused_http_session(peer).await {
+ return Ok((HttpSession::H1(h1), true));
+ }
+ // Step 3: Try and create a new Custom session
+ let (connection, reused) = self.custom.get_http_session(peer).await?;
+ // We create the Connector before TLS, so we need to make sure that the server also supports the same custom protocol
+ match connection {
+ Connection::Session(s) => {
+ return Ok((HttpSession::Custom(s), reused));
+ }
+ // Negotiated ALPN is not custom, create a new H1 session
+ Connection::Stream(s) => {
+ return Ok((
+ HttpSession::H1(Http1Session::new_with_options(s, peer)),
+ false,
+ ));
+ }
+ }
+ }
+
// NOTE: maybe TODO: we do not yet enforce that only TLS traffic can use h2, which is the
// de facto requirement for h2, because non TLS traffic lack the negotiation mechanism.
// We assume no peer option == no ALPN == h1 only
let h1_only = peer
.get_peer_options()
- .map_or(true, |o| o.alpn.get_max_http_version() == 1);
+ .is_none_or(|o| o.alpn.get_max_http_version() == 1);
if h1_only {
let (h1, reused) = self.h1.get_http_session(peer).await?;
Ok((HttpSession::H1(h1), reused))
@@ -78,13 +132,18 @@ impl Connector {
pub async fn release_http_session(
&self,
- session: HttpSession,
+ session: HttpSession,
peer: &P,
idle_timeout: Option,
) {
match session {
HttpSession::H1(h1) => self.h1.release_http_session(h1, peer, idle_timeout).await,
HttpSession::H2(h2) => self.h2.release_http_session(h2, peer, idle_timeout),
+ HttpSession::Custom(c) => {
+ self.custom
+ .release_http_session(c, peer, idle_timeout)
+ .await;
+ }
}
}
@@ -98,9 +157,21 @@ impl Connector {
#[cfg(feature = "any_tls")]
mod tests {
use super::*;
+ use crate::connectors::TransportConnector;
+ use crate::listeners::tls::TlsSettings;
+ use crate::listeners::{Listeners, TransportStack, ALPN};
use crate::protocols::http::v1::client::HttpSession as Http1Session;
+ use crate::protocols::tls::CustomALPN;
use crate::upstreams::peer::HttpPeer;
+ use crate::upstreams::peer::PeerOptions;
+ use async_trait::async_trait;
use pingora_http::RequestHeader;
+ use std::sync::Arc;
+ use std::sync::Mutex;
+ use tokio::io::AsyncWriteExt;
+ use tokio::net::TcpListener;
+ use tokio::task::JoinHandle;
+ use tokio::time::sleep;
async fn get_http(http: &mut Http1Session, expected_status: u16) {
let mut req = Box::new(RequestHeader::build("GET", b"/", None).unwrap());
@@ -123,6 +194,7 @@ mod tests {
match &h2 {
HttpSession::H1(_) => panic!("expect h2"),
HttpSession::H2(h2_stream) => assert!(!h2_stream.ping_timedout()),
+ HttpSession::Custom(_) => panic!("expect h2"),
}
connector.release_http_session(h2, &peer, None).await;
@@ -133,6 +205,7 @@ mod tests {
match &h2 {
HttpSession::H1(_) => panic!("expect h2"),
HttpSession::H2(h2_stream) => assert!(!h2_stream.ping_timedout()),
+ HttpSession::Custom(_) => panic!("expect h2"),
}
}
@@ -148,6 +221,7 @@ mod tests {
get_http(http, 200).await;
}
HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
}
connector.release_http_session(h1, &peer, None).await;
@@ -157,6 +231,7 @@ mod tests {
match &mut h1 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
}
}
@@ -178,6 +253,7 @@ mod tests {
get_http(http, 200).await;
}
HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
}
connector.release_http_session(h1, &peer, None).await;
@@ -190,6 +266,7 @@ mod tests {
match &mut h1 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
}
}
@@ -207,6 +284,7 @@ mod tests {
get_http(http, 200).await;
}
HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
}
connector.release_http_session(h1, &peer, None).await;
@@ -217,6 +295,314 @@ mod tests {
match &mut h1 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
+ }
+ }
+ // Track the flow of calls when using a custom protocol. For this we need to create a Mock Connector
+ struct MockConnector {
+ transport: TransportConnector,
+ reusable: Arc>, // Mock for tracking reusable sessions
+ }
+
+ #[async_trait]
+ impl custom::Connector for MockConnector {
+ type Session = ();
+
+ async fn get_http_session(
+ &self,
+ peer: &P,
+ ) -> Result<(Connection, bool)> {
+ let (stream, _) = self.transport.get_stream(peer).await?;
+
+ match stream.selected_alpn_proto() {
+ Some(ALPN::Custom(_)) => Ok((custom::Connection::Session(()), false)),
+ _ => Ok(((custom::Connection::Stream(stream)), false)),
+ }
+ }
+
+ async fn reused_http_session(
+ &self,
+ _peer: &P,
+ ) -> Option {
+ let mut flag = self.reusable.lock().unwrap();
+ if *flag {
+ *flag = false;
+ Some(())
+ } else {
+ None
+ }
+ }
+
+ async fn release_http_session(
+ &self,
+ _session: Self::Session,
+ _peer: &P,
+ _idle_timeout: Option,
+ ) {
+ let mut flag = self.reusable.lock().unwrap();
+ *flag = true;
}
}
+
+ // Finds an available TCP port on localhost for test server setup.
+ async fn get_available_port() -> u16 {
+ TcpListener::bind("127.0.0.1:0")
+ .await
+ .unwrap()
+ .local_addr()
+ .unwrap()
+ .port()
+ }
+ // Creates a test connector for integration/unit tests.
+ // For rustls, only ConnectorOptions are used here; the actual dangerous verifier is patched in the TLS connector.
+ fn create_test_connector() -> Connector {
+ #[cfg(feature = "rustls")]
+ let custom_transport = {
+ let options = ConnectorOptions::new(1);
+ TransportConnector::new(Some(options))
+ };
+ #[cfg(not(feature = "rustls"))]
+ let custom_transport = TransportConnector::new(None);
+ Connector {
+ h1: v1::Connector::new(None),
+ h2: v2::Connector::new(None),
+ custom: MockConnector {
+ transport: custom_transport,
+ reusable: Arc::new(Mutex::new(false)),
+ },
+ }
+ }
+
+ // Creates a test peer that uses a custom ALPN protocol and disables cert/hostname verification for tests.
+ fn create_peer_with_custom_proto(port: u16, proto: &[u8]) -> HttpPeer {
+ let mut peer = HttpPeer::new(("127.0.0.1", port), true, "localhost".into());
+ let mut options = PeerOptions::new();
+ options.alpn = ALPN::Custom(CustomALPN::new(proto.to_vec()));
+ // Disable cert verification for this test (self-signed or invalid certs are OK)
+ options.verify_cert = false;
+ options.verify_hostname = false;
+ peer.options = options;
+ peer
+ }
+ async fn build_custom_tls_listener(port: u16, custom_alpn: CustomALPN) -> TransportStack {
+ let cert_path = format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR"));
+ let key_path = format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR"));
+ let addr = format!("127.0.0.1:{}", port);
+ let mut listeners = Listeners::new();
+ let mut tls_settings = TlsSettings::intermediate(&cert_path, &key_path).unwrap();
+
+ tls_settings.set_alpn(ALPN::Custom(custom_alpn));
+ listeners.add_tls_with_settings(&addr, None, tls_settings);
+ listeners
+ .build(
+ #[cfg(unix)]
+ None,
+ )
+ .await
+ .unwrap()
+ .pop()
+ .unwrap()
+ }
+
+ // Spawn a simple TLS Server
+ fn spawn_test_tls_server(listener: TransportStack) -> JoinHandle<()> {
+ tokio::spawn(async move {
+ loop {
+ let stream = match listener.accept().await {
+ Ok(stream) => stream,
+ Err(_) => break, // Exit if listener is closed
+ };
+ let mut stream = stream.handshake().await.unwrap();
+
+ let _ = stream.write_all(b"CUSTOM").await; // Ignore write errors
+ }
+ })
+ }
+
+ // Both server and client are using the same custom protocol
+ #[tokio::test]
+ async fn test_custom_client_custom_upstream() {
+ let port = get_available_port().await;
+ let custom_protocol = b"custom".to_vec();
+
+ let listener =
+ build_custom_tls_listener(port, CustomALPN::new(custom_protocol.clone())).await;
+ let server_handle = spawn_test_tls_server(listener);
+ // Wait for server to start up
+ sleep(Duration::from_millis(100)).await;
+
+ let connector = create_test_connector();
+ let peer = create_peer_with_custom_proto(port, &custom_protocol);
+
+ // Check that the agreed ALPN is custom and matches the expected value
+ if let Ok((stream, reused)) = connector.custom.transport.get_stream(&peer).await {
+ assert!(!reused);
+ match stream.selected_alpn_proto() {
+ Some(ALPN::Custom(protocol)) => {
+ assert_eq!(
+ protocol.protocol(),
+ custom_protocol.as_slice(),
+ "Negotiated custom ALPN does not match expected value"
+ );
+ }
+ other => panic!("Expected custom ALPN, got {:?}", other),
+ }
+ } else {
+ panic!("Should be able to create a stream");
+ }
+
+ let (custom, reused) = connector.get_http_session(&peer).await.unwrap();
+ assert!(!reused);
+ match custom {
+ HttpSession::H1(_) => panic!("expect custom"),
+ HttpSession::H2(_) => panic!("expect custom"),
+ HttpSession::Custom(_) => {}
+ }
+ connector.release_http_session(custom, &peer, None).await;
+
+ // Assert it returns a reused custom session this time
+ let (custom, reused) = connector.get_http_session(&peer).await.unwrap();
+ assert!(reused);
+ match custom {
+ HttpSession::H1(_) => panic!("expect custom"),
+ HttpSession::H2(_) => panic!("expect custom"),
+ HttpSession::Custom(_) => {}
+ }
+
+ // Kill the server task
+ server_handle.abort();
+ sleep(Duration::from_millis(100)).await;
+ }
+
+ // Both client and server are using custom protocols, but different ones - we should create H1 sessions as fallback.
+ // For RusTLS if there is no agreed protocol, the handshake directly fails, so this won't work
+ // TODO: If no ALPN is matched, rustls should return None instead of failing the handshake.
+ #[cfg(not(feature = "rustls"))]
+ #[tokio::test]
+ async fn test_incompatible_custom_client_custom_upstream() {
+ let port = get_available_port().await;
+ let custom_protocol = b"custom".to_vec();
+
+ let listener =
+ build_custom_tls_listener(port, CustomALPN::new(b"different_custom".to_vec())).await;
+ let server_handle = spawn_test_tls_server(listener);
+ // Wait for server to start up
+ sleep(Duration::from_millis(100)).await;
+
+ let connector = create_test_connector();
+ let peer = create_peer_with_custom_proto(port, &custom_protocol);
+
+ // Verify that there is no agreed ALPN
+ if let Ok((stream, reused)) = connector.custom.transport.get_stream(&peer).await {
+ assert!(!reused);
+ assert!(stream.selected_alpn_proto().is_none());
+ } else {
+ panic!("Should be able to create a stream");
+ }
+
+ let (h1, reused) = connector.get_http_session(&peer).await.unwrap();
+ assert!(!reused);
+ match h1 {
+ HttpSession::H1(_) => {}
+ HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
+ }
+ // Not testing session reuse logic here as we haven't implemented it. Next test will test this.
+
+ // Kill the server task
+ server_handle.abort();
+ sleep(Duration::from_millis(100)).await;
+ }
+
+ // Client thinks server is custom but server is not Custom. Should fallback to H1
+ #[tokio::test]
+ async fn test_custom_client_non_custom_upstream() {
+ let custom_proto = b"custom".to_vec();
+ let connector = create_test_connector();
+ // Upstream supports H1 and H2
+ let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
+ // Client sets upstream ALPN as custom protocol
+ peer.options.alpn = ALPN::Custom(CustomALPN::new(custom_proto));
+
+ // Verify that there is no agreed ALPN
+ if let Ok((stream, reused)) = connector.custom.transport.get_stream(&peer).await {
+ assert!(!reused);
+ assert!(stream.selected_alpn_proto().is_none());
+ } else {
+ panic!("Should be able to create a stream");
+ }
+
+ let (mut h1, reused) = connector.get_http_session(&peer).await.unwrap();
+ // Assert it returns a new H1 session
+ assert!(!reused);
+ match &mut h1 {
+ HttpSession::H1(http) => {
+ get_http(http, 200).await;
+ }
+ HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
+ }
+ connector.release_http_session(h1, &peer, None).await;
+
+ // Assert it returns a reused h1 session this time
+ let (mut h1, reused) = connector.get_http_session(&peer).await.unwrap();
+ assert!(reused);
+ match &mut h1 {
+ HttpSession::H1(_) => {}
+ HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
+ }
+ }
+}
+
+// Used for disabling certificate/hostname verification in rustls for tests and custom ALPN/self-signed scenarios.
+#[cfg(all(test, feature = "rustls"))]
+pub mod rustls_no_verify {
+ use rustls::client::danger::{ServerCertVerified, ServerCertVerifier};
+ use rustls::pki_types::{CertificateDer, ServerName};
+ use rustls::Error as TLSError;
+ use std::sync::Arc;
+ #[derive(Debug)]
+ pub struct NoCertificateVerification;
+
+ impl ServerCertVerifier for NoCertificateVerification {
+ fn verify_server_cert(
+ &self,
+ _end_entity: &CertificateDer,
+ _intermediates: &[CertificateDer],
+ _server_name: &ServerName,
+ _scts: &[u8],
+ _now: rustls::pki_types::UnixTime,
+ ) -> Result {
+ Ok(ServerCertVerified::assertion())
+ }
+
+ fn verify_tls12_signature(
+ &self,
+ _message: &[u8],
+ _cert: &CertificateDer,
+ _dss: &rustls::DigitallySignedStruct,
+ ) -> Result {
+ Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
+ }
+
+ fn verify_tls13_signature(
+ &self,
+ _message: &[u8],
+ _cert: &CertificateDer,
+ _dss: &rustls::DigitallySignedStruct,
+ ) -> Result {
+ Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
+ }
+
+ fn supported_verify_schemes(&self) -> Vec {
+ vec![rustls::SignatureScheme::ECDSA_NISTP256_SHA256]
+ }
+ }
+
+ pub fn apply_no_verify(config: &mut rustls::ClientConfig) {
+ config
+ .dangerous()
+ .set_certificate_verifier(Arc::new(NoCertificateVerification));
+ }
}
diff --git a/pingora-core/src/connectors/http/v1.rs b/pingora-core/src/connectors/http/v1.rs
index 36026a40..62ecfcb6 100644
--- a/pingora-core/src/connectors/http/v1.rs
+++ b/pingora-core/src/connectors/http/v1.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -35,7 +35,7 @@ impl Connector {
peer: &P,
) -> Result<(HttpSession, bool)> {
let (stream, reused) = self.transport.get_stream(peer).await?;
- let http = HttpSession::new(stream);
+ let http = HttpSession::new_with_options(stream, peer);
Ok((http, reused))
}
@@ -43,10 +43,9 @@ impl Connector {
&self,
peer: &P,
) -> Option {
- self.transport
- .reused_stream(peer)
- .await
- .map(HttpSession::new)
+ let stream = self.transport.reused_stream(peer).await?;
+ let http = HttpSession::new_with_options(stream, peer);
+ Some(http)
}
pub async fn release_http_session(
@@ -68,7 +67,9 @@ mod tests {
use super::*;
use crate::protocols::l4::socket::SocketAddr;
use crate::upstreams::peer::HttpPeer;
+ use crate::upstreams::peer::Peer;
use pingora_http::RequestHeader;
+ use std::fmt::{Display, Formatter, Result as FmtResult};
async fn get_http(http: &mut HttpSession, expected_status: u16) {
let mut req = Box::new(RequestHeader::build("GET", b"/", None).unwrap());
@@ -102,6 +103,63 @@ mod tests {
assert!(reused);
}
+ #[cfg(unix)]
+ #[tokio::test]
+ async fn test_reuse_rejects_fd_mismatch() {
+ use std::os::unix::prelude::AsRawFd;
+
+ #[derive(Clone)]
+ struct MismatchPeer {
+ reuse_hash: u64,
+ address: SocketAddr,
+ }
+
+ impl Display for MismatchPeer {
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ write!(f, "{:?}", self.address)
+ }
+ }
+
+ impl Peer for MismatchPeer {
+ fn address(&self) -> &SocketAddr {
+ &self.address
+ }
+
+ fn tls(&self) -> bool {
+ false
+ }
+
+ fn sni(&self) -> &str {
+ ""
+ }
+
+ fn reuse_hash(&self) -> u64 {
+ self.reuse_hash
+ }
+
+ fn matches_fd(&self, _fd: V) -> bool {
+ false
+ }
+ }
+
+ let connector = Connector::new(None);
+ let peer = HttpPeer::new(("1.1.1.1", 80), false, "".into());
+ let (mut http, reused) = connector.get_http_session(&peer).await.unwrap();
+ assert!(!reused);
+ get_http(&mut http, 301).await;
+ connector.release_http_session(http, &peer, None).await;
+
+ let mismatch_peer = MismatchPeer {
+ reuse_hash: peer.reuse_hash(),
+ address: peer.address().clone(),
+ };
+
+ assert!(connector
+ .reused_http_session(&mismatch_peer)
+ .await
+ .is_none());
+ }
+
#[tokio::test]
#[cfg(feature = "any_tls")]
async fn test_connect_tls() {
diff --git a/pingora-core/src/connectors/http/v2.rs b/pingora-core/src/connectors/http/v2.rs
index 92cc31d5..c18914c0 100644
--- a/pingora-core/src/connectors/http/v2.rs
+++ b/pingora-core/src/connectors/http/v2.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,6 +14,7 @@
use super::HttpSession;
use crate::connectors::{ConnectorOptions, TransportConnector};
+use crate::protocols::http::custom::client::Session;
use crate::protocols::http::v1::client::HttpSession as Http1Session;
use crate::protocols::http::v2::client::{drive_connection, Http2Session};
use crate::protocols::{Digest, Stream, UniqueIDType};
@@ -62,7 +63,7 @@ pub(crate) struct ConnectionRefInner {
}
#[derive(Clone)]
-pub(crate) struct ConnectionRef(Arc);
+pub struct ConnectionRef(Arc);
impl ConnectionRef {
pub fn new(
@@ -162,7 +163,7 @@ impl ConnectionRef {
}
}
-struct InUsePool {
+pub struct InUsePool {
// TODO: use pingora hashmap to shard the lock contention
pools: RwLock>>,
}
@@ -174,7 +175,7 @@ impl InUsePool {
}
}
- fn insert(&self, reuse_hash: u64, conn: ConnectionRef) {
+ pub fn insert(&self, reuse_hash: u64, conn: ConnectionRef) {
{
let pools = self.pools.read();
if let Some(pool) = pools.get(&reuse_hash) {
@@ -192,14 +193,14 @@ impl InUsePool {
// retrieve a h2 conn ref to create a new stream
// the caller should return the conn ref to this pool if there are still
// capacity left for more streams
- fn get(&self, reuse_hash: u64) -> Option {
+ pub fn get(&self, reuse_hash: u64) -> Option {
let pools = self.pools.read();
pools.get(&reuse_hash)?.get_any().map(|v| v.1)
}
// release a h2_stream, this functional will cause an ConnectionRef to be returned (if exist)
// the caller should update the ref and then decide where to put it (in use pool or idle)
- fn release(&self, reuse_hash: u64, id: UniqueIDType) -> Option {
+ pub fn release(&self, reuse_hash: u64, id: UniqueIDType) -> Option {
let pools = self.pools.read();
if let Some(pool) = pools.get(&reuse_hash) {
pool.remove(id)
@@ -235,13 +236,25 @@ impl Connector {
}
}
+ pub fn transport(&self) -> &TransportConnector {
+ &self.transport
+ }
+
+ pub fn idle_pool(&self) -> &Arc> {
+ &self.idle_pool
+ }
+
+ pub fn in_use_pool(&self) -> &InUsePool {
+ &self.in_use_pool
+ }
+
/// Create a new Http2 connection to the given server
///
/// Either an Http2 or Http1 session can be returned depending on the server's preference.
- pub async fn new_http_session(
+ pub async fn new_http_session(
&self,
peer: &P,
- ) -> Result {
+ ) -> Result> {
let stream = self.transport.new_stream(peer).await?;
// check alpn
@@ -249,7 +262,9 @@ impl Connector {
Some(ALPN::H2) => { /* continue */ }
Some(_) => {
// H2 not supported
- return Ok(HttpSession::H1(Http1Session::new(stream)));
+ return Ok(HttpSession::H1(Http1Session::new_with_options(
+ stream, peer,
+ )));
}
None => {
// if tls but no ALPN, default to h1
@@ -257,9 +272,11 @@ impl Connector {
if peer.tls()
|| peer
.get_peer_options()
- .map_or(true, |o| o.alpn.get_min_http_version() == 1)
+ .is_none_or(|o| o.alpn.get_min_http_version() == 1)
{
- return Ok(HttpSession::H1(Http1Session::new(stream)));
+ return Ok(HttpSession::H1(Http1Session::new_with_options(
+ stream, peer,
+ )));
}
// else: min http version=H2 over plaintext, there is no ALPN anyways, we trust
// the caller that the server speaks h2c
@@ -302,8 +319,28 @@ impl Connector {
let maybe_conn = self
.in_use_pool
.get(reuse_hash)
+ // filter out closed, InUsePool does not have notify closed eviction like the idle pool
+ // and it's possible we get an in use connection that is closed and not yet released
+ .filter(|c| !c.is_closed())
.or_else(|| self.idle_pool.get(&reuse_hash));
if let Some(conn) = maybe_conn {
+ #[cfg(unix)]
+ if !peer.matches_fd(conn.id()) {
+ return Ok(None);
+ }
+ #[cfg(windows)]
+ {
+ use std::os::windows::io::{AsRawSocket, RawSocket};
+ struct WrappedRawSocket(RawSocket);
+ impl AsRawSocket for WrappedRawSocket {
+ fn as_raw_socket(&self) -> RawSocket {
+ self.0
+ }
+ }
+ if !peer.matches_sock(WrappedRawSocket(conn.id() as RawSocket)) {
+ return Ok(None);
+ }
+ }
let h2_stream = conn.spawn_stream().await?;
if conn.more_streams_allowed() {
self.in_use_pool.insert(reuse_hash, conn);
@@ -353,14 +390,12 @@ impl Connector {
};
let closed = conn.0.closed.clone();
let (notify_evicted, watch_use) = self.idle_pool.put(&meta, conn);
- if let Some(to) = idle_timeout {
- let pool = self.idle_pool.clone(); //clone the arc
- let rt = pingora_runtime::current_handle();
- rt.spawn(async move {
- pool.idle_timeout(&meta, to, notify_evicted, closed, watch_use)
- .await;
- });
- }
+ let pool = self.idle_pool.clone(); //clone the arc
+ let rt = pingora_runtime::current_handle();
+ rt.spawn(async move {
+ pool.idle_timeout(&meta, idle_timeout, notify_evicted, closed, watch_use)
+ .await;
+ });
} else {
self.in_use_pool.insert(reuse_hash, conn);
drop(locked);
@@ -388,7 +423,7 @@ impl Connector {
// 8 Mbytes = 80 Mbytes X 100ms, which should be enough for most links.
const H2_WINDOW_SIZE: u32 = 1 << 23;
-pub(crate) async fn handshake(
+pub async fn handshake(
stream: Stream,
max_streams: usize,
h2_ping_interval: Option,
@@ -457,6 +492,7 @@ pub(crate) async fn handshake(
))
}
+// TODO(slava): add custom unit tests
#[cfg(test)]
mod tests {
use super::*;
@@ -468,10 +504,14 @@ mod tests {
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
peer.options.set_http_version(2, 2);
- let h2 = connector.new_http_session(&peer).await.unwrap();
+ let h2 = connector
+ .new_http_session::(&peer)
+ .await
+ .unwrap();
match h2 {
HttpSession::H1(_) => panic!("expect h2"),
HttpSession::H2(h2_stream) => assert!(!h2_stream.ping_timedout()),
+ HttpSession::Custom(_) => panic!("expect h2"),
}
}
@@ -482,10 +522,14 @@ mod tests {
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
// a hack to force h1, new_http_session() in the future might validate this setting
peer.options.set_http_version(1, 1);
- let h2 = connector.new_http_session(&peer).await.unwrap();
+ let h2 = connector
+ .new_http_session::(&peer)
+ .await
+ .unwrap();
match h2 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
}
}
@@ -494,10 +538,14 @@ mod tests {
let connector = Connector::new(None);
let mut peer = HttpPeer::new(("1.1.1.1", 80), false, "".into());
peer.options.set_http_version(2, 1);
- let h2 = connector.new_http_session(&peer).await.unwrap();
+ let h2 = connector
+ .new_http_session::(&peer)
+ .await
+ .unwrap();
match h2 {
HttpSession::H1(_) => {}
HttpSession::H2(_) => panic!("expect h1"),
+ HttpSession::Custom(_) => panic!("expect h1"),
}
}
@@ -508,10 +556,14 @@ mod tests {
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
peer.options.set_http_version(2, 2);
peer.options.max_h2_streams = 1;
- let h2 = connector.new_http_session(&peer).await.unwrap();
+ let h2 = connector
+ .new_http_session::(&peer)
+ .await
+ .unwrap();
let h2_1 = match h2 {
HttpSession::H1(_) => panic!("expect h2"),
HttpSession::H2(h2_stream) => h2_stream,
+ HttpSession::Custom(_) => panic!("expect h2"),
};
let id = h2_1.conn.id();
@@ -540,10 +592,14 @@ mod tests {
let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
peer.options.set_http_version(2, 2);
peer.options.max_h2_streams = 3;
- let h2 = connector.new_http_session(&peer).await.unwrap();
+ let h2 = connector
+ .new_http_session::(&peer)
+ .await
+ .unwrap();
let h2_1 = match h2 {
HttpSession::H1(_) => panic!("expect h2"),
HttpSession::H2(h2_stream) => h2_stream,
+ HttpSession::Custom(_) => panic!("expect h2"),
};
let id = h2_1.conn.id();
@@ -573,4 +629,75 @@ mod tests {
let h2_5 = connector.reused_http_session(&peer).await.unwrap().unwrap();
assert_eq!(id, h2_5.conn.id());
}
+
+ #[cfg(all(feature = "any_tls", unix))]
+ #[tokio::test]
+ async fn test_h2_reuse_rejects_fd_mismatch() {
+ use crate::protocols::l4::socket::SocketAddr;
+ use crate::upstreams::peer::Peer;
+ use std::fmt::{Display, Formatter, Result as FmtResult};
+ use std::os::unix::prelude::AsRawFd;
+
+ #[derive(Clone)]
+ struct MismatchPeer {
+ reuse_hash: u64,
+ address: SocketAddr,
+ }
+
+ impl Display for MismatchPeer {
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ write!(f, "{:?}", self.address)
+ }
+ }
+
+ impl Peer for MismatchPeer {
+ fn address(&self) -> &SocketAddr {
+ &self.address
+ }
+
+ fn tls(&self) -> bool {
+ true
+ }
+
+ fn sni(&self) -> &str {
+ ""
+ }
+
+ fn reuse_hash(&self) -> u64 {
+ self.reuse_hash
+ }
+
+ fn matches_fd(&self, _fd: V) -> bool {
+ false
+ }
+ }
+
+ let connector = Connector::new(None);
+ let mut peer = HttpPeer::new(("1.1.1.1", 443), true, "one.one.one.one".into());
+ peer.options.set_http_version(2, 2);
+ peer.options.max_h2_streams = 1;
+
+ let h2 = connector
+ .new_http_session::(&peer)
+ .await
+ .unwrap();
+ let h2_stream = match h2 {
+ HttpSession::H1(_) => panic!("expect h2"),
+ HttpSession::H2(h2_stream) => h2_stream,
+ HttpSession::Custom(_) => panic!("expect h2"),
+ };
+
+ connector.release_http_session(h2_stream, &peer, None);
+
+ let mismatch_peer = MismatchPeer {
+ reuse_hash: peer.reuse_hash(),
+ address: peer.address().clone(),
+ };
+
+ assert!(connector
+ .reused_http_session(&mismatch_peer)
+ .await
+ .unwrap()
+ .is_none());
+ }
}
diff --git a/pingora-core/src/connectors/l4.rs b/pingora-core/src/connectors/l4.rs
index dc442644..bd7439d4 100644
--- a/pingora-core/src/connectors/l4.rs
+++ b/pingora-core/src/connectors/l4.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -280,7 +280,7 @@ async fn proxy_connect(peer: &P) -> Result {
);
let req_header = raw_connect::generate_connect_header(&proxy.host, proxy.port, &mut headers)?;
- let fut = raw_connect::connect(stream, &req_header);
+ let fut = raw_connect::connect(stream, &req_header, peer);
let (mut stream, digest) = match peer.connection_timeout() {
Some(t) => pingora_timeout::timeout(t, fut)
.await
@@ -314,8 +314,6 @@ mod tests {
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::io::AsyncWriteExt;
- #[cfg(unix)]
- use tokio::net::UnixListener;
use tokio::time::sleep;
/// Some of the tests below are flaky when making new connections to mock
@@ -465,31 +463,20 @@ mod tests {
}
#[cfg(unix)]
- const MOCK_UDS_PATH: &str = "/tmp/test_unix_connect_proxy.sock";
-
- // one-off mock server
- #[cfg(unix)]
- async fn mock_connect_server() {
- let _ = std::fs::remove_file(MOCK_UDS_PATH);
- let listener = UnixListener::bind(MOCK_UDS_PATH).unwrap();
- if let Ok((mut stream, _addr)) = listener.accept().await {
- stream.write_all(b"HTTP/1.1 200 OK\r\n\r\n").await.unwrap();
- // wait a bit so that the client can read
- tokio::time::sleep(std::time::Duration::from_millis(100)).await;
- }
- let _ = std::fs::remove_file(MOCK_UDS_PATH);
- }
-
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_proxy_work() {
- tokio::spawn(async {
- mock_connect_server().await;
- });
- // wait for the server to start
- tokio::time::sleep(std::time::Duration::from_millis(100)).await;
+ use crate::connectors::test_utils;
+
+ let socket_path = test_utils::unique_uds_path("connect_proxy_work");
+ let (ready_rx, shutdown_tx, server_handle) =
+ test_utils::spawn_mock_uds_server(socket_path.clone(), b"HTTP/1.1 200 OK\r\n\r\n");
+
+ // Wait for the server to be ready
+ ready_rx.await.unwrap();
+
let mut peer = HttpPeer::new("1.1.1.1:80".to_string(), false, "".to_string());
let mut path = PathBuf::new();
- path.push(MOCK_UDS_PATH);
+ path.push(&socket_path);
peer.proxy = Some(Proxy {
next_hop: path.into(),
host: "1.1.1.1".into(),
@@ -498,35 +485,27 @@ mod tests {
});
let new_session = connect(&peer, None).await;
assert!(new_session.is_ok());
- }
-
- #[cfg(unix)]
- const MOCK_BAD_UDS_PATH: &str = "/tmp/test_unix_bad_connect_proxy.sock";
- // one-off mock bad proxy
- // closes connection upon accepting
- #[cfg(unix)]
- async fn mock_connect_bad_server() {
- let _ = std::fs::remove_file(MOCK_BAD_UDS_PATH);
- let listener = UnixListener::bind(MOCK_BAD_UDS_PATH).unwrap();
- if let Ok((mut stream, _addr)) = listener.accept().await {
- stream.shutdown().await.unwrap();
- tokio::time::sleep(std::time::Duration::from_millis(100)).await;
- }
- let _ = std::fs::remove_file(MOCK_BAD_UDS_PATH);
+ // Clean up
+ let _ = shutdown_tx.send(());
+ server_handle.await.unwrap();
}
#[cfg(unix)]
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_proxy_conn_closed() {
- tokio::spawn(async {
- mock_connect_bad_server().await;
- });
- // wait for the server to start
- tokio::time::sleep(std::time::Duration::from_millis(100)).await;
+ use crate::connectors::test_utils;
+
+ let socket_path = test_utils::unique_uds_path("connect_proxy_conn_closed");
+ let (ready_rx, shutdown_tx, server_handle) =
+ test_utils::spawn_mock_uds_server_close_immediate(socket_path.clone());
+
+ // Wait for the server to be ready
+ ready_rx.await.unwrap();
+
let mut peer = HttpPeer::new("1.1.1.1:80".to_string(), false, "".to_string());
let mut path = PathBuf::new();
- path.push(MOCK_BAD_UDS_PATH);
+ path.push(&socket_path);
peer.proxy = Some(Proxy {
next_hop: path.into(),
host: "1.1.1.1".into(),
@@ -537,6 +516,10 @@ mod tests {
let err = new_session.unwrap_err();
assert_eq!(err.etype(), &ConnectionClosed);
assert!(!err.retry());
+
+ // Clean up
+ let _ = shutdown_tx.send(());
+ server_handle.await.unwrap();
}
#[cfg(target_os = "linux")]
diff --git a/pingora-core/src/connectors/mod.rs b/pingora-core/src/connectors/mod.rs
index 1e6c08dc..3e3c1c46 100644
--- a/pingora-core/src/connectors/mod.rs
+++ b/pingora-core/src/connectors/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -399,6 +399,86 @@ fn test_reusable_stream(stream: &mut Stream) -> bool {
}
}
+/// Test utilities for creating mock acceptors.
+#[cfg(all(test, unix))]
+pub(crate) mod test_utils {
+ use tokio::io::AsyncWriteExt;
+ use tokio::net::UnixListener;
+
+ /// Generates a unique socket path for testing to avoid conflicts when running in parallel
+ pub fn unique_uds_path(test_name: &str) -> String {
+ format!(
+ "/tmp/test_{test_name}_{:?}_{}.sock",
+ std::thread::current().id(),
+ std::process::id()
+ )
+ }
+
+ /// A mock UDS server that accepts one connection, sends data, and waits for shutdown signal
+ ///
+ /// Returns: (ready_rx, shutdown_tx, server_handle)
+ /// - ready_rx: Wait on this to know when server is ready to accept connections
+ /// - shutdown_tx: Send on this to tell server to shut down
+ /// - server_handle: Join handle for the server task
+ pub fn spawn_mock_uds_server(
+ socket_path: String,
+ response: &'static [u8],
+ ) -> (
+ tokio::sync::oneshot::Receiver<()>,
+ tokio::sync::oneshot::Sender<()>,
+ tokio::task::JoinHandle<()>,
+ ) {
+ let (ready_tx, ready_rx) = tokio::sync::oneshot::channel();
+ let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel();
+
+ let server_handle = tokio::spawn(async move {
+ let _ = std::fs::remove_file(&socket_path);
+ let listener = UnixListener::bind(&socket_path).unwrap();
+ // Signal that the server is ready to accept connections
+ let _ = ready_tx.send(());
+
+ if let Ok((mut stream, _addr)) = listener.accept().await {
+ let _ = stream.write_all(response).await;
+ // Keep the connection open until the test tells us to shutdown
+ let _ = shutdown_rx.await;
+ }
+ let _ = std::fs::remove_file(&socket_path);
+ });
+
+ (ready_rx, shutdown_tx, server_handle)
+ }
+
+ /// A mock UDS server that immediately closes connections (for testing error handling)
+ ///
+ /// Returns: (ready_rx, shutdown_tx, server_handle)
+ pub fn spawn_mock_uds_server_close_immediate(
+ socket_path: String,
+ ) -> (
+ tokio::sync::oneshot::Receiver<()>,
+ tokio::sync::oneshot::Sender<()>,
+ tokio::task::JoinHandle<()>,
+ ) {
+ let (ready_tx, ready_rx) = tokio::sync::oneshot::channel();
+ let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel();
+
+ let server_handle = tokio::spawn(async move {
+ let _ = std::fs::remove_file(&socket_path);
+ let listener = UnixListener::bind(&socket_path).unwrap();
+ // Signal that the server is ready to accept connections
+ let _ = ready_tx.send(());
+
+ if let Ok((mut stream, _addr)) = listener.accept().await {
+ let _ = stream.shutdown().await;
+ // Wait for shutdown signal before cleaning up
+ let _ = shutdown_rx.await;
+ }
+ let _ = std::fs::remove_file(&socket_path);
+ });
+
+ (ready_rx, shutdown_tx, server_handle)
+ }
+}
+
#[cfg(test)]
#[cfg(feature = "any_tls")]
mod tests {
@@ -407,9 +487,6 @@ mod tests {
use super::*;
use crate::upstreams::peer::BasicPeer;
- use tokio::io::AsyncWriteExt;
- #[cfg(unix)]
- use tokio::net::UnixListener;
// 192.0.2.1 is effectively a black hole
const BLACK_HOLE: &str = "192.0.2.1:79";
@@ -440,38 +517,34 @@ mod tests {
assert!(reused);
}
- #[cfg(unix)]
- const MOCK_UDS_PATH: &str = "/tmp/test_unix_transport_connector.sock";
-
- // one-off mock server
- #[cfg(unix)]
- async fn mock_connect_server() {
- let _ = std::fs::remove_file(MOCK_UDS_PATH);
- let listener = UnixListener::bind(MOCK_UDS_PATH).unwrap();
- if let Ok((mut stream, _addr)) = listener.accept().await {
- stream.write_all(b"it works!").await.unwrap();
- // wait a bit so that the client can read
- tokio::time::sleep(std::time::Duration::from_millis(100)).await;
- }
- let _ = std::fs::remove_file(MOCK_UDS_PATH);
- }
#[tokio::test(flavor = "multi_thread")]
+ #[cfg(unix)]
async fn test_connect_uds() {
- tokio::spawn(async {
- mock_connect_server().await;
- });
+ let socket_path = test_utils::unique_uds_path("transport_connector");
+ let (ready_rx, shutdown_tx, server_handle) =
+ test_utils::spawn_mock_uds_server(socket_path.clone(), b"it works!");
+
+ // Wait for the server to be ready before connecting
+ ready_rx.await.unwrap();
+
// create a new service at /tmp
let connector = TransportConnector::new(None);
- let peer = BasicPeer::new_uds(MOCK_UDS_PATH).unwrap();
+ let peer = BasicPeer::new_uds(&socket_path).unwrap();
// make a new connection to mock uds
let mut stream = connector.new_stream(&peer).await.unwrap();
let mut buf = [0; 9];
let _ = stream.read(&mut buf).await.unwrap();
assert_eq!(&buf, b"it works!");
- connector.release_stream(stream, peer.reuse_hash(), None);
- let (_, reused) = connector.get_stream(&peer).await.unwrap();
+ // Test connection reuse by releasing and getting the stream back
+ connector.release_stream(stream, peer.reuse_hash(), None);
+ let (stream, reused) = connector.get_stream(&peer).await.unwrap();
assert!(reused);
+
+ // Clean up: drop the stream, tell server to shutdown, and wait for it
+ drop(stream);
+ let _ = shutdown_tx.send(());
+ server_handle.await.unwrap();
}
async fn do_test_conn_timeout(conf: Option) {
diff --git a/pingora-core/src/connectors/offload.rs b/pingora-core/src/connectors/offload.rs
index 06fc0895..fe2d1c72 100644
--- a/pingora-core/src/connectors/offload.rs
+++ b/pingora-core/src/connectors/offload.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/connectors/tls/boringssl_openssl/mod.rs b/pingora-core/src/connectors/tls/boringssl_openssl/mod.rs
index f9b8c3f1..9bb3a5a6 100644
--- a/pingora-core/src/connectors/tls/boringssl_openssl/mod.rs
+++ b/pingora-core/src/connectors/tls/boringssl_openssl/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -198,7 +198,7 @@ where
}
// second_keyshare is default true
- if !peer.get_peer_options().map_or(true, |o| o.second_keyshare) {
+ if !peer.get_peer_options().is_none_or(|o| o.second_keyshare) {
ssl_use_second_key_share(&mut ssl_conf, false);
}
@@ -246,7 +246,11 @@ where
}
clear_error_stack();
- let connect_future = handshake(ssl_conf, peer.sni(), stream);
+
+ let complete_hook = peer
+ .get_peer_options()
+ .and_then(|o| o.upstream_tls_handshake_complete_hook.clone());
+ let connect_future = handshake(ssl_conf, peer.sni(), stream, complete_hook);
match peer.connection_timeout() {
Some(t) => match pingora_timeout::timeout(t, connect_future).await {
diff --git a/pingora-core/src/connectors/tls/mod.rs b/pingora-core/src/connectors/tls/mod.rs
index 4c41dfa5..c49be80b 100644
--- a/pingora-core/src/connectors/tls/mod.rs
+++ b/pingora-core/src/connectors/tls/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/connectors/tls/rustls/mod.rs b/pingora-core/src/connectors/tls/rustls/mod.rs
index 530d50cb..ff375929 100644
--- a/pingora-core/src/connectors/tls/rustls/mod.rs
+++ b/pingora-core/src/connectors/tls/rustls/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -22,8 +22,14 @@ use pingora_error::{
};
use pingora_rustls::{
load_ca_file_into_store, load_certs_and_key_files, load_platform_certs_incl_env_into_store,
- version, CertificateDer, ClientConfig as RusTlsClientConfig, PrivateKeyDer, RootCertStore,
- TlsConnector as RusTlsConnector,
+ version, CertificateDer, CertificateError, ClientConfig as RusTlsClientConfig,
+ DigitallySignedStruct, KeyLogFile, PrivateKeyDer, RootCertStore, RusTlsError, ServerName,
+ SignatureScheme, TlsConnector as RusTlsConnector, UnixTime, WebPkiServerVerifier,
+};
+
+// Uses custom certificate verification from rustls's 'danger' module.
+use pingora_rustls::{
+ HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier as RusTlsServerCertVerifier,
};
use crate::protocols::tls::{client::handshake, TlsStream};
@@ -75,7 +81,6 @@ impl TlsConnector {
if let Some((cert, key)) = conf.cert_key_file.as_ref() {
certs_key = load_certs_and_key_files(cert, key)?;
}
- // TODO: support SSLKEYLOGFILE
} else {
load_platform_certs_incl_env_into_store(&mut ca_certs)?;
}
@@ -88,7 +93,7 @@ impl TlsConnector {
RusTlsClientConfig::builder_with_protocol_versions(&[&version::TLS12, &version::TLS13])
.with_root_certificates(ca_certs.clone());
- let config = match certs_key {
+ let mut config = match certs_key {
Some((certs, key)) => {
match builder.with_client_auth_cert(certs.clone(), key.clone_key()) {
Ok(config) => config,
@@ -102,6 +107,13 @@ impl TlsConnector {
None => builder.with_no_client_auth(),
};
+ // Enable SSLKEYLOGFILE support for debugging TLS traffic
+ if let Some(options) = options.as_ref() {
+ if options.debug_ssl_keylog {
+ config.key_log = Arc::new(KeyLogFile::new());
+ }
+ }
+
Ok(Connector {
ctx: Arc::new(TlsConnector {
config: Arc::new(config),
@@ -155,10 +167,12 @@ where
.with_root_certificates(Arc::clone(&tls_ctx.ca_certs));
debug!("added root ca certificates");
- let updated_config = builder.with_client_auth_cert(certs, private_key).or_err(
+ let mut updated_config = builder.with_client_auth_cert(certs, private_key).or_err(
InvalidCert,
"Failed to use peer cert/key to update Rustls config",
)?;
+ // Preserve keylog setting from original config
+ updated_config.key_log = Arc::clone(&config.key_log);
Some(updated_config)
}
};
@@ -174,30 +188,64 @@ where
}
}
+ let mut domain = peer.sni().to_string();
+
+ if let Some(updated_config) = updated_config_opt.as_mut() {
+ let verification_mode = if peer.sni().is_empty() {
+ updated_config.enable_sni = false;
+ /* NOTE: technically we can still verify who signs the cert but turn it off to be
+ consistent with nginx's behavior */
+ Some(VerificationMode::SkipAll) // disable verification if sni does not exist
+ } else if !peer.verify_cert() {
+ Some(VerificationMode::SkipAll)
+ } else if !peer.verify_hostname() {
+ Some(VerificationMode::SkipHostname)
+ } else {
+ // if sni had underscores in leftmost label replace and add
+ if let Some(sni_s) = replace_leftmost_underscore(peer.sni()) {
+ domain = sni_s;
+ }
+ None
+ // to use the custom verifier for the full verify:
+ // Some(VerificationMode::Full)
+ };
+
+ // Builds the custom_verifier when verification_mode is set.
+ if let Some(mode) = verification_mode {
+ let delegate = WebPkiServerVerifier::builder(Arc::clone(&tls_ctx.ca_certs))
+ .build()
+ .or_err(InvalidCert, "Failed to build WebPkiServerVerifier")?;
+
+ let custom_verifier = Arc::new(CustomServerCertVerifier::new(delegate, mode));
+
+ updated_config
+ .dangerous()
+ .set_certificate_verifier(custom_verifier);
+ }
+ }
+
// TODO: curve setup from peer
// - second key share from peer, currently only used in boringssl with PQ features
+ // Patch config for dangerous verifier if needed, but only in test builds.
+ #[cfg(test)]
+ if !peer.verify_cert() || !peer.verify_hostname() {
+ use crate::connectors::http::rustls_no_verify::apply_no_verify;
+ if let Some(cfg) = updated_config_opt.as_mut() {
+ apply_no_verify(cfg);
+ } else {
+ let mut tmp = RusTlsClientConfig::clone(config);
+ apply_no_verify(&mut tmp);
+ updated_config_opt = Some(tmp);
+ }
+ }
+
let tls_conn = if let Some(cfg) = updated_config_opt {
RusTlsConnector::from(Arc::new(cfg))
} else {
RusTlsConnector::from(Arc::clone(config))
};
- // TODO: for consistent behavior between TLS providers some additions are required
- // - allowing to disable verification
- // - the validation/replace logic would need adjustments to match the boringssl/openssl behavior
- // implementing a custom certificate_verifier could be used to achieve matching behavior
- //let d_conf = config.dangerous();
- //d_conf.set_certificate_verifier(...);
-
- let mut domain = peer.sni().to_string();
- if peer.verify_cert() && peer.verify_hostname() {
- // TODO: streamline logic with replacing first underscore within TLS implementations
- if let Some(sni_s) = replace_leftmost_underscore(peer.sni()) {
- domain = sni_s;
- }
- }
-
let connect_future = handshake(&tls_conn, &domain, stream);
match peer.connection_timeout() {
@@ -211,3 +259,95 @@ where
None => connect_future.await,
}
}
+
+#[allow(dead_code)]
+#[derive(Debug)]
+pub enum VerificationMode {
+ SkipHostname,
+ SkipAll,
+ Full,
+ // Note: "Full" Included for completeness, making this verifier self-contained
+ // and explicit about all possible verification modes, not just exceptions.
+}
+
+#[derive(Debug)]
+pub struct CustomServerCertVerifier {
+ delegate: Arc,
+ verification_mode: VerificationMode,
+}
+
+impl CustomServerCertVerifier {
+ pub fn new(delegate: Arc, verification_mode: VerificationMode) -> Self {
+ Self {
+ delegate,
+ verification_mode,
+ }
+ }
+}
+
+// CustomServerCertVerifier delegates TLS signature verification and allows 3 VerificationMode:
+// Full: delegates all verification to the original WebPkiServerVerifier
+// SkipHostname: same as "Full" but ignores "NotValidForName" certificate errors
+// SkipAll: all certificate verification checks are skipped.
+impl RusTlsServerCertVerifier for CustomServerCertVerifier {
+ fn verify_server_cert(
+ &self,
+ _end_entity: &CertificateDer<'_>,
+ _intermediates: &[CertificateDer<'_>],
+ _server_name: &ServerName<'_>,
+ _ocsp: &[u8],
+ _now: UnixTime,
+ ) -> Result {
+ match self.verification_mode {
+ VerificationMode::Full => self.delegate.verify_server_cert(
+ _end_entity,
+ _intermediates,
+ _server_name,
+ _ocsp,
+ _now,
+ ),
+ VerificationMode::SkipHostname => {
+ match self.delegate.verify_server_cert(
+ _end_entity,
+ _intermediates,
+ _server_name,
+ _ocsp,
+ _now,
+ ) {
+ Ok(scv) => Ok(scv),
+ Err(RusTlsError::InvalidCertificate(cert_error)) => {
+ if let CertificateError::NotValidForNameContext { .. } = cert_error {
+ Ok(ServerCertVerified::assertion())
+ } else {
+ Err(RusTlsError::InvalidCertificate(cert_error))
+ }
+ }
+ Err(e) => Err(e),
+ }
+ }
+ VerificationMode::SkipAll => Ok(ServerCertVerified::assertion()),
+ }
+ }
+
+ fn verify_tls12_signature(
+ &self,
+ message: &[u8],
+ cert: &CertificateDer<'_>,
+ dss: &DigitallySignedStruct,
+ ) -> Result {
+ self.delegate.verify_tls12_signature(message, cert, dss)
+ }
+
+ fn verify_tls13_signature(
+ &self,
+ message: &[u8],
+ cert: &CertificateDer<'_>,
+ dss: &DigitallySignedStruct,
+ ) -> Result {
+ self.delegate.verify_tls13_signature(message, cert, dss)
+ }
+
+ fn supported_verify_schemes(&self) -> Vec {
+ self.delegate.supported_verify_schemes()
+ }
+}
diff --git a/pingora-core/src/connectors/tls/s2n/mod.rs b/pingora-core/src/connectors/tls/s2n/mod.rs
index 36f931d2..fbfdd7e7 100644
--- a/pingora-core/src/connectors/tls/s2n/mod.rs
+++ b/pingora-core/src/connectors/tls/s2n/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/lib.rs b/pingora-core/src/lib.rs
index 544a8669..a4450632 100644
--- a/pingora-core/src/lib.rs
+++ b/pingora-core/src/lib.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -35,7 +35,57 @@
//! If looking to build a (reverse) proxy, see [`pingora-proxy`](https://docs.rs/pingora-proxy) crate.
//!
//! # Optional features
-//! `boringssl`: Switch the internal TLS library from OpenSSL to BoringSSL.
+//!
+//! ## TLS backends (mutually exclusive)
+//! - `openssl`: Use OpenSSL as the TLS library (default if no TLS feature is specified)
+//! - `boringssl`: Use BoringSSL as the TLS library (FIPS compatible)
+//! - `rustls`: Use Rustls as the TLS library
+//!
+//! ## Additional features
+//! - `connection_filter`: Enable early TCP connection filtering before TLS handshake.
+//! This allows implementing custom logic to accept/reject connections based on peer address
+//! with zero overhead when disabled.
+//! - `sentry`: Enable Sentry error reporting integration
+//! - `patched_http1`: Enable patched HTTP/1 parser
+//!
+//! # Connection Filtering
+//!
+//! With the `connection_filter` feature enabled, you can implement early connection filtering
+//! at the TCP level, before any TLS handshake or HTTP processing occurs. This is useful for:
+//! - IP-based access control
+//! - Rate limiting at the connection level
+//! - Geographic restrictions
+//! - DDoS mitigation
+//!
+//! ## Example
+//!
+//! ```rust,ignore
+//! # #[cfg(feature = "connection_filter")]
+//! # {
+//! use async_trait::async_trait;
+//! use pingora_core::listeners::ConnectionFilter;
+//! use std::net::SocketAddr;
+//! use std::sync::Arc;
+//!
+//! #[derive(Debug)]
+//! struct MyFilter;
+//!
+//! #[async_trait]
+//! impl ConnectionFilter for MyFilter {
+//! async fn should_accept(&self, addr: &SocketAddr) -> bool {
+//! // Custom logic to filter connections
+//! !is_blocked_ip(addr.ip())
+//! }
+//! }
+//!
+//! // Apply the filter to a service
+//! let mut service = my_service();
+//! service.set_connection_filter(Arc::new(MyFilter));
+//! # }
+//! ```
+//!
+//! When the `connection_filter` feature is disabled, the filter API remains available
+//! but becomes a no-op, ensuring zero overhead for users who don't need this functionality.
// This enables the feature that labels modules that are only available with
// certain pingora features
diff --git a/pingora-core/src/listeners/connection_filter.rs b/pingora-core/src/listeners/connection_filter.rs
new file mode 100644
index 00000000..10ae642f
--- /dev/null
+++ b/pingora-core/src/listeners/connection_filter.rs
@@ -0,0 +1,147 @@
+// Copyright 2026 Cloudflare, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Connection filtering trait for early connection filtering
+//!
+//! This module provides the [`ConnectionFilter`] trait which allows filtering
+//! incoming connections at the TCP level, before the TLS handshake occurs.
+//!
+//! # Feature Flag
+//!
+//! This functionality requires the `connection_filter` feature to be enabled:
+//! ```toml
+//! [dependencies]
+//! pingora-core = { version = "0.5", features = ["connection_filter"] }
+//! ```
+//!
+//! When the feature is disabled, a no-op implementation is provided for API compatibility.
+
+use async_trait::async_trait;
+use std::fmt::Debug;
+use std::net::SocketAddr;
+
+/// A trait for filtering incoming connections at the TCP level.
+///
+/// Implementations of this trait can inspect the peer address of incoming
+/// connections and decide whether to accept or reject them before any
+/// further processing (including TLS handshake) occurs.
+///
+/// # Example
+///
+/// ```rust,no_run
+/// use async_trait::async_trait;
+/// use pingora_core::listeners::ConnectionFilter;
+/// use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+///
+/// #[derive(Debug)]
+/// struct BlocklistFilter {
+/// blocked_ips: Vec,
+/// }
+///
+/// #[async_trait]
+/// impl ConnectionFilter for BlocklistFilter {
+/// async fn should_accept(&self, addr: &SocketAddr) -> bool {
+/// !self.blocked_ips.contains(&addr.ip())
+/// }
+/// }
+/// ```
+///
+/// # Performance Considerations
+///
+/// This filter is called for every incoming connection, so implementations
+/// should be efficient. Consider caching or pre-computing data structures
+/// for IP filtering rather than doing expensive operations per connection.
+#[async_trait]
+pub trait ConnectionFilter: Debug + Send + Sync {
+ /// Determines whether an incoming connection should be accepted.
+ ///
+ /// This method is called after a TCP connection is accepted but before
+ /// any further processing (including TLS handshake).
+ ///
+ /// # Arguments
+ ///
+ /// * `addr` - The socket address of the incoming connection
+ ///
+ /// # Returns
+ ///
+ /// * `true` - Accept the connection and continue processing
+ /// * `false` - Drop the connection immediately
+ ///
+ /// # Example
+ ///
+ /// ```rust,no_run
+ /// async fn should_accept(&self, addr: &SocketAddr) -> bool {
+ /// // Accept only connections from private IP ranges
+ /// match addr.ip() {
+ /// IpAddr::V4(ip) => ip.is_private(),
+ /// IpAddr::V6(_) => true,
+ /// }
+ /// }
+ ///
+ async fn should_accept(&self, _addr: Option<&SocketAddr>) -> bool {
+ true
+ }
+}
+
+/// Default implementation that accepts all connections.
+///
+/// This filter accepts all incoming connections without any filtering.
+/// It's used as the default when no custom filter is specified.
+#[derive(Debug, Clone)]
+pub struct AcceptAllFilter;
+
+#[async_trait]
+impl ConnectionFilter for AcceptAllFilter {
+ // Uses default implementation
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::net::{IpAddr, Ipv4Addr};
+
+ #[derive(Debug, Clone)]
+ struct BlockListFilter {
+ blocked_ips: Vec,
+ }
+
+ #[async_trait]
+ impl ConnectionFilter for BlockListFilter {
+ async fn should_accept(&self, addr_opt: Option<&SocketAddr>) -> bool {
+ addr_opt
+ .map(|addr| !self.blocked_ips.contains(&addr.ip()))
+ .unwrap_or(true)
+ }
+ }
+
+ #[tokio::test]
+ async fn test_accept_all_filter() {
+ let filter = AcceptAllFilter;
+ let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
+ assert!(filter.should_accept(Some(&addr)).await);
+ }
+
+ #[tokio::test]
+ async fn test_blocklist_filter() {
+ let filter = BlockListFilter {
+ blocked_ips: vec![IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1))],
+ };
+
+ let blocked_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
+ let allowed_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 2)), 8080);
+
+ assert!(!filter.should_accept(Some(&blocked_addr)).await);
+ assert!(filter.should_accept(Some(&allowed_addr)).await);
+ }
+}
diff --git a/pingora-core/src/listeners/l4.rs b/pingora-core/src/listeners/l4.rs
index 4dc07bce..1fee7437 100644
--- a/pingora-core/src/listeners/l4.rs
+++ b/pingora-core/src/listeners/l4.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#[cfg(feature = "connection_filter")]
+use log::debug;
use log::warn;
use pingora_error::{
ErrorType::{AcceptError, BindError},
@@ -29,9 +31,16 @@ use std::time::Duration;
use std::{fs::Permissions, sync::Arc};
use tokio::net::TcpSocket;
+#[cfg(feature = "connection_filter")]
+use super::connection_filter::ConnectionFilter;
+#[cfg(feature = "connection_filter")]
+use crate::listeners::AcceptAllFilter;
+
use crate::protocols::l4::ext::{set_dscp, set_tcp_fastopen_backlog};
use crate::protocols::l4::listener::Listener;
pub use crate::protocols::l4::stream::Stream;
+#[cfg(feature = "connection_filter")]
+use crate::protocols::GetSocketDigest;
use crate::protocols::TcpKeepalive;
#[cfg(unix)]
use crate::server::ListenFds;
@@ -271,16 +280,24 @@ async fn bind(addr: &ServerAddress) -> Result {
pub struct ListenerEndpoint {
listen_addr: ServerAddress,
listener: Arc,
+ #[cfg(feature = "connection_filter")]
+ connection_filter: Arc,
}
#[derive(Default)]
pub struct ListenerEndpointBuilder {
listen_addr: Option,
+ #[cfg(feature = "connection_filter")]
+ connection_filter: Option>,
}
impl ListenerEndpointBuilder {
pub fn new() -> ListenerEndpointBuilder {
- Self { listen_addr: None }
+ Self {
+ listen_addr: None,
+ #[cfg(feature = "connection_filter")]
+ connection_filter: None,
+ }
}
pub fn listen_addr(&mut self, addr: ServerAddress) -> &mut Self {
@@ -288,6 +305,12 @@ impl ListenerEndpointBuilder {
self
}
+ #[cfg(feature = "connection_filter")]
+ pub fn connection_filter(&mut self, filter: Arc) -> &mut Self {
+ self.connection_filter = Some(filter);
+ self
+ }
+
#[cfg(unix)]
pub async fn listen(self, fds: Option) -> Result {
let listen_addr = self
@@ -313,9 +336,16 @@ impl ListenerEndpointBuilder {
bind(&listen_addr).await?
};
+ #[cfg(feature = "connection_filter")]
+ let connection_filter = self
+ .connection_filter
+ .unwrap_or_else(|| Arc::new(AcceptAllFilter));
+
Ok(ListenerEndpoint {
listen_addr,
listener: Arc::new(listener),
+ #[cfg(feature = "connection_filter")]
+ connection_filter,
})
}
@@ -324,11 +354,19 @@ impl ListenerEndpointBuilder {
let listen_addr = self
.listen_addr
.expect("Tried to listen with no addr specified");
+
let listener = bind(&listen_addr).await?;
+ #[cfg(feature = "connection_filter")]
+ let connection_filter = self
+ .connection_filter
+ .unwrap_or_else(|| Arc::new(AcceptAllFilter));
+
Ok(ListenerEndpoint {
listen_addr,
listener: Arc::new(listener),
+ #[cfg(feature = "connection_filter")]
+ connection_filter,
})
}
}
@@ -361,13 +399,50 @@ impl ListenerEndpoint {
}
pub async fn accept(&self) -> Result {
- let mut stream = self
- .listener
- .accept()
- .await
- .or_err(AcceptError, "Fail to accept()")?;
- self.apply_stream_settings(&mut stream)?;
- Ok(stream)
+ #[cfg(feature = "connection_filter")]
+ {
+ loop {
+ let mut stream = self
+ .listener
+ .accept()
+ .await
+ .or_err(AcceptError, "Fail to accept()")?;
+
+ // Performance: nested if-let avoids cloning/allocations on each connection accept
+ let should_accept = if let Some(digest) = stream.get_socket_digest() {
+ if let Some(peer_addr) = digest.peer_addr() {
+ self.connection_filter
+ .should_accept(peer_addr.as_inet())
+ .await
+ } else {
+ // No peer address available - accept by default
+ true
+ }
+ } else {
+ // No socket digest available - accept by default
+ true
+ };
+
+ if !should_accept {
+ debug!("Connection rejected by filter");
+ drop(stream);
+ continue;
+ }
+
+ self.apply_stream_settings(&mut stream)?;
+ return Ok(stream);
+ }
+ }
+ #[cfg(not(feature = "connection_filter"))]
+ {
+ let mut stream = self
+ .listener
+ .accept()
+ .await
+ .or_err(AcceptError, "Fail to accept()")?;
+ self.apply_stream_settings(&mut stream)?;
+ Ok(stream)
+ }
}
}
@@ -507,4 +582,146 @@ mod test {
// Verify the first listener still works
assert_eq!(listener1.as_str(), addr);
}
+
+ #[cfg(feature = "connection_filter")]
+ #[tokio::test]
+ async fn test_connection_filter_accept() {
+ use crate::listeners::ConnectionFilter;
+ use async_trait::async_trait;
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ #[derive(Debug)]
+ struct CountingFilter {
+ accept_count: Arc,
+ reject_count: Arc,
+ }
+
+ #[async_trait]
+ impl ConnectionFilter for CountingFilter {
+ async fn should_accept(&self, _addr: Option<&SocketAddr>) -> bool {
+ let count = self.accept_count.fetch_add(1, Ordering::SeqCst);
+ if count % 2 == 0 {
+ true
+ } else {
+ self.reject_count.fetch_add(1, Ordering::SeqCst);
+ false
+ }
+ }
+ }
+
+ let addr = "127.0.0.1:7300";
+ let accept_count = Arc::new(AtomicUsize::new(0));
+ let reject_count = Arc::new(AtomicUsize::new(0));
+
+ let filter = Arc::new(CountingFilter {
+ accept_count: accept_count.clone(),
+ reject_count: reject_count.clone(),
+ });
+
+ let mut builder = ListenerEndpoint::builder();
+ builder
+ .listen_addr(ServerAddress::Tcp(addr.into(), None))
+ .connection_filter(filter);
+
+ #[cfg(unix)]
+ let listener = builder.listen(None).await.unwrap();
+ #[cfg(windows)]
+ let listener = builder.listen().await.unwrap();
+
+ let listener_clone = listener.clone();
+ tokio::spawn(async move {
+ let _stream1 = listener_clone.accept().await.unwrap();
+ let _stream2 = listener_clone.accept().await.unwrap();
+ });
+
+ tokio::time::sleep(Duration::from_millis(10)).await;
+
+ let _conn1 = tokio::net::TcpStream::connect(addr).await.unwrap();
+ let _conn2 = tokio::net::TcpStream::connect(addr).await.unwrap();
+ let _conn3 = tokio::net::TcpStream::connect(addr).await.unwrap();
+
+ tokio::time::sleep(Duration::from_millis(50)).await;
+
+ assert_eq!(accept_count.load(Ordering::SeqCst), 3);
+ assert_eq!(reject_count.load(Ordering::SeqCst), 1);
+ }
+
+ #[cfg(feature = "connection_filter")]
+ #[tokio::test]
+ async fn test_connection_filter_blocks_all() {
+ use crate::listeners::ConnectionFilter;
+ use async_trait::async_trait;
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ #[derive(Debug)]
+ struct RejectAllFilter {
+ reject_count: Arc,
+ }
+
+ #[async_trait]
+ impl ConnectionFilter for RejectAllFilter {
+ async fn should_accept(&self, _addr: Option<&SocketAddr>) -> bool {
+ self.reject_count.fetch_add(1, Ordering::SeqCst);
+ false
+ }
+ }
+
+ let addr = "127.0.0.1:7301";
+ let reject_count = Arc::new(AtomicUsize::new(0));
+
+ let mut builder = ListenerEndpoint::builder();
+ builder
+ .listen_addr(ServerAddress::Tcp(addr.into(), None))
+ .connection_filter(Arc::new(RejectAllFilter {
+ reject_count: reject_count.clone(),
+ }));
+
+ #[cfg(unix)]
+ let listener = builder.listen(None).await.unwrap();
+ #[cfg(windows)]
+ let listener = builder.listen().await.unwrap();
+
+ let listener_clone = listener.clone();
+ let _accept_handle = tokio::spawn(async move {
+ // This will never return since all connections are rejected
+ let _ = listener_clone.accept().await;
+ });
+
+ tokio::time::sleep(Duration::from_millis(50)).await;
+
+ let mut handles = vec![];
+ for _ in 0..3 {
+ let handle = tokio::spawn(async move {
+ if let Ok(stream) = tokio::net::TcpStream::connect(addr).await {
+ drop(stream);
+ }
+ });
+ handles.push(handle);
+ }
+
+ for handle in handles {
+ let _ = handle.await;
+ }
+
+ // Wait for rejections to be counted with timeout
+ let start = tokio::time::Instant::now();
+ let timeout = Duration::from_secs(2);
+
+ loop {
+ let rejected = reject_count.load(Ordering::SeqCst);
+ if rejected >= 3 {
+ assert_eq!(rejected, 3, "Should reject exactly 3 connections");
+ break;
+ }
+
+ if start.elapsed() > timeout {
+ panic!(
+ "Timeout waiting for rejections, got {} expected 3",
+ rejected
+ );
+ }
+
+ tokio::time::sleep(Duration::from_millis(10)).await;
+ }
+ }
}
diff --git a/pingora-core/src/listeners/mod.rs b/pingora-core/src/listeners/mod.rs
index b8a45bf9..abc65ea1 100644
--- a/pingora-core/src/listeners/mod.rs
+++ b/pingora-core/src/listeners/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -13,31 +13,80 @@
// limitations under the License.
//! The listening endpoints (TCP and TLS) and their configurations.
+//!
+//! This module provides the infrastructure for setting up network listeners
+//! that accept incoming connections. It supports TCP, Unix domain sockets,
+//! and TLS endpoints.
+//!
+//! # Connection Filtering
+//!
+//! With the `connection_filter` feature enabled, this module also provides
+//! early connection filtering capabilities through the [`ConnectionFilter`] trait.
+//! This allows dropping unwanted connections at the TCP level before any
+//! expensive operations like TLS handshakes.
+//!
+//! ## Example with Connection Filtering
+//!
+//! ```rust,no_run
+//! # #[cfg(feature = "connection_filter")]
+//! # {
+//! use pingora_core::listeners::{Listeners, ConnectionFilter};
+//! use std::sync::Arc;
+//!
+//! // Create a custom filter
+//! let filter = Arc::new(MyCustomFilter::new());
+//!
+//! // Apply to listeners
+//! let mut listeners = Listeners::new();
+//! listeners.set_connection_filter(filter);
+//! listeners.add_tcp("0.0.0.0:8080");
+//! # }
+//! ```
mod l4;
+#[cfg(feature = "connection_filter")]
+pub mod connection_filter;
+
+#[cfg(feature = "connection_filter")]
+pub use connection_filter::{AcceptAllFilter, ConnectionFilter};
+
+#[cfg(not(feature = "connection_filter"))]
+#[derive(Debug, Clone)]
+pub struct AcceptAllFilter;
+
+#[cfg(not(feature = "connection_filter"))]
+pub trait ConnectionFilter: std::fmt::Debug + Send + Sync {
+ fn should_accept(&self, _addr: &std::net::SocketAddr) -> bool {
+ true
+ }
+}
+
+#[cfg(not(feature = "connection_filter"))]
+impl ConnectionFilter for AcceptAllFilter {
+ fn should_accept(&self, _addr: &std::net::SocketAddr) -> bool {
+ true
+ }
+}
#[cfg(feature = "any_tls")]
pub mod tls;
#[cfg(not(feature = "any_tls"))]
pub use crate::tls::listeners as tls;
-use crate::protocols::{
- l4::socket::SocketAddr,
- proxy_protocol,
- tls::TlsRef,
- Stream,
-};
+use crate::protocols::{l4::socket::SocketAddr, proxy_protocol, tls::TlsRef, Stream};
use log::{debug, warn};
-use pingora_error::{OrErr, ErrorType::*};
+use pingora_error::{ErrorType::*, OrErr};
/// Callback function type for ClientHello extraction
/// This allows external code (like moat) to generate fingerprints from ClientHello
-pub type ClientHelloCallback = Option)>;
+pub type ClientHelloCallback =
+ Option)>;
/// Global callback for ClientHello extraction
/// This is set by moat to generate fingerprints
-static CLIENT_HELLO_CALLBACK: std::sync::OnceLock> = std::sync::OnceLock::new();
+static CLIENT_HELLO_CALLBACK: std::sync::OnceLock> =
+ std::sync::OnceLock::new();
/// Set the ClientHello callback function
/// This is called by moat to register fingerprint generation
@@ -61,7 +110,10 @@ pub fn set_client_hello_callback(callback: ClientHelloCallback) {
}
/// Call the ClientHello callback if registered
-fn call_client_hello_callback(hello: &crate::protocols::tls::client_hello::ClientHello, peer_addr: Option) {
+fn call_client_hello_callback(
+ hello: &crate::protocols::tls::client_hello::ClientHello,
+ peer_addr: Option,
+) {
if let Some(cb_guard) = CLIENT_HELLO_CALLBACK.get() {
if let Ok(cb) = cb_guard.lock() {
if let Some(callback) = *cb {
@@ -82,7 +134,7 @@ use crate::server::ListenFds;
use async_trait::async_trait;
use pingora_error::Result;
-use std::{fs::Permissions, sync::Arc};
+use std::{any::Any, fs::Permissions, sync::Arc};
use l4::{ListenerEndpoint, Stream as L4Stream};
use tls::{Acceptor, TlsSettings};
@@ -102,6 +154,19 @@ pub trait TlsAccept {
async fn certificate_callback(&self, _ssl: &mut TlsRef) -> () {
// does nothing by default
}
+
+ /// This function is called after the TLS handshake is complete.
+ ///
+ /// Any value returned from this function (other than `None`) will be stored in the
+ /// `extension` field of `SslDigest`. This allows you to attach custom application-specific
+ /// data to the TLS connection, which will be accessible from the HTTP layer via the
+ /// `SslDigest` attached to the session digest.
+ async fn handshake_complete_callback(
+ &self,
+ _ssl: &TlsRef,
+ ) -> Option> {
+ None
+ }
}
pub type TlsAcceptCallbacks = Box;
@@ -109,6 +174,8 @@ pub type TlsAcceptCallbacks = Box;
struct TransportStackBuilder {
l4: ServerAddress,
tls: Option,
+ #[cfg(feature = "connection_filter")]
+ connection_filter: Option>,
}
impl TransportStackBuilder {
@@ -120,6 +187,11 @@ impl TransportStackBuilder {
builder.listen_addr(self.l4.clone());
+ #[cfg(feature = "connection_filter")]
+ if let Some(filter) = &self.connection_filter {
+ builder.connection_filter(filter.clone());
+ }
+
#[cfg(unix)]
let l4 = builder.listen(upgrade_listeners).await?;
@@ -196,10 +268,14 @@ impl UninitializedStream {
Err(e) => {
// Check if this is a connection error that should abort the handshake
match e.kind() {
- std::io::ErrorKind::ConnectionReset | std::io::ErrorKind::ConnectionAborted => {
+ std::io::ErrorKind::ConnectionReset
+ | std::io::ErrorKind::ConnectionAborted => {
debug!("Connection closed during ClientHello extraction: {:?}", e);
// Return error to abort the connection instead of proceeding to TLS handshake
- return Err(e).or_err(AcceptError, "Connection closed during ClientHello extraction");
+ return Err(e).or_err(
+ AcceptError,
+ "Connection closed during ClientHello extraction",
+ );
}
_ => {
debug!("Non-fatal error extracting ClientHello: {:?}", e);
@@ -237,10 +313,14 @@ impl UninitializedStream {
// Process the extracted ClientHello if available
if let Some(hello) = extracted_hello {
// Get peer address if available
- let peer_addr = wrapper.get_socket_digest()
+ let peer_addr = wrapper
+ .get_socket_digest()
.and_then(|d| d.peer_addr().cloned());
- debug!("Extracted ClientHello: SNI={:?}, ALPN={:?}, Peer={:?}", hello.sni, hello.alpn, peer_addr);
+ debug!(
+ "Extracted ClientHello: SNI={:?}, ALPN={:?}, Peer={:?}",
+ hello.sni, hello.alpn, peer_addr
+ );
// Call the callback to generate fingerprint (registered by moat)
call_client_hello_callback(&hello, peer_addr);
@@ -276,7 +356,8 @@ impl UninitializedStream {
return Ok(());
}
- let peer_addr = self.l4
+ let peer_addr = self
+ .l4
.get_socket_digest()
.and_then(|d| d.transport_peer_addr().cloned());
let peer_str = peer_addr
@@ -296,10 +377,7 @@ impl UninitializedStream {
proxy_addr, client_addr
);
} else {
- debug!(
- "PROXY protocol detected downstream client {}",
- client_addr
- );
+ debug!("PROXY protocol detected downstream client {}", client_addr);
}
}
} else if proxy_protocol::header_has_source_addr(&header) {
@@ -327,14 +405,19 @@ impl UninitializedStream {
/// The struct to hold one more multiple listening endpoints
pub struct Listeners {
stacks: Vec,
+ #[cfg(feature = "connection_filter")]
+ connection_filter: Option>,
}
impl Listeners {
/// Create a new [`Listeners`] with no listening endpoints.
pub fn new() -> Self {
- Listeners { stacks: vec![] }
+ Listeners {
+ stacks: vec![],
+ #[cfg(feature = "connection_filter")]
+ connection_filter: None,
+ }
}
-
/// Create a new [`Listeners`] with a TCP server endpoint from the given string.
pub fn tcp(addr: &str) -> Self {
let mut listeners = Self::new();
@@ -399,9 +482,28 @@ impl Listeners {
self.add_endpoint(addr, None);
}
+ /// Set a connection filter for all endpoints in this listener collection
+ #[cfg(feature = "connection_filter")]
+ pub fn set_connection_filter(&mut self, filter: Arc) {
+ log::debug!("Setting connection filter on Listeners");
+
+ // Store the filter for future endpoints
+ self.connection_filter = Some(filter.clone());
+
+ // Apply to existing stacks
+ for stack in &mut self.stacks {
+ stack.connection_filter = Some(filter.clone());
+ }
+ }
+
/// Add the given [`ServerAddress`] to `self` with the given [`TlsSettings`] if provided
pub fn add_endpoint(&mut self, l4: ServerAddress, tls: Option) {
- self.stacks.push(TransportStackBuilder { l4, tls })
+ self.stacks.push(TransportStackBuilder {
+ l4,
+ tls,
+ #[cfg(feature = "connection_filter")]
+ connection_filter: self.connection_filter.clone(),
+ })
}
pub(crate) async fn build(
@@ -432,6 +534,8 @@ impl Listeners {
#[cfg(test)]
mod test {
use super::*;
+ #[cfg(feature = "connection_filter")]
+ use std::sync::atomic::{AtomicUsize, Ordering};
#[cfg(feature = "any_tls")]
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
@@ -509,4 +613,53 @@ mod test {
let res = client.get(format!("https://{addr}")).send().await.unwrap();
assert_eq!(res.status(), reqwest::StatusCode::OK);
}
+
+ #[cfg(feature = "connection_filter")]
+ #[test]
+ fn test_connection_filter_inheritance() {
+ #[derive(Debug, Clone)]
+ struct TestFilter {
+ counter: Arc,
+ }
+
+ #[async_trait]
+ impl ConnectionFilter for TestFilter {
+ async fn should_accept(&self, _addr: Option<&std::net::SocketAddr>) -> bool {
+ self.counter.fetch_add(1, Ordering::SeqCst);
+ true
+ }
+ }
+
+ let mut listeners = Listeners::new();
+
+ // Add an endpoint before setting filter
+ listeners.add_tcp("127.0.0.1:7104");
+
+ // Set the connection filter
+ let filter = Arc::new(TestFilter {
+ counter: Arc::new(AtomicUsize::new(0)),
+ });
+ listeners.set_connection_filter(filter.clone());
+
+ // Add endpoints after setting filter
+ listeners.add_tcp("127.0.0.1:7105");
+ #[cfg(feature = "any_tls")]
+ {
+ // Only test TLS if the feature is enabled
+ if let Ok(tls_settings) = TlsSettings::intermediate(
+ &format!("{}/tests/keys/server.crt", env!("CARGO_MANIFEST_DIR")),
+ &format!("{}/tests/keys/key.pem", env!("CARGO_MANIFEST_DIR")),
+ ) {
+ listeners.add_tls_with_settings("127.0.0.1:7106", None, tls_settings);
+ }
+ }
+
+ // Verify all stacks have the filter (only when feature is enabled)
+ for stack in &listeners.stacks {
+ assert!(
+ stack.connection_filter.is_some(),
+ "All stacks should have the connection filter set"
+ );
+ }
+ }
}
diff --git a/pingora-core/src/listeners/tls/boringssl_openssl/mod.rs b/pingora-core/src/listeners/tls/boringssl_openssl/mod.rs
index ef1eeafb..a1e757da 100644
--- a/pingora-core/src/listeners/tls/boringssl_openssl/mod.rs
+++ b/pingora-core/src/listeners/tls/boringssl_openssl/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,8 +16,10 @@ use log::debug;
use pingora_error::{ErrorType, OrErr, Result};
use std::ops::{Deref, DerefMut};
+use crate::listeners::tls::boringssl_openssl::alpn::valid_alpn;
pub use crate::protocols::tls::ALPN;
use crate::protocols::{GetSocketDigest, IO};
+use crate::tls::ssl::AlpnError;
use crate::tls::ssl::{SslAcceptor, SslAcceptorBuilder, SslFiletype, SslMethod};
use crate::{
listeners::TlsAcceptCallbacks,
@@ -26,7 +28,6 @@ use crate::{
SslStream,
},
};
-
pub const TLS_CONF_ERR: ErrorType = ErrorType::Custom("TLSConfigError");
pub(crate) struct Acceptor {
@@ -113,6 +114,18 @@ impl TlsSettings {
.set_alpn_select_callback(alpn::prefer_h2),
ALPN::H1 => self.accept_builder.set_alpn_select_callback(alpn::h1_only),
ALPN::H2 => self.accept_builder.set_alpn_select_callback(alpn::h2_only),
+ ALPN::Custom(custom) => {
+ self.accept_builder
+ .set_alpn_select_callback(move |_, alpn_in| {
+ if !valid_alpn(alpn_in) {
+ return Err(AlpnError::NOACK);
+ }
+ match alpn::select_protocol(alpn_in, custom.protocol()) {
+ Some(p) => Ok(p),
+ None => Err(AlpnError::NOACK),
+ }
+ });
+ }
}
}
@@ -138,7 +151,9 @@ impl Acceptor {
/// Perform TLS handshake with ClientHello extraction
/// This wraps the stream with ClientHelloWrapper before TLS handshake
#[cfg(unix)]
- pub async fn tls_handshake_with_client_hello(
+ pub async fn tls_handshake_with_client_hello<
+ S: IO + GetSocketDigest + std::os::unix::io::AsRawFd + 'static,
+ >(
&self,
stream: S,
) -> Result>> {
@@ -150,10 +165,14 @@ impl Acceptor {
// Extract ClientHello before TLS handshake (sync version blocks until data is available)
if let Ok(Some(hello)) = wrapper.extract_client_hello() {
// Get peer address if available
- let peer_addr = wrapper.get_socket_digest()
+ let peer_addr = wrapper
+ .get_socket_digest()
.and_then(|d| d.peer_addr().cloned());
- debug!("Extracted ClientHello: SNI={:?}, ALPN={:?}, Peer={:?}", hello.sni, hello.alpn, peer_addr);
+ debug!(
+ "Extracted ClientHello: SNI={:?}, ALPN={:?}, Peer={:?}",
+ hello.sni, hello.alpn, peer_addr
+ );
// Generate fingerprint from raw ClientHello bytes
// This will be handled by moat's tls_client_hello module
@@ -173,7 +192,7 @@ mod alpn {
use super::*;
use crate::tls::ssl::{select_next_proto, AlpnError, SslRef};
- fn valid_alpn(alpn_in: &[u8]) -> bool {
+ pub(super) fn valid_alpn(alpn_in: &[u8]) -> bool {
if alpn_in.is_empty() {
return false;
}
@@ -181,6 +200,27 @@ mod alpn {
true
}
+ /// Finds the first protocol in the client-offered ALPN list that matches the given protocol.
+ ///
+ /// This is a helper for ALPN negotiation. It iterates over the client's protocol list
+ /// (in wire format) and returns the first protocol that matches proto
+ /// The returned reference always points into `client_protocols`, so lifetimes are correct.
+ pub(super) fn select_protocol<'a>(
+ client_protocols: &'a [u8],
+ proto: &[u8],
+ ) -> Option<&'a [u8]> {
+ let mut bytes = client_protocols;
+ while !bytes.is_empty() {
+ let len = bytes[0] as usize;
+ bytes = &bytes[1..];
+ if len == proto.len() && &bytes[..len] == proto {
+ return Some(&bytes[..len]);
+ }
+ bytes = &bytes[len..];
+ }
+ None
+ }
+
// A standard implementation provided by the SSL lib is used below
pub fn prefer_h2<'a>(_ssl: &mut SslRef, alpn_in: &'a [u8]) -> Result<&'a [u8], AlpnError> {
diff --git a/pingora-core/src/listeners/tls/mod.rs b/pingora-core/src/listeners/tls/mod.rs
index 887293b3..c345073e 100644
--- a/pingora-core/src/listeners/tls/mod.rs
+++ b/pingora-core/src/listeners/tls/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/listeners/tls/rustls/mod.rs b/pingora-core/src/listeners/tls/rustls/mod.rs
index 40babeb6..0ca94d51 100644
--- a/pingora-core/src/listeners/tls/rustls/mod.rs
+++ b/pingora-core/src/listeners/tls/rustls/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -20,6 +20,7 @@ use log::debug;
use pingora_error::ErrorType::InternalError;
use pingora_error::{Error, OrErr, Result};
use pingora_rustls::load_certs_and_key_files;
+use pingora_rustls::ClientCertVerifier;
use pingora_rustls::ServerConfig;
use pingora_rustls::{version, TlsAcceptor as RusTlsAcceptor};
@@ -30,6 +31,7 @@ pub struct TlsSettings {
alpn_protocols: Option>>,
cert_path: String,
key_path: String,
+ client_cert_verifier: Option>,
}
pub struct Acceptor {
@@ -54,15 +56,19 @@ impl TlsSettings {
)
};
- // TODO - Add support for client auth & custom CA support
- let mut config =
- ServerConfig::builder_with_protocol_versions(&[&version::TLS12, &version::TLS13])
- .with_no_client_auth()
- .with_single_cert(certs, key)
- .explain_err(InternalError, |e| {
- format!("Failed to create server listener config: {e}")
- })
- .unwrap();
+ let builder =
+ ServerConfig::builder_with_protocol_versions(&[&version::TLS12, &version::TLS13]);
+ let builder = if let Some(verifier) = self.client_cert_verifier {
+ builder.with_client_cert_verifier(verifier)
+ } else {
+ builder.with_no_client_auth()
+ };
+ let mut config = builder
+ .with_single_cert(certs, key)
+ .explain_err(InternalError, |e| {
+ format!("Failed to create server listener config: {e}")
+ })
+ .unwrap();
if let Some(alpn_protocols) = self.alpn_protocols {
config.alpn_protocols = alpn_protocols;
@@ -80,10 +86,15 @@ impl TlsSettings {
self.set_alpn(ALPN::H2H1);
}
- fn set_alpn(&mut self, alpn: ALPN) {
+ pub fn set_alpn(&mut self, alpn: ALPN) {
self.alpn_protocols = Some(alpn.to_wire_protocols());
}
+ /// Configure mTLS by providing a rustls client certificate verifier.
+ pub fn set_client_cert_verifier(&mut self, verifier: Arc) {
+ self.client_cert_verifier = Some(verifier);
+ }
+
pub fn intermediate(cert_path: &str, key_path: &str) -> Result
where
Self: Sized,
@@ -92,6 +103,7 @@ impl TlsSettings {
alpn_protocols: None,
cert_path: cert_path.to_string(),
key_path: key_path.to_string(),
+ client_cert_verifier: None,
})
}
diff --git a/pingora-core/src/listeners/tls/s2n/mod.rs b/pingora-core/src/listeners/tls/s2n/mod.rs
index 2598e829..ed689445 100644
--- a/pingora-core/src/listeners/tls/s2n/mod.rs
+++ b/pingora-core/src/listeners/tls/s2n/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/modules/http/compression.rs b/pingora-core/src/modules/http/compression.rs
index 1906bd66..fa64d3c1 100644
--- a/pingora-core/src/modules/http/compression.rs
+++ b/pingora-core/src/modules/http/compression.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/modules/http/grpc_web.rs b/pingora-core/src/modules/http/grpc_web.rs
index b248e233..fd1d4ad2 100644
--- a/pingora-core/src/modules/http/grpc_web.rs
+++ b/pingora-core/src/modules/http/grpc_web.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/modules/http/mod.rs b/pingora-core/src/modules/http/mod.rs
index d220e6b0..04084258 100644
--- a/pingora-core/src/modules/http/mod.rs
+++ b/pingora-core/src/modules/http/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/modules/mod.rs b/pingora-core/src/modules/mod.rs
index 359b9ef4..c4a1c4a6 100644
--- a/pingora-core/src/modules/mod.rs
+++ b/pingora-core/src/modules/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/protocols/client_hello_wrapper.rs b/pingora-core/src/protocols/client_hello_wrapper.rs
index e61d80a9..46efbde6 100644
--- a/pingora-core/src/protocols/client_hello_wrapper.rs
+++ b/pingora-core/src/protocols/client_hello_wrapper.rs
@@ -168,10 +168,11 @@ impl ClientHelloWrapper {
Err(e) => {
wrapper.hello_extracted = true;
match e.kind() {
- io::ErrorKind::ConnectionReset | io::ErrorKind::ConnectionAborted => {
+ io::ErrorKind::ConnectionReset
+ | io::ErrorKind::ConnectionAborted => {
Poll::Ready(Err(e))
}
- _ => Poll::Ready(Ok(None))
+ _ => Poll::Ready(Ok(None)),
}
}
}
@@ -180,10 +181,9 @@ impl ClientHelloWrapper {
wrapper.hello_extracted = true;
match e.kind() {
io::ErrorKind::WouldBlock => Poll::Pending,
- io::ErrorKind::ConnectionReset | io::ErrorKind::ConnectionAborted => {
- Poll::Ready(Err(e))
- }
- _ => Poll::Ready(Ok(None))
+ io::ErrorKind::ConnectionReset
+ | io::ErrorKind::ConnectionAborted => Poll::Ready(Err(e)),
+ _ => Poll::Ready(Ok(None)),
}
}
Poll::Pending => Poll::Pending,
@@ -373,4 +373,3 @@ mod tests {
assert_eq!(inner.into_inner(), data);
}
}
-
diff --git a/pingora-core/src/protocols/digest.rs b/pingora-core/src/protocols/digest.rs
index 64fe15e9..405c6698 100644
--- a/pingora-core/src/protocols/digest.rs
+++ b/pingora-core/src/protocols/digest.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/protocols/http/body_buffer.rs b/pingora-core/src/protocols/http/body_buffer.rs
index f3c46df9..a122df20 100644
--- a/pingora-core/src/protocols/http/body_buffer.rs
+++ b/pingora-core/src/protocols/http/body_buffer.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@ use bytes::{Bytes, BytesMut};
/// A buffer with size limit. When the total amount of data written to the buffer is below the limit
/// all the data will be held in the buffer. Otherwise, the buffer will report to be truncated.
-pub(crate) struct FixedBuffer {
+pub struct FixedBuffer {
buffer: BytesMut,
capacity: usize,
truncated: bool,
diff --git a/pingora-core/src/protocols/http/bridge/grpc_web.rs b/pingora-core/src/protocols/http/bridge/grpc_web.rs
index 63d19727..8a091d27 100644
--- a/pingora-core/src/protocols/http/bridge/grpc_web.rs
+++ b/pingora-core/src/protocols/http/bridge/grpc_web.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/protocols/http/bridge/mod.rs b/pingora-core/src/protocols/http/bridge/mod.rs
index fa1f58ca..6d295d0b 100644
--- a/pingora-core/src/protocols/http/bridge/mod.rs
+++ b/pingora-core/src/protocols/http/bridge/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/protocols/http/client.rs b/pingora-core/src/protocols/http/client.rs
index 2d1278d9..54fc367f 100644
--- a/pingora-core/src/protocols/http/client.rs
+++ b/pingora-core/src/protocols/http/client.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,21 +17,23 @@ use pingora_error::Result;
use pingora_http::{RequestHeader, ResponseHeader};
use std::time::Duration;
-use super::v1::client::HttpSession as Http1Session;
use super::v2::client::Http2Session;
+use super::{custom::client::Session, v1::client::HttpSession as Http1Session};
use crate::protocols::{Digest, SocketAddr, Stream};
/// A type for Http client session. It can be either an Http1 connection or an Http2 stream.
-pub enum HttpSession {
+pub enum HttpSession {
H1(Http1Session),
H2(Http2Session),
+ Custom(S),
}
-impl HttpSession {
+impl HttpSession {
pub fn as_http1(&self) -> Option<&Http1Session> {
match self {
Self::H1(s) => Some(s),
Self::H2(_) => None,
+ Self::Custom(_) => None,
}
}
@@ -39,8 +41,26 @@ impl HttpSession {
match self {
Self::H1(_) => None,
Self::H2(s) => Some(s),
+ Self::Custom(_) => None,
}
}
+
+ pub fn as_custom(&self) -> Option<&S> {
+ match self {
+ Self::H1(_) => None,
+ Self::H2(_) => None,
+ Self::Custom(c) => Some(c),
+ }
+ }
+
+ pub fn as_custom_mut(&mut self) -> Option<&mut S> {
+ match self {
+ Self::H1(_) => None,
+ Self::H2(_) => None,
+ Self::Custom(c) => Some(c),
+ }
+ }
+
/// Write the request header to the server
/// After the request header is sent. The caller can either start reading the response or
/// sending request body if any.
@@ -51,6 +71,7 @@ impl HttpSession {
Ok(())
}
HttpSession::H2(h2) => h2.write_request_header(req, false),
+ HttpSession::Custom(c) => c.write_request_header(req, false).await,
}
}
@@ -63,6 +84,7 @@ impl HttpSession {
Ok(())
}
HttpSession::H2(h2) => h2.write_request_body(data, end).await,
+ HttpSession::Custom(c) => c.write_request_body(data, end).await,
}
}
@@ -74,6 +96,7 @@ impl HttpSession {
Ok(())
}
HttpSession::H2(h2) => h2.finish_request_body(),
+ HttpSession::Custom(c) => c.finish_request_body().await,
}
}
@@ -84,6 +107,7 @@ impl HttpSession {
match self {
HttpSession::H1(h1) => h1.read_timeout = timeout,
HttpSession::H2(h2) => h2.read_timeout = timeout,
+ HttpSession::Custom(c) => c.set_read_timeout(timeout),
}
}
@@ -94,6 +118,7 @@ impl HttpSession {
match self {
HttpSession::H1(h1) => h1.write_timeout = timeout,
HttpSession::H2(h2) => h2.write_timeout = timeout,
+ HttpSession::Custom(c) => c.set_write_timeout(timeout),
}
}
@@ -107,6 +132,7 @@ impl HttpSession {
Ok(())
}
HttpSession::H2(h2) => h2.read_response_header().await,
+ HttpSession::Custom(c) => c.read_response_header().await,
}
}
@@ -117,6 +143,7 @@ impl HttpSession {
match self {
HttpSession::H1(h1) => h1.read_body_bytes().await,
HttpSession::H2(h2) => h2.read_response_body().await,
+ HttpSession::Custom(c) => c.read_response_body().await,
}
}
@@ -125,6 +152,7 @@ impl HttpSession {
match self {
HttpSession::H1(h1) => h1.is_body_done(),
HttpSession::H2(h2) => h2.response_finished(),
+ HttpSession::Custom(c) => c.response_finished(),
}
}
@@ -135,6 +163,7 @@ impl HttpSession {
match self {
Self::H1(s) => s.shutdown().await,
Self::H2(s) => s.shutdown(),
+ Self::Custom(c) => c.shutdown(0, "shutdown").await,
}
}
@@ -145,6 +174,7 @@ impl HttpSession {
match self {
Self::H1(s) => s.resp_header(),
Self::H2(s) => s.response_header(),
+ Self::Custom(c) => c.response_header(),
}
}
@@ -156,6 +186,7 @@ impl HttpSession {
match self {
Self::H1(s) => Some(s.digest()),
Self::H2(s) => s.digest(),
+ Self::Custom(c) => c.digest(),
}
}
@@ -166,6 +197,7 @@ impl HttpSession {
match self {
Self::H1(s) => Some(s.digest_mut()),
Self::H2(s) => s.digest_mut(),
+ Self::Custom(s) => s.digest_mut(),
}
}
@@ -174,6 +206,7 @@ impl HttpSession {
match self {
Self::H1(s) => s.server_addr(),
Self::H2(s) => s.server_addr(),
+ Self::Custom(s) => s.server_addr(),
}
}
@@ -182,6 +215,7 @@ impl HttpSession {
match self {
Self::H1(s) => s.client_addr(),
Self::H2(s) => s.client_addr(),
+ Self::Custom(s) => s.client_addr(),
}
}
@@ -191,6 +225,7 @@ impl HttpSession {
match self {
Self::H1(s) => Some(s.stream()),
Self::H2(_) => None,
+ Self::Custom(_) => None,
}
}
}
diff --git a/pingora-core/src/protocols/http/compression/brotli.rs b/pingora-core/src/protocols/http/compression/brotli.rs
index c4bb36a5..fa8a3bae 100644
--- a/pingora-core/src/protocols/http/compression/brotli.rs
+++ b/pingora-core/src/protocols/http/compression/brotli.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/protocols/http/compression/gzip.rs b/pingora-core/src/protocols/http/compression/gzip.rs
index 46678df6..97f7b636 100644
--- a/pingora-core/src/protocols/http/compression/gzip.rs
+++ b/pingora-core/src/protocols/http/compression/gzip.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/protocols/http/compression/mod.rs b/pingora-core/src/protocols/http/compression/mod.rs
index 2f86efce..9e84ab3c 100644
--- a/pingora-core/src/protocols/http/compression/mod.rs
+++ b/pingora-core/src/protocols/http/compression/mod.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -333,6 +333,8 @@ pub enum Algorithm {
Gzip,
Brotli,
Zstd,
+ Dcb,
+ Dcz,
// TODO: Identity,
// TODO: Deflate
Other, // anything unknown
@@ -344,6 +346,8 @@ impl Algorithm {
Algorithm::Gzip => "gzip",
Algorithm::Brotli => "br",
Algorithm::Zstd => "zstd",
+ Algorithm::Dcb => "dcb",
+ Algorithm::Dcz => "dcz",
Algorithm::Any => "*",
Algorithm::Other => "other",
}
@@ -390,6 +394,10 @@ impl From<&str> for Algorithm {
Algorithm::Brotli
} else if coding == UniCase::ascii("zstd") {
Algorithm::Zstd
+ } else if coding == UniCase::ascii("dcb") {
+ Algorithm::Dcb
+ } else if coding == UniCase::ascii("dcz") {
+ Algorithm::Dcz
} else if s.is_empty() {
Algorithm::Any
} else {
@@ -614,6 +622,36 @@ fn test_decide_action() {
let mut header = ResponseHeader::build(200, None).unwrap();
header.insert_header("content-encoding", "gzip").unwrap();
assert_eq!(decide_action(&header, &[Brotli, Gzip]), Noop);
+
+ // dcb passthrough: client accepts dcb, response has dcb
+ let mut header = ResponseHeader::build(200, None).unwrap();
+ header.insert_header("content-encoding", "dcb").unwrap();
+ assert_eq!(decide_action(&header, &[Dcb, Brotli]), Noop);
+
+ // dcz passthrough: client accepts dcz, response has dcz
+ let mut header = ResponseHeader::build(200, None).unwrap();
+ header.insert_header("content-encoding", "dcz").unwrap();
+ assert_eq!(decide_action(&header, &[Dcz, Zstd]), Noop);
+
+ // Client wants dcz but response has brotli, decompress brotli
+ let mut header = ResponseHeader::build(200, None).unwrap();
+ header.insert_header("content-encoding", "br").unwrap();
+ assert_eq!(decide_action(&header, &[Dcz]), Decompress(Brotli));
+
+ // Client wants dcz but response has zstd, decompress zstd
+ let mut header = ResponseHeader::build(200, None).unwrap();
+ header.insert_header("content-encoding", "zstd").unwrap();
+ assert_eq!(decide_action(&header, &[Dcz]), Decompress(Zstd));
+
+ // Client wants dcb but response has gzip, decompress gzip
+ let mut header = ResponseHeader::build(200, None).unwrap();
+ header.insert_header("content-encoding", "gzip").unwrap();
+ assert_eq!(decide_action(&header, &[Dcb]), Decompress(Gzip));
+
+ // Client wants dcb but response has brotli, decompress brotli
+ let mut header = ResponseHeader::build(200, None).unwrap();
+ header.insert_header("content-encoding", "br").unwrap();
+ assert_eq!(decide_action(&header, &[Dcb]), Decompress(Brotli));
}
use once_cell::sync::Lazy;
diff --git a/pingora-core/src/protocols/http/compression/zstd.rs b/pingora-core/src/protocols/http/compression/zstd.rs
index b8a45b41..39465918 100644
--- a/pingora-core/src/protocols/http/compression/zstd.rs
+++ b/pingora-core/src/protocols/http/compression/zstd.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/protocols/http/conditional_filter.rs b/pingora-core/src/protocols/http/conditional_filter.rs
index 49daebc9..10aee2f2 100644
--- a/pingora-core/src/protocols/http/conditional_filter.rs
+++ b/pingora-core/src/protocols/http/conditional_filter.rs
@@ -1,4 +1,4 @@
-// Copyright 2025 Cloudflare, Inc.
+// Copyright 2026 Cloudflare, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/pingora-core/src/protocols/http/custom/client.rs b/pingora-core/src/protocols/http/custom/client.rs
new file mode 100644
index 00000000..994ddf04
--- /dev/null
+++ b/pingora-core/src/protocols/http/custom/client.rs
@@ -0,0 +1,176 @@
+// Copyright 2026 Cloudflare, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::time::Duration;
+
+use async_trait::async_trait;
+use bytes::Bytes;
+use futures::Stream;
+use http::HeaderMap;
+use pingora_error::Result;
+use pingora_http::{RequestHeader, ResponseHeader};
+
+use crate::protocols::{l4::socket::SocketAddr, Digest, UniqueIDType};
+
+use super::{BodyWrite, CustomMessageWrite};
+
+#[doc(hidden)]
+#[async_trait]
+pub trait Session: Send + Sync + Unpin + 'static {
+ async fn write_request_header(&mut self, req: Box, end: bool) -> Result<()>;
+
+ async fn write_request_body(&mut self, data: Bytes, end: bool) -> Result<()>;
+
+ async fn finish_request_body(&mut self) -> Result<()>;
+
+ fn set_read_timeout(&mut self, timeout: Option);
+
+ fn set_write_timeout(&mut self, timeout: Option);
+
+ async fn read_response_header(&mut self) -> Result<()>;
+
+ async fn read_response_body(&mut self) -> Result>;
+
+ fn response_finished(&self) -> bool;
+
+ async fn shutdown(&mut self, code: u32, ctx: &str);
+
+ fn response_header(&self) -> Option<&ResponseHeader>;
+
+ fn was_upgraded(&self) -> bool;
+
+ fn digest(&self) -> Option<&Digest>;
+
+ fn digest_mut(&mut self) -> Option<&mut Digest>;
+
+ fn server_addr(&self) -> Option<&SocketAddr>;
+
+ fn client_addr(&self) -> Option<&SocketAddr>;
+
+ async fn read_trailers(&mut self) -> Result >;
+
+ fn fd(&self) -> UniqueIDType;
+
+ async fn check_response_end_or_error(&mut self, headers: bool) -> Result;
+
+ fn take_request_body_writer(&mut self) -> Option>;
+
+ async fn finish_custom(&mut self) -> Result<()>;
+
+ fn take_custom_message_reader(
+ &mut self,
+ ) -> Option> + Unpin + Send + Sync + 'static>>;
+
+ async fn drain_custom_messages(&mut self) -> Result<()>;
+
+ fn take_custom_message_writer(&mut self) -> Option>;
+}
+
+#[doc(hidden)]
+#[async_trait]
+impl Session for () {
+ async fn write_request_header(&mut self, _req: Box, _end: bool) -> Result<()> {
+ unreachable!("client session: write_request_header")
+ }
+
+ async fn write_request_body(&mut self, _data: Bytes, _end: bool) -> Result<()> {
+ unreachable!("client session: write_request_body")
+ }
+
+ async fn finish_request_body(&mut self) -> Result<()> {
+ unreachable!("client session: finish_request_body")
+ }
+
+ fn set_read_timeout(&mut self, _timeout: Option) {
+ unreachable!("client session: set_read_timeout")
+ }
+
+ fn set_write_timeout(&mut self, _timeout: Option) {
+ unreachable!("client session: set_write_timeout")
+ }
+
+ async fn read_response_header(&mut self) -> Result<()> {
+ unreachable!("client session: read_response_header")
+ }
+
+ async fn read_response_body(&mut self) -> Result> {
+ unreachable!("client session: read_response_body")
+ }
+
+ fn response_finished(&self) -> bool {
+ unreachable!("client session: response_finished")
+ }
+
+ async fn shutdown(&mut self, _code: u32, _ctx: &str) {
+ unreachable!("client session: shutdown")
+ }
+
+ fn response_header(&self) -> Option<&ResponseHeader> {
+ unreachable!("client session: response_header")
+ }
+
+ fn was_upgraded(&self) -> bool {
+ unreachable!("client session: was upgraded")
+ }
+
+ fn digest(&self) -> Option<&Digest> {
+ unreachable!("client session: digest")
+ }
+
+ fn digest_mut(&mut self) -> Option<&mut Digest> {
+ unreachable!("client session: digest_mut")
+ }
+
+ fn server_addr(&self) -> Option<&SocketAddr> {
+ unreachable!("client session: server_addr")
+ }
+
+ fn client_addr(&self) -> Option<&SocketAddr> {
+ unreachable!("client session: client_addr")
+ }
+
+ async fn finish_custom(&mut self) -> Result<()> {
+ unreachable!("client session: finish_custom")
+ }
+
+ async fn read_trailers(&mut self) -> Result > {
+ unreachable!("client session: read_trailers")
+ }
+
+ fn fd(&self) -> UniqueIDType {
+ unreachable!("client session: fd")
+ }
+
+ async fn check_response_end_or_error(&mut self, _headers: bool) -> Result {
+ unreachable!("client session: check_response_end_or_error")
+ }
+
+ fn take_custom_message_reader(
+ &mut self,
+ ) -> Option> + Unpin + Send + Sync + 'static>> {
+ unreachable!("client session: get_custom_message_reader")
+ }
+
+ async fn drain_custom_messages(&mut self) -> Result<()> {
+ unreachable!("client session: drain_custom_messages")
+ }
+
+ fn take_custom_message_writer(&mut self) -> Option> {
+ unreachable!("client session: get_custom_message_writer")
+ }
+
+ fn take_request_body_writer(&mut self) -> Option> {
+ unreachable!("client session: take_request_body_writer")
+ }
+}
diff --git a/pingora-core/src/protocols/http/custom/mod.rs b/pingora-core/src/protocols/http/custom/mod.rs
new file mode 100644
index 00000000..cac4a755
--- /dev/null
+++ b/pingora-core/src/protocols/http/custom/mod.rs
@@ -0,0 +1,90 @@
+// Copyright 2026 Cloudflare, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::time::Duration;
+
+use async_trait::async_trait;
+use bytes::Bytes;
+use futures::Stream;
+use log::debug;
+use pingora_error::Result;
+use tokio_stream::StreamExt;
+
+pub mod client;
+pub mod server;
+
+pub const CUSTOM_MESSAGE_QUEUE_SIZE: usize = 128;
+
+pub fn is_informational_except_101>(code: T) -> bool {
+ // excluding `101 Switching Protocols`, because it's not followed by any other
+ // response and it's a final
+ // The WebSocket Protocol https://datatracker.ietf.org/doc/html/rfc6455
+ code > 99 && code < 200 && code != 101
+}
+
+#[async_trait]
+pub trait CustomMessageWrite: Send + Sync + Unpin + 'static {
+ fn set_write_timeout(&mut self, timeout: Option);
+ async fn write_custom_message(&mut self, msg: Bytes) -> Result<()>;
+ async fn finish_custom(&mut self) -> Result<()>;
+}
+
+#[doc(hidden)]
+#[async_trait]
+impl CustomMessageWrite for () {
+ fn set_write_timeout(&mut self, _timeout: Option) {}
+
+ async fn write_custom_message(&mut self, msg: Bytes) -> Result<()> {
+ debug!("write_custom_message: {:?}", msg);
+ Ok(())
+ }
+
+ async fn finish_custom(&mut self) -> Result<()> {
+ debug!("finish_custom");
+ Ok(())
+ }
+}
+
+#[async_trait]
+pub trait BodyWrite: Send + Sync + Unpin + 'static {
+ async fn write_all_buf(&mut self, data: &mut Bytes) -> Result<()>;
+ async fn finish(&mut self) -> Result<()>;
+ async fn cleanup(&mut self) -> Result<()>;
+ fn upgrade_body_writer(&mut self);
+}
+
+pub async fn drain_custom_messages(
+ reader: Option> + Unpin + Send + Sync + 'static>>,
+) -> Result<()> {
+ let Some(mut reader) = reader else {
+ return Ok(());
+ };
+
+ while let Some(res) = reader.next().await {
+ let msg = res?;
+ debug!("consume_custom_messages: {msg:?}");
+ }
+
+ Ok(())
+}
+
+#[macro_export]
+macro_rules! custom_session {
+ ($base_obj:ident . $($method_tokens:tt)+) => {
+ if let Some(custom_session) = $base_obj.as_custom_mut() {
+ #[allow(clippy::semicolon_if_nothing_returned)]
+ custom_session.$($method_tokens)+;
+ }
+ };
+}
diff --git a/pingora-core/src/protocols/http/custom/server.rs b/pingora-core/src/protocols/http/custom/server.rs
new file mode 100644
index 00000000..fc9e4c48
--- /dev/null
+++ b/pingora-core/src/protocols/http/custom/server.rs
@@ -0,0 +1,299 @@
+// Copyright 2026 Cloudflare, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::time::Duration;
+
+use async_trait::async_trait;
+use bytes::Bytes;
+use futures::Stream;
+use http::HeaderMap;
+use pingora_error::Result;
+use pingora_http::{RequestHeader, ResponseHeader};
+
+use crate::protocols::{http::HttpTask, l4::socket::SocketAddr, Digest};
+
+use super::CustomMessageWrite;
+
+#[doc(hidden)]
+#[async_trait]
+pub trait Session: Send + Sync + Unpin + 'static {
+ fn req_header(&self) -> &RequestHeader;
+
+ fn req_header_mut(&mut self) -> &mut RequestHeader;
+
+ async fn read_body_bytes(&mut self) -> Result>;
+
+ async fn drain_request_body(&mut self) -> Result<()>;
+
+ async fn write_response_header(&mut self, resp: Box, end: bool) -> Result<()>;
+
+ async fn write_response_header_ref(&mut self, resp: &ResponseHeader, end: bool) -> Result<()>;
+
+ async fn write_body(&mut self, data: Bytes, end: bool) -> Result<()>;
+
+ async fn write_trailers(&mut self, trailers: HeaderMap) -> Result<()>;
+
+ async fn response_duplex_vec(&mut self, tasks: Vec) -> Result;
+
+ fn set_read_timeout(&mut self, timeout: Option);
+
+ fn get_read_timeout(&self) -> Option;
+
+ fn set_write_timeout(&mut self, timeout: Option);
+
+ fn get_write_timeout(&self) -> Option;
+
+ fn set_total_drain_timeout(&mut self, timeout: Option);
+
+ fn get_total_drain_timeout(&self) -> Option;
+
+ fn request_summary(&self) -> String;
+
+ fn response_written(&self) -> Option<&ResponseHeader>;
+
+ async fn shutdown(&mut self, code: u32, ctx: &str);
+
+ fn is_body_done(&mut self) -> bool;
+
+ async fn finish(&mut self) -> Result<()>;
+
+ fn is_body_empty(&mut self) -> bool;
+
+ async fn read_body_or_idle(&mut self, no_body_expected: bool) -> Result>;
+
+ fn body_bytes_sent(&self) -> usize;
+
+ fn body_bytes_read(&self) -> usize;
+
+ fn digest(&self) -> Option<&Digest>;
+
+ fn digest_mut(&mut self) -> Option<&mut Digest>;
+
+ fn client_addr(&self) -> Option<&SocketAddr>;
+
+ fn server_addr(&self) -> Option<&SocketAddr>;
+
+ fn pseudo_raw_h1_request_header(&self) -> Bytes;
+
+ fn enable_retry_buffering(&mut self);
+
+ fn retry_buffer_truncated(&self) -> bool;
+
+ fn get_retry_buffer(&self) -> Option;
+
+ async fn finish_custom(&mut self) -> Result<()>;
+
+ fn take_custom_message_reader(
+ &mut self,
+ ) -> Option> + Unpin + Send + Sync + 'static>>;
+
+ fn restore_custom_message_reader(
+ &mut self,
+ reader: Box> + Unpin + Send + Sync + 'static>,
+ ) -> Result<()>;
+
+ fn take_custom_message_writer(&mut self) -> Option>;
+
+ fn restore_custom_message_writer(&mut self, writer: Box) -> Result<()>;
+
+ /// Whether this request is for upgrade (e.g., websocket).
+ ///
+ /// Returns `true` if the request has HTTP/1.1 version and contains an Upgrade header.
+ fn is_upgrade_req(&self) -> bool {
+ false
+ }
+
+ /// Whether this session was fully upgraded (completed Upgrade handshake).
+ ///
+ /// Returns `true` if the request was an upgrade request and a 101 response was sent.
+ fn was_upgraded(&self) -> bool {
+ false
+ }
+}
+
+#[doc(hidden)]
+#[async_trait]
+impl Session for () {
+ fn req_header(&self) -> &RequestHeader {
+ unreachable!("server session: req_header")
+ }
+
+ fn req_header_mut(&mut self) -> &mut RequestHeader {
+ unreachable!("server session: req_header_mut")
+ }
+
+ async fn read_body_bytes(&mut self) -> Result> {
+ unreachable!("server session: read_body_bytes")
+ }
+
+ async fn drain_request_body(&mut self) -> Result<()> {
+ unreachable!("server session: drain_request_body")
+ }
+
+ async fn write_response_header(
+ &mut self,
+ _resp: Box,
+ _end: bool,
+ ) -> Result<()> {
+ unreachable!("server session: write_response_header")
+ }
+
+ async fn write_response_header_ref(
+ &mut self,
+ _resp: &ResponseHeader,
+ _end: bool,
+ ) -> Result<()> {
+ unreachable!("server session: write_response_header_ref")
+ }
+
+ async fn write_body(&mut self, _data: Bytes, _end: bool) -> Result<()> {
+ unreachable!("server session: write_body")
+ }
+
+ async fn write_trailers(&mut self, _trailers: HeaderMap) -> Result<()> {
+ unreachable!("server session: write_trailers")
+ }
+
+ async fn response_duplex_vec(&mut self, _tasks: Vec) -> Result {
+ unreachable!("server session: response_duplex_vec")
+ }
+
+ fn set_read_timeout(&mut self, _timeout: Option) {
+ unreachable!("server session: set_read_timeout")
+ }
+
+ fn get_read_timeout(&self) -> Option {
+ unreachable!("server_session: get_read_timeout")
+ }
+
+ fn set_write_timeout(&mut self, _timeout: Option) {
+ unreachable!("server session: set_write_timeout")
+ }
+
+ fn get_write_timeout(&self) -> Option {
+ unreachable!("server_session: get_write_timeout")
+ }
+
+ fn set_total_drain_timeout(&mut self, _timeout: Option) {
+ unreachable!("server session: set_total_drain_timeout")
+ }
+
+ fn get_total_drain_timeout(&self) -> Option {
+ unreachable!("server_session: get_total_drain_timeout")
+ }
+
+ fn request_summary(&self) -> String {
+ unreachable!("server session: request_summary")
+ }
+
+ fn response_written(&self) -> Option<&ResponseHeader> {
+ unreachable!("server session: response_written")
+ }
+
+ async fn shutdown(&mut self, _code: u32, _ctx: &str) {
+ unreachable!("server session: shutdown")
+ }
+
+ fn is_body_done(&mut self) -> bool {
+ unreachable!("server session: is_body_done")
+ }
+
+ async fn finish(&mut self) -> Result<()> {
+ unreachable!("server session: finish")
+ }
+
+ fn is_body_empty(&mut self) -> bool {
+ unreachable!("server session: is_body_empty")
+ }
+
+ async fn read_body_or_idle(&mut self, _no_body_expected: bool) -> Result