diff --git a/.gitignore b/.gitignore index eb5a316..643cc50 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ target +tmp.discovery diff --git a/Cargo.lock b/Cargo.lock index 364b443..2b3f986 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,32 +2,33 @@ name = "linkerd-tcp" version = "0.0.2" dependencies = [ - "bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "clap 2.24.1 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.11.0-a.0 (git+https://github.com/hyperium/hyper?rev=ca22eae)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "pretty_env_logger 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.11.0-a.0 (git+https://github.com/hyperium/hyper?rev=09fe9e6)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ordermap 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "pretty_env_logger 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", "rustls 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_yaml 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tacho 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tacho 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-timer 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "aho-corasick" -version = "0.5.3" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -41,7 +42,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -65,7 +66,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bytes" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -74,12 +75,12 @@ dependencies = [ [[package]] name = "cfg-if" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "clap" -version = "2.24.1" +version = "2.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -87,9 +88,9 @@ dependencies = [ "bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-segmentation 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "vec_map 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -104,16 +105,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "env_logger" -version = "0.3.5" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "futures" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -121,13 +122,13 @@ name = "futures-cpupool" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "gcc" -version = "0.3.46" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -135,43 +136,43 @@ name = "hdrsample" version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "httparse" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "hyper" version = "0.11.0-a.0" -source = "git+https://github.com/hyperium/hyper?rev=ca22eae#ca22eae5ac291f948299fb10bcdc505497ada8ba" +source = "git+https://github.com/hyperium/hyper?rev=09fe9e6#09fe9e6a80261f1455d84746dff8f6a537686e8f" dependencies = [ "base64 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "idna" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-normalization 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -180,7 +181,7 @@ name = "iovec" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -215,7 +216,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -223,42 +224,47 @@ name = "linked-hash-map" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "linked-hash-map" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "log" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "matches" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "memchr" -version = "0.1.11" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "mime" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "mio" -version = "0.6.7" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -281,43 +287,43 @@ name = "net2" version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num" -version = "0.1.37" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-bigint 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", - "num-complex 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-bigint 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", + "num-complex 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", "num-integer 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", "num-iter 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", - "num-rational 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-rational 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-bigint" -version = "0.1.37" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "num-integer 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-complex" -version = "0.1.37" +version = "0.1.38" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -326,7 +332,7 @@ name = "num-integer" version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -335,31 +341,31 @@ version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "num-integer 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-rational" -version = "0.1.36" +version = "0.1.38" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-bigint 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-bigint 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", "num-integer 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-traits" -version = "0.1.37" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "num_cpus" -version = "1.4.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -367,14 +373,19 @@ name = "ordermap" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "percent-encoding" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "pretty_env_logger" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -387,60 +398,60 @@ name = "rand" version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rayon" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rayon-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon-core 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rayon-core" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "deque 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "redox_syscall" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "regex" -version = "0.1.80" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex-syntax" -version = "0.3.9" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "ring" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.46 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.50 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -463,8 +474,8 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "webpki 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -482,45 +493,47 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "serde" -version = "0.9.15" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "serde_codegen_internals" -version = "0.14.2" +name = "serde_derive" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive_internals 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)", "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "serde_derive" -version = "0.9.15" +name = "serde_derive_internals" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_codegen_internals 0.14.2 (registry+https://github.com/rust-lang/crates.io-index)", "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", + "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_json" -version = "0.9.10" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "dtoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_yaml" -version = "0.6.2" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)", + "linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", "yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -559,13 +572,13 @@ dependencies = [ [[package]] name = "tacho" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "hdrsample 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "ordermap 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "twox-hash 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -579,25 +592,26 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "thread-id" -version = "2.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "thread_local" -version = "0.2.7" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "thread-id 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -606,34 +620,34 @@ version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-core" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "mio 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-io" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -641,15 +655,15 @@ name = "tokio-proto" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -658,7 +672,7 @@ name = "tokio-service" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -666,18 +680,10 @@ name = "tokio-timer" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "twox-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "unicase" version = "2.0.0" @@ -688,10 +694,10 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.2.5" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -701,7 +707,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "unicode-segmentation" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -714,6 +720,14 @@ name = "unicode-xid" version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "unreachable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "untrusted" version = "0.5.0" @@ -721,21 +735,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "url" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "idna 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "idna 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "utf8-ranges" -version = "0.1.3" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "vec_map" -version = "0.7.0" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "void" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -743,7 +763,7 @@ name = "webpki" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "ring 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -776,95 +796,98 @@ dependencies = [ ] [metadata] -"checksum aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ca972c2ea5f742bfce5687b9aef75506a764f61d37f8f649047846a9686ddb66" +"checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699" "checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6" "checksum atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d912da0db7fa85514874458ca3651fe2cddace8d0b0505571dbdcd41ab490159" "checksum base64 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "30e93c03064e7590d0466209155251b90c22e37fab1daf2771582598b5827557" "checksum bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1370e9fc2a6ae53aea8b7a5110edbd08836ed87c88736dfabccade1c2b44bff4" "checksum byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c40977b0ee6b9885c9013cd41d9feffdd22deb3bb4dc3a71d901cc7a77de18c8" -"checksum bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f9edb851115d67d1f18680f9326901768a91d37875b87015518357c6ce22b553" -"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" -"checksum clap 2.24.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b7541069be0b8aec41030802abe8b5cdef0490070afaa55418adea93b1e431e0" +"checksum bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8b24f16593f445422331a5eed46b72f7f171f910fead4f2ea8f17e727e9c5c14" +"checksum cfg-if 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d0c47d456a36ebf0536a6705c83c1cbbcb9255fbc1d905a6ded104f479268a29" +"checksum clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6b8f69e518f967224e628896b54e41ff6acfb4dcfefc5076325c36525dac900f" "checksum deque 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a694dae478589798d752c7125542f8a5ae8b6e59476172baf2eed67357bdfa27" "checksum dtoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "80c8b71fd71146990a9742fc06dcbbde19161a267e0ad4e572c35162f4578c90" -"checksum env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f" -"checksum futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "55f0008e13fc853f79ea8fc86e931486860d4c4c156cdffb59fa5f7fa833660a" +"checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" +"checksum futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4b63a4792d4f8f686defe3b39b92127fea6344de5d38202b2ee5a11bbbf29d6a" "checksum futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a283c84501e92cade5ea673a2a7ca44f71f209ccdd302a3e0896f50083d2c5ff" -"checksum gcc 0.3.46 (registry+https://github.com/rust-lang/crates.io-index)" = "181e3cebba1d663bd92eb90e2da787e10597e027eb00de8d742b260a7850948f" +"checksum gcc 0.3.50 (registry+https://github.com/rust-lang/crates.io-index)" = "5f837c392f2ea61cb1576eac188653df828c861b7137d74ea4a5caa89621f9e6" "checksum hdrsample 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4f78c6b8a49cb8334c462af348efd6e8a92fdfb7b5cc4cb298d9b9e3ee9df719" -"checksum httparse 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77f756bed9ee3a83ce98774f4155b42a31b787029013f3a7d83eca714e500e21" -"checksum hyper 0.11.0-a.0 (git+https://github.com/hyperium/hyper?rev=ca22eae)" = "" -"checksum idna 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6ac85ec3f80c8e4e99d9325521337e14ec7555c458a14e377d189659a427f375" +"checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07" +"checksum hyper 0.11.0-a.0 (git+https://github.com/hyperium/hyper?rev=09fe9e6)" = "" +"checksum idna 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2233d4940b1f19f0418c158509cd7396b8d70a5db5705ce410914dc8fa603b37" "checksum iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29d062ee61fccdf25be172e70f34c9f6efc597e1fb8f6526e8437b2046ab26be" "checksum itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eb2f404fbc66fd9aac13e998248505e7ecb2ad8e44ab6388684c5fb11c6c251c" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3b37545ab726dd833ec6420aaba8231c5b320814b9029ad585555d2a03e94fbf" "checksum lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce12306c4739d86ee97c23139f3a34ddf0387bbf181bc7929d287025a8c3ef6b" -"checksum libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)" = "babb8281da88cba992fa1f4ddec7d63ed96280a1a53ec9b919fd37b53d71e502" +"checksum libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)" = "e7eb6b826bfc1fdea7935d46556250d1799b7fe2d9f7951071f4291710665e3e" "checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd" -"checksum log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5141eca02775a762cc6cd564d8d2c50f67c0ea3a372cbf1c51592b3e029e10ad" -"checksum matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "efd7622e3022e1a6eaa602c4cea8912254e5582c9c692e9167714182244801b1" -"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" -"checksum mime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9d69889cdc6336ed56b174514ce876c4c3dc564cc23dd872e7bca589bb2a36c8" -"checksum mio 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6d19442734abd7d780b981c590c325680d933e99795fe1f693f0686c9ed48022" +"checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939" +"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" +"checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376" +"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4" +"checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" +"checksum mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9e965267d4d58496fc4f740e9861118367f13570cadf66316ed2c3f2f14d87c7" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum net2 0.2.29 (registry+https://github.com/rust-lang/crates.io-index)" = "bc01404e7568680f1259aa5729539f221cb1e6d047a0d9053cab4be8a73b5d67" -"checksum num 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "98b15ba84e910ea7a1973bccd3df7b31ae282bf9d8bd2897779950c9b8303d40" -"checksum num-bigint 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "ba6d838b16e56da1b6c383d065ff1ec3c7d7797f65a3e8f6ba7092fd87820bac" -"checksum num-complex 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "148eb324ca772230853418731ffdf13531738b50f89b30692a01fcdcb0a64677" +"checksum num 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "2c3a3dc9f30bf824141521b30c908a859ab190b76e20435fcd89f35eb6583887" +"checksum num-bigint 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "6361748d02e5291c72a422dc8ed4d8464a80cb1e618971f6fffe6d52d97e3286" +"checksum num-complex 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)" = "412dfc143c56579aa6a22c574e38ddbf724522f1280ae2b257498cccff3fb6af" "checksum num-integer 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)" = "ef1a4bf6f9174aa5783a9b4cc892cacd11aebad6c69ad027a0b65c6ca5f8aa37" "checksum num-iter 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)" = "f7d1891bd7b936f12349b7d1403761c8a0b85a18b148e9da4429d5d102c1a41e" -"checksum num-rational 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)" = "c2dc5ea04020a8f18318ae485c751f8cfa1c0e69dcf465c29ddaaa64a313cc44" -"checksum num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "e1cbfa3781f3fe73dc05321bed52a06d2d491eaa764c52335cf4399f046ece99" -"checksum num_cpus 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca313f1862c7ec3e0dfe8ace9fa91b1d9cb5c84ace3d00f5ec4216238e93c167" +"checksum num-rational 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)" = "33c881e104a26e1accc09449374c095ff2312c8e0c27fab7bbefe16eac7c776d" +"checksum num-traits 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "1708c0628602a98b52fad936cf3edb9a107af06e52e49fdf0707e884456a6af6" +"checksum num_cpus 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6e416ba127a4bb3ff398cb19546a8d0414f73352efe2857f4060d36f5fe5983a" "checksum ordermap 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "c036a53e6bb62d7eee2edf7e087df56fd84c7bbae6a0bd93c2b9f54bddf62e03" -"checksum pretty_env_logger 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd72d1ee7f294e96b4f2e3b67fec6c8e1cd7025477cd7a3655db362b930c53c5" +"checksum percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de154f638187706bde41d9b4738748933d64e6b37bdbffc0b47a97d16a6ae356" +"checksum pretty_env_logger 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8a97d1fde8be5bdb2c315277042a39a89b8ca5640c9d8e1a900cc9d906ee5af2" "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" "checksum rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "022e0636ec2519ddae48154b028864bdce4eaf7d35226ab8e65c611be97b189d" -"checksum rayon 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8c83adcb08e5b922e804fe1918142b422602ef11f2fd670b0b52218cb5984a20" -"checksum rayon-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "767d91bacddf07d442fe39257bf04fd95897d1c47c545d009f6beb03efd038f8" -"checksum redox_syscall 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "29dbdfd4b9df8ab31dec47c6087b7b13cbf4a776f335e4de8efba8288dda075b" -"checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f" -"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" -"checksum ring 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)" = "873ec7c2b7c9bf58024eb8f1bbc40a6499cd23c1adc59532f4af9e355f1de0f3" +"checksum rayon 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a77c51c07654ddd93f6cb543c7a849863b03abc7e82591afda6dc8ad4ac3ac4a" +"checksum rayon-core 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bd1e76f8ee0322fbbeb0c43a07e1757fcf8ff06bb0ff92da017625882ddc04dd" +"checksum redox_syscall 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "3041aeb6000db123d2c9c751433f526e1f404b23213bd733167ab770c3989b4d" +"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b" +"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" +"checksum ring 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)" = "24293de46bac74c9b9c05b40ff8496bbc8b9ae242a9b89f754e1154a43bc7c4c" "checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" "checksum rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "c5f5376ea5e30ce23c03eb77cbe4962b988deead10910c372b226388b594c084" "checksum rustls 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4779e2e35a2704f3981f3981e2bc983aa94b3c009f544f005f2d935856c6f269" "checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d" "checksum semver 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)" = "d4f410fedcf71af0345d7607d246e7ad15faaadd49d240ee3b24e5dc21a820ac" -"checksum serde 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)" = "34b623917345a631dc9608d5194cc206b3fe6c3554cd1c75b937e55e285254af" -"checksum serde_codegen_internals 0.14.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bc888bd283bd2420b16ad0d860e35ad8acb21941180a83a189bb2046f9d00400" -"checksum serde_derive 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)" = "978fd866f4d4872084a81ccc35e275158351d3b9fe620074e7d7504b816b74ba" -"checksum serde_json 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ad8bcf487be7d2e15d3d543f04312de991d631cfe1b43ea0ade69e6a8a5b16a1" -"checksum serde_yaml 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f8bd3f24ad8c7bcd34a6d70ba676dc11302b96f4f166aa5f947762e01098844d" +"checksum serde 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "c2f530d36fb84ec48fb7146936881f026cdbf4892028835fd9398475f82c1bb4" +"checksum serde_derive 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "10552fad5500771f3902d0c5ba187c5881942b811b7ba0d8fbbfbf84d80806d3" +"checksum serde_derive_internals 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "37aee4e0da52d801acfbc0cc219eb1eda7142112339726e427926a6f6ee65d3a" +"checksum serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "48b04779552e92037212c3615370f6bd57a40ebba7f20e554ff9f55e41a69a7b" +"checksum serde_yaml 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49d983aa39d2884a4b422bb11bb38f4f48fa05186e17469bc31e47d01e381111" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013" "checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694" "checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" "checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" -"checksum tacho 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ecff04cf5c1a7c6da4eafa1f1d5fd4ab0a7774fa69ecd3e7525c5c33cf704aa8" +"checksum tacho 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02f45c7446bcfce7e979dcbd5e761ae37977320f1678a98d6f6d3c6ff05fb69f" "checksum take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b157868d8ac1f56b64604539990685fa7611d8fa9e5476cf0c02cf34d32917c5" "checksum term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2b6b55df3198cc93372e85dd2ed817f0e38ce8cc0f22eb32391bfad9c4bf209" -"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03" -"checksum thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8576dbbfcaef9641452d5cf0df9b0e7eeab7694956dd33bb61515fb8f18cfdd5" +"checksum thread-id 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8df7875b676fddfadffd96deea3b1124e5ede707d4884248931077518cf1f773" +"checksum thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c85048c6260d17cf486ceae3282d9fb6b90be220bf5b28c400f5485ffc29f0c7" "checksum time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd7ccbf969a892bf83f1e441126968a07a3941c24ff522a26af9f9f4585d1a3" -"checksum tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "febd81b3e2ef615c6c8077347b33f3f3deec3d708ecd08194c9707b7a1eccfc9" -"checksum tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "48f55df1341bb92281f229a6030bc2abffde2c7a44c6d6b802b7687dd8be0775" +"checksum tokio-core 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6a20ba4738d283cac7495ca36e045c80c2a8df3e05dd0909b17a06646af5a7ed" +"checksum tokio-io 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c2c3ce9739f7387a0fa65b5421e81feae92e04d603f008898f4257790ce8c2db" "checksum tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fbb47ae81353c63c487030659494b295f6cb6576242f907f203473b191b0389" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum tokio-timer 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "86f33def658c14724fc13ec6289b3875a8152ee8ae767a5b1ccbded363b03db8" -"checksum twox-hash 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "475352206e7a290c5fccc27624a163e8d0d115f7bb60ca18a64fc9ce056d7435" "checksum unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2e01da42520092d0cd2d6ac3ae69eb21a22ad43ff195676b86f8c37f487d6b80" -"checksum unicode-bidi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d3a078ebdd62c0e71a709c3d53d2af693fe09fe93fbff8344aebe289b78f9032" +"checksum unicode-bidi 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a6a2c4e3710edd365cd7e78383153ed739fa31af19f9172f72d3575060f5a43a" "checksum unicode-normalization 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "e28fa37426fceeb5cf8f41ee273faa7c82c47dc8fba5853402841e665fcd86ff" -"checksum unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18127285758f0e2c6cf325bb3f3d138a12fee27de4f23e146cd6a179f26c2cf3" +"checksum unicode-segmentation 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a8083c594e02b8ae1654ae26f0ade5158b119bd88ad0e8227a5d8fcd72407946" "checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f" "checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" +"checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91" "checksum untrusted 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6b65243989ef6aacd9c0d6bd2b822765c3361d8ed352185a6f3a41f3a718c673" -"checksum url 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f5ba8a749fb4479b043733416c244fa9d1d3af3d7c23804944651c8a448cb87e" -"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f" -"checksum vec_map 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8cdc8b93bd0198ed872357fb2e667f7125646b1762f16d60b2c96350d361897" +"checksum url 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a69a2e36a5e5ed3f3063c8c64a3b028c4d50d689fa6c862abd7cfe65f882595c" +"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122" +"checksum vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "887b5b631c2ad01628bbbaa7dd4c869f80d3186688f8d0b6f58774fbe324988c" +"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum webpki 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dd7bf9a0f93259c4e827b8d0d31b729971150a1f14d5217dfe9ad0045b53d678" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" diff --git a/Cargo.toml b/Cargo.toml index a634a29..aa3f36a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "linkerd-tcp" description = "A native TCP proxy for the linkerd service mesh" -version = "0.0.2" +version = "0.1.0" authors = [ "Oliver Gould ", "Steve Jenson ", @@ -19,16 +19,18 @@ bytes = "0.4" clap = "2.24" futures = "0.1" # We use not-yet-released tokio integration on master: -hyper = { git = "https://github.com/hyperium/hyper", rev = "ca22eae" } +hyper = { git = "https://github.com/hyperium/hyper", rev = "09fe9e6" } log = "0.3" -rand = "0.3" +ordermap = "0.2.10" pretty_env_logger = "0.1" +rand = "0.3" rustls = "0.8" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" serde_yaml = "0.7" -tacho = "0.3" +# tacho = { path = "../tacho" } +tacho = "0.4" tokio-core = "0.1" tokio-io = "0.1" tokio-service = "0.1" diff --git a/README.md b/README.md index 330e050..cbd0593 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Status: _beta_ ## Quickstart ## 1. Install [Rust and Cargo][install-rust]. -2. Configure and run [namerd][namerd]. +2. Run [namerd][namerd]. `./namerd.sh` fetches, configures, and runs namerd using a local-fs-backed discovery (in ./tmp.discovery). 3. From this repository, run: `cargo run -- example.yml` We :heart: pull requests! See [CONTRIBUTING.md](CONTRIBUTING.md) for info on @@ -52,34 +52,79 @@ ARGS: ### Example configuration ### ```yaml -proxies: + +# Administrative control endpoints are exposed on a dedicated HTTP server. Endpoints +# include: +# - /metrics -- produces a snapshot of metrics formatted for prometheus. +# - /shutdown -- POSTing to this endpoint initiates graceful shutdown. +# - /abort -- POSTing to this terminates the process immediately. +admin: + port: 9989 + + # By default, the admin server listens only on localhost. We can force it to bind + # on all interfaces by overriding the IP. + ip: 0.0.0.0 + + # Metrics are snapshot at a fixed interval of 10s. + metricsIntervalSecs: 10 + +# A process exposes one or more 'routers'. Routers connect server traffic to +# load balancers. +routers: + + # Each router has a 'label' for reporting purposes. - label: default + servers: - # Listen on two ports, one using a self-signed TLS certificate. - - kind: io.l5d.tcp - addr: 0.0.0.0:7474 - - kind: io.l5d.tls - addr: 0.0.0.0:7575 - defaultIdentity: - privateKey: private.pem - certs: - - cert.pem - - ../eg-ca/ca/intermediate/certs/ca-chain.cert.pem - - # Lookup /svc/google in namerd. - namerd: - url: http://127.0.0.1:4180 - path: /svc/google - - # Require that the downstream connection be TLS'd, with a `subjectAltName` including - # the DNS name _www.google.com_ using either our local CA or the host's default - # openssl certificate. + + # Each router has one or more 'servers' listening for incoming connections. + # By default, routers listen on localhost. You need to specify a port. + - port: 7474 + dstName: /svc/default + # You can limit the amount of time that a server will wait to obtain a + # connection from the router. + connectTimeoutMs: 500 + + # By default each server listens on 'localhost' to avoid exposing an open + # relay by default. Servers may be configured to listen on a specific local + # address or all local addresses (0.0.0.0). + - port: 7575 + ip: 0.0.0.0 + # Note that each server may route to a different destination through a + # single router: + dstName: /svc/google + # Servers may be configured to perform a TLS handshake. + tls: + defaultIdentity: + privateKey: private.pem + certs: + - cert.pem + - ../eg-ca/ca/intermediate/certs/ca-chain.cert.pem + + # Each router is configured to resolve names. + # Currently, only namerd's HTTP interface is supported: + interpreter: + kind: io.l5d.namerd.http + baseUrl: http://localhost:4180 + namespace: default + periodSecs: 20 + + # Clients may also be configured to perform a TLS handshake. client: - tls: - dnsName: "www.google.com" - trustCerts: - - ../eg-ca/ca/intermediate/certs/ca-chain.cert.pem - - /usr/local/etc/openssl/cert.pem + kind: io.l5d.static + # We can also apply linkerd-style per-client configuration: + configs: + - prefix: /svc/google + connectTimeoutMs: 400 + # Require that the downstream connection be TLS'd, with a + # `subjectAltName` including the DNS name _www.google.com_ + # using either our local CA or the host's default openssl + # certificate. + tls: + dnsName: "www.google.com" + trustCerts: + - ../eg-ca/ca/intermediate/certs/ca-chain.cert.pem + - /usr/local/etc/openssl/cert.pem ``` ### Logging ### @@ -89,7 +134,7 @@ debugging, set `RUST_LOG=trace`. ## Docker ## -To build the linkerd/linkerd-tcp docker image, run: +To build the linkerd/linkerd-tcp docker image, run: ```bash ./dockerize latest diff --git a/example.yml b/example.yml index 8b319b1..880a5dc 100644 --- a/example.yml +++ b/example.yml @@ -1,30 +1,18 @@ admin: - addr: 0.0.0.0:9989 + port: 9989 metricsIntervalSecs: 10 -proxies: +routers: - label: default servers: - - kind: io.l5d.tcp - addr: 0.0.0.0:7474 - # - kind: io.l5d.tls - # addr: 0.0.0.0:7575 - # identities: - # localhost: - # privateKey: ../eg-ca/localhost.tls/private.pem - # certs: - # - ../eg-ca/localhost.tls/cert.pem - # - ../eg-ca/localhost.tls/ca-chain.cert.pem + - port: 7474 + dstName: /svc/default + connectTimeoutMs: 500 + connectionLifetimeSecs: 60 - namerd: - url: http://127.0.0.1:4180 - path: /svc/default - intervalSecs: 5 - - # client: - # tls: - # dnsName: "www.google.com" - # trustCerts: - # - ../eg-ca/www.google.com.tls/ca-chain.cert.pem - # #- /usr/local/etc/openssl/cert.pem + interpreter: + kind: io.l5d.namerd.http + baseUrl: http://localhost:4180 + namespace: default + periodSecs: 20 diff --git a/namerd.sh b/namerd.sh new file mode 100755 index 0000000..2ebadbf --- /dev/null +++ b/namerd.sh @@ -0,0 +1,51 @@ +#!/bin/sh + +set -e + +version="1.0.2" +bin="target/namerd-${version}-exec" +sha="338428a49cbe5f395c01a62e06b23fa492a7a9f89a510ae227b46c915b07569e" +url="https://github.com/linkerd/linkerd/releases/download/${version}/namerd-${version}-exec" + +validbin() { + checksum=$(openssl dgst -sha256 $bin | awk '{ print $2 }') + [ "$checksum" = $sha ] +} + +if [ -f "$bin" ] && ! validbin ; then + echo "bad $bin" >&2 + mv "$bin" "${bin}.bad" +fi + +if [ ! -f "$bin" ]; then + echo "downloading $bin" >&2 + curl -L --silent --fail -o "$bin" "$url" + chmod 755 "$bin" +fi + +if ! validbin ; then + echo "bad $bin. delete $bin and run $0 again." >&2 + exit 1 +fi + +mkdir -p ./tmp.discovery +if [ ! -f ./tmp.discovery/default ]; then + echo "127.1 9991" > ./tmp.discovery/default +fi + +"$bin" -- - < /#/io.l5d.fs; + +interfaces: + - kind: io.l5d.httpController +EOF diff --git a/router.md b/router.md new file mode 100644 index 0000000..73cd14a --- /dev/null +++ b/router.md @@ -0,0 +1,107 @@ +# Rust Stream Balancer Design + +## Prototype + +The initial implementation is basically a prototype. It proves the concept, but it has +severe deficiencies that cause performance (and probably correctness) problems. +Specifically, it implements its own polling... poorly. + +At startup, the configuration is parsed. For each **proxy**, the namerd and serving +configurations are split and connectd by an async channel so that namerd updates are +processed outside of the serving thread. All of the namerd watchers are collected to be +run together with the admin server. Once all of the proxy configurations are processed, +the application is run. + +The admin thread is started, initiating all namerd polling and starting the admin server. + +Simultaneously, all of the proxies are run in the main thread. For each of these, a +**connector** is created to determine how all downstream connections are established for +the proxy. A **balancer** is created with the connector and a stream of namerd updates. An +**acceptor** is created for each listening interface, which manifests as a stream of +connections, connections. The balancer is made shareable across servers by creating an +async channel and each server's connections are streamed into a sink clone. The balancer +is driven to process all of these connections. + +The balancer implements a Sink that manages _all_ I/O and connection management. Each +time `Balancer::start_send` or `Balancer::poll_complete` is called, the following work is +done: +- _all_ conneciton streams are checked for I/O and data is transfered; +- closed connections are reaped; +- service discovery is checked for updates; +- new connections are established; +- stats are recorded; + +## Lessons/Problems + +### Inflexible + +This model doesn't really reflect that of linkerd. We have no mechanism to _route_ +connections. All connections are simply forwarded. We cannot, for instance, route based on +client credentials or SNI destination. + +### Inefficient + +Currently, each balancer is effectively a scheduler, and a pretty poor one at that. I/O +processing should be far more granular and we shouldn't update load balancer endpoints in +the I/O path (unless absolutely necessary). + +### Timeouts + +We need several types of timeouts that are not currently implemented: +- Connection timeout: time from incoming connection to outbound established. +- Stream lifetime: maximum time a stream may stay open. +- Idle timeout: maximum time a connection may stay open without transmitting data. + +## Proposal + +linkerd-tcp should become a _stream router_. In the same way that linkerd routes requests, +linkerd-tcp should route connections. The following is a rough, evolving sketch of how +linkerd-tcp should be refactored to accomodate this: + +The linkerd-tcp configuration should support one or more **routers**. Each router is +configured with one or more **servers**. A server, which may or may not terminate TLS, +produces a stream of incoming connections comprising an envelope--a source identity (an +address, but maybe more) and a destination name--and a bidirectional data stream. The +server may choose the destination by static configuration or as some function of the +connection (e.g. client credentials, SNI, etc). Each connection envelope may be annotated +with a standard set of metadata including, for example, an optional connect deadline, +stream deadline, etc. + +The streams of all incoming connections for a router are merged into a single stream of +enveloped connections. This stream is forwarded to a **binder**. A binder is responsible +for maintaining a cache of balancers by destination name. When a balancer does not exist +in the cache, a new namerd lookup is initiated and its result stream (and value) is cached +so that future connections may resolve quickly. The binder obtains a **balancer** for each +destination name that maintains a list of endpoints and their load (in terms of +connections, throughput, etc). + +If the inbound connection has not expired (i.e. due to a timeout), it is dispatched to the +balancer for processing. The balancer maintains a reactor handle and initiates I/O and +balancer state management on the reactor. + +``` + ------ ------ +| srv0 | ... | srvN | + ------ | ------ + | + | (Envelope, IoStream) + V + ------------------- ------------- +| binder |----| interpreter | + ------------------- ------------- + | + V + ---------- +| balancer | + ---------- + | + V + ---------- +| endpoint | + ---------- + | + V + -------- +| duplex | + -------- +``` diff --git a/src/admin.rs b/src/admin.rs new file mode 100644 index 0000000..c2c4a63 --- /dev/null +++ b/src/admin.rs @@ -0,0 +1,85 @@ +use super::app::Closer; +use futures::{Future, future}; +use hyper::{self, Get, Post, StatusCode}; +use hyper::header::ContentLength; +use hyper::server::{Service, Request, Response}; +use std::boxed::Box; +use std::cell::RefCell; +use std::process; +use std::rc::Rc; +use std::time::{Duration, Instant}; +use tokio_core::reactor::Handle; +use tokio_timer::Timer; + +#[derive(Clone)] +pub struct Admin { + prometheus: Rc>, + closer: Rc>>, + grace: Duration, + reactor: Handle, + timer: Timer, +} + +type RspFuture = Box>; + +impl Admin { + pub fn new(prometheus: Rc>, + closer: Closer, + grace: Duration, + reactor: Handle, + timer: Timer) + -> Admin { + Admin { + closer: Rc::new(RefCell::new(Some(closer))), + prometheus, + grace, + reactor, + timer, + } + } + + fn metrics(&self) -> RspFuture { + let body = self.prometheus.borrow(); + let rsp = Response::new() + .with_status(StatusCode::Ok) + .with_header(ContentLength(body.len() as u64)) + .with_body(body.clone()); + future::ok(rsp).boxed() + } + + /// Tell the serving thread to stop what it's doing. + // TODO offer a `force` param? + fn shutdown(&self) -> RspFuture { + let mut closer = self.closer.borrow_mut(); + if let Some(c) = closer.take() { + info!("shutting down via admin API"); + let _ = c.send(Instant::now() + self.grace); + } + let rsp = Response::new().with_status(StatusCode::Ok); + future::ok(rsp).boxed() + } + + fn abort(&self) -> RspFuture { + process::exit(1); + } + + fn not_found(&self) -> RspFuture { + let rsp = Response::new().with_status(StatusCode::NotFound); + future::ok(rsp).boxed() + } +} + +impl Service for Admin { + type Request = Request; + type Response = Response; + type Error = hyper::Error; + type Future = RspFuture; + fn call(&self, req: Request) -> RspFuture { + match (req.method(), req.path()) { + (&Get, "/metrics") => self.metrics(), + (&Post, "/shutdown") => self.shutdown(), + (&Post, "/abort") => self.abort(), + _ => self.not_found(), + } + } +} diff --git a/src/app.rs b/src/app.rs new file mode 100644 index 0000000..158c65f --- /dev/null +++ b/src/app.rs @@ -0,0 +1,322 @@ +//! Provides all of the utilities needed to load a configuration and run a process. + +use super::{ConfigError, admin, resolver, router, server}; +use super::balancer::BalancerFactory; +use super::connector::ConnectorFactoryConfig; +use super::resolver::NamerdConfig; +use futures::{Future, Stream}; +use futures::sync::oneshot; +use hyper::server::Http; +use serde_json; +use serde_yaml; +use std::cell::RefCell; +use std::collections::VecDeque; +use std::net; +use std::rc::Rc; +use std::time::{Duration, Instant}; +use tacho; +use tokio_core::net::TcpListener; +use tokio_core::reactor::{Core, Handle}; +use tokio_timer::Timer; + +const DEFAULT_ADMIN_PORT: u16 = 9989; +const DEFAULT_BUFFER_SIZE_BYTES: usize = 16 * 1024; +const DEFAULT_GRACE_SECS: u64 = 10; +const DEFAULT_METRICS_INTERVAL_SECS: u64 = 60; + +/// Signals a receiver to shutdown by the provided deadline. +pub type Closer = oneshot::Sender; + +/// Signals that the receiver should release its resources by the provided deadline. +pub type Closed = oneshot::Receiver; + +/// Creates a thread-safe shutdown latch. +pub fn closer() -> (Closer, Closed) { + oneshot::channel() +} + +/// Holds the configuration for a linkerd-tcp instance. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct AppConfig { + /// Configures the processes's admin server. + pub admin: Option, + + /// Configures one or more routers. + pub routers: Vec, + + /// Configures the shared buffer used for transferring data. + pub buffer_size_bytes: Option, +} + +impl ::std::str::FromStr for AppConfig { + type Err = ConfigError; + + /// Parses a JSON- or YAML-formatted configuration file. + fn from_str(txt: &str) -> Result { + let txt = txt.trim_left(); + if txt.starts_with('{') { + serde_json::from_str(txt).map_err(|e| format!("json error: {}", e).into()) + } else { + serde_yaml::from_str(txt).map_err(|e| format!("yaml error: {}", e).into()) + } + } +} + +impl AppConfig { + /// Build an App from a configuration. + pub fn into_app(mut self) -> Result { + // Create a shared transfer buffer to be used for all stream proxying. + let buf = { + let sz = self.buffer_size_bytes.unwrap_or(DEFAULT_BUFFER_SIZE_BYTES); + Rc::new(RefCell::new(vec![0 as u8; sz])) + }; + + let (metrics, reporter) = tacho::new(); + let metrics = metrics.prefixed("l5d"); + + // Load all router configurations. + // + // Separate resolver tasks are created to be executed in the admin thread's + // reactor so that service discovery lookups are performed out of the serving + // thread. + let mut routers = VecDeque::with_capacity(self.routers.len()); + let mut resolvers = VecDeque::with_capacity(self.routers.len()); + for config in self.routers.drain(..) { + let mut r = config.into_router(buf.clone(), &metrics)?; + let e = r.resolver_executor + .take() + .expect("router missing resolver executor"); + routers.push_back(r); + resolvers.push_back(e); + } + + // Read the admin server configuration and bundle it an AdminRunner. + let admin = { + let addr = { + let ip = self.admin + .as_ref() + .and_then(|a| a.ip) + .unwrap_or_else(localhost_addr); + let port = self.admin + .as_ref() + .and_then(|a| a.port) + .unwrap_or(DEFAULT_ADMIN_PORT); + net::SocketAddr::new(ip, port) + }; + let grace = { + let s = self.admin + .as_ref() + .and_then(|admin| admin.grace_secs) + .unwrap_or(DEFAULT_GRACE_SECS); + Duration::from_secs(s) + }; + let metrics_interval = { + let s = self.admin + .as_ref() + .and_then(|admin| admin.metrics_interval_secs) + .unwrap_or(DEFAULT_METRICS_INTERVAL_SECS); + Duration::from_secs(s) + }; + AdminRunner { + addr, + reporter, + resolvers, + grace, + metrics_interval, + } + }; + + Ok(App { + routers: routers, + admin: admin, + }) + } +} + + +fn localhost_addr() -> net::IpAddr { + net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)) +} + +/// Holds configuraed tasks to be spawned. +pub struct App { + /// Executes configured routers. + pub routers: VecDeque, + /// Executes the admin server. + pub admin: AdminRunner, +} + +/// Holds the configuration for a single stream router. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct RouterConfig { + /// A descriptive name for this router. For stats reporting. + pub label: String, + + /// The configuration for one or more servers. + pub servers: Vec, + + /// Determines how outbound connections are initiated. + /// + /// By default, connections are clear TCP. + pub client: Option, + + /// Interprets request destinations into a stream of address pool updates. + pub interpreter: InterpreterConfig, +} + +impl RouterConfig { + /// Consumes and validates this configuration to produce a router initializer. + fn into_router(mut self, + buf: Rc>>, + metrics: &tacho::Scope) + -> Result { + let metrics = metrics.clone().labeled("rt", self.label); + + // Each router has its own resolver/executor pair. The resolver is used by the + // router. The resolver executor is used to drive execution in another thread. + let (resolver, resolver_exec) = match self.interpreter { + InterpreterConfig::NamerdHttp(config) => { + let namerd = config.into_namerd(&metrics)?; + resolver::new(namerd) + } + }; + + let balancer = { + let metrics = metrics.clone().prefixed("balancer"); + let client = self.client.unwrap_or_default().mk_connector_factory()?; + BalancerFactory::new(client, &metrics) + }; + let router = router::new(resolver, balancer, &metrics); + + let mut servers = VecDeque::with_capacity(self.servers.len()); + for config in self.servers.drain(..) { + let server = config.mk_server(router.clone(), buf.clone(), &metrics)?; + servers.push_back(server); + } + + Ok(RouterSpawner { + servers: servers, + resolver_executor: Some(resolver_exec), + }) + } +} + +/// Spawns a router by spawning all of its serving interfaces. +pub struct RouterSpawner { + servers: VecDeque, + resolver_executor: Option, +} + +impl RouterSpawner { + /// Spawns a router by spawning all of its serving interfaces. + /// + /// Returns successfully if all servers have been bound and spawned correctly. + pub fn spawn(mut self, reactor: &Handle, timer: &Timer) -> Result<(), ConfigError> { + while let Some(unbound) = self.servers.pop_front() { + info!("routing on {} to {}", + unbound.listen_addr(), + unbound.dst_name()); + let bound = unbound.bind(reactor, timer).expect("failed to bind server"); + reactor.spawn(bound.map_err(|_| {})); + } + Ok(()) + } +} + +/// Configures an interpreter. +/// +/// Currently, only the io.l5d.namerd.http interpreter is supported. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, tag = "kind")] +pub enum InterpreterConfig { + /// Polls namerd for updates. + #[serde(rename = "io.l5d.namerd.http")] + NamerdHttp(NamerdConfig), +} + +/// Configures the admin server. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct AdminConfig { + /// The port on which the admin server listens. + pub port: Option, + + /// The IP address on which the admin server listens. + pub ip: Option, + + /// The interval at which metrics should be snapshot (and reset) for export. + pub metrics_interval_secs: Option, + + /// The amount of time to wait for connections to complete between the /admin/shutdown + /// endpoint being triggered and the process exiting. + pub grace_secs: Option, +} + +/// Spawns resolvers before running . +pub struct AdminRunner { + addr: net::SocketAddr, + reporter: tacho::Reporter, + resolvers: VecDeque, + grace: Duration, + metrics_interval: Duration, +} + +impl AdminRunner { + /// Runs the admin server on the provided reactor. + /// + /// When the _shutdown_ endpoint is triggered, a shutdown deadline is sent on + /// `closer`. + pub fn run(self, closer: Closer, reactor: &mut Core, timer: &Timer) -> Result<(), ConfigError> { + let AdminRunner { + addr, + grace, + metrics_interval, + mut reporter, + mut resolvers, + } = self; + + let handle = reactor.handle(); + while let Some(resolver) = resolvers.pop_front() { + handle.spawn(resolver.execute(&handle, timer)); + } + + let prom_export = Rc::new(RefCell::new(String::with_capacity(8 * 1024))); + let reporting = { + let prom_export = prom_export.clone(); + timer + .interval(metrics_interval) + .map_err(|_| {}) + .for_each(move |_| { + let report = reporter.take(); + let mut prom_export = prom_export.borrow_mut(); + prom_export.clear(); + tacho::prometheus::write(&mut *prom_export, &report) + .expect("error foramtting metrics for prometheus"); + Ok(()) + }) + }; + handle.spawn(reporting); + + let serving = { + let listener = { + info!("admin listening on http://{}.", addr); + TcpListener::bind(&addr, &handle).expect("unable to listen") + }; + + let server = + admin::Admin::new(prom_export, closer, grace, handle.clone(), timer.clone()); + let http = Http::new(); + listener + .incoming() + .for_each(move |(tcp, src)| { + http.bind_connection(&handle, tcp, src, server.clone()); + Ok(()) + }) + }; + reactor.run(serving).unwrap(); + + Ok(()) + } +} diff --git a/src/app/admin_http.rs b/src/app/admin_http.rs deleted file mode 100644 index 21b7faf..0000000 --- a/src/app/admin_http.rs +++ /dev/null @@ -1,53 +0,0 @@ -use futures::{Future, future}; -use hyper::{self, Get, Post, StatusCode}; -use hyper::header::ContentLength; -use hyper::server::{Service, Request, Response}; -use std::boxed::Box; -use std::cell::RefCell; -use std::process; -use std::rc::Rc; - -pub struct Server { - prometheus: Rc>, -} - -impl Server { - pub fn new(prom: Rc>) -> Server { - Server { prometheus: prom } - } - fn get_metrics_body(&self) -> future::FutureResult { - let prom = self.prometheus.borrow(); - future::ok(prom.clone()) - } -} - -impl Service for Server { - type Request = Request; - type Response = Response; - type Error = hyper::Error; - type Future = Box>; - fn call(&self, req: Request) -> Self::Future { - match (req.method(), req.path()) { - (&Get, "/metrics") => { - self.get_metrics_body() - .then(|body| match body { - Ok(body) => { - let rsp = Response::new() - .with_status(StatusCode::Ok) - .with_header(ContentLength(body.len() as u64)) - .with_body(body); - future::ok(rsp) - } - Err(_) => { - future::ok(Response::new().with_status(StatusCode::InternalServerError)) - } - }) - .boxed() - } - (&Post, "/shutdown") => { - process::exit(0); - } - _ => future::ok(Response::new().with_status(StatusCode::NotFound)).boxed(), - } - } -} diff --git a/src/app/config.rs b/src/app/config.rs deleted file mode 100644 index 5e517dc..0000000 --- a/src/app/config.rs +++ /dev/null @@ -1,130 +0,0 @@ - - -use lb::WithAddr; -use serde_json; -use serde_yaml; -use std::{io, net}; -use std::collections::HashMap; - -pub fn from_str(mut txt: &str) -> io::Result { - txt = txt.trim_left(); - if txt.starts_with('{') { - serde_json::from_str(txt).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) - } else { - serde_yaml::from_str(txt).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) - } -} - -#[derive(Serialize, Deserialize, Debug)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct AppConfig { - pub admin: Option, - pub metrics_interval_secs: Option, - pub proxies: Vec, - pub buffer_size: Option, -} - -#[derive(Serialize, Deserialize, Debug)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct AdminConfig { - pub addr: Option, - pub metrics_interval_secs: Option, -} - -#[derive(Serialize, Deserialize, Debug)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct ProxyConfig { - pub label: String, - pub servers: Vec, - pub namerd: NamerdConfig, - pub client: Option, - pub max_waiters: Option, -} - -#[derive(Serialize, Deserialize, Debug)] -#[serde(deny_unknown_fields, tag = "kind")] -pub enum ServerConfig { - #[serde(rename = "io.l5d.tcp")] - Tcp { addr: net::SocketAddr }, - - // TODO support cypher suites - // TODO support client auth - // TODO supoprt persistence? - #[serde(rename = "io.l5d.tls", rename_all = "camelCase")] - Tls { - addr: net::SocketAddr, - alpn_protocols: Option>, - default_identity: Option, - identities: Option>, - }, -} - -impl WithAddr for ServerConfig { - fn addr(&self) -> net::SocketAddr { - match *self { - ServerConfig::Tcp { ref addr } | - ServerConfig::Tls { ref addr, .. } => *addr, - } - } -} - -#[derive(Serialize, Deserialize, Debug)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct TlsServerIdentity { - pub certs: Vec, - pub private_key: String, -} - -#[derive(Serialize, Deserialize, Debug)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct NamerdConfig { - pub url: String, - pub path: String, - pub namespace: Option, - pub interval_secs: Option, -} - -#[derive(Serialize, Deserialize, Debug)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct ClientConfig { - pub tls: Option, -} - -#[derive(Serialize, Deserialize, Debug)] -#[serde(deny_unknown_fields, rename_all = "camelCase")] -pub struct TlsClientConfig { - pub dns_name: String, - pub trust_certs: Option>, -} - -#[test] -fn parse_simple_yaml() { - let yaml = " -bufferSize: 1234 -proxies: - - label: default - servers: - - kind: io.l5d.tcp - addr: 0.0.0.0:4321 - - kind: io.l5d.tcp - addr: 0.0.0.0:4322 - namerd: - url: http://127.0.0.1:4180 - path: /svc/default - intervalSecs: 5 -"; - let app = from_str(yaml).unwrap(); - assert!(app.buffer_size == Some(1234)); - assert!(app.proxies.len() == 1); -} - -#[test] -fn parse_simple_json() { - let json = "{\"bufferSize\":1234, \"proxies\": [{\"label\": \"default\",\ - \"servers\": [{\"kind\":\"io.l5d.tcp\", \"addr\":\"0.0.0.0:4321\"},\ - {\"kind\":\"io.l5d.tcp\", \"addr\":\"0.0.0.0:4322\"}],\ - \"namerd\": {\"url\":\"http://127.0.0.1:4180\", \"path\":\"/svc/default\"}}]}"; - let app = from_str(json).unwrap(); - assert!(app.buffer_size == Some(1234)); - assert!(app.proxies.len() == 1); -} diff --git a/src/app/mod.rs b/src/app/mod.rs deleted file mode 100644 index 692b6ae..0000000 --- a/src/app/mod.rs +++ /dev/null @@ -1,345 +0,0 @@ -use futures::{Async, Future, Poll, Sink, Stream}; -use futures::sync::mpsc; -use hyper::Client; -use hyper::server::Http; -use rustls; -use rustls::ResolvesServerCert; -use std::boxed::Box; -use std::cell::RefCell; -use std::collections::{VecDeque, HashMap}; -use std::fs::File; -use std::io::{self, BufReader}; -use std::net; -use std::rc::Rc; -use std::time::Duration; -use tacho; -use tokio_core::net::TcpListener; -use tokio_core::reactor::{Core, Handle}; -use tokio_timer::Timer as TokioTimer; - -mod admin_http; -mod sni; -pub mod config; - -use self::config::*; -use self::sni::Sni; -use WeightedAddr; -use lb::{Balancer, Acceptor, Connector, PlainAcceptor, PlainConnector, SecureAcceptor, - SecureConnector}; -use namerd; - -const DEFAULT_BUFFER_SIZE: usize = 8 * 1024; -const DEFAULT_MAX_WAITERS: usize = 8; -const DEFAULT_NAMERD_SECONDS: u64 = 60; -const DEFAULT_METRICS_SECONDS: u64 = 10; - -fn default_admin_addr() -> net::SocketAddr { - "0.0.0.0:9989".parse().unwrap() -} - -/// Creates two reactor-aware runners from a configuration. -/// -/// Each runner takes a Handle and produces a `Future`, which should be passed to `run` -/// which completes when the thread should stop running. -pub fn configure(app: AppConfig) -> (Admin, Proxies) { - let transfer_buf = { - let sz = app.buffer_size.unwrap_or(DEFAULT_BUFFER_SIZE); - Rc::new(RefCell::new(vec![0;sz])) - }; - - let (metrics, reporter) = tacho::new(); - - let mut namerds = VecDeque::new(); - let mut proxies = VecDeque::new(); - let mut proxy_configs = app.proxies; - for _ in 0..proxy_configs.len() { - let ProxyConfig { label, namerd, servers, client, max_waiters, .. } = proxy_configs.pop() - .unwrap(); - let (addrs_tx, addrs_rx) = mpsc::channel(1); - namerds.push_back(Namerd { - config: namerd, - sender: addrs_tx, - metrics: metrics.clone(), - }); - proxies.push_back(Proxy { - client: client, - server: ProxyServer { - label: label, - addrs: Box::new(addrs_rx.fuse()), - servers: servers, - buf: transfer_buf.clone(), - max_waiters: max_waiters.unwrap_or(DEFAULT_MAX_WAITERS), - metrics: metrics.clone(), - }, - }); - } - - let addr = app.admin - .as_ref() - .and_then(|a| a.addr) - .unwrap_or_else(default_admin_addr); - let interval_s = app.admin - .as_ref() - .and_then(|a| a.metrics_interval_secs) - .unwrap_or(DEFAULT_METRICS_SECONDS); - let admin = Admin { - addr: addr, - metrics_interval: Duration::from_secs(interval_s), - namerds: namerds, - metrics: reporter, - }; - let proxies = Proxies { proxies: proxies }; - (admin, proxies) -} - -pub trait Loader: Sized { - type Run: Future; - fn load(self, handle: Handle) -> io::Result; -} -pub trait Runner: Sized { - fn run(self) -> io::Result<()>; -} - -impl Runner for L { - fn run(self) -> io::Result<()> { - let mut core = Core::new()?; - let fut = self.load(core.handle())?; - core.run(fut) - } -} - -pub struct Admin { - addr: net::SocketAddr, - metrics_interval: Duration, - namerds: VecDeque, - metrics: tacho::Reporter, -} -impl Loader for Admin { - type Run = Running; - fn load(self, handle: Handle) -> io::Result { - let mut running = Running::new(); - { - let mut namerds = self.namerds; - for _ in 0..namerds.len() { - let f = namerds.pop_front().unwrap().load(handle.clone())?; - running.register(f.map_err(|_| io::ErrorKind::Other.into())); - } - } - let metrics_export = Rc::new(RefCell::new(String::new())); - { - let metrics_export = metrics_export.clone(); - let mut metrics = self.metrics; - let reporting = TokioTimer::default() - .interval(self.metrics_interval) - .map_err(|_| {}) - .for_each(move |_| { - let metrics_export = metrics_export.clone(); - let report = metrics.take(); - let mut export = metrics_export.borrow_mut(); - *export = tacho::prometheus::format(&report); - Ok(()) - }) - .map(|_| {}) - .map_err(|_| io::ErrorKind::Other.into()); - running.register(reporting); - } - { - // TODO make this addr configurable. - let listener = { - println!("Listening on http://{}.", self.addr); - TcpListener::bind(&self.addr, &handle).expect("unable to listen") - }; - - let http = Http::new(); - let srv = listener.incoming().for_each(move |(socket, addr)| { - let server = admin_http::Server::new(metrics_export.clone()); - http.bind_connection(&handle, socket, addr, server); - Ok(()) - }); - running.register(srv); - } - Ok(running) - } -} - - -struct Namerd { - config: NamerdConfig, - sender: mpsc::Sender>, - metrics: tacho::Scope, -} -impl Loader for Namerd { - type Run = Box>; - fn load(self, handle: Handle) -> io::Result { - let path = self.config.path; - let url = self.config.url; - let interval_secs = self.config.interval_secs.unwrap_or(DEFAULT_NAMERD_SECONDS); - let interval = Duration::from_secs(interval_secs); - let ns = self.config.namespace.clone().unwrap_or_else(|| "default".into()); - info!("Updating {} in {} from {} every {}s", - path, - ns, - url, - interval_secs); - let addrs = { - let client = Client::new(&handle); - namerd::resolve(&url, client, interval, &ns, &path, self.metrics) - }; - let driver = { - let sink = self.sender.sink_map_err(|_| error!("sink error")); - addrs.forward(sink).map_err(|_| io::ErrorKind::Other.into()).map(|_| {}) - }; - Ok(Box::new(driver)) - } -} - -pub struct Proxies { - proxies: VecDeque, -} -impl Loader for Proxies { - type Run = Running; - fn load(self, handle: Handle) -> io::Result { - let mut running = Running::new(); - let mut proxies = self.proxies; - for _ in 0..proxies.len() { - let p = proxies.pop_front().unwrap(); - let f = p.load(handle.clone())?; - running.register(f); - } - Ok(running) - } -} - -struct Proxy { - client: Option, - server: ProxyServer, -} -impl Loader for Proxy { - type Run = Running; - fn load(self, handle: Handle) -> io::Result { - match self.client.and_then(|c| c.tls) { - None => { - let conn = PlainConnector::new(handle.clone()); - self.server.load(&handle, conn) - } - Some(ref c) => { - let mut tls = rustls::ClientConfig::new(); - if let Some(ref certs) = c.trust_certs { - for p in certs { - let f = File::open(p).expect("cannot open certificate file"); - tls.root_store - .add_pem_file(&mut BufReader::new(f)) - .expect("certificate error"); - } - }; - let conn = SecureConnector::new(c.dns_name.clone(), tls, handle.clone()); - self.server.load(&handle, conn) - } - } - } -} - -struct ProxyServer { - label: String, - servers: Vec, - addrs: Box, Error = ()>>, - buf: Rc>>, - max_waiters: usize, - metrics: tacho::Scope, -} -impl ProxyServer { - fn load(self, handle: &Handle, conn: C) -> io::Result - where C: Connector + 'static - { - let addrs = self.addrs.map_err(|_| io::ErrorKind::Other.into()); - let metrics = self.metrics.clone().labeled("proxy".into(), self.label.into()); - let bal = Balancer::new(addrs, conn, self.buf.clone(), metrics.clone()) - .into_shared(self.max_waiters, handle.clone()); - - // TODO scope/tag stats for servers. - - let mut running = Running::new(); - for s in &self.servers { - let handle = handle.clone(); - let bal = bal.clone(); - match *s { - ServerConfig::Tcp { ref addr } => { - let metrics = metrics.clone().labeled("srv".into(), format!("{}", addr)); - let acceptor = PlainAcceptor::new(handle, metrics); - let f = acceptor.accept(addr).forward(bal).map(|_| {}); - running.register(f); - } - ServerConfig::Tls { ref addr, - ref alpn_protocols, - ref default_identity, - ref identities, - .. } => { - let mut tls = rustls::ServerConfig::new(); - tls.cert_resolver = load_cert_resolver(identities, default_identity); - if let Some(ref protos) = *alpn_protocols { - tls.set_protocols(protos); - } - - let metrics = metrics.clone().labeled("srv".into(), format!("{}", addr)); - let acceptor = SecureAcceptor::new(handle, tls, metrics); - let f = acceptor.accept(addr).forward(bal).map(|_| {}); - running.register(f); - } - } - } - Ok(running) - } -} - -fn load_cert_resolver(ids: &Option>, - def: &Option) - -> Box { - let mut is_empty = def.is_some(); - if let Some(ref ids) = *ids { - is_empty = is_empty && ids.is_empty(); - } - if is_empty { - panic!("No TLS server identities specified"); - } - - Box::new(Sni::new(ids, def)) -} - -/// Tracks a list of `F`-typed `Future`s until are complete. -pub struct Running(VecDeque>>); -impl Running { - fn new() -> Running { - Running(VecDeque::new()) - } - - fn register(&mut self, f: F) - where F: Future + 'static - { - self.0.push_back(Box::new(f)) - } -} -impl Future for Running { - type Item = (); - type Error = io::Error; - fn poll(&mut self) -> Poll<(), io::Error> { - let sz = self.0.len(); - trace!("polling {} running", sz); - for i in 0..sz { - let mut f = self.0.pop_front().unwrap(); - trace!("polling runner {}", i); - if f.poll()? == Async::NotReady { - trace!("runner {} not ready", i); - self.0.push_back(f); - } else { - trace!("runner {} finished", i); - } - } - if self.0.is_empty() { - trace!("runner finished"); - Ok(Async::Ready(())) - } else { - trace!("runner not finished"); - Ok(Async::NotReady) - } - } -} diff --git a/src/balancer/dispatcher.rs b/src/balancer/dispatcher.rs new file mode 100644 index 0000000..685ce2c --- /dev/null +++ b/src/balancer/dispatcher.rs @@ -0,0 +1,379 @@ +use super::{Endpoints, EndpointMap, Waiter}; +use super::endpoint::{self, Endpoint}; +use super::super::Path; +use super::super::connection::Connection; +use super::super::connector::Connector; +use super::super::resolver::Resolve; +use futures::{Future, Stream, Sink, Poll, Async, AsyncSink, StartSend}; +use rand::{self, Rng}; +use std::collections::VecDeque; +use std::io; +use std::time::{Duration, Instant}; +use tacho; +use tokio_core::reactor::Handle; +use tokio_timer::Timer; + +pub fn new(reactor: Handle, + timer: Timer, + dst_name: Path, + connector: Connector, + resolve: Resolve, + endpoints: Endpoints, + metrics: &tacho::Scope) + -> Dispatcher { + Dispatcher { + reactor, + timer, + dst_name, + endpoints, + resolve, + max_waiters: connector.max_waiters(), + min_connections: connector.min_connections(), + fail_limit: connector.failure_limit(), + fail_penalty: connector.failure_penalty(), + connector, + connecting: VecDeque::default(), + connected: VecDeque::default(), + waiters: VecDeque::default(), + metrics: Metrics::new(metrics), + } +} + +/// Accepts connection requests +pub struct Dispatcher { + reactor: Handle, + timer: Timer, + dst_name: Path, + connector: Connector, + resolve: Resolve, + endpoints: Endpoints, + connecting: VecDeque>, + min_connections: usize, + connected: VecDeque>, + waiters: VecDeque, + max_waiters: usize, + fail_limit: usize, + fail_penalty: Duration, + metrics: Metrics, +} + +impl Dispatcher { + /// Selects an endpoint using the power of two choices. + /// + /// Two endpoints are chosen randomly and return the lesser-loaded endpoint. + /// If no endpoints are available, `None` is retruned. + fn select_endpoint(available: &EndpointMap) -> Option<&Endpoint> { + match available.len() { + 0 => None, + 1 => { + // One endpoint, use it. + available.get_index(0).map(|(_, ep)| ep) + } + sz => { + let mut rng = rand::thread_rng(); + + // Pick 2 candidate indices. + let (i0, i1) = if sz == 2 { + if rng.gen::() { (0, 1) } else { (1, 0) } + } else { + // 3 or more endpoints: choose two distinct endpoints at random. + let i0 = rng.gen_range(0, sz); + let mut i1 = rng.gen_range(0, sz); + while i0 == i1 { + i1 = rng.gen_range(0, sz); + } + (i0, i1) + }; + + // Determine the the scores of each endpoint + let (addr0, ep0) = available.get_index(i0).unwrap(); + let (load0, weight0) = (ep0.load(), ep0.weight()); + let score0 = (load0 + 1) as f64 * (1.0 - weight0); + + let (addr1, ep1) = available.get_index(i1).unwrap(); + let (load1, weight1) = (ep1.load(), ep1.weight()); + let score1 = (load1 + 1) as f64 * (1.0 - weight1); + + if score0 <= score1 { + trace!("dst: {} {}*{} (not {} {}*{})", + addr0, + load0, + weight0, + addr1, + load1, + weight1); + Some(ep0) + } else { + trace!("dst: {} {}*{} (not {} {}*{})", + addr1, + load1, + weight1, + addr0, + load0, + weight0); + Some(ep1) + } + } + } + } + + fn dispatch_to_next_waiter(&mut self, + conn: endpoint::Connection) + -> Result<(), endpoint::Connection> { + match self.waiters.pop_front() { + None => Err(conn), + Some(waiter) => { + match waiter.send(conn) { + Ok(()) => Ok(()), + Err(conn) => self.dispatch_to_next_waiter(conn), + } + } + } + } + + fn dispatch_connected_to_waiters(&mut self) { + debug!("dispatching {} connections to {} waiters", + self.connected.len(), + self.waiters.len()); + while let Some(conn) = self.connected.pop_front() { + if let Err(conn) = self.dispatch_to_next_waiter(conn) { + self.connected.push_front(conn); + return; + } + } + } + + fn connect(&mut self) { + let available = self.endpoints.available(); + if available.is_empty() { + trace!("no available endpoints"); + return; + } + + let needed = { + let needed = self.min_connections + self.waiters.len(); + let pending = self.connecting.len() + self.connected.len(); + if needed < pending { + 0 + } else { + needed - pending + } + }; + debug!("initiating {} connections", needed); + + for _ in 0..needed { + match Dispatcher::select_endpoint(available) { + None => { + trace!("no endpoints ready"); + self.metrics.unavailable.incr(1); + return; + } + Some(ep) => { + self.metrics.attempts.incr(1); + let mut conn = { + let sock = self.connector + .connect(&ep.peer_addr(), &self.reactor, &self.timer); + let c = ep.connect(sock, &self.metrics.connection_duration); + self.metrics.connect_latency.time(c) + }; + match conn.poll() { + Err(e) => { + debug!("connection failed: {}", e); + self.metrics.failure(&e); + } + Ok(Async::NotReady) => { + trace!("connection pending"); + self.metrics.pending.incr(1); + self.connecting.push_back(conn); + } + Ok(Async::Ready(conn)) => { + debug!("connected"); + self.metrics.connects.incr(1); + self.metrics.pending.decr(1); + self.metrics.open.incr(1); + self.connected.push_back(conn); + } + } + } + } + } + } + + fn poll_resolve(&mut self) { + loop { + match self.resolve.poll() { + Err(e) => { + error!("{}: resolver error: {:?}", self.dst_name, e); + } + Ok(Async::Ready(Some(Err(e)))) => { + error!("{}: resolver error: {:?}", self.dst_name, e); + } + Ok(Async::NotReady) => break, + Ok(Async::Ready(None)) => { + info!("resolution complete! no further updates will be received"); + break; + } + Ok(Async::Ready(Some(Ok(addrs)))) => { + self.endpoints.update_resolved(&self.dst_name, &addrs); + } + } + } + debug!("balancer updated: available={} failed={}, retired={}", + self.endpoints.available().len(), + self.endpoints.failed().len(), + self.endpoints.retired().len()); + } + + fn poll_connecting(&mut self) { + debug!("polling {} pending connections", self.connecting.len()); + for _ in 0..self.connecting.len() { + let mut conn = self.connecting.pop_front().unwrap(); + match conn.poll() { + Err(e) => { + debug!("connection failed: {}", e); + self.metrics.pending.decr(1); + self.metrics.failure(&e); + } + Ok(Async::NotReady) => { + trace!("connection pending"); + self.connecting.push_back(conn); + } + Ok(Async::Ready(conn)) => { + debug!("connected"); + self.metrics.connects.incr(1); + self.metrics.pending.decr(1); + self.metrics.open.incr(1); + self.connected.push_back(conn) + } + } + } + } + + fn record(&self, t0: Instant) { + { + let mut open = 0; + let mut pending = 0; + { + let available = self.endpoints.available(); + self.metrics.available.set(available.len()); + for ep in available.values() { + let state = ep.state(); + open += state.open_conns; + pending += state.pending_conns; + } + } + { + let failed = self.endpoints.failed(); + self.metrics.failed.set(failed.len()); + for &(_, ref ep) in failed.values() { + let state = ep.state(); + open += state.open_conns; + pending += state.pending_conns; + } + } + { + let retired = self.endpoints.retired(); + self.metrics.retired.set(retired.len()); + for ep in retired.values() { + let state = ep.state(); + open += state.open_conns; + pending += state.pending_conns; + } + } + self.metrics.open.set(open); + self.metrics.pending.set(pending); + } + self.metrics.waiters.set(self.waiters.len()); + self.metrics.poll_time.record_since(t0); + } + + fn assess_failure(&mut self) { + self.endpoints + .update_failed(self.fail_limit, self.fail_penalty); + } + + fn poll(&mut self) { + let t0 = Instant::now(); + self.poll_connecting(); + // We drive resolution from this task so that updates can trigger dispatch i.e. if + // waiters are waiting for endpoints to be added. + self.poll_resolve(); + self.connect(); + self.dispatch_connected_to_waiters(); + self.assess_failure(); + self.record(t0); + } +} + +/// Buffers up to `max_waiters` concurrent connection requests, along with corresponding connection attempts. +impl Sink for Dispatcher { + type SinkItem = Waiter; + type SinkError = io::Error; + + fn start_send(&mut self, waiter: Waiter) -> StartSend { + if self.waiters.len() == self.max_waiters { + return Ok(AsyncSink::NotReady(waiter)); + } + self.waiters.push_back(waiter); + self.poll(); + Ok(AsyncSink::Ready) + } + + fn poll_complete(&mut self) -> Poll<(), io::Error> { + if self.connecting.is_empty() && self.waiters.is_empty() { + return Ok(Async::Ready(())); + } + self.poll(); + Ok(Async::NotReady) + } +} + +struct Metrics { + available: tacho::Gauge, + failed: tacho::Gauge, + retired: tacho::Gauge, + pending: tacho::Gauge, + open: tacho::Gauge, + waiters: tacho::Gauge, + poll_time: tacho::Timer, + attempts: tacho::Counter, + unavailable: tacho::Counter, + connects: tacho::Counter, + timeouts: tacho::Counter, + refused: tacho::Counter, + failures: tacho::Counter, + connect_latency: tacho::Timer, + connection_duration: tacho::Timer, +} + +impl Metrics { + fn new(base: &tacho::Scope) -> Metrics { + let ep = base.clone().prefixed("endpoint"); + let conn = base.clone().prefixed("connection"); + Metrics { + available: ep.gauge("available"), + failed: ep.gauge("failed"), + retired: ep.gauge("retired"), + pending: conn.gauge("pending"), + open: conn.gauge("open"), + waiters: base.gauge("waiters"), + poll_time: base.timer_us("poll_time_us"), + unavailable: base.counter("unavailable"), + attempts: conn.counter("attempts"), + connects: conn.counter("connects"), + timeouts: conn.clone().labeled("cause", "timeout").counter("failure"), + refused: conn.clone().labeled("cause", "refused").counter("failure"), + failures: conn.clone().labeled("cause", "other").counter("failure"), + connect_latency: conn.timer_us("latency_us"), + connection_duration: conn.timer_ms("duration_ms"), + } + } + + fn failure(&self, err: &io::Error) { + match err.kind() { + io::ErrorKind::TimedOut => self.timeouts.incr(1), + io::ErrorKind::ConnectionRefused => self.refused.incr(1), + _ => self.failures.incr(1), + } + } +} diff --git a/src/balancer/endpoint.rs b/src/balancer/endpoint.rs new file mode 100644 index 0000000..64ed544 --- /dev/null +++ b/src/balancer/endpoint.rs @@ -0,0 +1,145 @@ +use super::super::Path; +use super::super::connection::{Connection as _Connection, ctx}; +use super::super::connector; +use futures::{Future, Poll}; +use std::{io, net}; +use std::cell::{Ref, RefCell}; +use std::rc::Rc; +use std::time::Instant; +use tacho; + +pub type Connection = _Connection; + +pub fn new(dst_name: Path, peer_addr: net::SocketAddr, weight: f64) -> Endpoint { + Endpoint { + dst_name, + peer_addr, + weight, + state: Rc::new(RefCell::new(State::default())), + } +} + +#[derive(Default)] +pub struct State { + pub pending_conns: usize, + pub open_conns: usize, + pub consecutive_failures: usize, + pub rx_bytes: usize, + pub tx_bytes: usize, +} + +impl State { + pub fn load(&self) -> usize { + self.open_conns + self.pending_conns + } + pub fn is_idle(&self) -> bool { + self.open_conns == 0 + } +} + +/// Represents a single concrete traffic destination +pub struct Endpoint { + dst_name: Path, + peer_addr: net::SocketAddr, + weight: f64, + state: Rc>, +} + +impl Endpoint { + pub fn peer_addr(&self) -> net::SocketAddr { + self.peer_addr + } + + pub fn state(&self) -> Ref { + self.state.borrow() + } + + // TODO we should be able to use throughput/bandwidth as well. + pub fn load(&self) -> usize { + self.state.borrow().load() + } + + pub fn set_weight(&mut self, w: f64) { + assert!(0.0 <= w && w <= 1.0); + self.weight = w; + } + + pub fn weight(&self) -> f64 { + self.weight + } + + pub fn connect(&self, sock: connector::Connecting, duration: &tacho::Timer) -> Connecting { + let conn = { + let peer_addr = self.peer_addr; + let dst_name = self.dst_name.clone(); + let state = self.state.clone(); + let duration = duration.clone(); + debug!("{}: connecting", peer_addr); + sock.then(move |res| match res { + Err(e) => { + error!("{}: connection failed: {}", peer_addr, e); + let mut s = state.borrow_mut(); + s.consecutive_failures += 1; + s.pending_conns -= 1; + Err(e) + } + Ok(sock) => { + debug!("{}: connected", peer_addr); + { + let mut s = state.borrow_mut(); + s.consecutive_failures = 0; + s.pending_conns -= 1; + s.open_conns += 1; + } + let ctx = Ctx { + state, + duration, + start: Instant::now(), + }; + Ok(Connection::new(dst_name, sock, ctx)) + } + }) + }; + + let mut state = self.state.borrow_mut(); + state.pending_conns += 1; + Connecting(Box::new(conn)) + } + + pub fn is_idle(&self) -> bool { + self.state.borrow().is_idle() + } +} + +pub struct Connecting(Box + 'static>); +impl Future for Connecting { + type Item = Connection; + type Error = io::Error; + fn poll(&mut self) -> Poll { + self.0.poll() + } +} + +pub struct Ctx { + state: Rc>, + duration: tacho::Timer, + start: Instant, +} +impl ctx::Ctx for Ctx { + fn read(&mut self, sz: usize) { + let mut state = self.state.borrow_mut(); + state.rx_bytes += sz; + } + + fn wrote(&mut self, sz: usize) { + let mut state = self.state.borrow_mut(); + state.tx_bytes += sz; + } +} +impl Drop for Ctx { + fn drop(&mut self) { + let mut state = self.state.borrow_mut(); + state.open_conns -= 1; + self.duration.record_since(self.start) + } +} diff --git a/src/balancer/factory.rs b/src/balancer/factory.rs new file mode 100644 index 0000000..4854433 --- /dev/null +++ b/src/balancer/factory.rs @@ -0,0 +1,35 @@ +use super::Balancer; +use super::super::{ConfigError, Path}; +use super::super::connector::ConnectorFactory; +use super::super::resolver::Resolve; +use std::cell::RefCell; +use std::rc::Rc; +use tacho; +use tokio_core::reactor::Handle; +use tokio_timer::Timer; + +#[derive(Clone)] +pub struct BalancerFactory { + connector_factory: Rc>, + metrics: tacho::Scope, +} + +impl BalancerFactory { + pub fn new(cf: ConnectorFactory, metrics: &tacho::Scope) -> BalancerFactory { + BalancerFactory { + connector_factory: Rc::new(RefCell::new(cf)), + metrics: metrics.clone(), + } + } + + pub fn mk_balancer(&self, + reactor: &Handle, + timer: &Timer, + dst_name: &Path, + resolve: Resolve) + -> Result { + let connector = self.connector_factory.borrow().mk_connector(dst_name)?; + let metrics = self.metrics.clone().labeled("dst", dst_name); + Ok(super::new(reactor, timer, dst_name, connector, resolve, &metrics)) + } +} diff --git a/src/balancer/mod.rs b/src/balancer/mod.rs new file mode 100644 index 0000000..39872ad --- /dev/null +++ b/src/balancer/mod.rs @@ -0,0 +1,254 @@ +use super::Path; +use super::connector::Connector; +use super::resolver::Resolve; +use futures::{Async, Future, Poll, Sink, Stream, unsync}; +use ordermap::OrderMap; +use std::{cmp, io, net}; +use std::collections::VecDeque; +use std::time::{Duration, Instant}; +use tacho; +use tokio_core::reactor::Handle; +use tokio_timer::Timer; + +mod dispatcher; +mod endpoint; +mod factory; + +pub use self::endpoint::{Connection as EndpointConnection, Ctx as EndpointCtx}; +use self::endpoint::Endpoint; +pub use self::factory::BalancerFactory; + +type Waiter = unsync::oneshot::Sender; + +/// A weighted concrete destination address. +#[derive(Clone, Debug)] +pub struct WeightedAddr { + pub addr: ::std::net::SocketAddr, + pub weight: f64, +} + +impl WeightedAddr { + pub fn new(addr: net::SocketAddr, weight: f64) -> WeightedAddr { + WeightedAddr { addr, weight } + } +} + +pub fn new(reactor: &Handle, + timer: &Timer, + dst: &Path, + connector: Connector, + resolve: Resolve, + metrics: &tacho::Scope) + -> Balancer { + let (tx, rx) = unsync::mpsc::unbounded(); + let dispatcher = dispatcher::new(reactor.clone(), + timer.clone(), + dst.clone(), + connector, + resolve, + Endpoints::default(), + metrics); + let dispatch = rx.forward(dispatcher.sink_map_err(|_| {})); + reactor.spawn(dispatch.map(|_| {})); + Balancer(tx) +} + +#[derive(Clone)] +pub struct Balancer(unsync::mpsc::UnboundedSender); +impl Balancer { + /// Obtains a connection to the destination. + pub fn connect(&self) -> Connect { + let (tx, rx) = unsync::oneshot::channel(); + let result = unsync::mpsc::UnboundedSender::send(&self.0, tx) + .map_err(|_| io::Error::new(io::ErrorKind::Other, "lost dispatcher")) + .map(|_| rx); + Connect(Some(result)) + } +} + +pub struct Connect(Option>>); +impl Future for Connect { + type Item = endpoint::Connection; + type Error = io::Error; + fn poll(&mut self) -> Poll { + let mut recv = self.0 + .take() + .expect("connect must not be polled after completion")?; + match recv.poll() { + Err(_) => Err(io::Error::new(io::ErrorKind::Interrupted, "canceled")), + Ok(Async::Ready(conn)) => Ok(Async::Ready(conn)), + Ok(Async::NotReady) => { + self.0 = Some(Ok(recv)); + Ok(Async::NotReady) + } + } + } +} + +pub type EndpointMap = OrderMap; +pub type FailedMap = OrderMap; + +#[derive(Default)] +pub struct Endpoints { + //minimum_connections: usize, + /// Endpoints considered available for new connections. + available: EndpointMap, + + /// Endpoints that are still active but considered unavailable for new connections. + retired: EndpointMap, + + failed: FailedMap, +} + +impl Endpoints { + pub fn available(&self) -> &EndpointMap { + &self.available + } + + pub fn failed(&self) -> &FailedMap { + &self.failed + } + + pub fn retired(&self) -> &EndpointMap { + &self.retired + } + + pub fn update_failed(&mut self, max_failures: usize, penalty: Duration) { + let mut available = VecDeque::with_capacity(self.failed.len()); + let mut failed = VecDeque::with_capacity(self.failed.len()); + + for (_, ep) in self.available.drain(..) { + if ep.state().consecutive_failures < max_failures { + available.push_back(ep); + } else { + failed.push_back((Instant::now(), ep)); + } + } + + for (_, (start, ep)) in self.failed.drain(..) { + if start + penalty <= Instant::now() { + available.push_back(ep); + } else { + failed.push_back((start, ep)); + } + } + + if available.is_empty() { + while let Some((_, ep)) = failed.pop_front() { + self.available.insert(ep.peer_addr(), ep); + } + } else { + while let Some(ep) = available.pop_front() { + self.available.insert(ep.peer_addr(), ep); + } + while let Some((since, ep)) = failed.pop_front() { + self.failed.insert(ep.peer_addr(), (since, ep)); + } + } + } + + // TODO: we need to do some sort of probation deal to manage endpoints that are + // retired. + pub fn update_resolved(&mut self, dst_name: &Path, resolved: &[WeightedAddr]) { + let mut temp = { + let sz = cmp::max(self.available.len(), self.retired.len()); + VecDeque::with_capacity(sz) + }; + let dsts = Endpoints::dsts_by_addr(resolved); + self.check_retired(&dsts, &mut temp); + self.check_available(&dsts, &mut temp); + self.check_failed(&dsts); + self.update_available_from_new(dst_name, dsts); + } + + /// Checks active endpoints. + fn check_available(&mut self, + dsts: &OrderMap, + temp: &mut VecDeque) { + for (addr, ep) in self.available.drain(..) { + if dsts.contains_key(&addr) { + temp.push_back(ep); + } else if ep.is_idle() { + drop(ep); + } else { + self.retired.insert(addr, ep); + } + } + + for _ in 0..temp.len() { + let ep = temp.pop_front().unwrap(); + self.available.insert(ep.peer_addr(), ep); + } + } + + /// Checks retired endpoints. + /// + /// Endpoints are either salvaged backed into the active pool, maintained as + /// retired if still active, or dropped if inactive. + fn check_retired(&mut self, + dsts: &OrderMap, + temp: &mut VecDeque) { + for (addr, ep) in self.retired.drain(..) { + if dsts.contains_key(&addr) { + self.available.insert(addr, ep); + } else if ep.is_idle() { + drop(ep); + } else { + temp.push_back(ep); + } + } + + for _ in 0..temp.len() { + let ep = temp.pop_front().unwrap(); + self.retired.insert(ep.peer_addr(), ep); + } + } + + /// Checks failed endpoints. + fn check_failed(&mut self, dsts: &OrderMap) { + let mut temp = VecDeque::with_capacity(self.failed.len()); + for (addr, (since, ep)) in self.failed.drain(..) { + if dsts.contains_key(&addr) { + temp.push_back((since, ep)); + } else if ep.is_idle() { + drop(ep); + } else { + self.retired.insert(addr, ep); + } + } + + for _ in 0..temp.len() { + let (instant, ep) = temp.pop_front().unwrap(); + self.failed.insert(ep.peer_addr(), (instant, ep)); + } + } + + fn update_available_from_new(&mut self, + dst_name: &Path, + mut dsts: OrderMap) { + // Add new endpoints or update the base weights of existing endpoints. + //let metrics = self.endpoint_metrics.clone(); + for (addr, weight) in dsts.drain(..) { + if let Some(&mut (_, ref mut ep)) = self.failed.get_mut(&addr) { + ep.set_weight(weight); + continue; + } + + if let Some(mut ep) = self.available.get_mut(&addr) { + ep.set_weight(weight); + continue; + } + + self.available + .insert(addr, endpoint::new(dst_name.clone(), addr, weight)); + } + } + + fn dsts_by_addr(dsts: &[WeightedAddr]) -> OrderMap { + let mut by_addr = OrderMap::with_capacity(dsts.len()); + for &WeightedAddr { addr, weight } in dsts { + by_addr.insert(addr, weight); + } + by_addr + } +} diff --git a/src/connection/ctx.rs b/src/connection/ctx.rs new file mode 100644 index 0000000..f859c46 --- /dev/null +++ b/src/connection/ctx.rs @@ -0,0 +1,19 @@ +/// A connection context +pub trait Ctx: Drop { + fn read(&mut self, sz: usize); + fn wrote(&mut self, sz: usize); +} + +#[allow(dead_code)] +pub fn null() -> Null { + Null() +} +#[allow(dead_code)] +pub struct Null(); +impl Ctx for Null { + fn read(&mut self, _sz: usize) {} + fn wrote(&mut self, _sz: usize) {} +} +impl Drop for Null { + fn drop(&mut self) {} +} diff --git a/src/connection/duplex.rs b/src/connection/duplex.rs new file mode 100644 index 0000000..616dc5c --- /dev/null +++ b/src/connection/duplex.rs @@ -0,0 +1,98 @@ +use super::Connection; +use super::Ctx; +use super::half_duplex::{self, HalfDuplex}; +use futures::{Async, Future, Poll}; +use std::cell::RefCell; +use std::io; +use std::net; +use std::rc::Rc; + +pub struct Summary { + pub to_dst_bytes: usize, + pub to_src_bytes: usize, +} + +pub fn new(src: Connection, dst: Connection, buf: Rc>>) -> Duplex + where S: Ctx, + D: Ctx +{ + let src_addr = src.peer_addr(); + let dst_addr = dst.peer_addr(); + let src = Rc::new(RefCell::new(src)); + let dst = Rc::new(RefCell::new(dst)); + Duplex { + dst_addr, + to_dst: Some(half_duplex::new(src.clone(), dst.clone(), buf.clone())), + to_dst_bytes: 0, + + src_addr, + to_src: Some(half_duplex::new(dst.clone(), src.clone(), buf)), + to_src_bytes: 0, + } +} + +/// Joins src and dst transfers into a single Future. +pub struct Duplex { + dst_addr: net::SocketAddr, + src_addr: net::SocketAddr, + to_dst: Option>, + to_src: Option>, + to_dst_bytes: usize, + to_src_bytes: usize, +} + +impl Future for Duplex { + type Item = Summary; + type Error = io::Error; + fn poll(&mut self) -> Poll { + if let Some(mut to_dst) = self.to_dst.take() { + trace!("polling dstward from {} to {}", + self.src_addr, + self.dst_addr); + match to_dst.poll()? { + Async::Ready(sz) => { + trace!("dstward complete from {} to {}", + self.src_addr, + self.dst_addr); + self.to_dst_bytes = sz; + } + Async::NotReady => { + trace!("dstward not ready"); + self.to_dst = Some(to_dst); + } + } + } + + if let Some(mut to_src) = self.to_src.take() { + trace!("polling srcward from {} to {}", + self.dst_addr, + self.src_addr); + match to_src.poll()? { + Async::Ready(sz) => { + trace!("srcward complete from {} to {}", + self.dst_addr, + self.src_addr); + self.to_src_bytes = sz; + } + Async::NotReady => { + trace!("srcward not ready"); + self.to_src = Some(to_src); + } + } + } + + if self.to_dst.is_none() && self.to_src.is_none() { + trace!("complete"); + // self.tx_bytes_stat.add(self.tx_bytes); + // self.rx_bytes_stat.add(self.rx_bytes) + let summary = Summary { + to_dst_bytes: self.to_dst_bytes, + to_src_bytes: self.to_src_bytes, + }; + Ok(Async::Ready(summary)) + } else { + trace!("not ready"); + Ok(Async::NotReady) + } + } +} diff --git a/src/connection/half_duplex.rs b/src/connection/half_duplex.rs new file mode 100644 index 0000000..1ced508 --- /dev/null +++ b/src/connection/half_duplex.rs @@ -0,0 +1,137 @@ +use super::Connection; +use super::Ctx; +use futures::{Async, Future, Poll}; +use std::cell::RefCell; +use std::io::{self, Read, Write}; +use std::net::Shutdown; +use std::rc::Rc; +//use tacho; +use tokio_io::AsyncWrite; + +pub fn new(reader: Rc>>, + writer: Rc>>, + buf: Rc>>) + -> HalfDuplex + where R: Ctx, + W: Ctx +{ + HalfDuplex { + reader, + writer, + buf, + pending: None, + bytes_total: 0, + should_shutdown: false, + // bytes_total_count: metrics.counter("bytes_total".into()), + // allocs_count: metrics.counter("allocs_count".into()), + } +} + +/// A future representing reading all data from one side of a proxy connection and writing +/// it to another. +/// +/// In the typical case, nothing allocations are required. If the write side exhibits +/// backpressure, however, a buffer is allocated to +pub struct HalfDuplex { + reader: Rc>>, + writer: Rc>>, + + // Holds transient data when copying between the reader and writer. + buf: Rc>>, + + // Holds data that can't be fully written. + pending: Option>, + + // The number of bytes we've written so far. + bytes_total: usize, + + // Indicates that that the reader has returned 0 and the writer should be shut down. + should_shutdown: bool, + + // bytes_total_count: tacho::Counter, + // allocs_count: tacho::Counter, +} + +impl Future for HalfDuplex + where R: Ctx, + W: Ctx +{ + type Item = usize; + type Error = io::Error; + + /// Reads from from the `reader` into a shared buffer before writing to `writer`. + /// + /// If all data cannot be written, the unwritten data is stored in a newly-allocated + /// buffer. This pending data is flushed before any more data is read. + fn poll(&mut self) -> Poll { + trace!("poll"); + let mut writer = self.writer.borrow_mut(); + let mut reader = self.reader.borrow_mut(); + + // Because writer.socket.shutdown may return WouldBlock, we may already be + // shutting down and need to resume graceful shutdown. + if self.should_shutdown { + try_nb!(writer.socket.shutdown()); + writer.socket.tcp_shutdown(Shutdown::Write)?; + return Ok(Async::Ready(self.bytes_total)); + } + + // If we've read more than we were able to write previously, then write all of it + // until the write would block. + if let Some(mut pending) = self.pending.take() { + trace!("writing {} pending bytes", pending.len()); + while !pending.is_empty() { + match writer.socket.write(&pending) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.pending = Some(pending); + return Ok(Async::NotReady); + } + Err(e) => return Err(e), + Ok(wsz) => { + // Drop the portion of the buffer that we've already written. + // There may or may not be more pending data remaining. + pending.drain(0..wsz); + self.bytes_total += wsz; + writer.ctx.wrote(wsz); + } + } + } + } + + // Read and write data until one of the endpoints is not ready. All data is read + // into a thread-global transfer buffer and then written from this buffer. If all + // data cannot be written, it is copied into a newly-allocated local buffer to be + // flushed later. + loop { + assert!(self.pending.is_none()); + + let mut rbuf = self.buf.borrow_mut(); + let rsz = try_nb!(reader.socket.read(&mut rbuf)); + reader.ctx.read(rsz); + if rsz == 0 { + self.should_shutdown = true; + try_nb!(writer.socket.shutdown()); + writer.socket.tcp_shutdown(Shutdown::Write)?; + return Ok(Async::Ready(self.bytes_total)); + } + + let mut wbuf = &rbuf[..rsz]; + while !wbuf.is_empty() { + match writer.socket.write(wbuf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + let mut p = vec![0; wbuf.len()]; + p.copy_from_slice(wbuf); + self.pending = Some(p); + return Ok(Async::NotReady); + } + Err(e) => return Err(e), + Ok(wsz) => { + self.bytes_total += wsz; + writer.ctx.wrote(wsz); + wbuf = &wbuf[wsz..]; + } + } + } + } + } +} diff --git a/src/connection/mod.rs b/src/connection/mod.rs new file mode 100644 index 0000000..b403a33 --- /dev/null +++ b/src/connection/mod.rs @@ -0,0 +1,101 @@ +use super::Path; +use std::{fmt, net}; +use std::cell::RefCell; +use std::rc::Rc; + +pub mod ctx; +mod duplex; +mod half_duplex; +pub mod secure; +pub mod socket; + +pub use self::ctx::Ctx; +pub use self::duplex::Duplex; +pub use self::socket::Socket; + +pub struct ConnectionCtx { + local_addr: net::SocketAddr, + peer_addr: net::SocketAddr, + dst_name: Path, + ctx: C, +} + +impl fmt::Debug for ConnectionCtx + where C: fmt::Debug +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("ConnectionCtx") + .field("local_addr", &self.local_addr) + .field("peer_addr", &self.peer_addr) + .field("dst_name", &self.dst_name) + .field("ctx", &self.ctx) + .finish() + } +} + +impl ConnectionCtx + where C: Ctx +{ + pub fn new(local_addr: net::SocketAddr, + peer_addr: net::SocketAddr, + dst_name: Path, + ctx: C) + -> ConnectionCtx { + ConnectionCtx { + local_addr, + peer_addr, + dst_name, + ctx, + } + } + + pub fn dst_name(&self) -> &Path { + &self.dst_name + } + + pub fn ctx(&self) -> &C { + &self.ctx + } +} + +impl Ctx for ConnectionCtx + where C: Ctx +{ + fn read(&mut self, sz: usize) { + self.ctx.read(sz); + } + + fn wrote(&mut self, sz: usize) { + self.ctx.wrote(sz); + } +} +impl Drop for ConnectionCtx { + fn drop(&mut self) {} +} + +/// A src or dst connection. +pub struct Connection { + pub ctx: ConnectionCtx, + pub socket: Socket, +} +impl Connection { + pub fn new(dst: Path, socket: Socket, ctx: C) -> Connection { + let ctx = ConnectionCtx::new(socket.local_addr(), socket.peer_addr(), dst, ctx); + Connection { socket, ctx } + } + + pub fn peer_addr(&self) -> net::SocketAddr { + self.ctx.peer_addr + } + + pub fn local_addr(&self) -> net::SocketAddr { + self.ctx.local_addr + } + + pub fn into_duplex(self, + other: Connection, + buf: Rc>>) + -> Duplex { + duplex::new(self, other, buf) + } +} diff --git a/src/connection/secure.rs b/src/connection/secure.rs new file mode 100644 index 0000000..90f6ba5 --- /dev/null +++ b/src/connection/secure.rs @@ -0,0 +1,315 @@ +use futures::{Async, Future, Poll}; +use rustls::{Session, ClientConfig, ServerConfig, ClientSession, ServerSession}; +use std::fmt; +use std::io::{self, Read, Write}; +use std::net::{Shutdown, SocketAddr}; +use std::sync::Arc; +use tokio_core::net::TcpStream; +use tokio_io::AsyncWrite; + +pub fn client_handshake(tcp: TcpStream, config: &Arc, name: &str) -> ClientHandshake { + let ss = SecureStream::new(tcp, ClientSession::new(config, name)); + ClientHandshake(Some(ss)) +} + +pub fn server_handshake(tcp: TcpStream, config: &Arc) -> ServerHandshake { + let ss = SecureStream::new(tcp, ServerSession::new(config)); + ServerHandshake(Some(ss)) +} + +/// Securely transmits data. +pub struct SecureStream { + peer: SocketAddr, + local: SocketAddr, + /// The external encrypted side of the socket. + tcp: TcpStream, + /// The internal decrypted side of the socket. + session: I, +} + +impl fmt::Debug for SecureStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SecureStream") + .field("peer", &self.peer) + .field("local", &self.local) + .finish() + } +} + +impl SecureStream + where S: Session +{ + fn new(tcp: TcpStream, session: S) -> SecureStream { + SecureStream { + peer: tcp.peer_addr().unwrap(), + local: tcp.local_addr().unwrap(), + tcp, + session, + } + } + + pub fn peer_addr(&self) -> SocketAddr { + self.peer + } + + pub fn local_addr(&self) -> SocketAddr { + self.local + } + + pub fn tcp_shutdown(&mut self, how: Shutdown) -> io::Result<()> { + trace!("tcp_shutdown: {:?}", self); + self.tcp.shutdown(how) + } + + fn read_tcp_to_session(&mut self) -> Option> { + if !self.session.wants_read() { + trace!("read_tcp_to_session: no read needed: {}", self.peer); + return None; + } + + trace!("read_tcp_to_session: read_tls: {}", self.peer); + match self.session.read_tls(&mut self.tcp) { + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock { + trace!("read_tcp_to_session: read_tls: {}: {}", self.peer, e); + None + } else { + error!("read_tcp_to_session: read_tls: {}: {}", self.peer, e); + Some(Err(e)) + } + } + Ok(sz) => { + trace!("read_tcp_to_session: read_tls: {} {}B", self.peer, sz); + if sz == 0 { + Some(Ok(sz)) + } else { + trace!("read_tcp_to_session: process_new_packets: {}", self.peer); + match self.session.process_new_packets() { + Ok(_) => Some(Ok(sz)), + Err(e) => { + trace!("read_tcp_to_session: process_new_packets error: {:?}", self); + Some(Err(io::Error::new(io::ErrorKind::Other, e))) + } + } + } + } + } + } + + fn write_session_to_tcp(&mut self) -> io::Result { + trace!("write_session_to_tcp: write_tls: {}", self.peer); + let sz = self.session.write_tls(&mut self.tcp)?; + trace!("write_session_to_tcp: write_tls: {}: {}B", self.peer, sz); + Ok(sz) + } +} + +impl Read for SecureStream + where S: Session +{ + fn read(&mut self, buf: &mut [u8]) -> io::Result { + trace!("read: {}", self.peer); + let read_ok = match self.read_tcp_to_session() { + None => false, + Some(Ok(_)) => true, + Some(Err(e)) => { + trace!("read: {}: {:?}", self.peer, e.kind()); + return Err(e); + } + }; + + let sz = self.session.read(buf)?; + trace!("read: {}: {}B", self.peer, sz); + if !read_ok && sz == 0 { + Err(io::ErrorKind::WouldBlock.into()) + } else { + Ok(sz) + } + } +} + +impl Write for SecureStream + where S: Session +{ + fn write(&mut self, buf: &[u8]) -> io::Result { + trace!("write: {}", self.peer); + let sz = self.session.write(buf)?; + trace!("write: {}: {}B", self.peer, sz); + + { + let mut write_ok = true; + while self.session.wants_write() && write_ok { + write_ok = match self.write_session_to_tcp() { + Ok(sz) => sz > 0, + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => false, + e @ Err(_) => return e, + }; + } + } + + Ok(sz) + } + + fn flush(&mut self) -> io::Result<()> { + trace!("flush: {:?}", self); + self.session.flush()?; + self.tcp.flush() + } +} + +impl AsyncWrite for SecureStream + where S: Session +{ + fn shutdown(&mut self) -> Poll<(), io::Error> { + self.session.send_close_notify(); + self.session.write_tls(&mut self.tcp)?; + self.tcp.flush()?; + Ok(Async::Ready(())) + } +} + +/// A future that completes when a server's TLS handshake is complete. +#[derive(Debug)] +pub struct ServerHandshake(Option>); +impl Future for ServerHandshake { + type Item = SecureStream; + type Error = io::Error; + fn poll(&mut self) -> Poll { + trace!("{:?}.poll()", self); + let mut ss = self.0 + .take() + .expect("poll must not be called after completion"); + + // Read and write the handshake. + { + let mut wrote = true; + while ss.session.is_handshaking() && wrote { + if let Some(Err(e)) = ss.read_tcp_to_session() { + trace!("server handshake: {}: error: {}", ss.peer, e); + return Err(e); + }; + trace!("server handshake: write_session_to_tcp: {}", ss.peer); + wrote = + ss.session.wants_write() && + match ss.write_session_to_tcp() { + Ok(sz) => { + trace!("server handshake: write_session_to_tcp: {}: wrote {}", + ss.peer, + sz); + sz > 0 + } + Err(e) => { + trace!("server handshake: write_session_to_tcp: {}: {}", ss.peer, e); + if e.kind() != io::ErrorKind::WouldBlock { + return Err(e); + } + false + } + } + } + } + + // If the remote hasn't read everything yet, resume later. + if ss.session.is_handshaking() { + trace!("server handshake: {}: not complete", ss.peer); + self.0 = Some(ss); + return Ok(Async::NotReady); + } + + // Finally, acknowledge the handshake is complete. + if ss.session.wants_write() { + trace!("server handshake: write_session_to_tcp: {}: final", ss.peer); + match ss.write_session_to_tcp() { + Ok(sz) => { + trace!("server handshake: write_session_to_tcp: {}: final: wrote {}B", + ss.peer, + sz); + } + Err(e) => { + trace!("server handshake: write_session_to_tcp: {}: final: {}", + ss.peer, + e); + if e.kind() != io::ErrorKind::WouldBlock { + return Err(e); + } + } + } + } + + trace!("server handshake: {}: complete", ss.peer); + Ok(Async::Ready(ss)) + } +} + +/// A future that completes when a client's TLS handshake is complete. +#[derive(Debug)] +pub struct ClientHandshake(Option>); + +impl Future for ClientHandshake { + type Item = SecureStream; + type Error = io::Error; + fn poll(&mut self) -> Poll { + trace!("{:?}.poll()", self); + let mut ss = self.0 + .take() + .expect("poll must not be called after completion"); + + // Read and write the handshake. + { + let mut read_ok = true; + let mut write_ok = true; + while ss.session.is_handshaking() && (read_ok || write_ok) { + trace!("client handshake: read_tcp_to_session: {}", ss.peer); + read_ok = match ss.read_tcp_to_session() { + None => { + trace!("client handshake: read_tcp_to_session: {}: not ready", + ss.peer); + false + } + Some(Ok(sz)) => { + trace!("client handshake: read_tcp_to_session: {}: {}B", + ss.peer, + sz); + sz > 0 + } + Some(Err(e)) => { + trace!("client handshake: read_tcp_to_session: {}: error: {}", + ss.peer, + e); + return Err(e); + } + }; + + trace!("client handshake: write_session_to_tcp: {}", ss.peer); + write_ok = ss.session.wants_write() && + match ss.write_session_to_tcp() { + Ok(sz) => { + trace!("client handshake: write_session_to_tcp: {}: wrote {}", + ss.peer_addr(), + sz); + sz > 0 + } + Err(e) => { + trace!("client handshake: write_session_to_tcp: {}: {}", + ss.peer_addr(), + e); + if e.kind() != io::ErrorKind::WouldBlock { + return Err(e); + } + false + } + }; + } + } + + // If the remote hasn't read everything yet, resume later. + if ss.session.is_handshaking() { + trace!("handshake: {}: not complete", ss.peer_addr()); + self.0 = Some(ss); + return Ok(Async::NotReady); + } + + trace!("handshake: {}: complete", ss.peer_addr()); + Ok(Async::Ready(ss)) + } +} diff --git a/src/connection/socket.rs b/src/connection/socket.rs new file mode 100644 index 0000000..c10a1eb --- /dev/null +++ b/src/connection/socket.rs @@ -0,0 +1,140 @@ +use super::secure::SecureStream; +use futures::Poll; +use rustls::{ClientSession, ServerSession}; +use std::fmt; +use std::io::{self, Read, Write}; +use std::net::{Shutdown, SocketAddr}; +use tokio_core::net::TcpStream; +use tokio_io::AsyncWrite; + +pub fn plain(tcp: TcpStream) -> Socket { + Socket { + local_addr: tcp.local_addr().expect("tcp stream has no local address"), + peer_addr: tcp.peer_addr().expect("tcp stream has no peer address"), + kind: Kind::Plain(tcp), + } +} + +pub fn secure_client(tls: SecureStream) -> Socket { + Socket { + local_addr: tls.local_addr(), + peer_addr: tls.peer_addr(), + kind: Kind::SecureClient(Box::new(tls)), + } +} + +pub fn secure_server(tls: SecureStream) -> Socket { + Socket { + local_addr: tls.local_addr(), + peer_addr: tls.peer_addr(), + kind: Kind::SecureServer(Box::new(tls)), + } +} + +/// Hides the implementation details of socket I/O. +/// +/// Plaintext and encrypted (client and server) streams have different type signatures. +/// Exposing these types to the rest of the application is painful, so `Socket` provides +/// an opaque container for the various types of sockets supported by this proxy. +pub struct Socket { + local_addr: SocketAddr, + peer_addr: SocketAddr, + kind: Kind, +} + +// Since the rustls types are much larger than the plain type, they are boxed. Because +// clippy says so. +enum Kind { + Plain(TcpStream), + SecureClient(Box>), + SecureServer(Box>), +} + +impl fmt::Debug for Socket { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.kind { + Kind::Plain(_) => { + f.debug_struct("Plain") + .field("peer", &self.peer_addr) + .field("local", &self.local_addr) + .finish() + } + Kind::SecureClient(_) => { + f.debug_struct("SecureClient") + .field("peer", &self.peer_addr) + .field("local", &self.local_addr) + .finish() + } + Kind::SecureServer(_) => { + f.debug_struct("SecureServer") + .field("peer", &self.peer_addr) + .field("local", &self.local_addr) + .finish() + } + } + } +} + +impl Socket { + pub fn tcp_shutdown(&mut self, how: Shutdown) -> io::Result<()> { + trace!("{:?}.tcp_shutdown({:?})", self, how); + match self.kind { + Kind::Plain(ref mut stream) => TcpStream::shutdown(stream, how), + Kind::SecureClient(ref mut stream) => stream.tcp_shutdown(how), + Kind::SecureServer(ref mut stream) => stream.tcp_shutdown(how), + } + } + + pub fn local_addr(&self) -> SocketAddr { + self.local_addr + } + + pub fn peer_addr(&self) -> SocketAddr { + self.peer_addr + } +} + +/// Reads the socket without blocking. +impl Read for Socket { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + trace!("{:?}.read({})", self, buf.len()); + match self.kind { + Kind::Plain(ref mut stream) => stream.read(buf), + Kind::SecureClient(ref mut stream) => stream.read(buf), + Kind::SecureServer(ref mut stream) => stream.read(buf), + } + } +} + +/// Writes to the socket without blocking. +impl Write for Socket { + fn write(&mut self, buf: &[u8]) -> io::Result { + trace!("{:?}.write({})", self, buf.len()); + match self.kind { + Kind::Plain(ref mut stream) => stream.write(buf), + Kind::SecureClient(ref mut stream) => stream.write(buf), + Kind::SecureServer(ref mut stream) => stream.write(buf), + } + } + + fn flush(&mut self) -> io::Result<()> { + trace!("{:?}.flush()", self); + match self.kind { + Kind::Plain(ref mut stream) => stream.flush(), + Kind::SecureClient(ref mut stream) => stream.flush(), + Kind::SecureServer(ref mut stream) => stream.flush(), + } + } +} + +/// Closes the write-side of a stream. +impl AsyncWrite for Socket { + fn shutdown(&mut self) -> Poll<(), io::Error> { + trace!("{:?}.shutdown()", self); + match self.kind { + Kind::Plain(ref mut stream) => AsyncWrite::shutdown(stream), + Kind::SecureClient(ref mut stream) => stream.shutdown(), + Kind::SecureServer(ref mut stream) => stream.shutdown(), + } + } +} diff --git a/src/connector/config.rs b/src/connector/config.rs new file mode 100644 index 0000000..5098549 --- /dev/null +++ b/src/connector/config.rs @@ -0,0 +1,142 @@ +use super::{Connector, ConnectorFactory, Tls}; +use super::super::ConfigError; +use rustls; +use std::fs::File; +use std::io::BufReader; +use std::sync::Arc; +use std::time; + +const DEFAULT_MAX_WAITERS: usize = 1_000_000; +const DEFAULT_MAX_CONSECUTIVE_FAILURES: usize = 5; +const DEFAULT_FAILURE_PENALTY_SECS: u64 = 60; + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, tag = "kind")] +pub enum ConnectorFactoryConfig { + #[serde(rename = "io.l5d.global")] + Global(ConnectorConfig), + + #[serde(rename = "io.l5d.static")] + Static { configs: Vec }, +} + +impl Default for ConnectorFactoryConfig { + fn default() -> ConnectorFactoryConfig { + ConnectorFactoryConfig::Global(ConnectorConfig::default()) + } +} + +impl ConnectorFactoryConfig { + pub fn mk_connector_factory(&self) -> Result { + match *self { + ConnectorFactoryConfig::Global(ref cfg) => { + if cfg.prefix.is_some() { + return Err("`prefix` not supported in io.l5d.global".into()); + } + let conn = cfg.mk_connector()?; + Ok(ConnectorFactory::new_global(conn)) + } + ConnectorFactoryConfig::Static { ref configs } => { + let mut pfx_configs = Vec::with_capacity(configs.len()); + for cfg in configs { + match cfg.prefix { + None => { + return Err("`prefix` required in io.l5d.static".into()); + } + Some(ref pfx) => { + pfx_configs.push((pfx.clone().into(), cfg.clone())); + } + } + } + Ok(ConnectorFactory::new_prefixed(pfx_configs)) + } + } + } +} + +#[derive(Clone, Default, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct ConnectorConfig { + pub prefix: Option, + pub tls: Option, + pub connect_timeout_ms: Option, + + pub max_waiters: Option, + pub min_connections: Option, + + pub fail_fast: Option, + + // TODO requeue_budget: Option +} + +#[derive(Clone, Default, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct FailFastConfig { + pub max_consecutive_failures: Option, + pub failure_penalty_secs: Option, +} + +impl ConnectorConfig { + pub fn mk_connector(&self) -> Result { + let tls = match self.tls { + None => None, + Some(ref tls) => Some(tls.mk_tls()?), + }; + let connect_timeout = self.connect_timeout_ms.map(time::Duration::from_millis); + let max_waiters = self.max_waiters.unwrap_or(DEFAULT_MAX_WAITERS); + let min_conns = self.min_connections.unwrap_or(0); + let max_fails = self.fail_fast + .as_ref() + .and_then(|c| c.max_consecutive_failures) + .unwrap_or(DEFAULT_MAX_CONSECUTIVE_FAILURES); + let fail_penalty = { + let s = self.fail_fast + .as_ref() + .and_then(|c| c.failure_penalty_secs) + .unwrap_or(DEFAULT_FAILURE_PENALTY_SECS); + time::Duration::from_secs(s) + }; + Ok(super::new(connect_timeout, + tls, + max_waiters, + min_conns, + max_fails, + fail_penalty)) + } + + pub fn update(&mut self, other: &ConnectorConfig) { + if let Some(ref otls) = other.tls { + self.tls = Some(otls.clone()); + } + if let Some(ct) = other.connect_timeout_ms { + self.connect_timeout_ms = Some(ct); + } + } +} + +#[derive(Clone, Default, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct TlsConnectorFactoryConfig { + pub dns_name: String, + pub trust_certs: Option>, +} + +impl TlsConnectorFactoryConfig { + pub fn mk_tls(&self) -> Result { + let mut config = rustls::ClientConfig::new(); + if let Some(ref certs) = self.trust_certs { + for p in certs { + let f = File::open(p).expect("cannot open certificate file"); + config + .root_store + .add_pem_file(&mut BufReader::new(f)) + .expect("certificate error"); + } + }; + let tls = Tls { + name: self.dns_name.clone(), + config: Arc::new(config), + }; + Ok(tls) + } +} diff --git a/src/connector/mod.rs b/src/connector/mod.rs new file mode 100644 index 0000000..f77af34 --- /dev/null +++ b/src/connector/mod.rs @@ -0,0 +1,149 @@ +use super::{ConfigError, Path}; +use super::connection::secure; +use super::connection::socket::{self, Socket}; +use futures::{Future, Poll}; +use rustls::ClientConfig as RustlsClientConfig; +use std::{io, net, time}; +use std::sync::Arc; +use tokio_core::net::TcpStream; +use tokio_core::reactor::Handle; +use tokio_timer::Timer; + +mod config; + +pub use self::config::{ConnectorFactoryConfig, ConnectorConfig, TlsConnectorFactoryConfig}; + +/// Builds a connector for each name. +pub struct ConnectorFactory(ConnectorFactoryInner); + +enum ConnectorFactoryInner { + /// Uses a single connector for all names. + StaticGlobal(Connector), + /// Builds a new connector for each name by applying all configurations with a + /// matching prefix. This is considered "static" because the set of configurations may + /// not be updated dynamically. + StaticPrefixed(StaticPrefixConnectorFactory), +} + +impl ConnectorFactory { + pub fn new_global(conn: Connector) -> ConnectorFactory { + ConnectorFactory(ConnectorFactoryInner::StaticGlobal(conn)) + } + + pub fn new_prefixed(prefixed_configs: Vec<(Path, ConnectorConfig)>) -> ConnectorFactory { + let f = StaticPrefixConnectorFactory(prefixed_configs); + ConnectorFactory(ConnectorFactoryInner::StaticPrefixed(f)) + } + + pub fn mk_connector(&self, dst_name: &Path) -> Result { + match self.0 { + ConnectorFactoryInner::StaticGlobal(ref c) => Ok(c.clone()), + ConnectorFactoryInner::StaticPrefixed(ref f) => f.mk_connector(dst_name), + } + } +} + +struct StaticPrefixConnectorFactory(Vec<(Path, ConnectorConfig)>); +impl StaticPrefixConnectorFactory { + /// Builds a new connector by applying all configurations with a matching prefix. + fn mk_connector(&self, dst_name: &Path) -> Result { + let mut config = ConnectorConfig::default(); + for &(ref pfx, ref c) in &self.0 { + if pfx.starts_with(dst_name) { + config.update(c); + } + } + config.mk_connector() + } +} + +#[derive(Clone)] +pub struct Tls { + name: String, + config: Arc, +} + +impl Tls { + fn handshake(&self, tcp: TcpStream) -> secure::ClientHandshake { + secure::client_handshake(tcp, &self.config, &self.name) + } +} + +fn new(connect_timeout: Option, + tls: Option, + max_waiters: usize, + min_connections: usize, + fail_limit: usize, + fail_penalty: time::Duration) + -> Connector { + Connector { + connect_timeout, + tls, + max_waiters, + min_connections, + fail_limit, + fail_penalty, + } +} + +#[derive(Clone)] +pub struct Connector { + connect_timeout: Option, + tls: Option, + max_waiters: usize, + min_connections: usize, + fail_limit: usize, + fail_penalty: time::Duration, +} + +impl Connector { + pub fn max_waiters(&self) -> usize { + self.max_waiters + } + + pub fn min_connections(&self) -> usize { + self.min_connections + } + + pub fn failure_limit(&self) -> usize { + self.fail_limit + } + pub fn failure_penalty(&self) -> time::Duration { + self.fail_penalty + } + + fn timeout(&self, fut: F, timer: &Timer) -> Box> + where F: Future + 'static + { + match self.connect_timeout { + None => Box::new(fut), + Some(t) => Box::new(timer.timeout(fut, t).map_err(|e| e.into())), + } + } + + pub fn connect(&self, addr: &net::SocketAddr, reactor: &Handle, timer: &Timer) -> Connecting { + let tcp = TcpStream::connect(addr, reactor); + let socket: Box> = match self.tls { + None => { + let f = tcp.map(socket::plain); + Box::new(self.timeout(f, timer)) + } + Some(ref tls) => { + let tls = tls.clone(); + let f = tcp.and_then(move |tcp| tls.handshake(tcp)) + .map(socket::secure_client); + Box::new(self.timeout(f, timer)) + } + }; + Connecting(socket) + } +} + +pub struct Connecting(Box>); +impl Future for Connecting { + type Item = Socket; + type Error = io::Error; + fn poll(&mut self) -> Poll { + self.0.poll() + } +} diff --git a/src/driver.rs b/src/driver.rs deleted file mode 100644 index f3d8fcd..0000000 --- a/src/driver.rs +++ /dev/null @@ -1,96 +0,0 @@ -use futures::{Async, AsyncSink, Future, Poll, Sink, Stream}; - -/// This is similar to `futures::stream::Forwar ` but also calls -/// `poll_complete` on wakeups. This is important to keep connection -/// pool up to date when no new requests are coming in. -/// -// Borrowed from tk-pool. -pub struct Driver> { - stream: S, - sink: K, - ready: Option, -} - -impl Driver - where S: Stream, - K: Sink -{ - pub fn new(src: S, snk: K) -> Driver { - Driver { - stream: src, - sink: snk, - ready: None, - } - } - - fn send_ready(&mut self) -> Result { - trace!("send_ready"); - match self.ready.take() { - None => { - trace!("nothing sent; ready!"); - Ok(true) - } - Some(item) => { - trace!("sending"); - match self.sink.start_send(item)? { - AsyncSink::Ready => { - trace!("ready!"); - Ok(true) - } - AsyncSink::NotReady(item) => { - trace!("not ready"); - self.ready = Some(item); - Ok(false) - } - } - } - } - } -} - -/// A Future that is complete when the stream has been fully flushed -/// into the sink. Ensures that the sink's `poll_complete` is called -/// aggressively to -impl Future for Driver - where S: Stream, - K: Sink -{ - type Item = (); - type Error = S::Error; - fn poll(&mut self) -> Poll<(), Self::Error> { - trace!("polling sink"); - match self.sink.poll_complete()? { - Async::Ready(_) => trace!("sink ready"), - Async::NotReady => trace!("sink not ready"), - } - loop { - if self.send_ready()? { - assert!(self.ready.is_none()); - trace!("polling sink"); - if let Async::Ready(_) = self.sink.poll_complete()? { - trace!("sink complete"); - return Ok(Async::Ready(())); - } - trace!("polling stream"); - match self.stream.poll()? { - Async::Ready(Some(item)) => { - trace!("stream ready"); - self.ready = Some(item); - // Continue trying to send. - } - Async::Ready(None) => { - trace!("stream done"); - return self.sink.poll_complete(); - } - Async::NotReady => { - trace!("stream not ready"); - return Ok(Async::NotReady); - } - } - } else { - trace!("polling sink"); - return self.sink.poll_complete(); - } - } - } -} diff --git a/src/lb/balancer.rs b/src/lb/balancer.rs deleted file mode 100644 index d7adb3a..0000000 --- a/src/lb/balancer.rs +++ /dev/null @@ -1,458 +0,0 @@ - - -use WeightedAddr; -use futures::{StartSend, AsyncSink, Async, Poll, Sink, Stream}; -use lb::{Connector, Endpoint, Shared, Src, WithAddr}; -use rand::{self, Rng}; -use std::{f32, io}; -use std::cell::RefCell; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::net::SocketAddr; -use std::rc::Rc; -use tacho::{self, Timing}; -use tokio_core::reactor::Handle; - -/// Distributes TCP connections across a pool of dsts. -/// -/// May only be accessed from a single thread. Use `Balancer::into_shared` for a -/// cloneable/shareable variant. -/// -/// ## Panics -/// -/// Panics if the `Stream` of address of resolutions ends. It must never complete. -/// -pub struct Balancer { - // Streams address updates (i.e. from service discovery). - addrs: A, - - // Initiates dst connections. - connector: C, - - // Holds transfer data between socks (because we don't yet employ a 0-copy strategy). - // This buffer is used for _all_ transfers in this balancer. - buffer: Rc>>, - - // Endpoints that are in service discovery or otherwise active, but without - // established connections.. - unready: VecDeque, - - // Endpoints that have established connections ready for dispatch. - ready: VecDeque, - - // We thank these endpoints for their service, but they have been deregistered and - // should initiate new connections. - retired: VecDeque, - - stats: Stats, -} - -impl Balancer - where A: Stream, Error = io::Error>, - C: Connector + 'static -{ - /// Creates a new balancer with the given address stream - pub fn new(addrs: A, - connector: C, - buf: Rc>>, - metrics: tacho::Scope) - -> Balancer { - Balancer { - addrs: addrs, - connector: connector, - buffer: buf, - unready: VecDeque::new(), - ready: VecDeque::new(), - retired: VecDeque::new(), - stats: Stats::new(metrics), - } - } - - /// Moves this balancer into one that may be shared across threads. - /// - /// The Balancer's handle is used to drive all balancer changes on a single thread, - /// while other threads may submit `Srcs` to be processed. - pub fn into_shared(self, max_waiters: usize, h: Handle) -> Shared - where A: 'static - { - Shared::new(self, max_waiters, h) - } - - /// Drop retired endpoints that have no pending connections. - fn evict_retirees(&mut self) -> io::Result<()> { - let sz = self.retired.len(); - trace!("checking {} retirees", sz); - for _ in 0..sz { - let mut ep = self.retired.pop_front().unwrap(); - ep.poll_connections()?; - if ep.is_active() { - trace!("still active {}", ep.addr()); - self.retired.push_back(ep); - } else { - trace!("evicting {}", ep.addr()); - drop(ep); - } - } - Ok(()) - } - - fn poll_ready(&mut self) -> io::Result<()> { - let sz = self.ready.len(); - trace!("checking {} ready", sz); - for _ in 0..sz { - let mut ep = self.ready.pop_front().unwrap(); - ep.poll_connections()?; - if ep.is_ready() { - trace!("ready {}", ep.addr()); - self.ready.push_back(ep); - } else { - trace!("not ready {}", ep.addr()); - self.unready.push_back(ep); - } - } - Ok(()) - } - - fn promote_unready(&mut self) -> io::Result<()> { - let sz = self.unready.len(); - trace!("checking {} unready", sz); - for _ in 0..sz { - let mut ep = self.unready.pop_front().unwrap(); - ep.poll_connections()?; - if ep.is_ready() { - trace!("ready {}", ep.addr()); - self.ready.push_back(ep); - } else { - trace!("not ready {}", ep.addr()); - self.unready.push_back(ep); - } - } - Ok(()) - } - - /// Checks if the addrs has updated. If it has, update `endpoints` new addresses and - /// weights. - /// - /// ## Panics - /// - /// If the addrs stream ends. - fn discover_and_retire(&mut self) -> io::Result<()> { - trace!("polling addr"); - if let Async::Ready(addrs) = self.addrs.poll()? { - trace!("addr update"); - let addrs = addrs.expect("addr stream must be infinite"); - let new = addr_weight_map(&addrs); - self.update_endpoints(&new); - } - Ok(()) - } - - /// Updates the endpoints with an address resolution update. - fn update_endpoints(&mut self, new_addrs: &HashMap) { - let mut ep_addrs = HashSet::new(); - - trace!("updating {} unready", self.unready.len()); - for _ in 0..self.unready.len() { - let mut ep = self.unready.pop_front().unwrap(); - let addr = ep.addr(); - match new_addrs.get(&addr) { - None => { - trace!("retiring {}", addr); - ep.retire(); - if ep.is_active() { - self.retired.push_back(ep) - } else { - trace!("evicting {}", addr); - drop(ep); - } - } - Some(&w) => { - trace!("updating {} *{}", addr, w); - ep.set_weight(w); - self.unready.push_back(ep); - ep_addrs.insert(addr); - } - } - } - - trace!("updating {} ready", self.ready.len()); - for _ in 0..self.ready.len() { - let mut ep = self.ready.pop_front().unwrap(); - let addr = ep.addr(); - match new_addrs.get(&addr) { - None => { - if ep.is_active() { - trace!("retiring {}", addr); - ep.retire(); - if ep.is_active() { - self.retired.push_back(ep); - } else { - trace!("evicting {}", addr); - drop(ep); - } - } else { - trace!("evicting {}", addr); - drop(ep); - } - } - Some(&w) => { - trace!("updating {} *{}", addr, w); - ep.set_weight(w); - self.ready.push_back(ep); - ep_addrs.insert(addr); - } - } - } - - // Check to see if we have re-added anything that has previously been marked as - // retired. - trace!("updating {} retired", self.retired.len()); - for _ in 0..self.retired.len() { - let mut ep = self.retired.pop_front().unwrap(); - let addr = ep.addr(); - match new_addrs.get(&addr) { - None => { - self.retired.push_back(ep); - } - Some(&w) => { - trace!("reviving {}", addr); - ep.unretire(); - ep.set_weight(w); - self.ready.push_back(ep); - ep_addrs.insert(addr); - } - } - } - - for (addr, weight) in new_addrs { - if !ep_addrs.contains(addr) { - trace!("adding {} *{}", addr, weight); - let metrics = self.stats.metrics.clone(); - self.connect(Endpoint::new(*addr, *weight, metrics)); - } - } - } - - /// Dispatches an `Src` to a dst `Endpoint`, if possible. - /// - /// Chooses two endpoints at random and uses the lesser-loaded of the two. - // TODO pluggable strategy - fn dispatch(&mut self, src: Src) -> StartSend { - trace!("dispatching {}", src.addr()); - // Choose an endpoint. - match self.ready.len() { - 0 => { - trace!("no endpoints ready"); - Ok(AsyncSink::NotReady(src)) - } - 1 => { - // One endpoint, use it. - let mut ep = self.ready.pop_front().unwrap(); - let tx = ep.transmit(src, self.buffer.clone()); - // Replace the connection preemptively. - self.connect(ep); - Ok(tx) - } - sz => { - // Pick 2 candidate indices. - let (i0, i1) = if sz == 2 { - // There are only two endpoints, so no need for an RNG. - (0, 1) - } else { - // 3 or more endpoints: choose two distinct endpoints at random. - let mut rng = rand::thread_rng(); - let i0 = rng.gen_range(0, sz); - let mut i1 = rng.gen_range(0, sz); - while i0 == i1 { - i1 = rng.gen_range(0, sz); - } - (i0, i1) - }; - // Determine the index of the lesser-loaded endpoint - let idx = { - let ep0 = &self.ready[i0]; - let ep1 = &self.ready[i1]; - if ep0.load() <= ep1.load() { - trace!("dst: {} *{} (not {} *{})", - ep0.addr(), - ep0.weight(), - ep1.addr(), - ep1.weight()); - i0 - } else { - trace!("dst: {} *{} (not {} *{})", - ep1.addr(), - ep1.weight(), - ep1.addr(), - ep0.weight()); - i1 - } - }; - - let tx = { - // Once we know the index of the endpoint we want to use, obtain a mutable - // reference to begin proxying. - let mut ep = self.ready.swap_remove_front(idx).unwrap(); - let tx = ep.transmit(src, self.buffer.clone()); - // Replace the connection preemptively. - self.connect(ep); - tx - }; - - Ok(tx) - } - } - } - - fn connect(&mut self, mut ep: Endpoint) { - ep.init_connection(&self.connector); - if ep.conns_established() > 0 { - self.ready.push_back(ep); - } else { - self.unready.push_back(ep); - } - } - - fn record_balanacer_stats(&mut self) { - self.stats.measure(&self.unready, &self.ready, &self.retired); - } -} - -fn addr_weight_map(new: &[WeightedAddr]) -> HashMap { - let mut s = HashMap::new(); - for wa in new { - s.insert(wa.0, wa.1); - } - s -} - -/// Receives `Src` sockets to be dismatched to an underlying endpoint. -/// -/// `start_send` returns `Async::Ready` if there is a dst endpoint available, and -/// `Async::NotReady` otherwise. -/// -/// `poll_complete` always returns `Async::NotReady`, since the load balancer may always -/// receive more srcs. -impl Sink for Balancer - where A: Stream, Error = io::Error>, - C: Connector + 'static -{ - type SinkItem = Src; - type SinkError = io::Error; - - /// Updates the list of endpoints before attempting to dispatch `src` to a - /// dst endpoint. - fn start_send(&mut self, src: Self::SinkItem) -> StartSend { - let src_addr = src.addr(); - let poll_t = tacho::Timing::start(); - trace!("start_send {}: unready={} ready={} retired={}", - src_addr, - self.unready.len(), - self.ready.len(), - self.retired.len()); - let ret = match self.dispatch(src) { - Err(e) => Err(e), - Ok(AsyncSink::Ready) => Ok(AsyncSink::Ready), - Ok(AsyncSink::NotReady(src)) => { - self.evict_retirees()?; - self.promote_unready()?; - self.discover_and_retire()?; - trace!("retrying {} unready={} ready={} retired={}", - src_addr, - self.unready.len(), - self.ready.len(), - self.retired.len()); - self.dispatch(src) - } - }; - - self.record_balanacer_stats(); - trace!("start_sent {}: {} unready={} ready={} retired={}", - src_addr, - match &ret { - &Ok(AsyncSink::Ready) => "sent", - &Ok(AsyncSink::NotReady(_)) => "not sent", - &Err(_) => "failed", - }, - self.unready.len(), - self.ready.len(), - self.retired.len()); - - self.stats.poll_time_us.add(poll_t.elapsed_us()); - ret - } - - /// Updates the list of endpoints as needed. - fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { - let poll_t = tacho::Timing::start(); - trace!("poll_complete unready={} ready={} retired={}", - self.unready.len(), - self.ready.len(), - self.retired.len()); - self.evict_retirees()?; - self.poll_ready()?; - self.promote_unready()?; - self.discover_and_retire()?; - self.record_balanacer_stats(); - trace!("poll_completed unready={} ready={} retired={}", - self.unready.len(), - self.ready.len(), - self.retired.len()); - self.stats.poll_time_us.add(poll_t.elapsed_us()); - Ok(Async::NotReady) - } -} - -struct Stats { - metrics: tacho::Scope, - conns_established: tacho::Gauge, - conns_active: tacho::Gauge, - conns_pending: tacho::Gauge, - endpoints_ready: tacho::Gauge, - endpoints_unready: tacho::Gauge, - endpoints_retired: tacho::Gauge, - poll_time_us: tacho::Stat, -} - -impl Stats { - fn new(m: tacho::Scope) -> Stats { - Stats { - conns_established: m.gauge("conns_established".into()), - conns_active: m.gauge("conns_active".into()), - conns_pending: m.gauge("conns_pending".into()), - endpoints_ready: m.gauge("endpoints_ready".into()), - endpoints_unready: m.gauge("endpoints_unready".into()), - endpoints_retired: m.gauge("endpoints_retired".into()), - poll_time_us: m.stat("poll_time_us".into()), - metrics: m, - } - } - - fn measure(&mut self, - unready: &VecDeque, - ready: &VecDeque, - retired: &VecDeque) { - let mut established = 0u64; - let mut active = 0u64; - let mut pending = 0u64; - for e in unready { - established += e.conns_established() as u64; - active += e.conns_active() as u64; - pending += e.conns_pending() as u64; - } - for e in ready { - established += e.conns_established() as u64; - active += e.conns_active() as u64; - pending += e.conns_pending() as u64; - } - for e in retired { - active += e.conns_active() as u64; - } - - self.conns_established.set(established); - self.conns_active.set(active); - self.conns_pending.set(pending); - self.endpoints_ready.set(ready.len() as u64); - self.endpoints_unready.set(unready.len() as u64); - self.endpoints_retired.set(retired.len() as u64); - } -} diff --git a/src/lb/duplex.rs b/src/lb/duplex.rs deleted file mode 100644 index 5e58c39..0000000 --- a/src/lb/duplex.rs +++ /dev/null @@ -1,103 +0,0 @@ -use futures::{Async, Future, Poll}; - -use lb::{ProxyStream, Socket, WithAddr}; -use std::cell::RefCell; -use std::io; -use std::net::SocketAddr; -use std::rc::Rc; -use tacho; - -/// Joins src and dst transfers into a single Future. -pub struct Duplex { - pub src_addr: SocketAddr, - pub dst_addr: SocketAddr, - tx: Option, - rx: Option, - - tx_bytes: u64, - tx_bytes_stat: tacho::Stat, - rx_bytes: u64, - rx_bytes_stat: tacho::Stat, -} - -impl Duplex { - pub fn new(src: Socket, - dst: Socket, - buf: Rc>>, - tx_metrics: tacho::Scope, - rx_metrics: tacho::Scope) - -> Duplex { - let src_addr = src.addr(); - let dst_addr = dst.addr(); - let src = Rc::new(RefCell::new(src)); - let dst = Rc::new(RefCell::new(dst)); - let tx_bytes_stat = tx_metrics.stat("bytes".into()); - let rx_byte_stat = rx_metrics.stat("bytes".into()); - let tx = ProxyStream::new(src.clone(), dst.clone(), buf.clone(), tx_metrics); - let rx = ProxyStream::new(dst, src, buf, rx_metrics); - Duplex { - src_addr: src_addr, - dst_addr: dst_addr, - tx: Some(tx), - rx: Some(rx), - - tx_bytes: 0, - tx_bytes_stat: tx_bytes_stat, - - rx_bytes: 0, - rx_bytes_stat: rx_byte_stat, - } - } -} - -impl Future for Duplex { - type Item = (); - type Error = io::Error; - fn poll(&mut self) -> Poll<(), io::Error> { - if let Some(mut tx) = self.tx.take() { - trace!("polling dstward from {} to {}", - self.src_addr, - self.dst_addr); - match tx.poll()? { - Async::Ready(sz) => { - trace!("dstward complete from {} to {}", - self.src_addr, - self.dst_addr); - self.tx_bytes += sz; - } - Async::NotReady => { - trace!("dstward not ready"); - self.tx = Some(tx); - } - } - } - - if let Some(mut rx) = self.rx.take() { - trace!("polling srcward from {} to {}", - self.dst_addr, - self.src_addr); - match rx.poll()? { - Async::Ready(sz) => { - trace!("srcward complete from {} to {}", - self.dst_addr, - self.src_addr); - self.rx_bytes += sz; - } - Async::NotReady => { - trace!("srcward not ready"); - self.rx = Some(rx); - } - } - } - - if self.tx.is_none() && self.rx.is_none() { - trace!("complete"); - self.tx_bytes_stat.add(self.tx_bytes); - self.rx_bytes_stat.add(self.rx_bytes); - Ok(Async::Ready(())) - } else { - trace!("not ready"); - Ok(Async::NotReady) - } - } -} diff --git a/src/lb/endpoint.rs b/src/lb/endpoint.rs deleted file mode 100644 index 952a1bc..0000000 --- a/src/lb/endpoint.rs +++ /dev/null @@ -1,263 +0,0 @@ -use futures::{Async, AsyncSink, Future}; - -use lb::{Connector, Duplex, Src, Dst, WithAddr}; -use std::{f32, io}; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::net::SocketAddr; -use std::rc::Rc; -use std::time::Instant; -use tacho::{self, Timing}; - -struct Pending { - connect: Box>, - start_t: Instant, -} - -struct Established { - dst: Dst, - start_t: Instant, -} - -struct Active { - duplex: Duplex, - start_t: Instant, -} - -struct Stats { - failures: tacho::Counter, - successes: tacho::Counter, - connect_latency_us: tacho::Stat, - connection_ready_ms: tacho::Stat, - connection_active_ms: tacho::Stat, - tx_metrics: tacho::Scope, - rx_metrics: tacho::Scope, -} -impl Stats { - fn new(metrics: tacho::Scope) -> Stats { - let tx_metrics = metrics.clone().labeled("direction".into(), "tx".into()); - let rx_metrics = metrics.clone().labeled("direction".into(), "rx".into()); - Stats { - connect_latency_us: metrics.stat("connect_latency_us".into()), - connection_ready_ms: metrics.stat("connection_ready_ms".into()), - connection_active_ms: metrics.stat("connection_active_ms".into()), - failures: metrics.counter("failure_count".into()), - successes: metrics.counter("success_count".into()), - tx_metrics: tx_metrics, - rx_metrics: rx_metrics, - } - } -} - -/// A single possibly-available load balancing endpoint. -pub struct Endpoint { - addr: SocketAddr, - weight: f32, - retired: bool, - - /// Pending connection attempts. - pending: VecDeque, - - /// Connections that have been established but are not yet in - /// active use. - established: VecDeque, - - /// Active TCP streams. The stream completed - active: VecDeque, - - stats: Stats, -} - -impl Endpoint { - pub fn new(a: SocketAddr, w: f32, metrics: tacho::Scope) -> Endpoint { - Endpoint { - addr: a, - weight: w, - retired: false, - pending: VecDeque::new(), - established: VecDeque::new(), - active: VecDeque::new(), - stats: Stats::new(metrics), - } - } - - pub fn addr(&self) -> SocketAddr { - self.addr - } - - pub fn weight(&self) -> f32 { - self.weight - } - - pub fn set_weight(&mut self, w: f32) { - self.weight = w; - } - - pub fn is_retired(&self) -> bool { - self.retired - } - - pub fn retire(&mut self) { - for _ in 0..self.pending.len() { - drop(self.pending.pop_front()); - } - for _ in 0..self.established.len() { - drop(self.established.pop_front()); - } - self.retired = true; - } - - pub fn unretire(&mut self) { - self.retired = false; - } - - pub fn conns_pending(&self) -> usize { - self.pending.len() - } - - pub fn conns_established(&self) -> usize { - self.established.len() - } - - pub fn conns_active(&self) -> usize { - self.active.len() - } - - pub fn is_ready(&self) -> bool { - !self.established.is_empty() - } - - pub fn is_active(&self) -> bool { - !self.active.is_empty() - } - - /// Scores the endpoint. - /// - /// Uses the number of active connections, combined with the endpoint's weight, to - /// produce a load score. A lightly-loaded, heavily-weighted endpoint will receive a - /// score close to 0.0. An endpoint that cannot currently handle events produces a - /// score of `f32::INFINITY`. - /// - /// TODO: Should this be extracted into a configurable strategy? - pub fn load(&self) -> f32 { - if !self.is_ready() { - // If the endpoint is not ready to serve requests, it should not be - // considered. - f32::INFINITY - } else { - (1.0 + self.conns_active() as f32) / self.weight - } - } - - /// Initiate a new connection - pub fn init_connection(&mut self, c: &C) { - debug!("initiating connection to {}", self.addr); - self.pending.push_back(Pending { - start_t: tacho::Timing::start(), - connect: c.connect(&self.addr), - }); - } - - /// Checks the state of connections for this endpoint. - /// - /// When active streams have been completed, they are removed. When pending - /// connections have been established, they are stored to be dispatched. - pub fn poll_connections(&mut self) -> io::Result<()> { - trace!("{}: {} connections established", - self.addr, - self.established.len()); - self.poll_active(); - self.poll_pending(); - Ok(()) - } - - fn poll_active(&mut self) { - let sz = self.active.len(); - trace!("{}: checking {} active streams", self.addr, sz); - for i in 0..sz { - let mut active = self.active.pop_front().unwrap(); - trace!("{}: polling active stream {}/{}: {}", - self.addr, - i + 1, - sz, - active.duplex.src_addr); - match active.duplex.poll() { - Ok(Async::NotReady) => { - trace!("{}: still active from {}", - self.addr, - active.duplex.src_addr); - self.active.push_back(active); - } - Ok(Async::Ready(())) => { - trace!("{}: completed from {}", self.addr, active.duplex.src_addr); - self.stats.successes.incr(1); - self.stats.connection_active_ms.add(active.start_t.elapsed_ms()); - drop(active); - } - Err(e) => { - info!("{}: failed from {}: {}", - self.addr, - active.duplex.src_addr, - e); - self.stats.failures.incr(1); - self.stats.connection_active_ms.add(active.start_t.elapsed_ms()); - drop(active); - } - } - } - } - - fn poll_pending(&mut self) { - let sz = self.pending.len(); - trace!("{}: polling {} pending streams", self.addr, sz); - for _ in 0..sz { - let mut pending = self.pending.pop_front().unwrap(); - match pending.connect.poll() { - Ok(Async::NotReady) => { - self.pending.push_back(pending); - } - Ok(Async::Ready(dst)) => { - trace!("{}: connection established", self.addr); - self.stats.connect_latency_us.add(pending.start_t.elapsed_us()); - self.established.push_back(Established { - dst: dst, - start_t: tacho::Timing::start(), - }); - } - Err(e) => { - info!("{}: cannot establish connection: {}", self.addr, e); - } - } - } - } - - - /// Attempts to begin transmiting from the the `Src` to this endpoint. - /// - /// If no connections have been established, the Upstrea is returned in an - /// `Async::NotReady` so that the caller may try another endpoint. - pub fn transmit(&mut self, src: Src, buf: Rc>>) -> AsyncSink { - match self.established.pop_front() { - None => { - { - let Src(ref src) = src; - trace!("no connections to {} from {}", self.addr, src.addr()); - } - AsyncSink::NotReady(src) - } - Some(established) => { - let Src(src) = src; - let Dst(dst) = established.dst; - self.stats.connection_ready_ms.add(established.start_t.elapsed_ms()); - trace!("transmitting to {} from {}", self.addr, src.addr()); - let tx_metrics = self.stats.tx_metrics.clone(); - let rx_metrics = self.stats.rx_metrics.clone(); - self.active.push_front(Active { - duplex: Duplex::new(src, dst, buf, tx_metrics, rx_metrics), - start_t: tacho::Timing::start(), - }); - AsyncSink::Ready - } - } - } -} diff --git a/src/lb/mod.rs b/src/lb/mod.rs deleted file mode 100644 index 42e8d26..0000000 --- a/src/lb/mod.rs +++ /dev/null @@ -1,165 +0,0 @@ -//! A simple layer-4 load balancing library on tokio. -//! -//! Inspired by https://github.com/tailhook/tk-pool. -//! -//! TODO: if removed endpoints can't be considered for load balancing, they should be -//! removed from `endpoints. -//! -//! TODO: Srcs will have to be made a trait to accomodate additional serverside -//! context: specifically, ALPN. - -use futures::{Future, Stream}; -use rustls; -use std::io; -use std::net::{self, SocketAddr}; -use std::sync::Arc; -use tacho; -use tokio_core::net::{TcpListener, TcpStream}; -use tokio_core::reactor::Handle; - -mod balancer; -mod duplex; -mod endpoint; -mod proxy_stream; -mod shared; -mod socket; - - -pub use self::balancer::Balancer; -use self::duplex::Duplex; -pub use self::endpoint::Endpoint; -use self::proxy_stream::ProxyStream; -pub use self::shared::Shared; -use self::socket::Socket; - -pub struct Src(Socket); -pub struct Dst(Socket); - -pub trait WithAddr { - fn addr(&self) -> SocketAddr; -} - -impl WithAddr for Src { - fn addr(&self) -> SocketAddr { - self.0.addr() - } -} - -/// Binds on `addr` and produces `U`-typed src connections. -pub trait Acceptor { - fn accept(&self, addr: &SocketAddr) -> Box>; -} - -/// Establishes a `D`-typed connection to `addr`. -// TODO does the address type need to be abstracted to support additional (TLS) metadata? -pub trait Connector { - fn connect(&self, addr: &SocketAddr) -> Box>; -} - -pub struct PlainAcceptor { - handle: Handle, - connects: tacho::Counter, -} -impl PlainAcceptor { - pub fn new(h: Handle, m: tacho::Scope) -> PlainAcceptor { - PlainAcceptor { - handle: h, - connects: m.counter("connects".into()), - } - } -} -impl Acceptor for PlainAcceptor { - fn accept(&self, addr: &SocketAddr) -> Box> { - let mut connects = self.connects.clone(); - TcpListener::bind(addr, &self.handle) - .unwrap() - .incoming() - .map(move |(s, a)| { - connects.incr(1); - Src(Socket::plain(a, s)) - }) - .boxed() - } -} - -/// A `Connector` that builds `TcpStream`-typed connections on the provided `Handle`. -pub struct PlainConnector(Handle); -impl PlainConnector { - pub fn new(h: Handle) -> PlainConnector { - PlainConnector(h) - } -} -impl Connector for PlainConnector { - fn connect(&self, addr: &net::SocketAddr) -> Box> { - let addr = *addr; - let f = TcpStream::connect(&addr, &self.0).map(move |s| Dst(Socket::plain(addr, s))); - Box::new(f) - } -} - -pub struct SecureAcceptor { - handle: Handle, - config: Arc, - connects: tacho::Counter, - fails: tacho::Counter, -} -impl SecureAcceptor { - pub fn new(h: Handle, c: rustls::ServerConfig, m: tacho::Scope) -> SecureAcceptor { - SecureAcceptor { - handle: h, - config: Arc::new(c), - connects: m.counter("connects".into()), - fails: m.counter("handshake_failures".into()), - } - } -} -impl Acceptor for SecureAcceptor { - fn accept(&self, addr: &SocketAddr) -> Box> { - let tls = self.config.clone(); - let l = TcpListener::bind(addr, &self.handle).unwrap(); - - let mut connects = self.connects.clone(); - let mut fails = self.fails.clone(); - - // Lift handshake errors so those connections are ignored. - let sockets = l.incoming() - .and_then(move |(tcp, addr)| Socket::secure_server_handshake(addr, tcp, &tls)); - let srcs = sockets.then(Ok).filter_map(move |result| match result { - Err(_) => { - fails.incr(1); - None - } - Ok(s) => { - connects.incr(1); - Some(Src(s)) - } - }); - Box::new(srcs) - } -} - -pub struct SecureConnector { - name: String, - handle: Handle, - tls: Arc, -} -impl SecureConnector { - pub fn new(n: String, c: rustls::ClientConfig, h: Handle) -> SecureConnector { - SecureConnector { - name: n, - handle: h, - tls: Arc::new(c), - } - } -} -impl Connector for SecureConnector { - fn connect(&self, addr: &net::SocketAddr) -> Box> { - let tls = self.tls.clone(); - let name = self.name.clone(); - let addr = *addr; - let f = TcpStream::connect(&addr, &self.handle) - .and_then(move |tcp| Socket::secure_client_handshake(addr, tcp, &tls, &name)) - .map(Dst); - Box::new(f) - } -} diff --git a/src/lb/proxy_stream.rs b/src/lb/proxy_stream.rs deleted file mode 100644 index ef59ee4..0000000 --- a/src/lb/proxy_stream.rs +++ /dev/null @@ -1,154 +0,0 @@ -//! Inspired by tokio-socks5 example. - -use futures::{Async, Future, Poll}; - -use lb::Socket; -use std::cell::RefCell; -use std::io::{self, Read, Write}; -use std::net::Shutdown; -use std::rc::Rc; -use tacho; -use tokio_io::AsyncWrite; - -/// A future representing reading all data from one side of a proxy connection and writing -/// it to another. -/// -/// In the typical case, nothing allocations are required. If the write side exhibits -/// backpressure, however, a buffer is allocated to -pub struct ProxyStream { - reader: Rc>, - writer: Rc>, - - // Holds transient data when copying between the reader and writer. - buf: Rc>>, - - // Holds data that can't be fully written. - pending: Option>, - - // The number of bytes we've written so far. - bytes_total: u64, - - completed: bool, - - bytes_total_count: tacho::Counter, - allocs_count: tacho::Counter, -} - -impl ProxyStream { - pub fn new(r: Rc>, - w: Rc>, - b: Rc>>, - metrics: tacho::Scope) - -> ProxyStream { - ProxyStream { - reader: r, - writer: w, - buf: b, - pending: None, - bytes_total: 0, - completed: false, - bytes_total_count: metrics.counter("bytes_total".into()), - allocs_count: metrics.counter("allocs_count".into()), - } - } -} - -// Here we implement the `Future` trait for `Transfer` directly. This does not -// use any combinators, and shows how you might implement it in custom -// situations if needed. -impl Future for ProxyStream { - // Our future resolves to the number of bytes transferred, or an I/O error - // that happens during the connection, if any. - type Item = u64; - type Error = io::Error; - - /// Attempts to drive this future to completion. - /// - /// Reads from from the `reader` into a shared buffer, before writing to If a Flushes - /// all pending data before reading any more. - fn poll(&mut self) -> Poll { - trace!("poll"); - let mut writer = self.writer.borrow_mut(); - let mut reader = self.reader.borrow_mut(); - loop { - if self.completed { - try_nb!(writer.shutdown()); - writer.tcp_shutdown(Shutdown::Write)?; - trace!("completed"); - return Ok(self.bytes_total.into()); - } - - // Try to flush pending bytes to the writer. - if let Some(mut pending) = self.pending.take() { - let psz = pending.len(); - trace!("writing {} pending bytes", psz); - - let wsz = writer.write(&pending)?; - trace!("wrote {} bytes", wsz); - - { - let wsz = wsz as u64; - self.bytes_total += wsz; - self.bytes_total_count.incr(wsz); - } - if wsz < psz { - trace!("saving {} bytes", psz - wsz); - // If all of the pending bytes couldn't be complete, save the - // remainder for next time. - pending.drain(0..wsz); - self.pending = Some(pending); - return Ok(Async::NotReady); - } - } - assert!(self.pending.is_none()); - - // Read some data into our shared buffer. - let mut buf = self.buf.borrow_mut(); - let rsz = try_nb!(reader.read(&mut buf)); - if rsz == 0 { - // Nothing left to read, return the total number of bytes transferred. - trace!("completing: {}B", self.bytes_total); - self.completed = true; - try_nb!(writer.shutdown()); - writer.tcp_shutdown(Shutdown::Write)?; - trace!("completed: {}B", self.bytes_total); - return Ok(self.bytes_total.into()); - } - trace!("read {} bytes", rsz); - - // Attempt to write from the shared buffer. - match writer.write(&buf[..rsz]) { - Ok(wsz) => { - trace!("wrote {} bytes", wsz); - { - let wsz = wsz as u64; - self.bytes_total += wsz; - self.bytes_total_count.incr(wsz); - } - if wsz < rsz { - trace!("saving {} bytes", rsz - wsz); - // Allocate a temporary buffer to the unwritten remainder for next - // time. - self.allocs_count.incr(1); - let mut p = vec![0; (rsz - wsz)]; - p.copy_from_slice(&buf[wsz..rsz]); - self.pending = Some(p); - return Ok(Async::NotReady); - } - } - Err(ref e) if e.kind() == ::std::io::ErrorKind::WouldBlock => { - self.allocs_count.incr(1); - let p = buf.clone(); - self.pending = Some(p); - return Ok(Async::NotReady); - } - Err(e) => { - return Err(e.into()); - } - } - - // We shouldn't be looping if we couldn't write everything. - assert!(self.pending.is_none()); - } - } -} diff --git a/src/lb/shared.rs b/src/lb/shared.rs deleted file mode 100644 index 1b8a5d0..0000000 --- a/src/lb/shared.rs +++ /dev/null @@ -1,46 +0,0 @@ - - -use {Driver, WeightedAddr}; -use futures::{StartSend, Poll, Sink, Stream}; -use futures::sync::mpsc; -use lb::{Balancer, Connector, Src, WithAddr}; -use std::io; -use tokio_core::reactor::Handle; - -/// Allows a balancer to be shared acorss threads. -pub struct Shared(mpsc::Sender); - -impl Shared { - /// Spawn the `balancer` in the given `handle`. - pub fn new(balancer: Balancer, max_waiters: usize, handle: Handle) -> Shared - where A: Stream, Error = io::Error> + 'static, - C: Connector + 'static - { - let (tx, rx) = mpsc::channel(max_waiters); - let driver = Driver::new(rx.fuse(), balancer.sink_map_err(|_| {})); - handle.spawn(driver); - Shared(tx) - } -} - -impl Clone for Shared { - fn clone(&self) -> Self { - Shared(self.0.clone()) - } -} - -impl Sink for Shared { - type SinkItem = Src; - type SinkError = io::Error; - - fn start_send(&mut self, src: Src) -> StartSend { - debug!("start_send {}", src.addr()); - self.0.start_send(src).map_err(|e| io::Error::new(io::ErrorKind::Other, e)) - } - - fn poll_complete(&mut self) -> Poll<(), Self::SinkError> { - debug!("poll_complete"); - // This doesn't actually do anything, since balancers never complete. - self.0.poll_complete().map_err(|_| unreachable!()) - } -} diff --git a/src/lb/socket.rs b/src/lb/socket.rs deleted file mode 100644 index cc003a5..0000000 --- a/src/lb/socket.rs +++ /dev/null @@ -1,407 +0,0 @@ -use futures::{Async, Future, Poll}; - -use lb::WithAddr; -use rustls::{Session, ClientConfig, ServerConfig, ClientSession, ServerSession}; -use std::fmt; -use std::io::{self, Read, Write}; -use std::net::{Shutdown, SocketAddr}; -use std::sync::Arc; -use tokio_core::net::TcpStream; -use tokio_io::AsyncWrite; - -/// Hides the implementation details of socket I/O. -/// -/// Plaintext and encrypted (client and server) streams have different type signatures. -/// Exposing these types to the rest of the application is painful, so `Socket` provides -/// an opaque container for the various types of sockets supported by this proxy. -#[derive(Debug)] -pub struct Socket(Inner); - -// Since the rustls types are much larger than the plain types, they are boxed. because -// clippy says so. -enum Inner { - Plain(SocketAddr, TcpStream), - SecureClient(SocketAddr, Box>), - SecureServer(SocketAddr, Box>), -} - -impl fmt::Debug for Inner { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Inner::Plain(ref a, _) => f.debug_tuple("Plain").field(a).finish(), - Inner::SecureClient(ref a, _) => f.debug_tuple("SecureClient").field(a).finish(), - Inner::SecureServer(ref a, _) => f.debug_tuple("SecureServer").field(a).finish(), - } - } -} - -impl Socket { - pub fn plain(addr: SocketAddr, tcp: TcpStream) -> Socket { - Socket(Inner::Plain(addr, tcp)) - } - - pub fn secure_client_handshake(addr: SocketAddr, - tcp: TcpStream, - tls: &Arc, - name: &str) - -> SecureClientHandshake { - trace!("initializing client handshake"); - let s = SecureSocket::new(addr, tcp, ClientSession::new(tls, name)); - SecureClientHandshake(Some(s)) - } - - pub fn secure_server_handshake(addr: SocketAddr, - tcp: TcpStream, - tls: &Arc) - -> SecureServerHandshake { - trace!("initializing server handshake"); - let s = SecureSocket::new(addr, tcp, ServerSession::new(tls)); - SecureServerHandshake(Some(s)) - } - - pub fn tcp_shutdown(&mut self, how: Shutdown) -> io::Result<()> { - trace!("{:?}.tcp_shutdown({:?})", self, how); - match self.0 { - Inner::Plain(_, ref mut s) => TcpStream::shutdown(s, how), - Inner::SecureClient(_, ref mut s) => s.tcp_shutdown(how), - Inner::SecureServer(_, ref mut s) => s.tcp_shutdown(how), - } - } -} - -impl WithAddr for Socket { - fn addr(&self) -> SocketAddr { - match self.0 { - Inner::Plain(ref a, _) | - Inner::SecureClient(ref a, _) | - Inner::SecureServer(ref a, _) => *a, - } - } -} - -/// Reads the socket without blocking. -impl Read for Socket { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - trace!("{:?}.read({})", self, buf.len()); - match self.0 { - Inner::Plain(_, ref mut t) => t.read(buf), - Inner::SecureClient(_, ref mut c) => c.read(buf), - Inner::SecureServer(_, ref mut s) => s.read(buf), - } - } -} - -/// Writes to the socket without blocking. -impl Write for Socket { - fn write(&mut self, buf: &[u8]) -> io::Result { - trace!("{:?}.write({})", self, buf.len()); - match self.0 { - Inner::Plain(_, ref mut t) => t.write(buf), - Inner::SecureClient(_, ref mut c) => c.write(buf), - Inner::SecureServer(_, ref mut s) => s.write(buf), - } - } - - fn flush(&mut self) -> io::Result<()> { - trace!("{:?}.flush()", self); - match self.0 { - Inner::Plain(_, ref mut t) => t.flush(), - Inner::SecureClient(_, ref mut c) => c.flush(), - Inner::SecureServer(_, ref mut s) => s.flush(), - } - } -} - -/// Closes the write-side of a stream. -impl AsyncWrite for Socket { - fn shutdown(&mut self) -> Poll<(), io::Error> { - trace!("{:?}.shutdown()", self); - match self.0 { - Inner::Plain(_, ref mut t) => t.shutdown(), - Inner::SecureClient(_, ref mut c) => c.shutdown(), - Inner::SecureServer(_, ref mut s) => s.shutdown(), - } - } -} - -/// Securely transmits data. -struct SecureSocket { - addr: SocketAddr, - /// The external encrypted side of the socket. - tcp: TcpStream, - /// The internal decrypted side of the socket. - session: I, -} - -impl fmt::Debug for SecureSocket { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("SecureSocket").field(&self.addr).finish() - } -} - -impl SecureSocket - where S: Session -{ - pub fn new(a: SocketAddr, t: TcpStream, s: S) -> SecureSocket { - SecureSocket { - addr: a, - tcp: t, - session: s, - } - } - - pub fn tcp_shutdown(&mut self, how: Shutdown) -> io::Result<()> { - trace!("tcp_shutdown: {:?}", self); - self.tcp.shutdown(how) - } - - fn read_tcp_to_session(&mut self) -> Option> { - if !self.session.wants_read() { - trace!("read_tcp_to_session: no read needed: {}", self.addr); - return None; - } - - trace!("read_tcp_to_session: read_tls: {}", self.addr); - match self.session.read_tls(&mut self.tcp) { - Err(e) => { - if e.kind() == io::ErrorKind::WouldBlock { - trace!("read_tcp_to_session: read_tls: {}: {}", self.addr, e); - None - } else { - error!("read_tcp_to_session: read_tls: {}: {}", self.addr, e); - Some(Err(e)) - } - } - Ok(sz) => { - trace!("read_tcp_to_session: read_tls: {} {}B", self.addr, sz); - if sz == 0 { - Some(Ok(sz)) - } else { - trace!("read_tcp_to_session: process_new_packets: {}", self.addr); - match self.session.process_new_packets() { - Ok(_) => Some(Ok(sz)), - Err(e) => { - trace!("read_tcp_to_session: process_new_packets error: {:?}", self); - Some(Err(io::Error::new(io::ErrorKind::Other, e))) - } - } - } - } - } - } - - fn write_session_to_tcp(&mut self) -> io::Result { - trace!("write_session_to_tcp: write_tls: {}", self.addr); - let sz = self.session.write_tls(&mut self.tcp)?; - trace!("write_session_to_tcp: write_tls: {}: {}B", self.addr, sz); - Ok(sz) - } -} - -impl WithAddr for SecureSocket { - fn addr(&self) -> SocketAddr { - self.addr - } -} - -impl Read for SecureSocket - where S: Session -{ - fn read(&mut self, buf: &mut [u8]) -> io::Result { - trace!("read: {}", self.addr); - let read_ok = match self.read_tcp_to_session() { - None => false, - Some(Ok(_)) => true, - Some(Err(e)) => { - trace!("read: {}: {:?}", self.addr, e.kind()); - return Err(e); - } - }; - - let sz = self.session.read(buf)?; - trace!("read: {}: {}B", self.addr, sz); - if !read_ok && sz == 0 { - Err(io::ErrorKind::WouldBlock.into()) - } else { - Ok(sz) - } - } -} - -impl Write for SecureSocket - where S: Session -{ - fn write(&mut self, buf: &[u8]) -> io::Result { - trace!("write: {}", self.addr); - let sz = self.session.write(buf)?; - trace!("write: {}: {}B", self.addr, sz); - - { - let mut write_ok = true; - while self.session.wants_write() && write_ok { - write_ok = match self.write_session_to_tcp() { - Ok(sz) => sz > 0, - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => false, - e @ Err(_) => return e, - }; - } - } - - Ok(sz) - } - - fn flush(&mut self) -> io::Result<()> { - trace!("flush: {:?}", self); - self.session.flush()?; - self.tcp.flush() - } -} - -impl AsyncWrite for SecureSocket - where S: Session -{ - fn shutdown(&mut self) -> Poll<(), io::Error> { - self.session.send_close_notify(); - self.session.write_tls(&mut self.tcp)?; - self.tcp.flush()?; - Ok(Async::Ready(())) - } -} - -/// A future that completes when a server's TLS handshake is complete. -#[derive(Debug)] -pub struct SecureServerHandshake(Option>); -impl Future for SecureServerHandshake { - type Item = Socket; - type Error = io::Error; - fn poll(&mut self) -> Poll { - trace!("{:?}.poll()", self); - let mut ss = self.0.take().expect("poll must not be called after completion"); - - // Read and write the handshake. - { - let mut wrote = true; - while ss.session.is_handshaking() && wrote { - if let Some(Err(e)) = ss.read_tcp_to_session() { - trace!("server handshake: {}: error: {}", ss.addr, e); - return Err(e); - }; - trace!("server handshake: write_session_to_tcp: {}", ss.addr); - wrote = ss.session.wants_write() && - match ss.write_session_to_tcp() { - Ok(sz) => { - trace!("server handshake: write_session_to_tcp: {}: wrote {}", - ss.addr, - sz); - sz > 0 - } - Err(e) => { - trace!("server handshake: write_session_to_tcp: {}: {}", ss.addr, e); - if e.kind() != io::ErrorKind::WouldBlock { - return Err(e); - } - false - } - } - } - } - - // If the remote hasn't read everything yet, resume later. - if ss.session.is_handshaking() { - trace!("server handshake: {}: not complete", ss.addr); - self.0 = Some(ss); - return Ok(Async::NotReady); - } - - // Finally, acknowledge the handshake is complete. - if ss.session.wants_write() { - trace!("server handshake: write_session_to_tcp: {}: final", ss.addr); - match ss.write_session_to_tcp() { - Ok(sz) => { - trace!("server handshake: write_session_to_tcp: {}: final: wrote {}B", - ss.addr, - sz); - } - Err(e) => { - trace!("server handshake: write_session_to_tcp: {}: final: {}", - ss.addr, - e); - if e.kind() != io::ErrorKind::WouldBlock { - return Err(e); - } - } - } - } - - trace!("server handshake: {}: complete", ss.addr); - Ok(Socket(Inner::SecureServer(ss.addr, Box::new(ss))).into()) - } -} - -/// A future that completes when a client's TLS handshake is complete. -#[derive(Debug)] -pub struct SecureClientHandshake(Option>); -impl Future for SecureClientHandshake { - type Item = Socket; - type Error = io::Error; - fn poll(&mut self) -> Poll { - trace!("{:?}.poll()", self); - let mut ss = self.0.take().expect("poll must not be called after completion"); - - // Read and write the handshake. - { - let mut read_ok = true; - let mut write_ok = true; - while ss.session.is_handshaking() && (read_ok || write_ok) { - trace!("client handshake: read_tcp_to_session: {}", ss.addr); - read_ok = match ss.read_tcp_to_session() { - None => { - trace!("client handshake: read_tcp_to_session: {}: not ready", - ss.addr); - false - } - Some(Ok(sz)) => { - trace!("client handshake: read_tcp_to_session: {}: {}B", - ss.addr, - sz); - sz > 0 - } - Some(Err(e)) => { - trace!("client handshake: read_tcp_to_session: {}: error: {}", - ss.addr, - e); - return Err(e); - } - }; - - trace!("client handshake: write_session_to_tcp: {}", ss.addr); - write_ok = ss.session.wants_write() && - match ss.write_session_to_tcp() { - Ok(sz) => { - trace!("client handshake: write_session_to_tcp: {}: wrote {}", - ss.addr, - sz); - sz > 0 - } - Err(e) => { - trace!("client handshake: write_session_to_tcp: {}: {}", ss.addr, e); - if e.kind() != io::ErrorKind::WouldBlock { - return Err(e); - } - false - } - }; - } - } - - // If the remote hasn't read everything yet, resume later. - if ss.session.is_handshaking() { - trace!("handshake: {}: not complete", ss.addr); - self.0 = Some(ss); - return Ok(Async::NotReady); - } - - trace!("handshake: {}: complete", ss.addr); - Ok(Socket(Inner::SecureClient(ss.addr, Box::new(ss))).into()) - } -} diff --git a/src/lib.rs b/src/lib.rs index 4c39ce8..cb5bb76 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,15 +1,17 @@ -//! A simple layer-4 load balancing library on tokio. +//! linkerd-tcp: A load-balancing TCP/TLS stream routing proxy. +//! //! -//! Inspired by https://github.com/tailhook/tk-pool. -//! Copyright 2016 The tk-pool Developers //! //! Copyright 2017 Buoyant, Inc. +#![deny(missing_docs)] + extern crate bytes; #[macro_use] extern crate log; extern crate futures; extern crate hyper; +extern crate ordermap; extern crate rand; extern crate rustls; extern crate serde; @@ -24,15 +26,37 @@ extern crate tokio_io; extern crate tokio_timer; extern crate url; -use std::net; - -mod driver; +mod admin; pub mod app; -pub mod lb; -pub mod namerd; +mod balancer; +mod connection; +mod connector; +mod path; +mod resolver; +mod router; +mod server; -use driver::Driver; -pub use lb::Balancer; +use balancer::WeightedAddr; +use path::Path; +/// Describes a configuratin error. #[derive(Clone, Debug)] -pub struct WeightedAddr(pub net::SocketAddr, pub f32); +pub struct ConfigError(String); + +impl<'a> From<&'a str> for ConfigError { + fn from(msg: &'a str) -> ConfigError { + ConfigError(msg.into()) + } +} + +impl<'a> From for ConfigError { + fn from(msg: String) -> ConfigError { + ConfigError(msg) + } +} + +impl ::std::fmt::Display for ConfigError { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + fmt.write_str(&self.0) + } +} diff --git a/src/main.rs b/src/main.rs index 8e7d800..ff4a238 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,55 +1,103 @@ #[macro_use] extern crate clap; +extern crate futures; +extern crate linkerd_tcp; #[macro_use] extern crate log; extern crate pretty_env_logger; -extern crate linkerd_tcp; +extern crate tokio_core; +extern crate tokio_timer; -use clap::{Arg, App}; -use linkerd_tcp::app::{self, Runner}; +use clap::{Arg, App as ClapApp}; +use linkerd_tcp::app::{self, AppConfig, App, AdminRunner, RouterSpawner}; +use std::collections::VecDeque; use std::fs; use std::io::Read; use std::thread; +use tokio_core::reactor::{Core, Handle}; +use tokio_timer::Timer; static CONFIG_PATH_ARG: &'static str = "PATH"; +/// Runs linkerd-tcp. +/// +/// Accepts a configuration file fn main() { // Configure the logger from the RUST_LOG environment variable. drop(pretty_env_logger::init()); // Load command-line options. - let opts = App::new(crate_name!()) + let opts = ClapApp::new(crate_name!()) .version(crate_version!()) .about(crate_description!()) .arg(Arg::with_name(CONFIG_PATH_ARG) - .required(true) - .index(1) - .help("Config file path.")) + .required(true) + .index(1) + .help("Config file path.")) .get_matches(); // Parse configuration file. - let config = { + let config: AppConfig = { let path = opts.value_of(CONFIG_PATH_ARG).unwrap(); let mut txt = String::new(); - fs::File::open(path) - .and_then(|mut f| f.read_to_string(&mut txt)) - .expect("failed to read config"); - app::config::from_str(&txt).expect("configuration error") + let res = match path { + "-" => ::std::io::stdin().read_to_string(&mut txt), + path => fs::File::open(path).and_then(|mut f| f.read_to_string(&mut txt)), + }; + match res { + Err(e) => panic!("error reading configuration from {}: {}", path, e), + Ok(_) => txt.parse().expect("failed to parse configuration"), + } }; + debug!("parsed config: {:?}", config); // Process the configuration, splitting it into two threads. These threads are // connected by synchronization primitives as needed, but no work is being done yet. // Next, we'll attach each of these to a reactor in an independent thread, driving // both admin and serving work. - let (admin, proxies) = app::configure(config); + let App { routers, admin } = config.into_app().expect("failed to load configuration"); + debug!("loaded app"); + + let (closer, closed) = app::closer(); + + // A single timer for the whole process. The default hashwheel timer has a granularity of 100ms. + let timer = Timer::default(); + + // Create a background admin thread that runs an admin server and executes executes + // namerd resolutions + let admin_thread = spawn_admin(admin, closer, &timer); + run_routers(routers, closed, &timer); + admin_thread.join().expect("failed to join admin thread"); + debug!("stopped") +} - // Run admin in a new thread.z - let admin_thread = thread::Builder::new() +fn spawn_admin(admin: AdminRunner, closer: app::Closer, timer: &Timer) -> thread::JoinHandle<()> { + let timer = timer.clone(); + thread::Builder::new() .name("admin".into()) - .spawn(move || admin.run().expect("could not run admin")) - .expect("could not spawn admin thread"); + .spawn(move || { + debug!("running admin server"); + let mut core = Core::new().expect("failed to initialize admin reactor"); + admin + .run(closer, &mut core, &timer) + .expect("failed to run the admin server"); + }) + .expect("failed to spawn admin thread") +} + +fn run_routers(routers: VecDeque, closed: app::Closed, timer: &Timer) { + // Schedule all routers on the main thread. + let mut core = Core::new().expect("failed to initialize server reactor"); + spawn_routers(routers, &core.handle(), timer); + + // Run until the admin thread closes the application. + debug!("running until admin server closes"); + core.run(closed).expect("failed to run"); +} - proxies.run().expect("could not run proxies"); - admin_thread.join().expect("admin thread failed to join"); - info!("Closing.") +fn spawn_routers(mut routers: VecDeque, reactor: &Handle, timer: &Timer) { + while let Some(r) = routers.pop_front() { + debug!("spawning router"); + r.spawn(reactor, timer).expect("failed to spawn router"); + } } diff --git a/src/namerd.rs b/src/namerd.rs deleted file mode 100644 index 1890b15..0000000 --- a/src/namerd.rs +++ /dev/null @@ -1,177 +0,0 @@ -//! Namerd Endpointer - -use bytes::{Buf, BufMut, IntoBuf, Bytes, BytesMut}; -use futures::{Future, Stream, future}; -use hyper::{Body, Chunk, Client, Uri}; -use hyper::client::Connect; -use hyper::status::StatusCode; -use serde_json as json; -use std::{f32, net, time}; -use std::collections::HashMap; -use std::rc::Rc; -use tacho::{self, Timing}; -use tokio_timer::Timer; -use url::Url; - -#[derive(Debug)] -pub struct NamerdError(String); - -type AddrsFuture = Box>, Error = ()>>; -type AddrsStream = Box, Error = ()>>; - -#[derive(Clone)] -struct Stats { - request_latency_ms: tacho::Stat, - success_count: tacho::Counter, - failure_count: tacho::Counter, -} -impl Stats { - fn new(metrics: tacho::Scope) -> Stats { - let metrics = metrics.labeled("service".into(), "namerd".into()); - Stats { - request_latency_ms: metrics.stat("namerd_request_latency_ms".into()), - success_count: metrics.counter("namerd_success_count".into()), - failure_count: metrics.counter("namerd_failure_count".into()), - } - } -} - -/// Make a Resolver that periodically polls namerd to resolve a name -/// to a set of addresses. -/// -/// The returned stream never completes. -pub fn resolve(base_url: &str, - client: Client, - period: time::Duration, - namespace: &str, - target: &str, - metrics: tacho::Scope) - -> AddrsStream - where C: Connect -{ - let url = { - let base = format!("{}/api/1/resolve/{}", base_url, namespace); - let url = Url::parse_with_params(&base, &[("path", &target)]).expect("invalid namerd url"); - url.as_str().parse::().expect("Could not parse invalid URI") - }; - let stats = Stats::new(metrics); - let client = Rc::new(client); - let init = request(client.clone(), url.clone(), stats.clone()); - let updates = Timer::default() - .interval(period) - .then(move |_| request(client.clone(), url.clone(), stats.clone())); - Box::new(init.into_stream().chain(updates).filter_map(|opt| opt)) -} - - -fn request(client: Rc>, uri: Uri, stats: Stats) -> AddrsFuture { - debug!("Polling namerd at {}", uri.to_string()); - let mut stats = stats; - let rsp = future::lazy(|| Ok(tacho::Timing::start())).and_then(move |start_t| { - client.get(uri) - .then(|rsp| match rsp { - Ok(rsp) => { - match rsp.status() { - StatusCode::Ok => parse_body(rsp.body()), - status => { - info!("error: bad response: {}", status); - future::ok(None).boxed() - } - } - } - Err(e) => { - error!("failed to read response: {}", e); - future::ok(None).boxed() - } - }) - .then(move |rsp| { - stats.request_latency_ms.add(start_t.elapsed_ms()); - if rsp.as_ref().ok().and_then(|r| r.as_ref()).is_some() { - stats.success_count.incr(1); - } else { - stats.failure_count.incr(1); - } - rsp - }) - }); - Box::new(rsp) -} - - -fn parse_body(body: Body) -> AddrsFuture { - trace!("parsing namerd response"); - body.collect() - .then(|res| match res { - Ok(ref chunks) => Ok(parse_chunks(chunks)), - Err(e) => { - info!("error: {}", e); - Ok(None) - } - }) - .boxed() -} - -fn bytes_in(chunks: &[Chunk]) -> usize { - let mut sz = 0; - for c in chunks { - sz += (*c).len(); - } - sz -} - -fn to_buf(chunks: &[Chunk]) -> Bytes { - let mut buf = BytesMut::with_capacity(bytes_in(chunks)); - for c in chunks { - buf.put_slice(&*c) - } - buf.freeze() -} - -fn parse_chunks(chunks: &[Chunk]) -> Option> { - let r = to_buf(chunks).into_buf().reader(); - let result: json::Result = json::from_reader(r); - match result { - Ok(ref nrsp) if nrsp.kind == "bound" => Some(to_weighted_addrs(&nrsp.addrs)), - Ok(_) => Some(vec![]), - Err(e) => { - info!("error parsing response: {}", e); - None - } - } -} - -fn to_weighted_addrs(namerd_addrs: &[NamerdAddr]) -> Vec<::WeightedAddr> { - // We never intentionally clear the EndpointMap. - let mut weighted_addrs: Vec<::WeightedAddr> = Vec::new(); - for na in namerd_addrs { - let addr = net::SocketAddr::new(na.ip.parse().unwrap(), na.port); - let w = na.meta.endpoint_addr_weight.unwrap_or(1.0); - weighted_addrs.push(::WeightedAddr(addr, w)); - } - weighted_addrs -} - -#[derive(Debug, Deserialize)] -struct NamerdResponse { - #[serde(rename = "type")] - kind: String, - addrs: Vec, - meta: HashMap, -} - -#[derive(Debug, Deserialize)] -struct NamerdAddr { - ip: String, - port: u16, - meta: Meta, -} - -#[derive(Debug, Deserialize)] -struct Meta { - authority: Option, - - #[serde(rename = "nodeName")] - node_name: Option, - - endpoint_addr_weight: Option, -} diff --git a/src/path.rs b/src/path.rs new file mode 100644 index 0000000..71b3b53 --- /dev/null +++ b/src/path.rs @@ -0,0 +1,45 @@ +use std::fmt; + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] +pub struct Path(String); +impl Path { + pub fn as_str(&self) -> &str { + &self.0 + } + + pub fn is_empty(&self) -> bool { + self.len() == 1 + } + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn starts_with(&self, other: &Path) -> bool { + let &Path(ref other) = other; + if self.0.len() > other.len() { + self.0.starts_with(other) && + (self.0.ends_with('/') || other[self.0.len()..].starts_with('/')) + } else if other.len() == self.0.len() { + self.0 == *other + } else { + false + } + } +} +impl fmt::Display for Path { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&self.0) + } +} +impl From for Path { + fn from(s: String) -> Path { + assert!(s.starts_with('/')); + Path(s) + } +} +impl<'a> From<&'a str> for Path { + fn from(s: &'a str) -> Path { + assert!(s.starts_with('/')); + Path(s.into()) + } +} diff --git a/src/resolver/config.rs b/src/resolver/config.rs new file mode 100644 index 0000000..0791d7c --- /dev/null +++ b/src/resolver/config.rs @@ -0,0 +1,33 @@ +use super::namerd::Namerd; +use super::super::ConfigError; +use std::time::Duration; +use tacho; +use url::Url; + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct NamerdConfig { + pub base_url: String, + pub period_secs: u64, + pub namespace: String, +} + +impl NamerdConfig { + pub fn into_namerd(self, metrics: &tacho::Scope) -> Result { + if self.period_secs == 0 { + return Err("namerd period must be greater than 0".into()); + } + let period = Duration::from_secs(self.period_secs); + + if let Err(e) = Url::parse(&self.base_url) { + return Err(format!("invalid base_url {}: {}", self.base_url, e).into()); + } + + let metrics = metrics + .clone() + .prefixed("resolver") + .labeled("namespace".into(), self.namespace.clone()); + let namerd = Namerd::new(self.base_url, period, self.namespace, metrics); + Ok(namerd) + } +} diff --git a/src/resolver/mod.rs b/src/resolver/mod.rs new file mode 100644 index 0000000..0fdf296 --- /dev/null +++ b/src/resolver/mod.rs @@ -0,0 +1,108 @@ +use super::{WeightedAddr, Path}; +use futures::{Future, Stream, Poll}; +use futures::sync::mpsc; +use tokio_core::reactor::Handle; +use tokio_timer::{Timer, TimerError}; + +mod config; +mod namerd; +pub use self::config::NamerdConfig; +pub use self::namerd::{Namerd, Addrs}; + +#[derive(Debug)] +pub enum Error { + Hyper(::hyper::Error), + UnexpectedStatus(::hyper::StatusCode), + Serde(::serde_json::Error), + Timer(TimerError), + Rejected, + NotBound, +} + +impl From> for Error { + fn from(_e: mpsc::SendError) -> Error { + Error::Rejected + } +} + +pub type Result = ::std::result::Result; + +/// Creates a multithreaded resolver. +/// +/// The `Resolver` side is a client of the `Executor`. Namerd work is performed on +/// whatever thread the executor is spawned on. +pub fn new(namerd: Namerd) -> (Resolver, Executor) { + let (tx, rx) = mpsc::unbounded(); + let res = Resolver { requests: tx }; + let exe = Executor { + requests: rx, + namerd: namerd, + }; + (res, exe) +} + +/// Requests resolutions from an `Executor`. +/// +/// Resolution requests are sent on an channel along with a response channel. The executor +/// writes to the response channel as results are ready. +#[derive(Clone)] +pub struct Resolver { + requests: mpsc::UnboundedSender<(Path, mpsc::UnboundedSender>>)>, +} + +impl Resolver { + pub fn resolve(&mut self, path: Path) -> Resolve { + let addrs = { + let reqs = &self.requests; + let (tx, rx) = mpsc::unbounded(); + reqs.send((path, tx)) + .expect("failed to send resolution request"); + rx + }; + Resolve(addrs) + } +} + +pub struct Resolve(mpsc::UnboundedReceiver>>); + +impl Stream for Resolve { + type Item = Result>; + type Error = (); + fn poll(&mut self) -> Poll, Self::Error> { + self.0.poll() + } +} + +/// Serves resolutions from `Resolver`s. +pub struct Executor { + requests: mpsc::UnboundedReceiver<(Path, mpsc::UnboundedSender>>)>, + namerd: Namerd, +} + +impl Executor { + pub fn execute(self, handle: &Handle, timer: &Timer) -> Execute { + let handle = handle.clone(); + let namerd = self.namerd.with_client(&handle, timer); + let f = self.requests + .for_each(move |(path, rsp_tx)| { + // Stream namerd resolutions to the response channel. + let resolve = namerd.resolve(path.as_str()); + let respond = resolve.forward(rsp_tx).map_err(|_| {}).map(|_| {}); + // Do all of this work in another task so that we can receive + // additional requests. + handle.spawn(respond); + Ok(()) + }); + Execute(Box::new(f)) + } +} + +// A stream of name resolutions. +pub struct Execute(Box>); +impl Future for Execute { + type Item = (); + type Error = (); + fn poll(&mut self) -> Poll { + self.0.poll() + } +} diff --git a/src/resolver/namerd.rs b/src/resolver/namerd.rs new file mode 100644 index 0000000..6f7727c --- /dev/null +++ b/src/resolver/namerd.rs @@ -0,0 +1,288 @@ +//! Namerd Endpointer + +// TODO In the future, we likely want to change this to use the split bind & addr APIs so +// balancers can be shared across logical names. In the meantime, it's sufficient to have +// a balancer per logical name. + +use super::{WeightedAddr, Result, Error}; +use bytes::{Buf, BufMut, IntoBuf, Bytes, BytesMut}; +use futures::{Async, Future, IntoFuture, Poll, Stream}; +use hyper::{Body, Chunk, Client, Uri}; +use hyper::client::{Connect as HyperConnect, HttpConnector}; +use hyper::status::StatusCode; +use serde_json as json; +use std::{net, time}; +use std::collections::HashMap; +use std::rc::Rc; +use tacho; +use tokio_core::reactor::Handle; +use tokio_timer::{Timer, Interval}; +use url::Url; + +type HttpConnectorFactory = Client; + +type AddrsFuture = Box, Error = Error>>; + +// pub struct Addrs(Box>, Error = ()>>); +// impl Stream for Addrs { +// type Item = Result>; +// type Error = (); +// fn poll(&mut self) -> Poll, Self::Error> { +// self.0.poll() +// } +// } + +#[derive(Clone)] +pub struct Namerd { + base_url: String, + period: time::Duration, + namespace: String, + stats: Stats, +} + +impl Namerd { + pub fn new(base_url: String, + period: time::Duration, + namespace: String, + metrics: tacho::Scope) + -> Namerd { + Namerd { + base_url: format!("{}/api/1/resolve/{}", base_url, namespace), + stats: Stats::new(metrics), + namespace, + period, + } + } +} + +impl Namerd { + pub fn with_client(self, handle: &Handle, timer: &Timer) -> WithClient { + WithClient { + namerd: self, + client: Rc::new(Client::new(handle)), + timer: timer.clone(), + } + } +} + +/// A name +pub struct WithClient { + namerd: Namerd, + client: Rc, + timer: Timer, +} +impl WithClient { + pub fn resolve(&self, target: &str) -> Addrs { + let uri = Url::parse_with_params(&self.namerd.base_url, &[("path", &target)]) + .expect("invalid namerd url") + .as_str() + .parse::() + .expect("Could not parse namerd URI"); + let init = request(self.client.clone(), uri.clone(), self.namerd.stats.clone()); + let interval = self.timer.interval(self.namerd.period); + Addrs { + client: self.client.clone(), + stats: self.namerd.stats.clone(), + state: Some(State::Pending(init, interval)), + uri, + } + } +} + +/// Streams +pub struct Addrs { + state: Option, + client: Rc, + uri: Uri, + stats: Stats, +} + +enum State { + Pending(AddrsFuture, Interval), + Waiting(Interval), +} + +impl Stream for Addrs { + type Item = Result>; + type Error = Error; + + fn poll(&mut self) -> Poll, Self::Error> { + loop { + match self.state.take().expect("polled after completion") { + State::Waiting(mut int) => { + match int.poll() { + Err(e) => { + self.state = Some(State::Waiting(int)); + return Err(Error::Timer(e)); + } + Ok(Async::NotReady) => { + self.state = Some(State::Waiting(int)); + return Ok(Async::NotReady); + } + Ok(Async::Ready(_)) => { + let fut = { + let c = self.client.clone(); + let u = self.uri.clone(); + let s = self.stats.clone(); + request(c, u, s) + }; + self.state = Some(State::Pending(fut, int)); + } + } + } + + State::Pending(mut fut, int) => { + match fut.poll() { + Err(e) => { + self.state = Some(State::Waiting(int)); + return Ok(Async::Ready(Some(Err(e)))); + } + Ok(Async::Ready(addrs)) => { + self.state = Some(State::Waiting(int)); + return Ok(Async::Ready(Some(Ok(addrs)))); + } + Ok(Async::NotReady) => { + self.state = Some(State::Pending(fut, int)); + return Ok(Async::NotReady); + } + } + } + } + } + } +} + +fn request(client: Rc>, uri: Uri, stats: Stats) -> AddrsFuture { + debug!("Polling namerd at {}", uri.to_string()); + let rsp = stats + .request_latency + .time(client.get(uri).then(handle_response)) + .then(move |rsp| { + if rsp.is_ok() { + stats.success_count.incr(1); + } else { + stats.failure_count.incr(1); + } + rsp + }); + Box::new(rsp) +} + +fn handle_response(result: ::hyper::Result<::hyper::client::Response>) -> AddrsFuture { + match result { + Ok(rsp) => { + match rsp.status() { + StatusCode::Ok => parse_body(rsp.body()), + status => { + info!("error: bad response: {}", status); + Box::new(Err(Error::UnexpectedStatus(status)).into_future()) + } + } + } + Err(e) => { + error!("failed to read response: {:?}", e); + Box::new(Err(Error::Hyper(e)).into_future()) + } + } +} + +fn parse_body(body: Body) -> AddrsFuture { + trace!("parsing namerd response"); + body.collect() + .then(|res| match res { + Ok(ref chunks) => parse_chunks(chunks), + Err(e) => { + info!("error: {}", e); + Err(Error::Hyper(e)) + } + }) + .boxed() +} + +fn bytes_in(chunks: &[Chunk]) -> usize { + let mut sz = 0; + for c in chunks { + sz += (*c).len(); + } + sz +} + +fn to_buf(chunks: &[Chunk]) -> Bytes { + let mut buf = BytesMut::with_capacity(bytes_in(chunks)); + for c in chunks { + buf.put_slice(&*c) + } + buf.freeze() +} + +fn parse_chunks(chunks: &[Chunk]) -> Result> { + let r = to_buf(chunks).into_buf().reader(); + let result: json::Result = json::from_reader(r); + match result { + Ok(ref nrsp) if nrsp.kind == "bound" => Ok(to_weighted_addrs(&nrsp.addrs)), + Ok(_) => Err(Error::NotBound), + Err(e) => { + info!("error parsing response: {}", e); + Err(Error::Serde(e)) + } + } +} + +fn to_weighted_addrs(namerd_addrs: &[NamerdAddr]) -> Vec { + // We never intentionally clear the EndpointMap. + let mut dsts: Vec = Vec::new(); + let mut sum = 0.0; + for na in namerd_addrs { + let addr = net::SocketAddr::new(na.ip.parse().unwrap(), na.port); + let w = na.meta.endpoint_addr_weight.unwrap_or(1.0); + sum += w; + dsts.push(WeightedAddr::new(addr, w)); + } + // Normalize weights on [0.0, 0.1]. + for mut dst in &mut dsts { + dst.weight /= sum; + } + dsts +} + +#[derive(Debug, Deserialize)] +struct NamerdResponse { + #[serde(rename = "type")] + kind: String, + addrs: Vec, + meta: HashMap, +} + +#[derive(Debug, Deserialize)] +struct NamerdAddr { + ip: String, + port: u16, + meta: Meta, +} + +#[derive(Debug, Deserialize)] +struct Meta { + authority: Option, + + #[serde(rename = "nodeName")] + node_name: Option, + + endpoint_addr_weight: Option, +} + + +#[derive(Clone)] +pub struct Stats { + request_latency: tacho::Timer, + success_count: tacho::Counter, + failure_count: tacho::Counter, +} +impl Stats { + fn new(metrics: tacho::Scope) -> Stats { + Stats { + request_latency: metrics.timer_ms("request_latency_ms".into()), + success_count: metrics.counter("success_count".into()), + failure_count: metrics.counter("failure_count".into()), + } + } +} diff --git a/src/router.rs b/src/router.rs new file mode 100644 index 0000000..7498dd2 --- /dev/null +++ b/src/router.rs @@ -0,0 +1,106 @@ +use super::{ConfigError, Path}; +use super::balancer::{Balancer, BalancerFactory}; +use super::resolver::Resolver; +use futures::{Future, Poll, Async}; +use std::cell::RefCell; +use std::collections::HashMap; +use std::io; +use std::rc::Rc; +use tacho::{self, Timing}; +use tokio_core::reactor::Handle; +use tokio_timer::Timer; + +static ROUTE_CREATE_KEY: &'static str = "route_create"; +static ROUTE_ERROR_KEY: &'static str = "route_error"; +static ROUTE_FOUND_KEY: &'static str = "route_found"; +static ROUTE_TIME_US_KEY: &'static str = "route_time_us"; + +pub fn new(resolver: Resolver, factory: BalancerFactory, metrics: &tacho::Scope) -> Router { + let inner = InnerRouter { + resolver, + factory, + routes: HashMap::default(), + route_create: metrics.counter(ROUTE_CREATE_KEY), + route_error: metrics.counter(ROUTE_ERROR_KEY), + route_found: metrics.counter(ROUTE_FOUND_KEY), + route_time_us: metrics.stat(ROUTE_TIME_US_KEY), + }; + Router(Rc::new(RefCell::new(inner))) +} + + +/// Produces a `Balancer` for a +/// +/// The router maintains an internal cache of routes, by destination name. +#[derive(Clone)] +pub struct Router(Rc>); + +impl Router { + /// Obtains a balancer for an inbound connection. + pub fn route(&self, dst: &Path, rct: &Handle, tim: &Timer) -> Route { + self.0.borrow_mut().route(dst, rct, tim) + } +} + +struct InnerRouter { + routes: HashMap, + resolver: Resolver, + factory: BalancerFactory, + route_create: tacho::Counter, + route_error: tacho::Counter, + route_found: tacho::Counter, + route_time_us: tacho::Stat, +} + +impl InnerRouter { + fn route(&mut self, dst: &Path, reactor: &Handle, timer: &Timer) -> Route { + let t = tacho::Timing::start(); + let r = self.do_route(dst, reactor, timer); + self.route_time_us.add(t.elapsed_us()); + Route(Some(r)) + } + + fn do_route(&mut self, + dst: &Path, + reactor: &Handle, + timer: &Timer) + -> Result { + // Try to get a balancer from the cache. + if let Some(route) = self.routes.get(dst) { + self.route_found.incr(1); + return Ok(route.clone()); + } + + let resolve = self.resolver.resolve(dst.clone()); + match self.factory.mk_balancer(reactor, timer, dst, resolve) { + Err(e) => { + self.route_error.incr(1); + Err(e) + } + Ok(balancer) => { + self.route_create.incr(1); + self.routes.insert(dst.clone(), balancer.clone()); + Ok(balancer) + } + } + } +} + +/// Materializes a `Balancer`. +/// +/// +#[derive(Clone)] +pub struct Route(Option>); +impl Future for Route { + type Item = Balancer; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + match self.0 + .take() + .expect("route must not be polled more than once") { + Err(e) => Err(io::Error::new(io::ErrorKind::Other, format!("config error: {}", e))), + Ok(selector) => Ok(Async::Ready(selector)), + } + } +} diff --git a/src/server/config.rs b/src/server/config.rs new file mode 100644 index 0000000..f4c7264 --- /dev/null +++ b/src/server/config.rs @@ -0,0 +1,97 @@ +use super::{Unbound, sni}; +use super::super::ConfigError; +use super::super::router::Router; +use rustls; +use std::cell::RefCell; +use std::collections::HashMap; +use std::net; +use std::rc::Rc; +use std::sync::Arc; +use std::time::Duration; +use tacho; + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct ServerConfig { + port: u16, + ip: Option, + dst_name: Option, + tls: Option, + connect_timeout_ms: Option, + connection_lifetime_secs: Option, + max_concurrency: Option, + // TODO idle time +} + +impl ServerConfig { + pub fn mk_server(&self, + router: Router, + buf: Rc>>, + metrics: &tacho::Scope) + -> Result { + match *self { + ServerConfig { + port, + ref ip, + ref dst_name, + ref tls, + ref connect_timeout_ms, + ref connection_lifetime_secs, + ref max_concurrency, + } => { + if dst_name.is_none() { + return Err("`dst_name` required".into()); + } + let dst_name = dst_name.as_ref().unwrap().clone(); + let ip = ip.unwrap_or_else(|| net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1))); + let addr = net::SocketAddr::new(ip, port); + let tls = match tls.as_ref() { + None => None, + Some(&TlsServerConfig { + ref alpn_protocols, + ref default_identity, + ref identities, + }) => { + let mut tls = rustls::ServerConfig::new(); + if let Some(protos) = alpn_protocols.as_ref() { + tls.set_protocols(protos); + } + let sni = sni::new(identities, default_identity)?; + tls.cert_resolver = Box::new(sni); + Some(super::UnboundTls { config: Arc::new(tls) }) + } + }; + let timeout = connect_timeout_ms.map(Duration::from_millis); + let lifetime = connection_lifetime_secs.map(Duration::from_secs); + let max_concurrency = max_concurrency.unwrap_or(super::DEFAULT_MAX_CONCURRENCY); + Ok(super::unbound(addr, + dst_name.into(), + router, + buf, + tls, + timeout, + lifetime, + max_concurrency, + metrics)) + } + } + } +} + +// TODO support cypher suites +// TODO support client validation +// TODO supoprt persistence? +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct TlsServerConfig { + pub alpn_protocols: Option>, + pub default_identity: Option, + pub identities: Option>, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "camelCase")] +pub struct TlsServerIdentityConfig { + pub certs: Vec, + pub private_key: String, +} diff --git a/src/server/mod.rs b/src/server/mod.rs new file mode 100644 index 0000000..26e7063 --- /dev/null +++ b/src/server/mod.rs @@ -0,0 +1,353 @@ +//! TODO `dst_name` should be chosen dynamically. + +use super::Path; +use super::connection::{Connection, Socket, ctx, secure, socket}; +use super::router::Router; +use futures::{Async, Future, Poll, Stream, future}; +use rustls; +use std::{io, net}; +use std::cell::RefCell; +use std::rc::Rc; +use std::sync::Arc; +use std::time::Duration; +use tacho; +use tokio_core::net::{TcpListener, TcpStream}; +use tokio_core::reactor::Handle; +use tokio_timer::Timer; + +mod config; +mod sni; +pub use self::config::ServerConfig; + +const DEFAULT_MAX_CONCURRENCY: usize = 100000; + +/// Builds a server that is not yet bound on a port. +fn unbound(listen_addr: net::SocketAddr, + dst_name: Path, + router: Router, + buf: Rc>>, + tls: Option, + connect_timeout: Option, + connection_lifetime: Option, + max_concurrency: usize, + metrics: &tacho::Scope) + -> Unbound { + let metrics = metrics.clone().prefixed("srv"); + Unbound { + listen_addr, + dst_name, + router, + buf, + tls, + connect_timeout, + connection_lifetime, + max_concurrency, + metrics, + } +} + +pub struct Unbound { + listen_addr: net::SocketAddr, + dst_name: Path, + router: Router, + buf: Rc>>, + tls: Option, + metrics: tacho::Scope, + connect_timeout: Option, + connection_lifetime: Option, + max_concurrency: usize, +} +impl Unbound { + pub fn listen_addr(&self) -> net::SocketAddr { + self.listen_addr + } + + pub fn dst_name(&self) -> &Path { + &self.dst_name + } + + fn init_src_connection(dst_name: Path, + src_tcp: TcpStream, + metrics: &Metrics, + tls: &Option) + -> Box, Error = io::Error>> { + + let sock: Box> = match tls.as_ref() { + None => future::ok(socket::plain(src_tcp)).boxed(), + Some(tls) => { + // TODO we should be able to get metadata from a TLS handshake but we can't! + let sock = tls.handshake_latency + .time(secure::server_handshake(src_tcp, &tls.config)) + .map(socket::secure_server); + Box::new(sock) + } + }; + + let metrics = metrics.per_conn.clone(); + let conn = sock.map(move |sock| { + let ctx = SrcCtx { + rx_bytes_total: 0, + tx_bytes_total: 0, + metrics, + }; + Connection::new(dst_name, sock, ctx) + }); + Box::new(conn) + } + + pub fn bind(self, reactor: &Handle, timer: &Timer) -> io::Result { + debug!("routing on {} to {}", self.listen_addr, self.dst_name); + let listen = TcpListener::bind(&self.listen_addr, reactor)?; + let bound_addr = listen.local_addr().unwrap(); + + let metrics = self.metrics.labeled("srv_addr", format!("{}", bound_addr)); + let tls = self.tls + .map(|tls| { + BoundTls { + config: tls.config, + handshake_latency: + metrics.clone().prefixed("tls").timer_us("handshake_us"), + } + }); + + let connect_metrics = metrics.clone().prefixed("connect"); + let stream_metrics = metrics.clone().prefixed("stream"); + let per_conn = ConnMetrics { + rx_bytes: stream_metrics.counter("rx_bytes"), + tx_bytes: stream_metrics.counter("tx_bytes"), + rx_bytes_per_conn: stream_metrics.stat("connection_rx_bytes"), + tx_bytes_per_conn: stream_metrics.stat("connection_tx_bytes"), + latency: connect_metrics.timer_us("latency_us"), + duration: stream_metrics.timer_ms("duration_ms"), + }; + let metrics = Metrics { + accepts: metrics.counter("accepts"), + closes: metrics.counter("closes"), + failures: metrics.counter("failures"), + active: metrics.gauge("active"), + waiters: metrics.gauge("waiters"), + connect_failures: FailureMetrics::new(&connect_metrics, "failure"), + stream_failures: FailureMetrics::new(&stream_metrics, "failure"), + per_conn, + }; + + // TODO determine dst_addr dynamically. + let dst_name = self.dst_name; + let router = self.router; + let connect_timeout = self.connect_timeout; + let connection_lifetime = self.connection_lifetime; + let buf = self.buf; + + let reactor = reactor.clone(); + let timer = timer.clone(); + let serving = listen + .incoming() + .map(move |(src_tcp, src_addr)| { + trace!("received incoming connection from {}", src_addr); + metrics.accepts.incr(1); + let active = metrics.active.clone(); + active.incr(1); + let waiters = metrics.waiters.clone(); + waiters.incr(1); + + // Finish accepting the connection from the server. + // TODO determine dst_addr dynamically. + let src = Unbound::init_src_connection(dst_name.clone(), src_tcp, &metrics, &tls); + + // Obtain a balancing endpoint selector for the given destination. + let balancer = router.route(&dst_name, &reactor, &timer); + + // Once the incoming connection is ready and we have a balancer ready, obtain an + // outbound connection and begin streaming. We obtain an outbound connection after + // the incoming handshake is complete so that we don't waste outbound connections + // on failed inbound connections. + let connect = src.join(balancer) + .and_then(move |(src, b)| b.connect().map(move |dst| (src, dst))); + + // Enforce a connection timeout, measure successful connection + // latencies and failure counts. + let connect = { + // Measure the time until the connection is established, if it completes. + let c = timeout(metrics.per_conn.latency.time(connect), + connect_timeout, + &timer); + let fails = metrics.connect_failures.clone(); + c.then(move |res| match res { + Ok((src, dst)) => { + trace!("connection ready for {} to {}", + src_addr, + dst.peer_addr()); + waiters.decr(1); + Ok((src, dst)) + } + Err(e) => { + trace!("connection failed for {}: {}", src_addr, e); + waiters.decr(1); + fails.record(&e); + Err(e) + } + }) + }; + + // Copy data between the endpoints. + let stream = { + let buf = buf.clone(); + let stream_fails = metrics.stream_failures.clone(); + let duration = metrics.per_conn.duration.clone(); + let lifetime = connection_lifetime; + let timer = timer.clone(); + connect.and_then(move |(src, dst)| { + // Enforce a timeout on total connection lifetime. + let dst_addr = dst.peer_addr(); + let duplex = src.into_duplex(dst, buf); + duration + .time(timeout(duplex, lifetime, &timer)) + .then(move |res| match res { + Ok(_) => { + trace!("stream succeeded for {} to {}", + src_addr, + dst_addr); + Ok(()) + } + Err(e) => { + trace!("stream failed for {} to {}: {}", + src_addr, + dst_addr, + e); + stream_fails.record(&e); + Err(e) + } + }) + }) + }; + + let closes = metrics.closes.clone(); + let failures = metrics.failures.clone(); + stream.then(move |ret| { + active.decr(1); + if ret.is_ok() { + closes.incr(1); + } else { + failures.incr(1); + } + Ok(()) + }) + }) + .buffer_unordered(self.max_concurrency); + + Ok(Bound(Box::new(serving))) + } +} + +pub struct Bound(Box + 'static>); +impl Future for Bound { + type Item = (); + type Error = io::Error; + fn poll(&mut self) -> Poll { + loop { + match self.0.poll() { + Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::Ready(None)) => return Ok(Async::Ready(())), + Ok(Async::Ready(Some(_))) | + Err(_) => {} + } + } + } +} + +struct Metrics { + accepts: tacho::Counter, + closes: tacho::Counter, + failures: tacho::Counter, + active: tacho::Gauge, + waiters: tacho::Gauge, + per_conn: ConnMetrics, + connect_failures: FailureMetrics, + stream_failures: FailureMetrics, +} + +#[derive(Clone)] +struct FailureMetrics { + timeouts: tacho::Counter, + other: tacho::Counter, +} +impl FailureMetrics { + fn new(metrics: &tacho::Scope, key: &'static str) -> FailureMetrics { + FailureMetrics { + timeouts: metrics.clone().labeled("cause", "timeout").counter(key), + other: metrics.clone().labeled("cause", "other").counter(key), + } + } + + fn record(&self, e: &io::Error) { + if e.kind() == io::ErrorKind::TimedOut { + self.timeouts.incr(1); + } else { + self.other.incr(1); + } + } +} + +#[derive(Clone)] +struct ConnMetrics { + rx_bytes: tacho::Counter, + tx_bytes: tacho::Counter, + rx_bytes_per_conn: tacho::Stat, + tx_bytes_per_conn: tacho::Stat, + duration: tacho::Timer, + latency: tacho::Timer, +} + +fn timeout(fut: F, + timeout: Option, + timer: &Timer) + -> Box> + where F: Future + 'static +{ + match timeout { + None => Box::new(fut), + Some(duration) => { + let timer = timer.clone(); + let fut = future::lazy(move || timer.timeout(fut, duration)); + Box::new(fut) + } + } +} + +#[derive(Clone)] +pub struct UnboundTls { + config: Arc, +} + +#[derive(Clone)] +pub struct BoundTls { + config: Arc, + handshake_latency: tacho::Timer, +} + +pub struct SrcCtx { + rx_bytes_total: usize, + tx_bytes_total: usize, + metrics: ConnMetrics, +} +impl ctx::Ctx for SrcCtx { + fn read(&mut self, sz: usize) { + self.rx_bytes_total += sz; + self.metrics.rx_bytes.incr(sz); + } + + fn wrote(&mut self, sz: usize) { + self.tx_bytes_total += sz; + self.metrics.tx_bytes.incr(sz); + } +} +impl Drop for SrcCtx { + fn drop(&mut self) { + self.metrics + .rx_bytes_per_conn + .add(self.rx_bytes_total as u64); + self.metrics + .tx_bytes_per_conn + .add(self.tx_bytes_total as u64); + } +} diff --git a/src/app/sni.rs b/src/server/sni.rs similarity index 57% rename from src/app/sni.rs rename to src/server/sni.rs index 1f7daa2..5ad411f 100644 --- a/src/app/sni.rs +++ b/src/server/sni.rs @@ -1,6 +1,5 @@ - - -use app::config::TlsServerIdentity as IdentityConfig; +use super::config::TlsServerIdentityConfig; +use super::super::ConfigError; use rustls::{Certificate, ResolvesServerCert, SignatureScheme, sign}; use rustls::internal::pemfile; use std::collections::HashMap; @@ -8,31 +7,35 @@ use std::fs::File; use std::io::BufReader; use std::sync::Arc; +pub fn new(identities: &Option>, + default: &Option) + -> Result { + let n_identities = identities.as_ref().map(|ids| ids.len()).unwrap_or(0); + if default.is_none() && n_identities > 0 { + return Err("No TLS server identities specified".into()); + } + let sni = Sni { + default: default.as_ref().map(|c| ServerIdentity::load(c)), + identities: { + let mut ids = HashMap::with_capacity(n_identities); + if let Some(identities) = identities.as_ref() { + for (k, c) in identities { + let k: String = (*k).clone(); + let v = ServerIdentity::load(c); + ids.insert(k, v); + } + } + Arc::new(ids) + }, + }; + Ok(sni) +} + pub struct Sni { default: Option, identities: Arc>, } -impl Sni { - pub fn new(identities: &Option>, - default: &Option) - -> Sni { - Sni { - default: default.as_ref().map(|c| ServerIdentity::load(c)), - identities: { - let mut ids = HashMap::new(); - if let Some(ref identities) = *identities { - for (k, c) in identities { - let k = k.clone(); - ids.insert(k, ServerIdentity::load(c)); - } - } - Arc::new(ids) - }, - } - } -} - fn to_chain_and_signer(id: &ServerIdentity) -> sign::CertChainAndSigner { (id.certs.clone(), id.key.clone()) } @@ -43,14 +46,15 @@ impl ResolvesServerCert for Sni { _sigschemes: &[SignatureScheme]) -> Option { debug!("finding cert resolver for {:?}", server_name); - server_name.and_then(|n| { - debug!("found match for {}", n); - self.identities.get(n) - }) + server_name + .and_then(|n| { + debug!("found match for {}", n); + self.identities.get(n) + }) .or_else(|| { - debug!("reverting to default"); - self.default.as_ref() - }) + debug!("reverting to default"); + self.default.as_ref() + }) .map(to_chain_and_signer) } } @@ -61,7 +65,7 @@ struct ServerIdentity { } impl ServerIdentity { - fn load(c: &IdentityConfig) -> ServerIdentity { + fn load(c: &TlsServerIdentityConfig) -> ServerIdentity { let mut certs = vec![]; for p in &c.certs { certs.append(&mut load_certs(p));