diff --git a/.env.development b/.env.development
new file mode 100644
index 0000000..da96fb5
--- /dev/null
+++ b/.env.development
@@ -0,0 +1 @@
+PUBLIC_RELAY_HOST="localhost:4443"
diff --git a/.env.production b/.env.production
new file mode 100644
index 0000000..7a9770e
--- /dev/null
+++ b/.env.production
@@ -0,0 +1 @@
+PUBLIC_RELAY_HOST="relay.quic.video"
diff --git a/.github/logo.svg b/.github/logo.svg
new file mode 100644
index 0000000..109b070
--- /dev/null
+++ b/.github/logo.svg
@@ -0,0 +1,348 @@
+
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
new file mode 100644
index 0000000..71e12c2
--- /dev/null
+++ b/.github/workflows/main.yml
@@ -0,0 +1,55 @@
+name: main
+
+on:
+ push:
+ branches: ["main"]
+
+env:
+ REGISTRY: docker.io
+ IMAGE: ${{ github.repository }}
+ SERVICE: web
+
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ packages: write
+ id-token: write
+
+ # Only one release at a time and cancel prior releases
+ concurrency:
+ group: release
+ cancel-in-progress: true
+
+ steps:
+ - uses: actions/checkout@v3
+
+ # I'm paying for Depot for faster ARM builds.
+ - uses: depot/setup-action@v1
+
+ # Log into the docker registry via the GCP token
+ - uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ # Build and push Docker image with Depot
+ - uses: depot/build-push-action@v1
+ id: depot
+ with:
+ project: hppp3x0qjc
+ context: .
+ push: true
+ tags: ${{env.REGISTRY}}/${{env.IMAGE}}
+
+ # Log in to GCP
+ - uses: google-github-actions/auth@v1
+ with:
+ credentials_json: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }}
+
+ # Deploy to cloud run
+ - uses: google-github-actions/deploy-cloudrun@v1
+ with:
+ service: web
+ image: ${{env.REGISTRY}}/${{env.IMAGE}}@${{steps.depot.outputs.imageid}}
diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml
new file mode 100644
index 0000000..5548639
--- /dev/null
+++ b/.github/workflows/pr.yml
@@ -0,0 +1,19 @@
+name: pr
+
+on:
+ pull_request:
+ branches: ["main"]
+
+jobs:
+ check:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - uses: oven-sh/setup-bun@v2
+ - uses: biomejs/setup-biome@v2
+ with:
+ version: latest
+
+ - run: bun install
+ - run: bun check
diff --git a/.gitignore b/.gitignore
index a590a6c..8dc7837 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,37 +1,26 @@
-# Local .terraform directories
-**/.terraform/*
-
-# .tfstate files
-*.tfstate
-*.tfstate.*
-
-# Crash log files
-crash.log
-crash.*.log
-
-# Exclude all .tfvars files, which are likely to contain sensitive data, such as
-# password, private keys, and other secrets. These should not be part of version
-# control as they are data points which are potentially sensitive and subject
-# to change depending on the environment.
-#*.tfvars
-#*.tfvars.json
-
-# Ignore override files as they are usually used to override resources locally and so
-# are not checked in
-override.tf
-override.tf.json
-*_override.tf
-*_override.tf.json
-
-# Include override files you do wish to add to version control using negated pattern
-# !example_override.tf
-
-# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
-# example: *tfplan*
-
-# Ignore CLI configuration files
-.terraformrc
-terraform.rc
-
-# OSX pls
+# Logs
+logs
+*.log
+npm-debug.log*
+
+# NPM
+node_modules/*
+
+# Build output
+dist/
+.astro/
+
+# logs
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+
+# Just in case somebody runs the wrong command
+yarn.lock
+pnpm-lock.yaml
+tsconfig.tsbuildinfo
+package-lock.json
+
+# Mac shit
.DS_Store
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..d6456f9
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,10 @@
+FROM oven/bun:latest
+
+WORKDIR /app
+
+COPY . .
+RUN bun install --frozen-lockfile --production
+RUN bun pack
+
+ENV HOST="0.0.0.0"
+CMD [ "bun", "./dist/server/entry.mjs" ]
diff --git a/LICENSE-APACHE b/LICENSE-APACHE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENSE-APACHE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/LICENSE-MIT b/LICENSE-MIT
new file mode 100644
index 0000000..fbd437c
--- /dev/null
+++ b/LICENSE-MIT
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 Luke Curley
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
index ca0ccdb..9ed0c58 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,33 @@
-# quic.video
+
+
+
-This repo contains the infrastructure for [quic.video](https://quic.video).
+This repository contains the code for [quic.video](https://quic.video).
-The actual website is in [moq-js](https://github.com/kixelated/moq-js) until I find a good way to split the library and app.
+This is a client only.
+You'll either need to run a local server using [moq-rs](https://github.com/kixelated/moq-rs) or use a public server such as [relay.quic.video](https://quic.video/relay).
+
+Join the [Discord](https://discord.gg/FCYF3p99mr) for updates and discussion.
+
+## Setup
+
+Install the dependencies with `bun`:
+
+```bash
+bun i
+```
+
+## Development
+
+Run the development web server:
+
+```bash
+bun dev
+```
+
+## License
+
+Licensed under either:
+
+- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
diff --git a/astro.config.ts b/astro.config.ts
new file mode 100644
index 0000000..1bf3e37
--- /dev/null
+++ b/astro.config.ts
@@ -0,0 +1,53 @@
+import fs from "node:fs";
+import path from "node:path";
+import mdx from "@astrojs/mdx";
+import nodejs from "@astrojs/node";
+import solidJs from "@astrojs/solid-js";
+import tailwind from "@astrojs/tailwind";
+import { defineConfig } from "astro/config";
+
+import mkcert from "vite-plugin-mkcert";
+import wasm from "vite-plugin-wasm";
+
+// https://astro.build/config
+export default defineConfig({
+ integrations: [
+ mdx(),
+ solidJs(),
+ tailwind({
+ // Disable injecting a basic `base.css` import on every page.
+ applyBaseStyles: false,
+ }),
+ ],
+ // Renders any non-static pages using node
+ adapter: nodejs({
+ mode: "standalone",
+ }),
+ // Default to static rendering, but allow server rendering per-page
+ output: "hybrid",
+ vite: {
+ build: {
+ target: "esnext",
+ },
+ base: "./",
+ server: {
+ fs: {
+ allow: [
+ ".",
+ // Allow `bun link @kixelated/moq`
+ fs.realpathSync(path.resolve("node_modules/@kixelated/moq")),
+ ],
+ },
+ },
+ plugins: [mkcert(), wasm()],
+ worker: {
+ format: "es",
+ plugins: () => [wasm()],
+ },
+ resolve: {
+ alias: {
+ "@": "/src",
+ },
+ },
+ },
+});
diff --git a/biome.json b/biome.json
new file mode 100644
index 0000000..323aecd
--- /dev/null
+++ b/biome.json
@@ -0,0 +1,30 @@
+{
+ "$schema": "https://biomejs.dev/schemas/1.9.4/schema.json",
+ "vcs": {
+ "enabled": true,
+ "clientKind": "git",
+ "useIgnoreFile": true
+ },
+ "files": {
+ "ignoreUnknown": false
+ },
+ "formatter": {
+ "enabled": true,
+ "indentStyle": "tab",
+ "lineWidth": 120
+ },
+ "organizeImports": {
+ "enabled": true
+ },
+ "linter": {
+ "enabled": true,
+ "rules": {
+ "recommended": true
+ }
+ },
+ "javascript": {
+ "formatter": {
+ "quoteStyle": "double"
+ }
+ }
+}
diff --git a/bun.lockb b/bun.lockb
new file mode 100755
index 0000000..be86360
Binary files /dev/null and b/bun.lockb differ
diff --git a/env.d.ts b/env.d.ts
new file mode 100644
index 0000000..4df96c2
--- /dev/null
+++ b/env.d.ts
@@ -0,0 +1,9 @@
+///
+
+interface ImportMetaEnv {
+ readonly PUBLIC_RELAY_HOST: string;
+}
+
+interface ImportMeta {
+ readonly env: ImportMetaEnv;
+}
diff --git a/infra/.gitignore b/infra/.gitignore
new file mode 100644
index 0000000..ab7d960
--- /dev/null
+++ b/infra/.gitignore
@@ -0,0 +1,38 @@
+# Local .terraform directories
+**/.terraform/*
+
+# .tfstate files
+*.tfstate
+*.tfstate.*
+
+# Crash log files
+crash.log
+crash.*.log
+
+# Exclude all .tfvars files, which are likely to contain sensitive data, such as
+# password, private keys, and other secrets. These should not be part of version
+# control as they are data points which are potentially sensitive and subject
+# to change depending on the environment.
+#*.tfvars
+#*.tfvars.json
+
+# Ignore override files as they are usually used to override resources locally and so
+# are not checked in
+override.tf
+override.tf.json
+*_override.tf
+*_override.tf.json
+
+# Include override files you do wish to add to version control using negated pattern
+# !example_override.tf
+
+# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
+# example: *tfplan*
+
+# Ignore CLI configuration files
+.terraformrc
+terraform.rc
+
+# OSX pls
+.DS_Store
+node_modules
diff --git a/package.json b/package.json
new file mode 100644
index 0000000..8987d53
--- /dev/null
+++ b/package.json
@@ -0,0 +1,30 @@
+{
+ "name": "quic.video",
+ "type": "module",
+ "version": "0.0.1",
+ "private": true,
+ "scripts": {
+ "dev": "astro dev --open",
+ "pack": "astro build",
+ "preview": "astro preview",
+ "check": "biome ci && tsc --noEmit",
+ "fix": "biome check --write"
+ },
+ "dependencies": {
+ "@astrojs/mdx": "3.1.9",
+ "@astrojs/node": "8.3.4",
+ "@astrojs/solid-js": "4.4.4",
+ "@astrojs/tailwind": "5.1.2",
+ "@kixelated/moq": "^0.3.4",
+ "@tailwindcss/forms": "^0.5.9",
+ "@tailwindcss/typography": "^0.5.15",
+ "astro": "4.16.16",
+ "solid-js": "^1.9.3",
+ "tailwindcss": "^3.4.15",
+ "vite-plugin-mkcert": "^1.17.6",
+ "vite-plugin-wasm": "^3.3.0"
+ },
+ "devDependencies": {
+ "@types/node": "^22.10.1"
+ }
+}
diff --git a/pub/Dockerfile b/pub/Dockerfile
deleted file mode 100644
index 9b97717..0000000
--- a/pub/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM kixelated/moq-rs@sha256:f8abcef43f43b250b0c7e8bd64bb0bdc383e35749e342fcb477af6afb8cc6456
-
-# Install required utilities and ffmpeg
-RUN apt-get update && \
- apt-get install -y ffmpeg wget
-
-# Copy the publish script into the image
-COPY ./publish /usr/local/bin/publish
-
-# Use our publish script
-CMD [ "publish" ]
diff --git a/pub/deploy b/pub/deploy
deleted file mode 100755
index 652e023..0000000
--- a/pub/deploy
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-set -euo pipefail
-
-# Function to find the first available command
-find_command() {
- for cmd in "$@"; do
- if command -v "$cmd" &> /dev/null; then
- echo "$cmd"
- return
- fi
- done
- echo "Install one of: $@" >&2
- exit 1
-}
-
-# Find the first available command among podman and docker
-# Originally, depot was on the list but it doesn't seem to play nicely with auth?
-CMD=$(find_command podman docker)
-
-# Build with arm/amd and push.
-${CMD} buildx build --platform linux/arm64,linux/amd64 -t docker.io/kixelated/moq-pub .
-${CMD} push docker.io/kixelated/moq-pub
diff --git a/pub/publish b/pub/publish
deleted file mode 100755
index 9723977..0000000
--- a/pub/publish
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-set -euo pipefail
-
-ADDR=${ADDR:-"https://relay.quic.video"}
-NAME=${NAME:-"bbb"}
-URL=${URL:-"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4"}
-REGION=${REGION:-"server"}
-
-# Download the funny bunny
-wget -nv "${URL}" -O "${NAME}.mp4"
-
-# ffmpeg
-# -hide_banner: Hide the banner
-# -v quiet: and any other output
-# -stats: But we still want some stats on stderr
-# -stream_loop -1: Loop the broadcast an infinite number of times
-# -re: Output in real-time
-# -i "${INPUT}": Read from a file on disk
-# -vf "drawtext": Render the current time in the corner of the video
-# -an: Disable audio for now
-# -b:v 3M: Output video at 3Mbps
-# -preset ultrafast: Don't use much CPU at the cost of quality
-# -tune zerolatency: Optimize for latency at the cost of quality
-# -f mp4: Output to mp4 format
-# -movflags: Build a fMP4 file with a frame per fragment
-# - | moq-pub: Output to stdout and moq-pub to publish
-
-# Run ffmpeg
-ffmpeg \
- -stream_loop -1 \
- -hide_banner \
- -v quiet \
- -re \
- -i "${NAME}.mp4" \
- -vf "drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf:text='${REGION}\: %{gmtime\: %H\\\\\:%M\\\\\:%S.%3N}':x=(W-tw)-24:y=24:fontsize=48:fontcolor=white:box=1:boxcolor=black@0.5" \
- -an \
- -b:v 3M \
- -preset ultrafast \
- -tune zerolatency \
- -f mp4 \
- -movflags empty_moov+frag_every_frame+separate_moof+omit_tfhd_offset \
- - | moq-pub "${ADDR}" --name "${NAME}"
diff --git a/public/blog/forward-error-correction/graph.jpeg b/public/blog/forward-error-correction/graph.jpeg
new file mode 100644
index 0000000..d362fd4
Binary files /dev/null and b/public/blog/forward-error-correction/graph.jpeg differ
diff --git a/public/blog/forward-error-correction/mfw.jpeg b/public/blog/forward-error-correction/mfw.jpeg
new file mode 100644
index 0000000..e051b57
Binary files /dev/null and b/public/blog/forward-error-correction/mfw.jpeg differ
diff --git a/public/blog/forward-error-correction/tubes.png b/public/blog/forward-error-correction/tubes.png
new file mode 100644
index 0000000..da9dae1
Binary files /dev/null and b/public/blog/forward-error-correction/tubes.png differ
diff --git a/public/blog/kixelCat.png b/public/blog/kixelCat.png
new file mode 100644
index 0000000..aa75c51
Binary files /dev/null and b/public/blog/kixelCat.png differ
diff --git a/public/blog/moq-onion/cancel.png b/public/blog/moq-onion/cancel.png
new file mode 100644
index 0000000..1798790
Binary files /dev/null and b/public/blog/moq-onion/cancel.png differ
diff --git a/public/blog/moq-onion/karp.png b/public/blog/moq-onion/karp.png
new file mode 100644
index 0000000..d4962c4
Binary files /dev/null and b/public/blog/moq-onion/karp.png differ
diff --git a/public/blog/moq-onion/onion.png b/public/blog/moq-onion/onion.png
new file mode 100644
index 0000000..49e33e9
Binary files /dev/null and b/public/blog/moq-onion/onion.png differ
diff --git a/public/blog/moq-onion/sfu.png b/public/blog/moq-onion/sfu.png
new file mode 100644
index 0000000..6aa9526
Binary files /dev/null and b/public/blog/moq-onion/sfu.png differ
diff --git a/public/blog/moq-onion/track.png b/public/blog/moq-onion/track.png
new file mode 100644
index 0000000..770e253
Binary files /dev/null and b/public/blog/moq-onion/track.png differ
diff --git a/public/blog/never-use-datagrams/bbr.png b/public/blog/never-use-datagrams/bbr.png
new file mode 100644
index 0000000..d1d25d1
Binary files /dev/null and b/public/blog/never-use-datagrams/bbr.png differ
diff --git a/public/blog/never-use-datagrams/bodies.jpeg b/public/blog/never-use-datagrams/bodies.jpeg
new file mode 100644
index 0000000..f393b71
Binary files /dev/null and b/public/blog/never-use-datagrams/bodies.jpeg differ
diff --git a/public/blog/never-use-datagrams/denver.jpeg b/public/blog/never-use-datagrams/denver.jpeg
new file mode 100644
index 0000000..c8c35c2
Binary files /dev/null and b/public/blog/never-use-datagrams/denver.jpeg differ
diff --git a/public/blog/never-use-datagrams/glitch.gif b/public/blog/never-use-datagrams/glitch.gif
new file mode 100644
index 0000000..56e5bf2
Binary files /dev/null and b/public/blog/never-use-datagrams/glitch.gif differ
diff --git a/public/blog/quic-powers/anycast.png b/public/blog/quic-powers/anycast.png
new file mode 100644
index 0000000..6d64815
Binary files /dev/null and b/public/blog/quic-powers/anycast.png differ
diff --git a/public/blog/quic-powers/anycast2.png b/public/blog/quic-powers/anycast2.png
new file mode 100644
index 0000000..7d19b01
Binary files /dev/null and b/public/blog/quic-powers/anycast2.png differ
diff --git a/public/blog/quic-powers/http.png b/public/blog/quic-powers/http.png
new file mode 100644
index 0000000..28cf6e0
Binary files /dev/null and b/public/blog/quic-powers/http.png differ
diff --git a/public/blog/quic-powers/perf.jpg b/public/blog/quic-powers/perf.jpg
new file mode 100644
index 0000000..20005e0
Binary files /dev/null and b/public/blog/quic-powers/perf.jpg differ
diff --git a/public/blog/quic-powers/quic.png b/public/blog/quic-powers/quic.png
new file mode 100644
index 0000000..6218fb3
Binary files /dev/null and b/public/blog/quic-powers/quic.png differ
diff --git a/public/blog/quic-powers/standards.png b/public/blog/quic-powers/standards.png
new file mode 100644
index 0000000..85712dd
Binary files /dev/null and b/public/blog/quic-powers/standards.png differ
diff --git a/public/blog/quic-powers/tcp.png b/public/blog/quic-powers/tcp.png
new file mode 100644
index 0000000..2167da5
Binary files /dev/null and b/public/blog/quic-powers/tcp.png differ
diff --git a/public/blog/quic-powers/tuple-quic.png b/public/blog/quic-powers/tuple-quic.png
new file mode 100644
index 0000000..71f4a96
Binary files /dev/null and b/public/blog/quic-powers/tuple-quic.png differ
diff --git a/public/blog/quic-powers/tuple-quic2.png b/public/blog/quic-powers/tuple-quic2.png
new file mode 100644
index 0000000..2e6794e
Binary files /dev/null and b/public/blog/quic-powers/tuple-quic2.png differ
diff --git a/public/blog/quic-powers/tuple-quic3.png b/public/blog/quic-powers/tuple-quic3.png
new file mode 100644
index 0000000..a4eb4ef
Binary files /dev/null and b/public/blog/quic-powers/tuple-quic3.png differ
diff --git a/public/blog/quic-powers/tuple-tcp.png b/public/blog/quic-powers/tuple-tcp.png
new file mode 100644
index 0000000..7895c97
Binary files /dev/null and b/public/blog/quic-powers/tuple-tcp.png differ
diff --git a/public/blog/quic-powers/tuple-tcp2.png b/public/blog/quic-powers/tuple-tcp2.png
new file mode 100644
index 0000000..75e864d
Binary files /dev/null and b/public/blog/quic-powers/tuple-tcp2.png differ
diff --git a/public/blog/quic-powers/tuple-webrtc.png b/public/blog/quic-powers/tuple-webrtc.png
new file mode 100644
index 0000000..7800e35
Binary files /dev/null and b/public/blog/quic-powers/tuple-webrtc.png differ
diff --git a/public/blog/quic-powers/tuple-webrtc2.png b/public/blog/quic-powers/tuple-webrtc2.png
new file mode 100644
index 0000000..045e7be
Binary files /dev/null and b/public/blog/quic-powers/tuple-webrtc2.png differ
diff --git a/public/blog/quic-powers/vegeta.png b/public/blog/quic-powers/vegeta.png
new file mode 100644
index 0000000..2aacbeb
Binary files /dev/null and b/public/blog/quic-powers/vegeta.png differ
diff --git a/public/blog/replacing-hls-dash/buffering.gif b/public/blog/replacing-hls-dash/buffering.gif
new file mode 100644
index 0000000..b5ad3f7
Binary files /dev/null and b/public/blog/replacing-hls-dash/buffering.gif differ
diff --git a/public/blog/replacing-hls-dash/carrot.png b/public/blog/replacing-hls-dash/carrot.png
new file mode 100644
index 0000000..3805906
Binary files /dev/null and b/public/blog/replacing-hls-dash/carrot.png differ
diff --git a/public/blog/replacing-hls-dash/green.jpg b/public/blog/replacing-hls-dash/green.jpg
new file mode 100644
index 0000000..07eb2cd
Binary files /dev/null and b/public/blog/replacing-hls-dash/green.jpg differ
diff --git a/public/blog/replacing-hls-dash/troll.webp b/public/blog/replacing-hls-dash/troll.webp
new file mode 100644
index 0000000..d400246
Binary files /dev/null and b/public/blog/replacing-hls-dash/troll.webp differ
diff --git a/public/blog/replacing-webrtc/artifact.png b/public/blog/replacing-webrtc/artifact.png
new file mode 100644
index 0000000..9ddc28c
Binary files /dev/null and b/public/blog/replacing-webrtc/artifact.png differ
diff --git a/public/blog/replacing-webrtc/layers.png b/public/blog/replacing-webrtc/layers.png
new file mode 100644
index 0000000..92235e0
Binary files /dev/null and b/public/blog/replacing-webrtc/layers.png differ
diff --git a/public/blog/replacing-webrtc/sfu.png b/public/blog/replacing-webrtc/sfu.png
new file mode 100644
index 0000000..516f6a9
Binary files /dev/null and b/public/blog/replacing-webrtc/sfu.png differ
diff --git a/public/blog/replacing-webrtc/toothpaste.jpg b/public/blog/replacing-webrtc/toothpaste.jpg
new file mode 100644
index 0000000..6f836b6
Binary files /dev/null and b/public/blog/replacing-webrtc/toothpaste.jpg differ
diff --git a/public/blog/to-wasm/duck.jpeg b/public/blog/to-wasm/duck.jpeg
new file mode 100644
index 0000000..01ae9ef
Binary files /dev/null and b/public/blog/to-wasm/duck.jpeg differ
diff --git a/public/blog/to-wasm/slide.png b/public/blog/to-wasm/slide.png
new file mode 100644
index 0000000..d4809fe
Binary files /dev/null and b/public/blog/to-wasm/slide.png differ
diff --git a/public/blog/to-wasm/spooky.jpg b/public/blog/to-wasm/spooky.jpg
new file mode 100644
index 0000000..fbc7108
Binary files /dev/null and b/public/blog/to-wasm/spooky.jpg differ
diff --git a/public/blog/to-wasm/thonk.png b/public/blog/to-wasm/thonk.png
new file mode 100644
index 0000000..92fffeb
Binary files /dev/null and b/public/blog/to-wasm/thonk.png differ
diff --git a/public/blog/transfork/fork.png b/public/blog/transfork/fork.png
new file mode 100644
index 0000000..fbafdcb
Binary files /dev/null and b/public/blog/transfork/fork.png differ
diff --git a/public/home/ietf.svg b/public/home/ietf.svg
new file mode 100644
index 0000000..e77361d
--- /dev/null
+++ b/public/home/ietf.svg
@@ -0,0 +1,655 @@
+
diff --git a/public/home/logo.svg b/public/home/logo.svg
new file mode 100644
index 0000000..109b070
--- /dev/null
+++ b/public/home/logo.svg
@@ -0,0 +1,348 @@
+
diff --git a/public/home/quic.svg b/public/home/quic.svg
new file mode 100644
index 0000000..a45fdd9
--- /dev/null
+++ b/public/home/quic.svg
@@ -0,0 +1,205 @@
+
diff --git a/public/issues/warning.svg b/public/issues/warning.svg
new file mode 100644
index 0000000..1d44cf3
--- /dev/null
+++ b/public/issues/warning.svg
@@ -0,0 +1,63 @@
+
\ No newline at end of file
diff --git a/public/layout/discord.svg b/public/layout/discord.svg
new file mode 100644
index 0000000..c85cccf
--- /dev/null
+++ b/public/layout/discord.svg
@@ -0,0 +1,189 @@
+
diff --git a/public/layout/explain.svg b/public/layout/explain.svg
new file mode 100644
index 0000000..ee86665
--- /dev/null
+++ b/public/layout/explain.svg
@@ -0,0 +1,155 @@
+
diff --git a/public/layout/favicon.svg b/public/layout/favicon.svg
new file mode 100644
index 0000000..50d4749
--- /dev/null
+++ b/public/layout/favicon.svg
@@ -0,0 +1,9 @@
+
+
diff --git a/public/layout/github.svg b/public/layout/github.svg
new file mode 100644
index 0000000..7409c57
--- /dev/null
+++ b/public/layout/github.svg
@@ -0,0 +1,165 @@
+
diff --git a/public/layout/logo.svg b/public/layout/logo.svg
new file mode 100644
index 0000000..e1d2b60
--- /dev/null
+++ b/public/layout/logo.svg
@@ -0,0 +1,267 @@
+
diff --git a/public/layout/publish.svg b/public/layout/publish.svg
new file mode 100644
index 0000000..e6d4bad
--- /dev/null
+++ b/public/layout/publish.svg
@@ -0,0 +1,140 @@
+
diff --git a/public/layout/source.svg b/public/layout/source.svg
new file mode 100644
index 0000000..74efed4
--- /dev/null
+++ b/public/layout/source.svg
@@ -0,0 +1,143 @@
+
diff --git a/public/layout/watch.svg b/public/layout/watch.svg
new file mode 100644
index 0000000..06ffa00
--- /dev/null
+++ b/public/layout/watch.svg
@@ -0,0 +1,107 @@
+
diff --git a/public/watch/bunny.png b/public/watch/bunny.png
new file mode 100644
index 0000000..0efbed5
Binary files /dev/null and b/public/watch/bunny.png differ
diff --git a/src/components/fail.tsx b/src/components/fail.tsx
new file mode 100644
index 0000000..ce36b5e
--- /dev/null
+++ b/src/components/fail.tsx
@@ -0,0 +1,19 @@
+import { Show, createEffect } from "solid-js";
+
+export default function Fail(props: { error?: Error }) {
+ createEffect(() => {
+ if (props.error) {
+ console.error(props.error);
+ }
+ });
+
+ return (
+
+ {(error) => (
+
+ This is an proof-of-concept. Check out the numerous issues.
+
+
diff --git a/src/components/watch.tsx b/src/components/watch.tsx
new file mode 100644
index 0000000..6f78204
--- /dev/null
+++ b/src/components/watch.tsx
@@ -0,0 +1,16 @@
+import "@kixelated/moq";
+
+export default function Watch(props: { path: string }) {
+ // Use query params to allow overriding environment variables.
+ const urlSearchParams = new URLSearchParams(window.location.search);
+ const params = Object.fromEntries(urlSearchParams.entries());
+ const server = params.server ?? import.meta.env.PUBLIC_RELAY_HOST;
+
+ const url = `https://${server}/${props.path}`;
+
+ return (
+
+
+
+ );
+}
diff --git a/src/elements.d.ts b/src/elements.d.ts
new file mode 100644
index 0000000..b4d334a
--- /dev/null
+++ b/src/elements.d.ts
@@ -0,0 +1,17 @@
+// Provides TypeScript support for any web components that add themselves to HTMLElementTagNameMap.
+// See: https://github.com/solidjs/solid/issues/616#issuecomment-1144074821
+declare module "solid-js" {
+ namespace JSX {
+ type ElementProps = {
+ // Add both the element's prefixed properties and the attributes
+ [K in keyof T]: Props & HTMLAttributes;
+ };
+ // Prefixes all properties with `prop:` to match Solid's property setting syntax
+ type Props = {
+ [K in keyof T as `prop:${string & K}`]?: T[K];
+ };
+ interface IntrinsicElements extends ElementProps {}
+ }
+}
+
+export {};
diff --git a/src/env.d.ts b/src/env.d.ts
new file mode 100644
index 0000000..acef35f
--- /dev/null
+++ b/src/env.d.ts
@@ -0,0 +1,2 @@
+///
+///
diff --git a/src/layouts/global.astro b/src/layouts/global.astro
new file mode 100644
index 0000000..e9b7cfd
--- /dev/null
+++ b/src/layouts/global.astro
@@ -0,0 +1,64 @@
+---
+import "./global.css";
+
+// NOTE: This is magically used as the type for Astro.props
+interface Props {
+ title: string;
+
+ frontmatter?: {
+ title: string;
+ date: string;
+ };
+}
+
+let { title, frontmatter } = Astro.props;
+if (frontmatter?.title) title = frontmatter.title;
+---
+
+
+
+
+
+
+
+
+
+ {title ? `${title} - Media over QUIC` : "Media over QUIC"}
+
+
+
+
+
+ {
+ frontmatter?.date && (
+
+ published {new Date(frontmatter.date).toLocaleDateString()}
+
+ )
+ }
+
+
+
+
+
diff --git a/src/layouts/global.css b/src/layouts/global.css
new file mode 100644
index 0000000..7df573f
--- /dev/null
+++ b/src/layouts/global.css
@@ -0,0 +1,39 @@
+@tailwind base;
+@tailwind components;
+@tailwind utilities;
+
+@layer components {
+ /* see https://tailwindcss.com/docs/typography-plugin */
+ .markdown {
+ @apply prose prose-lg lg:prose-xl;
+ @apply prose-moq;
+ @apply prose-headings:underline prose-headings:decoration-green-500 prose-headings:decoration-4 prose-headings:underline-offset-4;
+ @apply prose-h3:text-2xl;
+ @apply prose-th:no-underline;
+ @apply prose-h1:mb-8 prose-h1:mt-12 prose-h1:decoration-8;
+ @apply prose-h2:decoration-4;
+ @apply prose-h3:decoration-2;
+ @apply prose-li:my-1;
+ @apply prose-img:rounded-lg;
+ @apply prose-figure:object-center prose-figure:text-center prose-img:m-auto;
+ @apply prose-a:font-bold prose-a:text-green-500 prose-a:underline prose-a:decoration-green-500 prose-a:decoration-2 prose-a:underline-offset-4;
+ }
+
+ .tagline {
+ @apply my-12 text-center text-2xl font-bold text-white underline decoration-green-500 decoration-4 underline-offset-4 md:text-3xl;
+ }
+
+ button,
+ .form-button {
+ @apply rounded-md border-0 bg-slate-700 font-bold shadow-sm hover:bg-slate-800 focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2;
+ @apply px-12 py-4 text-2xl;
+ }
+
+ input,
+ select,
+ input[type="text"],
+ .form-input,
+ .form-select {
+ @apply rounded-md border-0 bg-slate-700 px-4 py-2 text-xl text-slate-100 shadow-sm focus:ring-1 focus:ring-inset focus:ring-green-600;
+ }
+}
diff --git a/src/pages/404.mdx b/src/pages/404.mdx
new file mode 100644
index 0000000..e518593
--- /dev/null
+++ b/src/pages/404.mdx
@@ -0,0 +1,8 @@
+---
+layout: "@/layouts/global.astro"
+title: 404
+---
+
+# 4 0 4
+
+Page not found
diff --git a/src/pages/blog/distribution-at-twitch.mdx b/src/pages/blog/distribution-at-twitch.mdx
new file mode 100644
index 0000000..c7bf05b
--- /dev/null
+++ b/src/pages/blog/distribution-at-twitch.mdx
@@ -0,0 +1,264 @@
+---
+layout: "@/layouts/global.astro"
+title: Distribution @ Twitch
+author: kixelated
+description: Eight years of progress at Twitch with various distribution protocols.
+cover: "/blog/kixelCat.png"
+date: 2022-02-15
+---
+
+# Source
+
+[This document](https://docs.google.com/document/d/1OTnJunbpSJchdj8XI3GU9Fo-RUUFBqLO1AhlaKk5Alo/edit?usp=sharing) was originally shared on [MoQ IETF mailing list](https://mailarchive.ietf.org/arch/msg/moq/3-9tqDmEuzij7Iz3Xf7kIsR1ZYo/).
+This is a verbatim copy of the original, preserved without Google Docs.
+
+# Introduction
+
+This is an attempt to document the issues that Twitch/Amazon IVS has encountered with various distribution protocols over the last 8 years.
+
+# HLS
+
+We initially used RTMP for distribution but switched to HLS something like 8 years ago.
+We use MSE to feed the player buffer on web platforms.
+This assumes 2s segments.
+
+### Congestion
+
+- Must finish downloading the current segment before ABR can switch renditions
+- ABR cannot differentiate between broadcaster starvation and network congestion
+- Depleting the buffer causes a jarring pause while it refills
+
+### Latency
+
+- Segments must be finished before they are added to the playlist, adding 2s
+- Playlist updates must be polled by the player, adding up to 2s
+- Segments are downloaded sequentially, adding 200ms? between requests
+- MSE has a minimum buffer size, adding 100ms?
+- BBRv1 introduces latency during the PROBE_RTT phase, adding 400ms?
+- Depleting the buffer increases latency for the remainder of the session
+
+### Time to Video
+
+- Fetching the first rendition playlist takes 3 RTTs.
+- Fetching the first segment takes 3 RTTs.
+- The buffer needs to be sufficiently filled before playback starts.
+
+### Clients
+
+- Our server can't do anything to improve dumb clients
+- Limited 3rd party metrics
+
+# LHLS
+
+We made our own fork of HLS to address some of the issues mentioned above.
+Segments are advertised in the playlist ahead of time and delivered frame-by-frame with HTTP chunked-transfer.
+
+
AS COMPARED TO _**HLS**_
+
+### Congestion
+
+- **(new)** Individual frames are often not large enough to saturate a network.
+- **(new)** ABR has difficulty determining when it can switch up
+
+### Latency
+
+- ~~Segments must be finished before they can be downloaded, adding 2-4s~~
+- ~~Playlist updates must be polled by the player, adding 0-2s~~
+- **(new)** We benchmark the network between segments, adding 200ms?
+
+### Clients
+
+- ~~Our server can't do anything to improve dumb clients~~
+- ~~Limited 3rd party metrics~~
+- **(new)** 3rd party clients are not supported
+
+### Performance
+
+- **(new)** Frame delivery is more expensive due to context switching
+
+# LL-HLS
+
+Apple went ahead and made their own low latency HLS solution.
+Segments are split into sub-segments and updates are requested more frequently.
+We have not implemented this yet so some of these bullet points may be inaccurate or missing.
+This assumes 2s segments and 500ms sub-segments
+
+
AS COMPARED TO _**LHLS**_
+
+### Congestion
+
+- ~~Individual frames are often not large enough to saturate a network.~~
+- ~~ABR has difficulty determining when it can switch up~~
+
+### Latency
+
+- ~~A blocking speed test between segments is required to saturate the network~~
+- **(new)** Sub-segments must be finished before they are added to the playlist, adding 500ms
+- **(new)** Playlist updates need to be pushed to the player, adding 100ms?
+
+### Clients
+
+- ~~3rd party clients are not supported~~
+- **(back)** Our server can't do anything to improve dumb clients
+- **(back)** Limited 3rd party metrics
+
+### Performance
+
+- ~~Frame delivery is more expensive due to context switching~~
+- **(new)** 4x the number of playlist and segment requests
+- **(new)** Playlist long-polling involves more context switching
+
+# WebRTC
+
+We decided that the only way to further reduce latency was to use WebRTC.
+This project involved using WebRTC for last-mile delivery; our existing video system (RTMP ingest, HLS distribution) was used for everything prior.
+We tried twice; once using libwebrtc and another time using a heavily forked [pion](https://github.com/pion/webrtc).
+
+Some of these issues would not be present if we replaced our entire video pipeline with WebRTC instead of this hybrid approach.
+That would have been a gigantic undertaking and was absolutely not feasible.
+
+
AS COMPARED TO _**LHLS**_
+
+### Congestion
+
+- ~~ABR has difficulty determining when it can switch up~~
+- ~~No way to differentiate between broadcaster starvation and network congestion~~
+- ~~Individual frames are often not large enough to saturate a network.~~
+- ~~Depleting the buffer causes a jarring pause while it refills~~
+- ~~Adds latency for the remainder of the session~~
+- **(modified)** Depleting the jitter buffer causes frame dropping
+- **(new)** Dropping reference frames causes artifacts or freezing until the next IDR
+- **(new)** RTCP receiver report is awful for congestion control; required transport wide CC instead
+
+### Latency
+
+- ~~Segments are downloaded sequentially, adding 100ms between requests~~
+- ~~We saturate the network between segments, adding 200ms~~
+- ~~BBR causes latency during PROBE_RTT phase~~
+- ~~Depleting the buffer increases latency for the remainder of the session~~
+- **(new)** An additional jitter buffer is required in front of WebRTC, adding 100ms?
+
+### Quality
+
+- **(new)** H.264 B-frames are not supported, causing a VQ loss
+- **(new)** Producing fewer reference frames causes a VQ loss
+- **(new)** Excess jitter in the video pipeline (ex. ingest) causes playback jitter
+- **(new)** Any transcoding changes also impacts our HLS stack (VQ loss)
+
+### Time to Video
+
+- ~~Fetching the first rendition playlist takes 2-3 RTTs.~~
+- ~~Fetching the first segment takes 2-3 RTTs.~~
+- ~~The buffer needs to be sufficiently filled before playback starts.~~
+- **(new)** Negotiating SDP via HTTPS takes 3 RTTs.
+- **(new)** Negotiating ICE takes 2-3? RTTs
+- **(new)** Negotiating DTLS takes 2? RTTs
+
+### Clients
+
+- **(new)** libwebrtc is obnoxious to build
+- **(new)** libwebrtc bloats the size of our player
+- **(new)** Limited user experience metrics
+- **(new)** Transport wide CC extension was not supported by all clients/browsers
+
+### Features
+
+- **(new)** Does not support DRM
+
+### Performance
+
+- **(new)** UDP delivery is more expensive than TCP in practice
+- **(new)** Requires transcoding to remove B-frames
+- **(new)** Requires transcoding to convert AAC to OPUS
+- **(new)** libwebrtc did not scale to hundreds of viewers, let alone thousands
+
+# Frames over WebRTC data channels
+
+When WebRTC was not working, we tried to switch over to WebRTC data channels (SCTP over DTLS).
+Each frame was sent as a WebRTC data channel message.
+These frames could be fed into the player via MSE.
+
+It didn’t work.
+SCTP deadlocks when messages are too large because they count towards flow control until fully received.
+The flow control limits in Chrome and Firefox are hard-coded and are often smaller than a single I-frame.
+SCTP cannot drop messages out of order.
+
+# RTP over WebRTC data(grams)
+
+Since data channels weren’t working as intended, we decided to send each RTP packet as an unreliable message.
+This was then reassembled by the application and fed into the player.
+
+
AS COMPARED TO _**LHLS**_
+
+### Congestion
+
+- ~~ABR has difficulty determining when it can switch up~~
+- **(new)** SCTP has poor congestion control (I forget why)
+
+### Latency
+
+- ~~We saturate the network between segments, adding 200ms~~
+- ~~Segments are downloaded sequentially, adding 200ms? between requests~~
+
+### Time to Video
+
+- ~~Fetching the first playlist takes 2-3 RTTs.~~
+- ~~Fetching the first segment takes 2-3 RTTs.~~
+- **(new)** Negotiating SDP via HTTPS takes 3 RTTs.
+- **(new)** Negotiating ICE takes 2-3? RTTs
+- **(new)** Negotiating DTLS takes 2? RTTs
+- **(new)** Negotiating SCTP takes 2? RTTs
+- **(new)** Negotiating data channels takes 1 RTT
+
+### Features
+
+- **(new)** Does not support 3rd party CDNs
+
+### Performance
+
+- **(new)** UDP delivery is more expensive than TCP in practice
+- **(new)** SCTP ACKs cause excessive UDP packets to be sent/received
+
+# Warp
+
+Warp is conceptually similar to LHLS, but segments are pushed in parallel via QUIC/WebTransport.
+Prioritization is used to avoid segments fighting for bandwidth, delivering newer media first (especially audio) during congestion.
+
+
AS COMPARED TO _**LHLS**_
+
+### Congestion
+
+- ~~ABR has difficulty determining when it can switch up~~
+- ~~Must finish downloading the current segment before ABR can switch renditions~~
+- ~~No way to differentiate between broadcaster starvation and network congestion~~
+- ~~Individual frames are often not large enough to saturate a network.~~
+- ~~Depleting the buffer causes a jarring pause while it refills~~
+- **(modified)** Depleting the audio buffer causes a jarring pause while it refills
+- **(new)** Depleting the video buffer causes frames to be skipped
+- **(new)** PING packets must be sent to occasionally saturate the network
+
+### Latency
+
+- ~~We benchmark the network between segments, adding 200ms?~~
+- ~~Segments are downloaded sequentially, adding 200ms? between requests~~
+- ~~Depleting the buffer increases latency for the remainder of the session~~
+- **(modified)** Depleting the **audio** buffer increases latency for the remainder of the session
+
+### Time to Video
+
+- ~~Fetching the first rendition playlist takes 3 RTTs.~~
+- ~~Fetching the first segment takes 3 RTTs.~~
+- **(new)** WebTransport handshake takes 2 RTTs
+
+### Clients
+
+- **(new)** Chrome only WebTransport support
+- **(new)** Chrome only video underflow support
+
+### Features
+
+- **(new)** Does not support 3rd party CDNs
+
+### Performance
+
+- **(new)** UDP delivery is more expensive than TCP in practice
diff --git a/src/pages/blog/forward-error-correction.mdx b/src/pages/blog/forward-error-correction.mdx
new file mode 100644
index 0000000..8debf70
--- /dev/null
+++ b/src/pages/blog/forward-error-correction.mdx
@@ -0,0 +1,197 @@
+---
+layout: "@/layouts/global.astro"
+title: Forward? Error? Correction?
+author: kixelated
+description: Concealing packet loss is harder than you think.
+cover: "/blog/forward-error-correction/mfw.jpeg"
+date: 2024-02-17
+---
+
+# Forward? Error? Correction?
+
+So I absolutely _dunked_ on datagrams in the [last blog post](/blog/never-use-datagrams).
+Now it's time to dunk on the last remaining hope for datagrams: [Forward Error Correction](https://www.techtarget.com/searchmobilecomputing/definition/forward-error-correction) (FEC).
+
+## OPUS
+
+[Opus](https://opus-codec.org/) is an amazing audio codec.
+Full disclosure, I haven't had the opportunity to work with it directly;
+I was stuck in [AAC](https://en.wikipedia.org/wiki/Advanced_Audio_Coding) land at Twitch.
+But that's not going to stop me from talking out of my ass.
+
+I want to rant about OPUS' built-in support for FEC.
+And to be clear, this isn't a rant specific to OPUS.
+Somebody _inevitably_ asks for FEC in every networking protocol (like [MoQ](https://github.com/moq-wg/moq-transport/issues/320)) and you can link them this post now.
+
+The general idea behind FEC is is to send redundant data so the receiver can paper over small amounts of packet loss.
+It's conceptually similar to [RAID](https://en.wikipedia.org/wiki/RAID) but for packets spread over time instead of hard drives.
+There are so many possible FEC schemes, many of which are patented, and I would do the subject a disservice if I even understood them.
+
+Conveniently, audio "frames" are so small that they fit into a single datagram.
+So rather than deal with retransmissions at the _disgusting_ transport layer, the audio encoder can just encode redundancy via FEC.
+🪦 RIP packet loss: 1983-2024 🪦
+
+But despite being a great idea on paper, there's _so_ many things wrong with this.
+
+## Networks are Complicated
+
+I worked with some very smart people at [Twitch](https://www.twitch.tv/).
+However, I will never forget a presentation maybe 4 years ago where a very smart codec engineer pitched using FEC.
+
+There was a graph that showed the TCP throughput during random packet loss.
+Wow, TCP sure has a low bitrate at 30% packet loss, it sucks!
+But look at this other green line!
+It's a custom protocol using UDP+FEC and it's multiple times faster than TCP!
+
+
+ 
+ [Abridged recreation of the meeting](https://www.youtube.com/watch?v=sz2mmM-kN1I)
+
+
+If somebody shows you any results based on simulated, random packet loss, you should politely tell them: **no, that's not how the internet works**.
+
+Networking is not quantum mechanics.
+There are no dice involved and packet loss is _not random_.
+It depends on the underlying transport.
+
+- Sometimes it occurs randomly due to signal interference.
+- Sometimes it occurs in bursts due to batching.
+- Sometimes it occurs due to congestion.
+- Sometimes it occurs because ???
+
+Unfortunately, there's no magic loophole on the internet.
+There's no one trick that has eluded TCP for 40 years, and yet the UDP geniuses have figured it out.
+You can't send 10x the data to mask packet loss.
+
+In fact, if you ever see a number like 30% packet loss in the real world (yikes), it's likely due to congestion.
+You're sending 30% _too much_ and fully saturating a link.
+The solution is to send _less data_, not parity bits. 🤯
+
+**Fun-fact**: That's the fundamental difference between loss-based congestion control (ex. Reno, CUBIC) and delay-based congestion control (ex. BBR, COPA).
+BBRv1 doesn't even use packet loss as a signal; it's all about RTT.
+
+## Expertise
+
+These packet loss misconceptions come up surprisingly often in the live video space.
+The hyperfocus on packet loss is a symptom of a larger problem: media experts suddenly have to become networking experts.
+
+Even modern media protocols are built directly on top of UDP; for example [WebRTC](https://webrtc.org/), [SRT](https://www.haivision.com/products/srt-secure-reliable-transport/), [Sye](https://nscreenmedia.com/amazon-buys-sye/), [RIST](https://www.rist.tv/).
+And for good reason, as the head-of-line blocking of TCP is a non-starter for real-time media.
+But with great power (UDP) comes great responsibility.
+
+
+ 
+
+ [\> mfw](https://knowyourmeme.com/memes/im-going-to-die-spider-man-3-qte) a new protocol over UDP is announced.
+
+
+
+And the same mistakes keep getting repeated.
+I can't tell you the number of times I've talked to an engineer at a video conference who decries congestion control, and in the next breath claims FEC is the solution to all their problems.
+Frankly, I'm just jaded at this point.
+
+But it is definitely possible to have both media and networking expertise.
+The Google engineers who built WebRTC are a testament to that.
+However, the complexity of WebRTC speaks volumes to the difficulty of the task.
+
+This is one of the many reasons why we need **Media over QUIC**.
+Let the network engineers handle the network and the media engineers handle the media.
+
+## End-to-End
+
+But my beef with FEC in OPUS is more fundamental.
+
+When I speak into a microphone, the audio data is encoded into packets via a codec like OPUS.
+That packet then traverses multiple hops, potentially going over WiFi, Ethernet, 4G, fiber, satellites, etc.
+It switches between different cell towers, routers, ISPs, transit providers, business units, and who knows what else.
+Until finally, finally, the packet reaches ur Mom's iPhone and my words replay into her ear.
+Tell her I miss her. 😢
+
+Unfortunately, each of those hops have different properties and packet loss scenarios.
+Many of them already have FEC built-in or don't need it at all.
+
+By performing FEC in the application layer, specifically the audio codec, we're making a decision that's **end-to-end**.
+It's suboptimal by definition because packet loss is a **hop-by-hop** property.
+
+## Hop-by-Hop
+
+If not the audio codec, where should we perform FEC instead?
+
+In my ideal world, each hop uses a tailored loss recovery mechanism.
+This is based on the properties of the hop, and if they expect:
+
+- **burst loss**: delayed parity.
+- **random loss**: interleaved parity.
+- **low RTT**: retransmit packets.
+- **congestion**: drop packets.
+
+But at which layer?
+A protocol like WiFi doesn't know the contents of each packet, especially if they're encrypted like every modern protocol.
+Throughput matters when you're downloading a movie, but latency matters when you're on a conference call.
+
+Our time-sensitive audio packets need to have different behavior than other traffic.
+There are ways to signal [QoS](https://en.wikipedia.org/wiki/Quality_of_service) in IP packets, but unfortunately, support is limited as is the granularity.
+All it takes is one router in the chain to ignore your flag and everything falls apart.
+
+That's why it absolutely makes sense to perform FEC at a higher level.
+If the transport layer knows the desired properties, then it can make the best decision.
+Not the audio codec.
+
+## QUIC
+
+So I just dunked on FEC in OPUS.
+"Don't do FEC in the audio codec, do it in QUIC instead."
+
+Well QUIC doesn't support FEC yet.
+Oops.
+There are [some proposals](https://www.ietf.org/archive/id/draft-michel-quic-fec-01.html) but I imagine it will be a long time before anything materializes.
+
+QUIC is primarily designed and used by CDN companies.
+Their whole purpose is to put edge nodes as close to the user as possible in order to improve the user experience.
+When your RTT to the Google/CloudFlare/Akamai/Fastly/etc edge is 20ms, then FEC is strictly worse than retransmissions.
+FEC can only ever be an improvement when `target_latency < 2*RTT`.
+
+Additionally, there might not even be a need for FEC in QUIC.
+WebRTC supports [RED](https://webrtchacks.com/red-improving-audio-quality-with-redundancy/) which was [added to RTP in 1997](https://datatracker.ietf.org/doc/html/rfc2198).
+The idea is to just transmit the same packet multiple times and let the receiver discard duplicates.
+
+RED actually works natively in QUIC without any extensions.
+A QUIC library can send redundant [STREAM frames](https://www.rfc-editor.org/rfc/rfc9000.html#name-stream-frames) and the receiver will transparently discard duplicates.
+It's wasteful but it's simple and might be good enough for some hops.
+
+## In Defense of FEC
+
+This is a hot topic and I am quite ignorant.
+I don't to be too dismissive.
+
+There are absolutely scenarios where FEC is the best solution.
+When you're sending data over a satellite link, you're dealing with a high RTT and burst loss.
+And there's totally scenarios where you won't have intermediate hops that can perform retransmissions, like a P2P connection.
+When the RTT gets high enough, you need FEC.
+
+And performing that FEC in OPUS gives you an extra property that I haven't mentioned yet: partial reconstruction.
+You might not be able to reconstruct the entire audio bitstream, but you can fill in the blanks so to speak.
+The fact that OPUS can partially decode a bitstream with only a fraction of the packets, regardless of FEC, is frankly amazing.
+
+And most importantly, you might not have control over the lower layers.
+I'm used to working at a company with a global network and a CDN but that's not a common reality.
+If the only thing you can control is the audio codec, then ratchet that FEC up to 11 and see what happens.
+
+My point is that transport knows best.
+The audio encoder shouldn't know that there's a satellite link in the chain.
+
+## Conclusion
+
+Audio is important.
+Networks are complicated.
+This is not haiku.
+
+FEC should not be in an audio codec, but rather closer to the source of packet loss.
+But at the end of the day, I'm just shoving blame down the stack.
+Do what works best for your users at whatever layer you control.
+
+Just please, never show me a graph based on random packet loss again.
+
+Written by [@kixelated](https://github.com/kixelated).
+
+
diff --git a/src/pages/blog/index.astro b/src/pages/blog/index.astro
new file mode 100644
index 0000000..5d7621b
--- /dev/null
+++ b/src/pages/blog/index.astro
@@ -0,0 +1,41 @@
+---
+import MainLayout from "../../layouts/global.astro";
+
+//TODO: add the interface for the frontmatter of the blog posts
+interface Frontmatter {
+ title: string;
+ cover: string;
+ description: string;
+ author: string;
+ date: string;
+}
+
+const posts = await Astro.glob("./*.mdx");
+posts.sort((a, b) => {
+ const dateA = Date.parse(a.frontmatter.date);
+ const dateB = Date.parse(b.frontmatter.date);
+ return dateB - dateA;
+});
+---
+
+
+
+
+
+ ))
+ }
+
+
diff --git a/src/pages/blog/moq-onion.mdx b/src/pages/blog/moq-onion.mdx
new file mode 100644
index 0000000..51a620e
--- /dev/null
+++ b/src/pages/blog/moq-onion.mdx
@@ -0,0 +1,340 @@
+---
+layout: "@/layouts/global.astro"
+title: The MoQ Onion
+author: kixelated
+description: Media over Transfork over WebTransport over QUIC over UDP over IP over Ethernet over Fiber over Light over Space over Time
+cover: "/blog/moq-onion/onion.png"
+date: 2024-11-17
+---
+
+# The MoQ Onion
+Today we slice the onion.
+The most critical, and least documented, thing to understand is the layering behind MoQ.
+
+Without further blabbering, from the bottom to top:
+- **QUIC**: The network layer
+- **Web Transport**: Browser compatibility
+- **MoQ Transfork**: Media-like pub/sub.
+- **MoQ Karp**: A media playlist and container.
+- **Ur App**: Your application.
+
+This layering is the most crucial concept behind MoQ.
+We explicitly want to avoid building yet another monolithic and inflexible media protocol built directly on top of UDP.
+Sorry SRT, you're not invited to this party.
+
+This guide will also help explain the IETF drafts.
+Of course I decided to [fork them](/blog/transfork), but the high level concepts are still very similar.
+Just gotta rename a few things:
+
+- [Transfork](https://datatracker.ietf.org/doc/draft-lcurley-moq-transfork/) -> [Transport](https://datatracker.ietf.org/doc/draft-ietf-moq-transport/)
+- Karp -> [Warp](https://datatracker.ietf.org/doc/draft-law-moq-warpstreamingformat/)
+
+Yes, I used unoriginal names on purpose.
+Your mental model should thank me.
+
+## QUIC
+If you've used TCP before (and you have), you'll know that it's fully reliable and ordered.
+It's a FIFO over the internet.
+But the internet is full of rough seas.
+Step outside and you'll encounter a swirling typhoon in the form of a 🌀 loading 🌀 animation.
+Sometimes we don't want to wait for everything; sometimes we want to skip.
+
+This is not a new problem.
+There have been many attempts to fix the head-of-line blocking with HTTP:
+
+- With **HTTP/1**, browsers would utilize multiple TCP connections to each host.
+However, each connection involves a relatively expensive TCP/TLS handshake and these connections would compete for resources.
+
+- With **HTTP/2**, browsers would utilize a single, shared TCP connection to each host.
+However, despite the illusion of independent requests and a complex prioritization scheme, it all gets interleaved into a single pipe.
+
+- With **HTTP/3**, TCP was replaced with QUIC.
+Head-of-line blocking is no more\*.
+What?
+How?
+
+
+ 
+ Rage-bait stolen from another blog stolen from another presentation.
+
+
+QUIC combines the two approaches by sharing some state (like HTTP/2) while providing independent streams (like HTTP/1).
+Each HTTP request is a QUIC stream that can be created, delivered, and closed in parallel with minimal overhead.
+All of the encryption, congestion control, and flow control is shared at the connection level.
+
+But hang on, why not build on top of UDP like scores of other, live media protocols?
+It's pretty simple actually:
+- QUIC is wicked smart: check out my [QUIC POWERS](/blog/quic-powers) post for more info.
+- QUIC is available in the browser.
+- QUIC benefits from economies of scale (many implementations).
+
+The point of MoQ is to fully utilize QUIC's features to deliver live media.
+We're not reinventing the wheel nor are we checking a box for marketing reasons.
+
+## Web Transport
+I just said QUIC was created for HTTP/3... so why not use HTTP/3?
+That's QUIC right?
+
+Well you can totally use HTTP/3 to implement something like MoQ, but the HTTP semantics add more hoops to jump through.
+
+My main gripe with HTTP is that it's not designed for live content.
+The client has to know what content to request, which is kind of a problem when that content doesn't exist yet.
+You end up relying on everybody's favorite hacks: **polling** and **_long_-polling**.
+Also, the client/server model means that contribution (client->server) and distribution (server->client) must be handled separately.
+
+Somebody else should and will make "Media over HTTP/3" (hit up my DMs @pantos).
+
+I'm interested in [WebTransport](https://developer.mozilla.org/en-US/docs/Web/API/WebTransport_API) instead.
+It's a browser API that exposes QUIC browser applications, similar to how WebSockets exposes TCP\*.
+The connection-oriented nature makes it significantly easier to push media without the HTTP request/response song and dance.
+There's not much else to say, it's basically the same thing as QUIC.
+
+...except that the underlying implementation is gross.
+A WebTransport session shares a QUIC connection with HTTP/3 requests and potentially other WebTransport sessions ([spec](https://datatracker.ietf.org/doc/draft-ietf-webtrans-http3/spec)).
+This *pooling* feature is responsible for a ton of headaches but the IETF went ahead and standardized it anyway (despite my best efforts).
+
+So we use WebTransport for WebSupport.
+That's why this layer exists; we're forced to use it.
+Choose a WebTransport library ([like mine!](https://docs.rs/web-transport-quinn/latest/web_transport_quinn/)) and pretend it's just QUIC.
+
+## WebRTC
+Before we get into the good stuff (my protocol ofc), it's best to learn from the failures of other protocols.
+
+One of the biggest headaches with WebRTC is _scale_.
+WebRTC supports peer-to-peer communication, but this requires the broadcaster sends a copy of each packet to all viewers.
+Obviously this falls down as the number of participants grows, so the usual solution is to have all participants send to a central server instead: a **Selective Forwarding Unit** (SFU).
+Our server can then use the power of DATA CENTERS and PEERING to fan the content out to viewers.
+
+
+ 
+ A comprehensive diagram of the differences between P2P and SFU.
+
+
+Unfortunately, WebRTC does a poor job of facilitating this.
+The network transport (RTP) is tightly coupled to the application (also RTP).
+Without going into too much detail, an SFU has to pretend to be another participant, performing hacks along the way to stay compliant with the standard.
+Ultimately, every major service makes a custom SFU implementation to handle the quirks of their application.
+If you want a longer rant, check out my [Replacing WebRTC](/blog/replacing-webrtc).
+
+My bold claim:
+**WebRTC doesn't scale because it's not generic**.
+
+For those with a HTTP background, this is something we take for granted.
+Want to scale your application?
+Throw nginx in front of it.
+Still doesn't scale?
+Throw a CDN in front of it.
+Still doesn't scale?
+Throw more money at it.
+
+But HTTP is a pain for live content.
+It's totally possible, but like mentioned in the previous section, you're jumping through hoops.
+What we really want are the great HTTP cache/fanout semantics but with the ability to PUSH.
+
+## MoqTransfork
+So here were are.
+[MoQ Transfork](https://datatracker.ietf.org/doc/draft-lcurley-moq-transfork/) is an ambitious attempt to learn from the succesess of HTTP and the aforementioned failures of WebRTC.
+But you know, for live content and not *hyper-text*.
+
+The idea is that generic relays and CDNs implement Transfork _but not the media layer_.
+The Transfork headers contain enough information to facilitate optimal caching and fanout even in congested scenarios.
+The application can encode whatever it wants into the payload; it doesn't need to be media and it could even be encrypted.
+
+Here's the general flow:
+- A client establishes a **Session** to a server.
+- Both sides can **Announce** available tracks by name.
+- Both sides can **Subscribe** to a live tracks by name with a priority.
+- A **Track** is broken into groups, each delivered independently with a priority.
+- A **Group** consists of ordered frames, delivered reliably unless cancelled.
+- A **Frame** is a sized chunk of data.
+
+---
+
+The most crucial concept is how live media is mapped to Transfork.
+This is main reason why I forked the IETF draft; it needed to be done right.
+
+You see, video encoding is a form of delta encoding.
+To over-simplify, a video stream involves a periodic I-frame (static image) followed by P-frames (delta).
+If we lose a frame, then our diffs get out sync and there's ugly artifacts until the next I-frame.
+You've probably seen this before; it looks like you're tripping balls.
+
+And yet we also need a plan for congestion.
+We don't want to ⏸️ pause ⏸️ and 🌀 buffer 🌀 during a conference call so we need to drop _something_.
+This is a contentious subject but I think there's only one option: skip frames until the next I-frame.
+In order to do that, we need to split up the live stream and deliver it in pieces.
+
+These are known as a [Group of Pictures](https://en.wikipedia.org/wiki/Group_of_pictures) aka a __GoP__ aka a [HLS/DASH segment](https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP) aka a __Transfork Group__.
+Unlike the IETF draft, Transfork maps a GoP _directly_ to a __QUIC Stream__ which provides reliability and ordering.
+There's no need for a reassembly buffer in our application; QUIC will make sure that every frame is delivered and in the correct order.
+
+
+ 
+ Some visuals always help.
+
+
+And crucially, QUIC streams can be prioritized.
+We tell the QUIC layer to transmit packets according to a (deterministic) priority when bandwidth is limited.
+The most important groups are delivered (ex. newer audio) while the least important groups will be starved (ex. older video).
+And if any group is starved for too long, either side can explicitly cancel it and move on.
+
+
+ 
+ Some visuals might help?
+
+
+And just like that, we can achieve latency close to WebRTC levels with limited complexity.
+We still need to improve QUIC's congestion control to reach parity with WebRTC, but that's a problem for another day.
+
+But what about other latency targets?
+Well this is why I very specifically said we _starve_ streams, not _drop_ them.
+If a viewer is willing to accept higher latency to avoid stalls, they do so by having a larger buffer.
+A larger buffer means more time for network starvation to recover, and thus group starvation to recover.
+We only cancel a QUIC stream after the viewer decides to skip ahead, if that every happens (ex. VOD).
+
+__Prioritization__ is a superset of __dropping__.
+We don't need no stinking UDP.
+
+---
+
+One last ~~thing~~ rant.
+I think the IETF is overfixated on the "generic" angle.
+MoqTransport has become a solution in search of a problem (more use-cases).
+Unfortunately, the core live media use-case has been neglected in favor of convoluted abstractions and useless features.
+
+I think this stems from a misunderstanding of the layering.
+Transfork _SHOULD NOT_ be thought of as a generic HTTP replacement.
+Instead, Transfork is a clear layer in the sand: it's **the bare minimum that a live media client/server _need_ to support**.
+
+The server doesn't care about the media encoding which is why it's delegated to a higher layer (spoilers).
+It's a side-benefit that the transport is generic, not the explicit goal.
+I think Transfork could become less generic, for example by adding timestamps to each frame, but only if this information would be useful for the server.
+
+## Karp
+Okay okay, so it's finally time for the big reveal: the __M__ in MoQ.
+__Karp__ is a layer on top of MoqTransfork that actually does the **M**edia stuff.
+It's also the simplest.
+Wowee.
+
+
+ 
+ WIP Logo
+
+
+Karp is modeled after the [WebCodecs API](https://developer.mozilla.org/en-US/docs/Web/API/WebCodecs_API).
+It's just enough metadata to initialize a decoder and render a frame.
+It's not a on-disk media container like MP4 or MKV, instead it's optimized for low overhead live streaming.
+
+Karp consists of:
+- A **catalog**: A JSON blob describing the tracks.
+- A **container**: A simple header in front of the codec data.
+
+---
+
+The __Catalog__ is delivered over a Transfork track, commonly named `catalog.json`.
+It contains available tracks within a broadcast and metadata about them.
+The viewer subscribes to this catalog, determines if a track is desired/decodable, and then subscribes to individual media tracks.
+
+Let's just dive into an example because it explains itself:
+
+```json
+{
+ "video": [{
+ // Transfork information
+ "track": {
+ "name": "480p",
+ "priority": 2
+ },
+ // The codec in use.
+ "codec": "avc1.64001f",
+ // The resolution of the video
+ "resolution": {
+ "width": 1280,
+ "height": 720
+ },
+ // The maximum bitrate (3Mb/s)
+ "bitrate": 3000000
+ // etc
+ }],
+}
+```
+
+Of course, the catalog can contain multiple tracks, audio and video but also alternative renditions.
+For example, two tracks could have the same content but different settings, like `480p` vs `1080p`.
+A viewer can choose which one it wants based on the indicated bitrate and resolution.
+
+The concept is similar to a HLS/DASH playlist or SDP monstrosity, but it's a strongly-typed JSON schema.
+But the unlike those crummy afformentioned formats, this catalog can be updated; it's a live track itself!
+Viewers will **Subscribe** to the catalog and receive updates via **Groups** or delta **Frames**.
+Just like that, you can add or remove tracks on the fly.
+
+---
+
+The __Container__ is even less interesting.
+Originally, Warp used fMP4.
+This is great for a company like Twitch who already uses fMP4 (aka CMAF) for HLS/DASH delivery.
+
+Unfortunately, this container is not designed for live streaming.
+You can minimize latency by fragmenting (the f in fMP4) at each frame but this involves ~100 bytes of overhead.
+This is nearly the size of OPUS audio packet; doubling our network usage for audio-only streams is unacceptable.
+
+So we're not using fMP4, we're using our own container.
+It consists of:
+- A 1-8 byte presentation timestamp.
+- A payload.
+
+That's it.
+That's it!
+
+Obviously we'll need more functionality in the future so expect to see updates on Karp.
+For example, keying information used for encryption or DRM (not the same thing lul).
+But the goal is to keep it simple.
+
+## UrApp
+MoQ can provide video and audio delivery.
+You're on your own for the rest.
+Fly free little developer.
+
+Of course there are libraries to help.
+Nobody is expecting you to write your own QUIC, Transfork, or Karp implementation.
+Likewise I expect there will be other layers on top of MoqTransfork, for example a chat protocol.
+
+But ultimately, you're responsible for your application.
+It shouldn't be generic, it's yours!
+
+But neither should MoQ be one-size-fits-all.
+You will inevitably need to extend MoQ to fit your needs.
+I don't want that to involve clicking the "Feature Request" button.
+
+But how do you extend MoQ?
+Well of course it depends on the layer, perhaps:
+
+- A QUIC extension?
+- A WebTransport extension?
+- A MoqTransfork extension?
+- A MoqTransfork track.
+- A MoqKarp extension?
+
+One of those in particular should stick out: you can create arbitrary tracks.
+
+For example, you could create a custom `controller` or `chat` track alongside the Karp tracks.
+You get all of the benefits of MoqTransfork, like prioritization and caching, without having to reinvent the wheel.
+But you do need to figure out how to delta encode your content into **Groups** and **Frames**.
+
+For example, the MoQ relays tracks to gossip available broadcasts and routes.
+Each node creates a `cluster.` track and subscribes to all other `cluster.*` tracks.
+The track is just a series of + or - deltas indicating when a broadcast has started or stopped.
+Dog fooding to the max.
+
+You too can abuse MoQ to deliver non-media content.
+Perhaps one day this blog post will be delivered over MoQ too...
+
+## Out of Date
+MoQ is evolving rapidly.
+The core concepts are stable but everything else keeps evolving, even the terminology.
+I'm sure this blog post is already out of date.
+
+[Join the conversation](https://discord.gg/FCYF3p99mr) and evolve with me, platonically of course.
+
+Written by [@kixelated](https://github.com/kixelated).
+
+
diff --git a/src/pages/blog/never-use-datagrams.mdx b/src/pages/blog/never-use-datagrams.mdx
new file mode 100644
index 0000000..ce04aa4
--- /dev/null
+++ b/src/pages/blog/never-use-datagrams.mdx
@@ -0,0 +1,260 @@
+---
+layout: "@/layouts/global.astro"
+title: Never* use Datagrams
+author: kixelated
+description: Rethink your approach. *Unless you're doing something dope.
+cover: "/blog/never-use-datagrams/bodies.jpeg"
+date: 2024-02-17
+---
+
+# Never\* use Datagrams
+
+Click-bait title, but hear me out.
+
+## TCP vs UDP
+
+So you're reading this blog over the internet.
+I would wager you do a lot of things over the internet.
+
+If you've built an application on the internet, you've undoubtedly had to decide whether to use TCP or UDP.
+Maybe you're trying to make, oh I dunno, a live video protocol or something.
+There are more choices than just those two but let's pretend like we're a networking textbook from the 90s.
+
+The common wisdom is:
+
+- use **TCP** if you want **reliable** delivery
+- use **UDP** if you want **unreliable** delivery
+
+What the fuck does that mean?
+Who _wants_ unreliability?
+
+- You don't want a hard-drive that fails 5% of writes.
+- You don't want something with random holes in the middle (unless it's cheese).
+- You don't want a service that is randomly unavailable because ¯\\\_(ツ)\_/¯.
+
+Nobody\* wants memory corruption or deadzones or artifacts or cosmic rays.
+Unreliability is a consequence, not a goal.
+
+
+ 
+
+ \*Unless you're making some cursed GIF art. [Source](https://en.wikipedia.org/wiki/Glitch_art)
+
+
+
+## Properties
+
+So what do we actually want?
+
+If you go low enough level, you can use electrical impulses to do neat stuff like:
+
+- Power on LEDs in a desired configuration.
+- Spin magnets at ludicrous speeds.
+- Make objects tingle and shake.
+- etc you get the idea.
+
+But we don't want to deal with electrical impulses.
+We want higher level functionality.
+
+Fortunately, software engineering is all about standing on the shoulders of others.
+There are layers on top of layers on top of layers of abstraction.
+Each layer provides properties so you don't have to reinvent the personal computer every time.
+
+Our job as developers is to decide which shoulders we want to stand on.
+But some shoulders are awful, so we have to be selective.
+Over-abstraction is bad but so is under-abstraction.
+
+What user experience are we trying to build, and how can we leverage the properties of existing layers to achieve that?
+
+## "Unreliable"
+
+There was a recent [MoQ interim](https://datatracker.ietf.org/wg/moq/meetings/) in Denver.
+For those unaware, it's basically a meetup of masochistic super nerds who want to design a live video protocol.
+We spent hours debating the semantic differences between **FETCH** and **SUBSCRIBE** among other riveting topics.
+
+
+ 
+ I'm the one in the back right corner, the one with the stupid grin on their face.
+
+
+A few times, it was stated that **SUBSCRIBE** should be _unreliable_.
+The room cringed, and I hard cringed enough to write this blog post.
+
+What I actually want is **timeliness**.
+If the internet can choose between delivering two pieces of data, I want it to deliver the newest one.
+
+In the live video scenario, this is the difference between buffering and skipping ahead.
+If you're trying to have a conversation with someone on the internet, there can't be a delay.
+You don't want a buffering spinner on top of their face, nor do you want to hear what they said 5 seconds ago.
+
+To accomplish timeliness, the live video industry often uses UDP datagrams instead of TCP streams.
+As does the video game industry apparently.
+But why?
+
+## Datagrams
+
+A datagram, aka an IP packet, is an envelope of 0s and 1s that gets sent from a source address to a destination address.
+Each device has a different maximum size allowed, which is super annoying, but 1200 bytes is generally safe.
+And of course, they can be silently lost or even arrive out of order.
+
+But the physical world doesn't work in discrete packets; it's yet another layer of abstraction.
+I'm not a scientist-man, but the data is converted to analog signals and sent through some medium.
+It all gets serialized and deserialized and buffered and queued and retransmitted and dropped and corrupted and delayed and reordered and duplicated and lost and all sorts of other things.
+
+So why does this abstraction exist?
+
+## Internet of Queues
+
+It's pretty simple actually: something's got to give.
+
+
+ 
+ Let the packets hit the FLOOR
+
+
+When there's too much data sent over the network, the network has to decide what to do.
+In theory it could drop random bits but oh lord that is a nightmare, as evidenced by over-the-air TV.
+So instead, a bunch of smart people got together and decided that routers should drop at packet boundaries.
+
+But why drop packets again?
+Why can't we just queue and deliver them later?
+Well yeah, that's what a lot of routers do these days since RAM is cheap.
+It's a phenomenon called [bufferbloat](https://en.wikipedia.org/wiki/Bufferbloat) and my [coworkers](https://discord.com) can attest that it's my favorite thing to talk about. 🐷
+
+But RAM is a finite resource so the packets will eventually get dropped.
+Then you finally get the **unreliability** you wanted all along...
+
+## Oh no
+
+Oh shit, I forgot, I actually want **timeliness** and bufferbloat is the worst possible scenario.
+Naively, you would expect the internet to deliver packets immediately, with some random packets getting dropped.
+However bufferbloat causes _all_ packets to get queued, possibly for seconds, ruling out any hope of timely delivery.
+
+How do you avoid this?
+Basically, the only way to avoid queuing is to detect it, and then send less.
+The sender uses some feedback from the receiver to determine how long it took a packet to arrive.
+We can use that signal to infer when routers are queuing packets, and back off to drain any queues.
+
+This is called [congestion control](https://en.wikipedia.org/wiki/TCP_congestion_control) and it's a huge, never ending area of research.
+I briefly summarized it in the [Replacing WebRTC](/blog/replacing-webrtc) post if you want more CONTENT.
+But all you need to know is that sending packets at unlimited rate is a recipe for disaster.
+
+
+ 
+
+ [Source](https://datatracker.ietf.org/meeting/99/materials/slides-99-iccrg-iccrg-presentation-2-00.pdf):
+ Riveting slides from IETF meetings that you're missing out on.
+
+
+
+## You, The Application Developer
+
+Speaking of a recipe for disaster.
+Let's say you made the mistake of using UDP directly because you want them datagrams.
+You're bound to mess up, and you won't even realize why.
+
+If you want to build your own transport protocol on top of UDP, you "need" to implement:
+
+- [retransmissions](https://www.rfc-editor.org/rfc/rfc9000.html#name-packetization-and-reliabili)
+- [congestion control](https://www.rfc-editor.org/rfc/rfc9002.html#name-congestion-control)
+
+And if you want a great protocol, you also need:
+
+- [encryption](https://www.rfc-editor.org/rfc/rfc9001)
+- [RTT estimates](https://www.rfc-editor.org/rfc/rfc9002.html#name-estimating-the-round-trip-t)
+- [path validation](https://www.rfc-editor.org/rfc/rfc9000.html#name-address-validation)
+- [path migration](https://www.rfc-editor.org/rfc/rfc9000.html#name-connection-migration)
+- [pacing](https://www.rfc-editor.org/rfc/rfc9002.html#name-pacing)
+- [flow control](https://www.rfc-editor.org/rfc/rfc9000.html#name-flow-control)
+- [version negotiation](https://www.rfc-editor.org/rfc/rfc9000.html#name-version-negotiation)
+- [extensions](https://www.rfc-editor.org/rfc/rfc9000.html#name-transport-parameter-encodin)
+- [prioritization](https://www.rfc-editor.org/rfc/rfc9000.html#name-stream-prioritization)
+- [keep-alives](https://www.rfc-editor.org/rfc/rfc9000.html#name-idle-timeout)
+- [multiplexing](https://www.rfc-editor.org/rfc/rfc9000.html#name-streams)
+
+And if you want an AMAZING protocol, you also need:
+
+- [web support](https://developer.mozilla.org/en-US/docs/Web/API/WebTransport_API)
+- [port reuse](https://www.rfc-editor.org/rfc/rfc9000.html#name-matching-packets-to-connect)
+- [dynamic MTUs](https://www.rfc-editor.org/rfc/rfc9000.html#name-datagram-size)
+- [multi-path](https://datatracker.ietf.org/doc/draft-ietf-quic-multipath/)
+- [stateless load balancing](https://datatracker.ietf.org/doc/draft-ietf-quic-load-balancers/)
+- [anycast load balancing](https://www.rfc-editor.org/rfc/rfc9000.html#section-9.6.1)
+
+Let's be honest, you don't even know what half of those are, nor why they are worth implementing.
+Just use a [QUIC](https://en.wikipedia.org/wiki/QUIC) library instead.
+
+But if you still insist on UDP, you're actually in good company with a lot of the video industry.
+Building a live video protocol on top of UDP is all the rage; for example, [WebRTC](https://webrtc.org/), [SRT](https://www.haivision.com/products/srt-secure-reliable-transport/), [Sye](https://nscreenmedia.com/amazon-buys-sye/), [RIST](https://www.rist.tv/), etc.
+With the exception of Google, it's very easy make a terrible protocol on top of UDP.
+Look forward to the upcoming **Replacing RTMP _but please not with SRT_** blog post!
+
+## Timeliness
+
+But remember, I ultimately want to achieve **timeliness**.
+How can we do that with QUIC?
+
+1. **Avoid bloating the buffers** 🐷.
+ Use a delay-based congestion controller like [BBR](https://www.ietf.org/archive/id/draft-cardwell-iccrg-bbr-congestion-control-01.html) that will detect queueing and back off.
+ There are better ways of doing this, like how WebRTC uses [transport-wide-cc](https://webrtc.googlesource.com/src/+/refs/heads/main/docs/native-code/rtp-hdrext/transport-wide-cc-02/README.md), which I'll personally make sure gets added to QUIC.
+
+2. **Split data into streams**.
+ The bytes within each stream are ordered, reliable, and can be any size; it's nice and convenient.
+ Each stream could be a video frame, or a game update, or a chat message, or a JSON blob, or really any atomic unit.
+
+3. **Prioritize the streams**.
+ Streams are independent and can arrive in any order.
+ But you can tell the QUIC stack to focus on delivering important streams first.
+ The low priority streams will be starved, and can be closed to avoid wasting bandwidth.
+
+That's it.
+That's the secret behind [Media over QUIC](https://datatracker.ietf.org/wg/moq/about/).
+Now all that's left is to bikeshed the details.
+
+And guess what?
+This approach works with higher latency targets too.
+It turns out that the fire-and-forget nature of datagrams only works when you need real-time latency.
+For everything else, there's QUIC streams.
+
+You don't need datagrams.
+
+
+
+## In Defense of Datagrams
+
+**Never\* use Datagrams** got you to click, but the direction of QUIC and MoQ seems to tell another story:
+
+1. QUIC [has support for datagrams](https://datatracker.ietf.org/doc/html/rfc9221) via an extension.
+2. WebTransport [requires support for datagrams](https://datatracker.ietf.org/doc/html/draft-ietf-webtrans-http3-08#section-3.1-6).
+3. The latest MoQ version [adds support for datagrams](https://www.ietf.org/archive/id/draft-ietf-moq-transport-02.html#object-message-formats).
+4. The next MoQ version [will require support for datagrams](https://github.com/moq-wg/moq-transport/pull/384).
+
+Like all things designed by committee, there's going to be some compromise.
+There are some folks who think datagram support is important.
+And frankly, it's trivial to support and allow people to experiment.
+For example, OPUS has FEC support built-in, which is why MoQ supports the ability to send each audio "frame" as a datagram.
+
+But it's a trap.
+Designed to lure in developers who don't know any better.
+Who wouldn't give up their precious UDP datagrams otherwise.
+
+If you want some more of my hot-takes:
+
+- The next [blog post about FEC in OPUS](/blog/forward-error-correction), and why layers are important.
+- The previous [blog post gushed over QUIC](/blog/quic-powers), except for the datagram extension which is frankly terrible.
+
+## Conclusion
+
+There is no conclusion.
+This is a rant.
+
+Please don't design your application on top of datagrams.
+Old protocols like [DNS](https://en.wikipedia.org/wiki/Domain_Name_System) get a pass, but be like [DNS over HTTPS](https://en.wikipedia.org/wiki/DNS_over_HTTPS) instead.
+
+And please, please don't make yet another video protocol on top of UDP.
+Get involved with [Media over QUIC](https://datatracker.ietf.org/wg/moq/about/) instead!
+Join our [Discord](https://discord.gg/FCYF3p99mr) and tell me how wrong I am.
+
+Written by [@kixelated](https://github.com/kixelated).
+
+
diff --git a/src/pages/blog/quic-powers.mdx b/src/pages/blog/quic-powers.mdx
new file mode 100644
index 0000000..8914226
--- /dev/null
+++ b/src/pages/blog/quic-powers.mdx
@@ -0,0 +1,452 @@
+---
+layout: "@/layouts/global.astro"
+title: QUIC's (hidden) Super Powers
+author: kixelated
+description: Most of QUIC's killer features aren't obvious or documented. Time to change that.
+cover: "/blog/quic-powers/vegeta.png"
+date: 2023-12-17
+---
+
+# QUIC's (hidden) Super Powers
+
+Most of QUIC's killer features aren't obvious or documented.
+Time to change that.
+
+# The Basics
+
+Before we can cover the advanced stuff, we need to cover the basics.
+
+
+ 
+ rage bait but it's true
+
+
+You can still use TCP (or WebSockets) in \[CURRENT_YEAR\] but you're missing out.
+[QUIC](https://en.wikipedia.org/wiki/QUIC) is the new transport protocol on the block it's going to slowly take over the internet.
+
+## QUIC
+
+Imagine you're a humble (and handsome) application developer like myself.
+You hear about this cool new protocol from this cool website with a cool domain name.
+Life is good, but you could always spice it up with some _new technology_.
+
+Before we can answer the **WHY** let's cover the **HOW**.
+What is the QUIC API? How do I use it?
+
+
+ 
+ Shamelessly taken from my presentation at MHV 2023
+
+
+To oversimplify, QUIC gives you a byte stream like TCP.
+But unlike TCP, there's no cost to open/close additional streams on either side.
+
+Why is this useful?
+
+- If you've ever **used a connection pool**, delete that shit and use QUIC.
+- If you've ever **multiplexed streams**, delete that shit and use QUIC.
+
+## HTTP
+
+And that's exactly why QUIC was created.
+It turns out HTTP suffers from both of these issues depending on the version:
+
+- HTTP/1 **used a connection pool**, often up to 6 connections _per domain_.
+- HTTP/2 **multiplexed requests**, introducing _head-of-line blocking_.
+
+Delete that shit and use HTTP/3.
+
+
+ 
+ Shamelessly vandalized from my presentation at MHV 2023
+
+
+However, as a humble (and handsome) application developer, **I don't care about HTTP/3**.
+
+The problem is that the HTTP version is transparent to my application.
+If you visit my website, you can use any version of HTTP and basically it all works the same.
+However, if issue a ton of requests in parallel in an attempt to utilize QUIC's concurrency, then my application will absolutely choke on the older HTTP versions.
+
+Don't get me wrong, HTTP/3 _might_ be an improvement, but it's an incremental improvement.
+It's not exciting unless you're a CDN vendor for all the reasons I'll mention below.
+
+## WebTransport
+
+And frankly, it's often just not worth fighting against HTTP semantics.
+If you want to make a web application that utilizes QUIC, then just use QUIC directly.
+
+Fortunately we have [WebTransport](https://developer.mozilla.org/en-US/docs/Web/API/WebTransport_API) for that.
+It _mostly_ exposes the QUIC API and is supported in Chrome/Edge/Firefox with Safari coming soon™.
+
+You can think of WebTransport as a WebSocket replacement, but using QUIC instead of TCP.
+
+# The Advanced Shit
+
+Anyway, time to get technical.
+Most of this won't matter for your everyday humble (and handsome) application developer, but maybe you find something useful.
+
+I figured out most of these while writing my own QUIC implementation.
+But don't be me.
+Use an [existing implementation](https://github.com/quicwg/base-drafts/wiki/Implementations) instead.
+I'm now a Rust fanboye so I'm using [Quinn](https://github.com/quinn-rs/quinn) with [WebTransport](https://docs.rs/webtransport-quinn/latest/webtransport_quinn/).
+CloudFlare's [Quiche](https://github.com/cloudflare/quiche) is also quite nice and interfaces with C/C++.
+
+But you can read the RFCs if you're mega bored on a plane.
+They're extremely well-written and designed if you're into that sort of thing.
+
+- [RFC9000](https://www.rfc-editor.org/rfc/rfc9000.html): QUIC Transport
+- [RFC9001](https://www.rfc-editor.org/rfc/rfc9001.html): QUIC Security
+- [RFC9002](https://www.rfc-editor.org/rfc/rfc9002.html): QUIC Congestion Control
+
+
+ 
+
+ yes i did just spend an hour of my rapidly depleting time left on this earth to trace a dead meme about the RFC
+ numbers
+
+
+
+## Connection ID
+
+If you took a networking class, I'm sorry.
+But you _might_ remember that TCP connections are identified by a 4-tuple.
+The kernel uses this information to map incoming packets to the correct socket/connection:
+
+- source IP
+- source port
+- destination IP
+- destination port
+
+Each IP packet contains the source ip/port and destination ip/port, so you know who sent it and where to send the reply.
+Kinda like mail if anybody is old enough to remember that.
+
+
+ 
+ A thrilling diagram of a TCP connection.
+
+
+That's gone with QUIC.
+
+QUIC connections are instead identified by a [Connection ID](https://www.rfc-editor.org/rfc/rfc9000.html#name-connection-id).
+This is an opaque blob of data, chosen by the receiver, sent in the header of every QUIC packet.
+
+
+ 
+ Now each QUIC packet also contains a connection ID
+
+
+This might seem inconsequential at first, but it's actually a **huge** deal.
+
+### Roaming
+
+Ever switch between cellular and WiFi?
+Of course you have, you have a phone.
+When you switch networks, your source IP/port changes.
+
+**With TCP**, this changes the 4-tuple.
+The server will silently discard any packets from the unknown source, severing the connection.
+
+The application needs to detect this and retry, dialing a new connection and reissuing any pending requests.
+Retry logic is _always_ a pain to get right and often users will just have to refresh manually.
+
+
+ 
+ There's no way to associate the new IP/port. Ripzo your connection
+
+
+**With QUIC**, the source address will change but the Connection ID remains constant.
+The QUIC stack will transparently [validate the new path](https://www.rfc-editor.org/rfc/rfc9000.html#name-path-validation) (to prevent spoofing attacks) before switching to it.
+The application doesn't need to do anything, it just works™
+
+
+ 
+ Wow thanks for identifying yourself, Mr Connection ID.
+
+
+### NAT Rebinding
+
+In the above scenario, the client could know that its IP address changed when switching networks.
+Rather than wait for the connect to timeout, a good client can proactively reconnect.
+
+However that's not true with NATs.
+NATs suck.
+A NAT can transparently rebind your address without your knowledge, usually after an unspecified period of inactivity but sometimes just because they suck
+
+**In TCP land**, a NAT rebinding is fatal, as the 4-tuple changes and packets will get silently discarded.
+The application has no idea that this happened and the connection appears to hang.
+It's obnoxious and why you need some form of keep-alive to both detect and fend off any NAT rebinding.
+
+**In QUIC land**, the Connection ID saves the day.
+It still takes 1 RTT for QUIC to verify the new path but it just works™
+
+### Firewalls
+
+I've been focusing on TCP quite a lot to simplify the narrative, but now it's time to pick on WebRTC instead.
+It's complicated, but WebRTC _actually_ identifies connections based on the 2-tuple:
+
+- destination IP
+- destination port
+
+This means the source IP/port can change without severing the connection: ez roaming and NAT rebinding support!
+
+
+ 
+
+ The chad 2-tuple doesn't care about the source address. The unique destination port tells us the connection.
+
+
+
+But why doesn't QUIC do this instead?
+Two terrifying words: _corporate firewalls_.
+
+The server needs to open a unique port for each connection.
+However, corporate firewalls often block all but a handful of ports for "security reasons".
+At one point I probed the Twitch corporate network and found that only around ~30 UDP ports were open; everything else was blocked.
+
+
+ 
+
+ The beta 2-tuple failed to connect, oops. Turns out the port was too unique for the firewall.
+
+
+
+QUIC instead uses `udp/443` for _all_ connections.
+This is huge because firewalls will often allow port 443 for HTTPS but block almost everything else.
+If you use QUIC, you automatically leverage this firewall hole punching.
+
+Note that you can (and I have) hack a WebRTC server to listen on `udp/443` instead.
+However, just like TCP, you lose roaming support because you rely on the 4-tuple to identify connections.
+
+**Fun fact**: a QUIC client can also use the same IP and port for all outgoing connections.
+There's no need for ephemeral ports!
+
+## Load Balancing
+
+When you use internet, you're almost never connecting to a single server.
+There's secretly a fleet of servers behind a fleet of load balancers, distributing the load.
+
+You can't see them, but they're watching.
+Waiting.
+
+### Connection ID
+
+Like I mentioned earlier, the Connection ID is an opaque blob of data chosen by the _receiver_.
+The receiver can use multiple IDs, issuing or retiring them at will, and the length is unbounded.
+
+This is **huge**.
+Your friendly neighborhood server admin is salivating.
+
+Since a server choose it's own connection ID, it can encode session information into it.
+This can include routing information, echoed by the client in the header of each _packet_.
+Basically you can encode whatever load balancing information you want.
+
+
+ 
+ don't tell the EU that connection IDs are kinda like cookies
+
+
+There's an [entire draft](https://datatracker.ietf.org/doc/draft-ietf-quic-load-balancers/) dedicated to cool shit you could do with this.
+Here's a free idea: encode the name of the backend server into the Connection ID.
+Now you have sticky sessions without a lookup table!
+
+That being said:
+
+- **Overhead matters**: Don't hog a 1.2KB QUIC packet with a 1KB connection ID.
+- **Security matters**: The connection ID is the _only_ plaintext data so just make sure it's encrypted/unguessable.
+
+But there's a lot of creative stuff you can do!
+
+### Anycast
+
+**OH BOY**.
+This one is extremely well-hidden but extremely powerful.
+
+During the handshake, the server can reply with an optional and kinda obscure [preferred_address](https://datatracker.ietf.org/doc/html/rfc9000#section-9.6.1).
+The client will try to send packets to this address instead, [after validating it of course](https://datatracker.ietf.org/doc/html/rfc9000#forgery-spa).
+Why on earth does this matter?
+
+So there's this thing called [anycast](https://en.wikipedia.org/wiki/Anycast).
+Anycast allows multiple servers to advertise the same IP address and any packets will get routed to the "closest" server (based on [BGP](https://www.cloudflare.com/learning/security/glossary/what-is-bgp/)).
+This is amazing for global load balancing since it also gives network administrators control over routing.
+The path to the closest server might be congested, so ISPs can choose a different "closest" server instead.
+
+
+ 
+ All regions advertise the same address. Your packet gets routed to the closest one.
+
+
+However, if the "closest" server changes for any reason (flapping), unfortunately a different server will receive your packets.
+Even with QUIC, the new server will discard these unknown packets and the connection is severed.
+
+
+ 
+ Who knows why the route changed, but it does happen.
+
+
+Anycast is usually reserved for UDP-protocols like DNS because of this flapping issue.
+It's usually better to use [unicast](https://en.wikipedia.org/wiki/Unicast) with TCP to ensure consistent routing.
+
+However, what if you could use **anycast for discovery** and then switch to **unicast for consistency**?
+That's exactly what `preferred_address` is for.
+
+Every server can advertise a shared anycast address used for the TLS handshake _only_.
+Afterwards, the clients are told to switch to the server's unique unicast address.
+The end result is a stable connection to the closest server, transparent to the application!
+
+There are other ways of implementing this today, but they kind of suck.
+[GeoDNS](https://en.wikipedia.org/wiki/GeoDNS) is quite inaccurate since it (usually) involves a crude IP lookup database.
+An application can use something like [Happy Eyeballs](https://en.wikipedia.org/wiki/Happy_Eyeballs) to try multiple addresses in parallel, but it's expensive and racey.
+
+My **bold** prediction is that QUIC's anycast handshake will take over once it's widely deployed.
+
+## Privacy
+
+QUIC takes a paranoid approach to privacy.
+TLS encryption is required and even packet headers are encrypted to piss off middleboxes.
+
+What are middleboxes?
+Well it's a fancy word for routers, the boxes that figure out how to get IP packets from point A to point B.
+The problem is that these middleboxes can inspect and potentially modify packets.
+A middlebox with evil intentions can monitor traffic, throttle traffic, or even [inject ads](https://superuser.com/questions/902635/isp-is-inserting-ads-into-web-pages).
+
+The QUIC solution is to encrypt everything\*.
+Even the packet number is encrypted because fuck middleboxes.
+But please note that this is not perfect:
+
+- The QUIC handshake uses a [hard-coded initial secret](https://www.rfc-editor.org/rfc/rfc9001.html#name-initial-secrets).
+ A middlebox can decrypt this traffic to determine the [server name](https://www.cloudflare.com/learning/ssl/what-is-sni/) unless you're using [ESNI](https://www.cloudflare.com/learning/ssl/what-is-encrypted-sni/).
+- The QUIC connection ID will leak information about the number of connections and their activity.
+ [MASQUE](https://datatracker.ietf.org/wg/masque/about/) allows you to nest multiple QUIC connections, effectively operating as a VPN.
+
+And just to pick on WebRTC some more; the [SRTP](https://datatracker.ietf.org/doc/html/rfc3711) header (including any extensions) are not encrypted.
+A middlebox can easily inspect the header, determine the type of WebRTC traffic and even figure out which participants are currently talking.
+And of course, WebRTC is notorious for [leaking your IP address](https://nordvpn.com/blog/webrtc/).
+
+## Attacks
+
+UDP protocols are generally quite vulnerable to attacks, often amplifying them by accident.
+TCP SYN floods are a common DDoS vector too.
+[QUIC is not immune to attacks](https://datatracker.ietf.org/doc/html/rfc9000#section-21.1) either but there are some neat mitigation techniques.
+
+Of course, you should know by now that **Connection ID is amazing**.
+A QUIC server can sign the Connection ID to prevent the client tampering or spoofing it.
+
+Why does this matter?
+Well, now a cooperating router (L3) can drop abusive packets as they enter the network.
+Otherwise, the packet would have to reach the QUIC server (L7) before it could be processed.
+This can be done in hardware for maximum efficiency especially since it requires no state.
+
+In fact, the router could even send a [stateless reset](https://datatracker.ietf.org/doc/html/rfc9000#name-stateless-reset) to close a connection if it detects abuse.
+The server chooses the [reset token](https://www.rfc-editor.org/rfc/rfc9000.html#section-10.3) during the handshake, and if it's deterministic then the router can compute it too.
+
+There's a world of creative architectures that are just waiting to be explored.
+I'm sure the folks at Google and CloudFlare are working on some cool shit right now.
+
+## Congestion Control
+
+[QUIC's congestion control](https://www.rfc-editor.org/rfc/rfc9002.html) is modeled after TCP but there's some [important differences](https://www.rfc-editor.org/rfc/rfc9002.html#section-4) to call out:
+
+- [QUIC packets have unique numbers](https://www.rfc-editor.org/rfc/rfc9002.html#section-4.2) so the receiver can distinguish between retransmissions.
+- [QUIC ACKs include unbounded\* ranges](https://www.rfc-editor.org/rfc/rfc9002.html#section-4.5) so the receiver can more accurately report individual losses.
+- [QUIC ACKs include the batching delay](https://www.rfc-editor.org/rfc/rfc9002.html#name-explicit-correction-for-del) so the receiver has accurate RTT measurements.
+
+Does this make a noticeable difference?
+Not really, but QUIC's congestion control should be marginally better.
+QUIC implementations are still being tuned and optimized so take these initial results with a grain of salt:
+
+
+ 
+ HTTP rollout stats from a global CDN (2023). These are percentages, green = good
+
+
+**Fun fact**: QUIC ACKs are themselves acknowledged, so you effectively ACK an ACK.
+Subsequent ACKs [don't include the ACK'd ACK packet numbers](https://datatracker.ietf.org/doc/html/rfc9000#name-managing-ack-ranges), saving bandwidth!
+
+### Deployable
+
+However, there is an **MAJOR** difference between TCP and QUIC congestion control.
+TCP is implemented in the kernel which means it's difficult or impossible to modify.
+
+- A Windows client is stuck with the crappy TCP implementation in Windows.
+- An OSX client is stuck with the crappy TCP implementation in OSX.
+- An Android client is stuck with the crappy TCP implementation in Android.
+- An iOS client is stuck with the crappy TCP implementation in iOS.
+- A Linux client is stuck with the crappy TCP implementation in Linux.
+
+You get the point.
+The default TCP congestion control for every OS (as far as I can tell) is loss-based and suffers from [bufferbloat](https://en.wikipedia.org/wiki/Bufferbloat), making it poor for latency sensitive applications.
+Note that you can [configure](https://wiki.crowncloud.net/?How_to_enable_BBR_on_Ubuntu_20_04) or install a custom kernel to change TCP's behavior, but that's primarily for power users or server operators.
+
+QUIC, on the other hand, is **not implemented in the kernel** (yet).
+When you ship your client, you ship your own, vendored QUIC implementation.
+This means your application can better congestion control algorithms (ex. [BBR](https://en.wikipedia.org/wiki/TCP_congestion_control#TCP_BBR)) which has been impossible until now.
+
+In fact, you can even experiment with your own congestion control algorithms.
+I've made some improvements to BBR to better support [application-limited](https://datatracker.ietf.org/doc/html/rfc9002#section-7.8) traffic like live video.
+It's also significantly easier to run experiments in userspace.
+
+## Datagrams
+
+Datagrams are basically UDP packets.
+They are unreliable, unordered, and have a maximum size (at least 1.2KB).
+
+QUIC supports datagrams via [an extension](https://www.rfc-editor.org/rfc/rfc9221.html).
+This extension is required as part of [WebTransport](https://www.w3.org/TR/webtransport/), which means datagram support in the browser!
+
+However, there are some caveats:
+
+1. **Datagrams are congestion controlled**. QUIC datagrams are acknowledged behind the scenes _solely_ to compute the max send rate. An application can't implement it's own congestion control since it's throttled by the QUIC congestion control.
+2. **Datagrams are acknowledged [but not exposed](https://github.com/quicwg/datagram/issues/15)**. An application has to implement it's own reliability mechanism instead, so the QUIC ACKs are mostly wasted.
+3. **Datagrams can't be sent to arbitrary destinations**. If you want to packets to multiple ports, then you have to establish separate QUIC connections.
+4. **Datagrams may be coalesced into a single QUIC packet**. This is great for efficiency because it means fewer packets sent. However, it means an application can't rely on datagrams _actually_ being independent, which is kind of the point.
+
+**HOT TAKE ALERT**: never use QUIC datagrams.
+They have all the downsides of UDP, none of the benefits, and throw some foot-guns into the mix.
+They suck.
+
+You should use QUIC streams instead.
+Make a QUIC stream for each logical unit (ex. video frame) and prioritize/close them as needed.
+You get fragmentation, ordering, reliability, flow control, etc for free and never have to think about [MTUs](https://en.wikipedia.org/wiki/Maximum_transmission_unit) again.
+
+### Media over QUIC
+
+I know this is positive blog post but I want to dunk on QUIC datagrams a bit more.
+
+The usage of datagrams is actually a core difference between the [Media over QUIC](https://datatracker.ietf.org/wg/moq/about/) and the [RTP over QUIC](https://datatracker.ietf.org/doc/draft-ietf-avtcore-rtp-over-quic/) efforts.
+Both are trying to improve WebRTC, but I'm on team "make a brand new protocol".
+
+
+ 
+ obligatory [XKCD](https://xkcd.com/927/)
+
+
+All of the reasons above actually prevent implementing RTP naively by using QUIC datagrams instead of UDP:
+
+1. **Datagrams are congestion controlled**. It means you can't implement [GCC](https://datatracker.ietf.org/doc/html/draft-ietf-rmcat-gcc-02) or [SCReAM](https://datatracker.ietf.org/doc/html/rfc8298) on top of QUIC datagrams.
+2. **Datagrams are acknowledged but you can't use them**. You send up sending both ACKs and NACKs in different layers, increasing overhead and hurting performance.
+3. **Datagrams can't be sent to arbitrary destinations**. It means you can't send RTP and RTCP to different ports; they have to be muxed. (not a big deal)
+4. **Datagrams may be coalesced into a single QUIC packet**. It makes it more difficult to implement [FEC](https://www.rfc-editor.org/rfc/rfc8854.html), since QUIC datagrams may be secretly coalesced.
+
+This is just an example for RTP but same is probably true for your favorite UDP-based protocol.
+Use QUIC streams instead.
+
+**NOTE**: Media over QUIC will likely support datagrams, primarily for experimentation.
+I've already stated my opinion but unfortunately, I'm not the boss of the IETF.
+
+# STREAM_FIN
+
+Written by [@kixelated](https://github.com/kixelated).
+
+I'm super exited about QUIC and WebTransport.
+There's never been a better time to be a transport protocol nerd.
+
+On a personal note, I'm now [gainfully employed](https://discord.com/).
+That means I'm getting paid to actually make useful stuff instead of writing ~informative~ blog posts.
+Unfortunate for you, but fortunate for me since I get health insurance now (thanks America).
+
+I won't be able to devote as much time cheer-leading for Media over QUIC but the standard is still full steam ahead.
+Remember: it's co-authored by individuals from Google, Meta, Cisco, Akamai, along with IETF as a whole.
+
+Join the [discord server](https://discord.gg/FCYF3p99mr) though.
+
+
diff --git a/src/pages/blog/replacing-hls-dash.mdx b/src/pages/blog/replacing-hls-dash.mdx
new file mode 100644
index 0000000..028fdae
--- /dev/null
+++ b/src/pages/blog/replacing-hls-dash.mdx
@@ -0,0 +1,372 @@
+---
+layout: "@/layouts/global.astro"
+title: Replacing HLS/DASH
+author: kixelated
+description: Low-latency, high bitrate, mass fan-out is hard. Who knew?
+cover: "/blog/replacing-hls-dash/carrot.png"
+date: 2023-11-21
+---
+
+# Replacing HLS/DASH
+
+Low-latency, high bitrate, mass fan-out is hard. Who knew?
+
+See [Replacing WebRTC](https://quic.video/blog/replacing-webrtc) for the previous post in this series.
+
+## tl;dr
+
+If you're using HLS/DASH and your main priority is...
+
+- **cost**: wait until there CDN offerings.
+- **latency**: you should seriously consider MoQ.
+- **features**: it will take a while to implement everything.
+- **vod**: it works great, why replace it?
+
+## Intro
+
+Thanks for the positive reception on [Hacker News](https://news.ycombinator.com/item?id=38069974)!
+Anyway, I'm back.
+
+I spent the last 9 years working on literally all facets of HLS and Twitch's extension: [LHLS](https://www.theoplayer.com/blog/low-latency-hls-lhls).
+We hit a latency wall and my task was to find an alternative, originally WebRTC but that eventually pivoted into **Media over QUIC**.
+
+Hopefully this time I won't be _"dunning-Krugerering off a cliff"_. Thanks random Reddit user for that confidence boost.
+
+## Why HLS/DASH?
+
+Simple answer: [Apple](https://developer.apple.com/library/archive/documentation/NetworkingInternet/Conceptual/StreamingMediaGuide/UsingHTTPLiveStreaming/UsingHTTPLiveStreaming.html)
+
+> If your app delivers video over cellular networks, and the video exceeds either 10 minutes duration or 5 MB of data in a five minute period, you are required to use HTTP Live Streaming.
+
+It's an anti-climactic answer, but Twitch migrated from [RTMP](https://en.wikipedia.org/wiki/Real-Time_Messaging_Protocol) to [HLS](https://en.wikipedia.org/wiki/HTTP_Live_Streaming) to avoid getting kicked off the App Store.
+The next sentence gives a hint as to why:
+
+> If your app uses HTTP Live Streaming over cellular networks, you are required to provide at least one stream at 64 Kbps or lower bandwidth.
+
+This was back in 2009 when the iPhone 3GS was released and AT&T's network was [struggling to meet the demand](https://www.wired.com/2010/07/ff-att-fail/).
+The key feature of HLS was [ABR](https://en.wikipedia.org/wiki/Adaptive_bitrate_streaming): multiple copies of the same content at different bitrates.
+This allowed the Apple-controlled HLS player to reduce the bitrate rather than pummel a poor megacorp's cellular network.
+
+[DASH](https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP) came afterwards in an attempt to standardize HLS minus the controlled by Apple part.
+There's definitely some cool features in DASH but the [core concepts are the same](https://www.cloudflare.com/learning/video/what-is-mpeg-dash/) and they even share the same [media container](https://www.wowza.com/blog/what-is-cmaf) now.
+So the two get bundled together as **HLS/DASH**.
+
+But I'll focus more on HLS since that's my shit.
+
+## The Good Stuff
+
+While we were forced to switch protocols at the tech equivalent of gunpoint, HLS actually has some amazing benefits.
+The biggest one is that it uses **HTTP**.
+
+HLS/DASH works by breaking media into "segments", each containing a few seconds of media.
+The player will individually request each segment via a HTTP request and seamlessly stitch them together.
+New segments are constantly being generated and announced to the player via a "playlist".
+
+
+ 
+ Thanks for the filer image, DALL·E
+
+
+Because HLS uses HTTP, a service like Twitch can piggyback on the existing infrastructure of the internet.
+There's a plethora of optimized CDNs, servers, and clients that all speak HTTP and can be used to transport media.
+You do have to do some extra work to massage live video into HTTP semantics, but it's worth it.
+
+The key is utilizing [economies of scale](https://napkinfinance.com/napkin/what-are-economies-of-scale/) to make it cheap to mass distribute live media.
+Crafting individual IP packets might the _correct_ way to send live media with minimal latency (ie. WebRTC), but it's not the most cost effective.
+
+## The Bad Stuff
+
+I hope you weren't expecting a fluff piece.
+
+### Latency
+
+We were somewhat sad to bid farewell to Flash (_gasp_).
+Twitch's latency went from something like 3 seconds with RTMP to 15 seconds with HLS.
+
+There's a boatload of latency sources, anywhere from the duration of segments to the frequency of playlist updates.
+Over the years we were able to slowly able to chip away at the problem, eventually extending HLS to get latency back down to theoretical RTMP levels.
+I [documented our journey](/blog/distribution-at-twitch) if you're interested in the gritty details.
+
+But one big source of latency remains: **T** **C** **P**
+
+I went into more detail with my [previous blog post](/blog/replacing-webrtc), but the problem is head-of-line blocking.
+Once you flush a frame to the TCP socket, it will be delivered reliably and in order.
+However, when the network is congested, the encoded media bitrate will exceed the network bitrate and queues will grow.
+Frames will take longer and longer to reach the player until the buffer is depleted and the viewer gets to see their least favorite spinny boye.
+
+
+ 
+ > tfw HLS/DASH
+
+
+A HLS/DASH player can detect queuing and switch to a lower bitrate via ABR.
+However, it can only do this at infrequent (ex. 2s) segment boundaries, and it can't renege any frames already flushed to the socket.
+So if you're watching 1080p video and your network takes a dump, well you still need to download seconds of unsustainable 1080p video before you can switch down to a reasonable 360p.
+
+You can't just put the toothpaste back in the tube if you squeeze out too much.
+You gotta use all of the toothpaste, even if it takes much longer to brush your teeth.
+
+
+ 
+
+ [Source](https://knowyourmeme.com/memes/shitting-toothpaste-pooping-toothpaste). The analogy falls apart but I
+ get to use this image again.
+
+
+
+### Clients
+
+HLS utilizes "smart" clients and "dumb" servers.
+The client decides what, when, why, and how to download each media playlist, segment, and frame.
+Meanwhile the server just sits there and serves HTTP requests.
+
+The problem really depends on your perspective. If you control:
+
+- **client only**: Life is great!
+- **client and server**: Life is great! You can even extend the protocol!
+- **server only**: Life is _pain_.
+
+For a service like Twitch, the solution might seem simple: build your own client and server!
+And we did, including a baremetal live CDN designed exclusively for HLS.
+
+But [until quite recently](https://bitmovin.com/managed-media-source), we have been forced to use the Apple HLS player on iOS for AirPlay or Safari support.
+And of course TVs, consoles, casting devices, and others have their own HLS players.
+And if you're offering your baremetal live CDN [to the public](https://aws.amazon.com/ivs/), you can't exactly force customers to use your proprietary player.
+
+So you're stuck with a _dumb_ server and a bunch of _dumb_ clients.
+These _dumb_ clients make _dumb_ decisions with no cooperation with the server, based on imperfect information.
+
+### Ownership
+
+I love the simplicity of HLS compared to DASH.
+There's something so satisfying about a text-based playlist that you can actually read, versus a XML monstrosity designed by committee.
+
+```
+#EXTM3U
+#EXT-X-TARGETDURATION:10
+#EXT-X-VERSION:3
+#EXTINF:9.009,
+http://media.example.com/first.ts
+#EXTINF:9.009,
+http://media.example.com/second.ts
+#EXTINF:3.003,
+http://media.example.com/third.ts
+#EXT-X-ENDLIST
+```
+
+
+ [Orgasmic](https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis/#section-9.1).
+
+
+But unfortunately Apple controls HLS.
+
+There's a misalignment of incentives between Apple and the rest of the industry.
+I'm not even sure how Apple uses HLS, or why they would care about latency, or why they insist on being the sole arbiter of a live streaming protocol.
+[Pantos](https://www.crunchbase.com/person/roger-pantos) has done a great and thankless job, but it feels like a stand-off.
+
+For example, LL-HLS originally [required HTTP/2 server push](https://www.theoplayer.com/blog/impact-of-apple-ll-hls-update-2020) and it took nearly the entire industry to convince Apple that this was a bad idea.
+The upside is that we got [a mailing list](https://lists.apple.com/mailman/listinfo/hls-announce) so they can announce changes to developers first... but don't expect the ability to propose changes any time soon.
+
+DASH is its own can of worms as it's controlled by [MPEG](https://en.wikipedia.org/wiki/Moving_Picture_Experts_Group).
+The specifications are [behind a paywall](https://www.iso.org/standard/79106.html) or [require patent licensing](https://www.streamingmedia.com/Articles/ReadArticle.aspx?ArticleID=133508)?
+I can't even tell if I'm going to [get sued](https://www.mpegla.com/wp-content/uploads/DASHWeb.pdf) for parsing a DASH playlist without paying the troll toll.
+
+
+ 
+
+ [Source](https://itsalwayssunny.fandom.com/wiki/The_Nightman_Cometh). 🎵 You gotta pay the Troll Toll 🎵
+
+
+
+# What's next?
+
+You're given a blank canvas and a brush to paint the greenest of fields, what do you make?
+
+
+ 
+
+ [Source](https://www.freeimageslive.co.uk/free_stock_image/green-field-painting-jpg). Wow. That's quite the
+ green field.
+
+
+
+## TCP
+
+After my [previous blog post](/blog/replacing-webrtc), I had a few people hit up my DMs and claim they can do real-time latency with TCP.
+And I'm sure a few more people will too after this post, so you get your own section that muddles the narrative.
+
+Yes, you can do real-time latency with TCP (or WebSockets) under ideal conditions.
+
+However, it just won't work well enough on poor networks.
+Congestion and buffer-bloat will absolutely wreck your protocol on poor networks.
+A lot of my time spent at Twitch was optimizing for the 90th percentile; the shoddy cellular networks in Brazil or India or Australia.
+
+But if you are going to reinvent RTMP, there are [some ways to reduce queuing](https://www.youtube.com/watch?v=cpYhm74zp0U) but they are quite limited.
+This is _especially_ true in a browser environment when limited to HTTP or [WebSockets](https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API).
+
+See my next blog post about **Replacing RTMP**.
+
+## HTTP
+
+Notably absent thus far has been any mention of [LL-HLS](https://www.theoplayer.com/blog/low-latency-hls-lhls) and [LL-DASH](https://www.wowza.com/blog/what-is-low-latency-dash).
+These two protocols are meant to lower HLS/DASH latency respectively by breaking media segments into smaller chunks.
+
+The chunks might be smaller, but they're still served sequentially over TCP.
+The latency floor is lower but the latency ceiling is still just as high, and you're still going to buffer during congestion.
+
+
+ 
+ > tfw LL-HLS/LL-DASH
+
+
+We're also approaching the limit of what you can do with HTTP semantics.
+
+- **LL-HLS** has configurable latency at the cost of and exponential number of sequential requests in the critical path. For example, 20 HTTP requests a second _per track_ still only gets you +100ms of latency, which is not even viable for real-time latency.
+- **LL-DASH** can be configured down to +0ms added latency, delivering frame-by-frame with chunked-transfer. However it absolutely wrecks client-side ABR algorithms. Twitch [hosted a challenge](https://blog.twitch.tv/en/2020/01/15/twitch-invites-you-to-take-on-our-acm-mmsys-2020-grand-challenge/) to improve this but I'm convinced it's impossible without server feedback.
+
+[HESP](https://www.theoplayer.com/solutions/hesp-high-efficiency-streaming) also gets a special shout-out because it's cool.
+It works by canceling HTTP requests during congestion and frankensteining the video encoding which is quite ~~hacky~~ clever, but suffers a similar fate.
+
+We've hit a wall with HTTP over TCP.
+
+## HTTP/3
+
+If you're an astute hyper text transport protocol aficionado, you might have noticed that I said "HTTP over TCP" above.
+But [HTTP/3](https://www.cloudflare.com/learning/performance/what-is-http3) uses [QUIC](https://www.rfc-editor.org/rfc/rfc9000.html) instead of TCP.
+Problem solved! We can replace any mention of ~~TCP~~ with QUIC!
+
+Well, not quite. To use another complicated topic as a metaphor:
+
+- A TCP connection is a single-core CPU.
+- A QUIC connection is a multi-core CPU.
+
+If you take a single threaded program and run it on a multi-core machine, it will run just as slow, and perhaps even slower.
+This is the case with HLS/DASH as each segment request is made _sequentially_.
+HTTP/3 is not a magic bullet and only has marginal benefits when used with HLS/DASH.
+
+
The key to using QUIC is to embrace concurrency.
+
+This means utilizing multiple, independent streams that share a connection.
+You can prioritize a stream so it gets more bandwidth during congestion, much like you can use `nice` on Linux to prioritize a process when CPU starved.
+If a stream is taking too long, you can cancel it much like you can `kill` a process.
+
+For live media, you want to prioritize new media over old media in order to skip old content.
+You also want to prioritize audio over video, so you can hear what someone is saying without necessarily seeing their lips move.
+If you can only transmit half of a media stream in time, make sure it's the most important half.
+
+To Apple/Pantos' credit, LL-HLS is exploring [prioritization using HTTP/3](https://mailarchive.ietf.org/arch/msg/hls-interest/RcZ2SG8Sz_zZEcjWnDKzcM_-TJk/).
+It doesn't go far enough (yet!) and HTTP semantics get in the way, but it's absolutely the right direction.
+I'm convinced that somebody will make a [HTTP/3 only media protocol](https://mailarchive.ietf.org/arch/msg/moq/S3eOPU5XnvQ4kn1zJyDThG5U4sA/) at some point.
+
+But of course I'm biased towards...
+
+# Media over QUIC
+
+MoQ utilizes WebTransport/QUIC directly to avoid TCP and HTTP.
+But what about that whole **economies of scale** stuff?
+
+Well, there are some important differences between Media over QUIC as compared to your standard _not invented here_ protocol:
+
+## Reason 0: QUIC
+
+QUIC is the future of the internet.
+TCP is a relic of the past.
+
+
+
+ You're going to see a lot of this logo, although not crudely traced or green.
+
+
+It's a **bold** claim I know.
+But I struggle to think of a single reason why you would use TCP over QUIC going forward.
+There are still some corporate firewalls that block UDP (used by QUIC) and hardware offload doesn't exist yet, but I mean that's about it.
+
+It will take a few years, but every library, server, load balancer, and NIC will be optimized for QUIC delivery.
+Media over QUIC offloads as much as possible into this powerful layer.
+We benefit also from any new features, including proposals such as [multi-path](https://datatracker.ietf.org/doc/draft-ietf-quic-multipath/), [FEC](https://datatracker.ietf.org/doc/draft-michel-quic-fec/), [congestion control](https://datatracker.ietf.org/doc/rfc9330/), etc.
+I don't want network features in my media layer _thank you very much_ (looking at you WebRTC).
+
+It might not be obvious is that HTTP/3 is actually a thin layer on top of QUIC.
+Likewise MoQ is also meant to be a thin layer on top of QUIC, effectively just providing pub/sub semantics.
+We get all of the benefits of QUIC without the baggage of HTTP, and yet still achieve web support via [WebTransport](https://developer.mozilla.org/en-US/docs/Web/API/WebTransport_API).
+
+Instead we can focus on the important stuff instead: **live media**.
+
+## Reason 1: Relay Layer
+
+To avoid [the mistakes of WebRTC](/blog/replacing-webrtc), we need to decouple the application from the transport.
+If a relay (ie. CDN) knows anything about media encoding, we have failed.
+
+The idea is to break MoQ into layers.
+
+[MoqTransport](https://datatracker.ietf.org/doc/draft-ietf-moq-transport/) is the base layer and is a typical pub/sub protocol, although catered toward QUIC.
+The application splits data into "objects", annotated with a header providing simple instructions on how the relay needs to deliver it.
+These are generic signals, including stuff like the priority, reliability, grouping, expiration, etc.
+
+MoqTransport is designed to be used for arbitrary applications.
+Some examples include:
+
+- live chat
+- end-to-end encryption
+- game state
+- live playlists
+- or even a clock!
+
+This is huge draw for CDN vendors.
+Instead of building a custom WebRTC CDN that targets one specific niche, you can cast a much wider net with MoqTransport.
+Akamai, Google, and Cloudflare have been involved in the standardization process thus far and CDN support is inevitable.
+
+## Reason 2: Media Layer
+
+There will be at least one media layer on top of MoqTransport.
+We're focused on the transport right now so there's no official "adopted" draft yet.
+
+However, my proposal is [Warp](https://datatracker.ietf.org/doc/draft-law-moq-warpstreamingformat/).
+It uses CMAF so it's backwards compatible with HLS/DASH while still capable of real-time latency.
+I think this is critically important, as any migration has to be done piecewise, client-by-client and user-by-user.
+The same media segments can be served for a mixed roll-out and for VoD.
+
+This website uses Warp! [Try it out!](/watch) Or watch one of my [presentations](https://www.youtube.com/watch?v=PncdrMPVaNc).
+
+There will absolutely be other mappings and containers; MoQ is not married to CMAF.
+The important part is that only the encoder/decoder understand this media layer and not any relays in the middle.
+There's a lot of cool ideas floating around, such as a [live playlist format](https://datatracker.ietf.org/doc/draft-wilaw-moq-catalogformat/) and a [low-overhead container](https://datatracker.ietf.org/doc/draft-mzanaty-moq-loc/).
+
+## Reason 3: IETF
+
+Media over QUIC is an [IETF working group](https://datatracker.ietf.org/wg/moq/about/).
+
+
+
+ I crudely traced and recolored this logo too.
+
+
+If you know nothing about the IETF, just know that it's the standards body behind favorites such as HTTP, DNS, TLS, QUIC, and even WebRTC.
+But I think [this part](https://www.ietf.org/about/introduction/) is especially important:
+
+> There is no membership in the IETF. Anyone can participate by signing up to a working group mailing list (more on that below), or registering for an IETF meeting. All IETF participants are considered volunteers and expected to participate as individuals, including those paid to participate.
+
+It's not a protocol owned by a company.
+It's not a protocol owned by lawyers.
+
+[Join the mailing list](https://www.ietf.org/mailman/listinfo/moq).
+
+# What's missing?
+
+Okay cool so hopefully I sold you on MoQ.
+What can't you use it today to replace HLS/DASH?
+
+1. **It's not done yet**: The IETF is many things, but fast is not one of them.
+2. **Cost**: QUIC is a new protocol that has yet to be fully optimized to match TCP. It's possible and apparently Google is [near parity](https://conferences.sigcomm.org/sigcomm/2020/files/slides/epiq/0%20QUIC%20and%20HTTP_3%20CPU%20Performance.pdf).
+3. **Support**: Your favorite language/library/cdn/cloud/browser might not even provide HTTP/3 support yet, let alone WebTransport or QUIC.
+4. **Features**: Somebody has to reimplement all of the annoying HLS/DASH features like DRM and server-side advertisements....
+5. **VoD**: MoQ is currently live only. HLS/DASH work great, why replace it?
+
+We'll get there eventually.
+
+Feel free to use our [Rust](https://github.com/kixelated/moq-rs) or [Typescript](https://github.com/kixelated/moq-js) implementation if you want to experiment.
+Join the [Discord](https://discord.gg/FCYF3p99mr) if you want to help!
+
+Written by [@kixelated](https://github.com/kixelated).
+
+
diff --git a/src/pages/blog/replacing-webrtc.mdx b/src/pages/blog/replacing-webrtc.mdx
new file mode 100644
index 0000000..c98daf1
--- /dev/null
+++ b/src/pages/blog/replacing-webrtc.mdx
@@ -0,0 +1,321 @@
+---
+layout: "@/layouts/global.astro"
+title: Replacing WebRTC
+author: kixelated
+description: The long path to use something else for real-time media.
+cover: "/blog/replacing-webrtc/artifact.png"
+date: 2023-10-10
+---
+
+# Replacing WebRTC
+
+The long path to use _something else_ for real-time media.
+
+## tl;dr
+
+If you primarily use WebRTC for...
+
+- **real-time media**: it will take a while to replace WebRTC; we're working on it.
+- **data channels**: WebTransport is amazing and _actually_ works.
+- **peer-to-peer**: you're stuck with WebRTC for the forseeable future.
+
+## Disclaimer
+
+I spent almost two years building/optimizing a partial WebRTC stack @ Twitch using [pion](https://github.com/pion/webrtc).
+Our use-case was quite custom and we ultimately scrapped it, but your millage may vary.
+
+## Why WebRTC?
+
+Google released WebRTC in 2011 as a way of fixing a very specific problem:
+
+> How do we build Google Meet?
+
+Back then, the web was a very different place.
+Flash was the only way to do live media and it was a _mess_.
+HTML5 video was primarily for pre-recorded content.
+It personally took me until 2015 to write a [HTML5 player for Twitch](https://reddit.com/r/Twitch/comments/3hqfkw/the_csgo_client_embeds_the_twitch_html5_player/) using [MSE](https://developer.mozilla.org/en-US/docs/Web/API/Media_Source_Extensions_API), and we're still talking 5+ seconds of latency on a good day.
+
+Transmitting video over the internet _in real-time_ is hard.
+
+You need a tight coupling between the video encoding and the network to avoid any form of queuing, which adds latency.
+This effectively rules out TCP and forces you to use UDP.
+But now you also need a video encoder/decoder that can deal with packet loss without spewing artifacts everywhere.
+
+
+ 
+
+ [Source](https://flashphoner.com/10-important-webrtc-streaming-metrics-and-configuring-prometheus-grafana-monitoring/).
+ Example of Artifacts caused by packet loss.
+
+
+
+Google (correctly) determined that it would be impossible to solve these problems piecewise with new web standards.
+The approach instead was to create [libwebrtc](https://webrtc.googlesource.com/src/), the defacto WebRTC implementation that still ships with all browsers.
+It does everything, from networking to video encoding/decoding to data transfer, and it does it remarkably well.
+It's actually quite a feat of software engineering, _especially_ the part where Google managed to convince Apple/Mozilla to embed a full media/networking stack into their browsers.
+
+My favorite part about WebRTC is that it manages to leverage existing standards.
+WebRTC is not really a protocol, but rather a collection of protocols: [ICE](https://datatracker.ietf.org/doc/html/rfc8445), [STUN](https://datatracker.ietf.org/doc/html/rfc5389), [TURN](https://datatracker.ietf.org/doc/html/rfc5766), [DTLS](https://datatracker.ietf.org/doc/html/rfc6347), [RTP/RTCP](https://datatracker.ietf.org/doc/html/rfc3550), [SRTP](https://datatracker.ietf.org/doc/html/rfc3711), [SCTP](https://datatracker.ietf.org/doc/html/rfc4960), [SDP](https://datatracker.ietf.org/doc/html/rfc4566), [mDNS](https://datatracker.ietf.org/doc/html/rfc6762), etc.
+Throw a [Javascript API](https://www.w3.org/TR/webrtc/) on top of these and you have WebRTC.
+
+
+ 
+ [Source](https://hpbn.co/webrtc/). That's a lot of protocols layered on top of each other.
+
+
+## Why not WebRTC?
+
+I wouldn't be writing this blog post if WebRTC was perfect.
+The core issue is that WebRTC is not a protocol; it's a monolith.
+
+WebRTC does a lot of things, let's break down it down piece by piece:
+
+- [Media](#media): a full capture/encoding/networking/rendering pipeline.
+- [Data](#data): reliable/unreliable messages.
+- [P2P](#p2p): peer-to-peer connectability.
+- [SFU](#sfu): a relay that selectively forwards media.
+
+### Media
+
+The WebRTC media stack is designed for conferencing and does an amazing job at it.
+The problems start when you try to use it for anything else.
+
+My final project at Twitch was to reduce latency by replacing HLS with WebRTC for delivery.
+This seems like a no-brainer at first, but it quickly turned into [death by a thousand cuts](/blog/distribution-at-twitch).
+The biggest issue was that the user experience was just terrible.
+Twitch doesn't need the same aggressive latency as Google Meet, but WebRTC is hard-coded to compromise on quality.
+
+In general, it's quite difficult to customize WebRTC outside of a few configurable modes.
+It's a black box that you turn on, and if it works it works.
+And if it doesn't work, then you have to deal with the pain that is [forking libwebrtc](https://github.com/webrtc-sdk/libwebrtc)... or just hope Google fixes it for you.
+
+The protocol has some wiggle room and I really enjoyed my time tinkering with [pion](https://github.com/pion/webrtc).
+But you're ultimately bound by the browser implementation, unless you don't need web support, in which case you don't need WebRTC.
+
+### Data
+
+WebRTC also has a data channel API, which is particularly useful because [until recently](#webtransport), it's been the only way to send/receive "unreliable" messages from a browser.
+In fact, many companies use WebRTC data channels to avoid the WebRTC media stack (ex. Zoom).
+
+I went down this path too, attempting to send each video frame as an unreliable message, but it didn't work due to fundamental flaws with [SCTP](https://www.rfc-editor.org/rfc/rfc9260.html).
+I won't go into the detail in this post, but I eventually hacked "datagram" support into SCTP by breaking frames into unreliable messages below the MTU size.
+
+Finally! UDP\* in the browser, but at what cost:
+
+- a convoluted handshake that takes at least 10 (!) round trips.
+- 2x the packets, because libsctp immediately ACKs every "datagram".
+- a custom SCTP implementation, which means the browser can't send "datagrams".
+
+Oof.
+
+### P2P
+
+The best and worst part about WebRTC is that it supports peer-to-peer.
+
+The [ICE handshake](https://developer.mozilla.org/en-US/docs/Web/API/WebRTC_API/Connectivity) is extremely complicated, even from the [application's point of view](https://developer.mozilla.org/en-US/docs/Web/API/WebRTC_API/Signaling_and_video_calling).
+Without going into detail, there's an explosion of permutations that you need to handle based on the network topology.
+Some networks block P2P (ex. symmetric NATs) while others outright block UDP, forcing you to use a TURN server a [non-insignificant amount of time](https://twitter.com/HCornflower/status/894600051506515968).
+
+Most conferencing solutions are client-server anyway, relying on their own private network instead of public transit (aka a CDN).
+However the server is still forced to perform the complicated ICE handshake which has major architecture ramifications, but I'll save that for another blog post.
+
+Note that there are rumblings of [P2P WebTransport](https://w3c.github.io/p2p-webtransport/) and [P2P QUIC](https://datatracker.ietf.org/doc/draft-seemann-quic-nat-traversal/), but I wouldn't hold my breath.
+
+### SFU
+
+Last but not least, WebRTC scales using SFUs (Selective Forwarding Units).
+
+
+ 
+
+ [Source](https://blog.livekit.io/scaling-webrtc-with-distributed-mesh/). Participants send to a central server,
+ rather than directly to each other.
+
+
+
+The problem with SFUs is subtle: they're custom.
+
+It requires a lot of business logic to determine _where_ to forward packets.
+A single server like that diagram won't scale, nor will all of the participants be located in the same geo.
+Each SFU needs to be made aware of the network topology and the location of each participant _somehow_.
+
+Additionally, a good SFU will avoid dropping packets based on dependencies, otherwise you waste bandwidth on undecodable packets.
+Unfortunately, determining this requires parsing each RTP packet on a _per-codec_ basis.
+For example, here's a [h.264 depacketizer](https://webrtc.googlesource.com/src/+/refs/heads/main/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.cc) for libwebrtc.
+
+But the biggest issue at Twitch was that SFUs share very little in common with CDNs.
+One team is optimizing WebRTC, another team is optimizing HTTP, and they're not talking to each other.
+
+This is why HLS/DASH uses HTTP instead: **economies of scale**
+
+# Replacing WebRTC
+
+Okay enough ranting about what's wrong, let's fix it.
+
+First off, **WebRTC is not going anywhere**. It does a fantastic job at what it was designed for: conferencing.
+It will take a long time before anything will reach feature/latency parity with WebRTC.
+
+Before you can replace **Web**RTC, you need **Web**Support.
+Fortunately, we now have **Web**Codecs and **Web**Transport.
+
+## WebCodecs
+
+[WebCodecs](https://developer.mozilla.org/en-US/docs/Web/API/WebCodecs_API) is a new API for encoding/decoding media in the browser.
+It's remarkably simple:
+
+1. Capture input via [canvas](https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API) or a [media device](https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia).
+2. [VideoEncoder](https://developer.mozilla.org/en-US/docs/Web/API/VideoEncoder): Input raw frames, output encoded frames.
+3. Transfer those frames somehow. (ex. [WebTransport](#webtransport))
+4. [VideoDecoder](https://developer.mozilla.org/en-US/docs/Web/API/VideoDecoder): Input encoded frames, output raw frames.
+5. Render output via [canvas](https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API) or just marvel at the pixel data.
+
+The catch is that the application is responsible for all timing.
+That means you need to choose when to render each frame via [requestAnimationFrame](https://developer.mozilla.org/en-US/docs/Web/API/window/requestAnimationFrame).
+In fact, you need to choose when to render each audio _sample_ via [AudioWorklet](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet).
+
+The upside is that now your web application gets full control of how to render media.
+It's now possible to implement WebRTC-like behavior, like temporarily freezing video and desyncing A/V.
+
+Check [caniuse](https://caniuse.com/webcodecs) for current browser support.
+
+## WebTransport
+
+[WebTransport](https://developer.mozilla.org/en-US/docs/Web/API/WebTransport_API) is a new API for transmitting data over the network.
+Think of it like WebSockets, but with a few key differences:
+
+- [QUIC](https://www.rfc-editor.org/rfc/rfc9000.html) not TCP.
+- [Reliable streams](https://developer.mozilla.org/en-US/docs/Web/API/WebTransport_API#reliable_transmission_via_streams) that are delivered in order.
+- **Semi-reliable streams** by closing a stream (with an error code) to drop the tail.
+- [Unreliable datagrams](https://developer.mozilla.org/en-US/docs/Web/API/WebTransport/datagrams) that may be dropped during congestion.
+
+QUIC has too many benefits to enumerate, but some highlights:
+
+- Fully encrypted
+- Congestion controlled (even datagrams)
+- Independent streams (no head-of-line blocking)
+- 1-RTT handshake
+- Multiplexed over a single UDP port
+- Transparent network migration (ex. switching from Wifi to LTE)
+- Used for HTTP/3
+
+That last one is surprisingly important: WebTransport will share all of the optimizations that HTTP/3 receives.
+A HTTP/3 server can simultaneously serve multiple WebTransport sessions and HTTP requests over the same connection.
+
+Check [caniuse](https://caniuse.com/webtransport) for current browser support.
+Use my [Rust library](https://docs.rs/webtransport-quinn/latest/webtransport_quinn/) for servers and native clients!
+
+## But how?
+
+Okay, so we have WebCodecs and WebTransport, but are they actually useful?
+
+I alluded to the secret behind latency earlier: avoiding queues.
+Queuing can occur at any point in the media pipeline.
+
+| Capture/Encode | Send/Receive | Decode/Render |
+| :------------: | :----------: | :-----------: |
+| --> | --> | --> |
+
+Let's start with the easy one.
+[WebCodecs](#webcodecs) allows you to avoid queuing almost entirely.
+
+| Capture/Encode | Send/Receive | Decode/Render |
+| :------------: | :----------: | :-----------: |
+| **WebCodecs** | ? | **WebCodecs** |
+
+The tricky part is the bit in the middle, the network.
+It's not as simple as throwing your hands into the air and proclaiming "UDP has no queues!"
+
+### The Internet of Queues
+
+The internet is a [series of tubes](https://en.wikipedia.org/wiki/Series_of_tubes).
+You put packets in one end and they eventually come out of the other end, kinda.
+This section will get an entire blog post in the future, but until then, let's over-simplify things.
+
+Every packet you send will fight with other packets on the internet.
+
+- If routers have sufficient throughput, **packets arrive on time**.
+- If routers have limited throughput, **packets will be queued**.
+- If those queues are full, **packets will be dropped**.
+
+There can be random packet loss, but 99% of the time we care about loss due to queuing.
+Note that even datagrams may be queued by the network; a firehose of packets is never the answer.
+
+### Detecting Queuing
+
+The goal of congestion control is to detect queuing and back off.
+
+Different congestion control algorithms use different signals to detect queuing.
+This is a gross oversimplification of a topic with an immense amount of research, but here's a rough breakdown:
+
+| Signal | Description | Latency | Examples |
+| :----------: | :---------------------------------------------------- | :-----------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------: |
+| Packet Loss | Wait until the queue is full and packets are dropped. | [High](https://en.wikipedia.org/wiki/Bufferbloat) | [Reno](https://en.wikipedia.org/wiki/TCP_congestion_control), [CUBIC](https://en.wikipedia.org/wiki/CUBIC_TCP) |
+| ACK Delay | Indirectly measure the queue size via ACK RTT. | Medium | [BBR](https://research.google/pubs/pub45646/), [COPA](https://web.mit.edu/copa/) |
+| Packet Delay | Indirectly measure the queue size via packet RTT. | Low | [GCC](https://datatracker.ietf.org/doc/html/draft-ietf-rmcat-gcc-02), [SCReAM](https://github.com/EricssonResearch/scream) |
+| ECN | Get told by the router to back off. | None\* | [L4S](https://datatracker.ietf.org/doc/rfc9330/) |
+
+There's no single "best" congestion control algorithm; it depends on your use-case, network, and target latency.
+But this is one area where WebRTC has an advantage thanks to [transport-wide-cc](https://webrtc.googlesource.com/src/+/refs/heads/main/docs/native-code/rtp-hdrext/transport-wide-cc-02/README.md).
+
+### Reducing Bitrate
+
+Once you detect queuing, the application needs to send fewer bytes.
+
+In some situations we can just reduce the encoder bitrate, however:
+
+- This only applies to future frames.
+- We don't want one viewer to degrade the experience for all.
+- It's too expensive to encode on a per-viewer basis.
+
+So basically, we have to drop encoded media in response to congestion.
+
+This is the fundamental problem with TCP.
+Once you queue data on a TCP socket, it can't be undone without closing the connection.
+You can't put the toothpaste back in the tube.
+
+
+ 
+
+ [Source](https://knowyourmeme.com/memes/shitting-toothpaste-pooping-toothpaste). You earned a meme for making it
+ this far.
+
+
+
+However, there are actually quite a few ways of dropping media with [WebTransport](#webtransport):
+
+1. Use datagrams and choose which packets to transmit. (like WebRTC)
+2. Use QUIC streams and close them to stop transmissions. (like [RUSH](https://www.ietf.org/archive/id/draft-kpugin-rush-00.html))
+3. Use QUIC streams and prioritize them. (like [Warp](https://www.youtube.com/watch?v=PncdrMPVaNc))
+
+I'm biased because I made the 3rd one.
+WebTransport's [sendOrder](https://www.w3.org/TR/webtransport/#dom-webtransportsendstreamoptions-sendorder) can be used to instruct the QUIC stack what should be sent during congestion.
+But that deserves an entire blog post on its own.
+
+# Replacing WebRTC
+
+But to actually replace WebRTC, we need a standard. Anybody can make their own UDP-based protocol (_and they do_), using this new web tech (_and they will_).
+
+What sets [Media over QUIC](https://datatracker.ietf.org/wg/moq/about/) apart is that we're doing it through the IETF, the same organization that standardized WebRTC... and virtually every internet protocol.
+
+It's going to take years.
+It's going to take a lot of idiots like myself who want to replace WebRTC.
+It's going to take a lot of companies who are willing to bet on a new standard.
+
+And there are major flaws with both **WebCodecs** and **WebTransport** that still need to be addressed before we'll ever reach WebRTC parity.
+To name a few:
+
+- We need better [congestion control](https://www.w3.org/TR/webtransport/#dom-webtransportoptions-congestioncontrol) in browsers.
+- We need something like [transport-wide-cc](https://webrtc.googlesource.com/src/+/refs/heads/main/docs/native-code/rtp-hdrext/transport-wide-cc-02/README.md) in QUIC: [like this proposal](https://www.ietf.org/archive/id/draft-smith-quic-receive-ts-00.html)
+- We need echo cancellation in [WebAudio](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API), which might be possible?
+- We may need [FEC](https://en.wikipedia.org/wiki/Error_correction_code#Forward_error_correction) in QUIC: [like this proposal](https://datatracker.ietf.org/doc/draft-michel-quic-fec/)
+- We may need more encoding options, like non-reference frames or SVC.
+- Oh yeah and full browser support: [WebCodecs](https://caniuse.com/webcodecs) - [WebTransport](https://caniuse.com/webtransport)
+
+## So yeah...
+
+Written by [@kixelated](https://github.com/kixelated).
+Hit me up on [Discord](https://discord.gg/FCYF3p99mr) if you want to help!
+
+Tune in for next week's episode: **Replacing HLS/DASH** and then **Replacing RTMP**.
+
+
diff --git a/src/pages/blog/to-wasm.mdx b/src/pages/blog/to-wasm.mdx
new file mode 100644
index 0000000..14a6d17
--- /dev/null
+++ b/src/pages/blog/to-wasm.mdx
@@ -0,0 +1,174 @@
+---
+layout: "@/layouts/global.astro"
+title: To WASM, or not to WASM
+author: kixelated
+description: Have our benevolent W3C overlords allowed us to use Rust in the browser yet?
+cover: "/blog/to-wasm/duck.jpeg"
+date: 2024-10-24
+---
+
+# To WASM, or not to WASM
+
+I'm losing sleep over whether the web client should be written in Rust or TypeScript.
+I need your opinion, my beautiful little rubber duckies.
+
+
+ 
+ you irl
+
+
+## Frontend
+
+But first, I'm going to spew my own opinion.
+The UI layer will absolutely be written in TypeScript using a flavor-of-the-month web framework.
+I'm not going to use [Yew](https://yew.rs/) or any other React clone written in Rust.
+
+Why?
+It's pretty simple, I need frontend contributors.
+I don't think it's fair to ask a frontend savant to learn Rust and deal with the restrictions imposed by the language.
+Unpopular opinion: a UI should be flashy and not _safe_.
+
+Additionally, JavaScript web frameworks are more mature and used in production.
+I'm not suggesting that you can't use a Rust frontend library, just that they won't be nearly as polished or feature complete.
+
+"@kixelated ur dumb" if you disagree.
+
+My plan is to create a `` [custom element](https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_custom_elements) that abstracts away the underlying implementation; similar to the `