From a3503d8e21673f31b551779f04196a413c3556f3 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 13:38:01 +0000 Subject: [PATCH 1/8] Add interactive turn detection visualizer component Co-Authored-By: Dan Ince --- .../components/TurnDetectionVisualizer.tsx | 495 ++++++++++++++++++ .../universal-streaming/turn-detection.mdx | 2 + 2 files changed, 497 insertions(+) create mode 100644 fern/assets/components/TurnDetectionVisualizer.tsx diff --git a/fern/assets/components/TurnDetectionVisualizer.tsx b/fern/assets/components/TurnDetectionVisualizer.tsx new file mode 100644 index 00000000..113a998f --- /dev/null +++ b/fern/assets/components/TurnDetectionVisualizer.tsx @@ -0,0 +1,495 @@ +"use client"; +import * as React from "react"; + +const SPEECH_SEGMENTS = [ + { start: 0.5, end: 2.8, label: "Hello, I'd like to check on my order status...", confidence: 0.85 }, + { start: 4.2, end: 7.1, label: "um... it was placed last Tuesday I think", confidence: 0.35 }, + { start: 9.0, end: 11.5, label: "The order number is 4-5-7-2", confidence: 0.92 }, +]; + +const TOTAL_DURATION = 14; +const WAVEFORM_POINTS = 280; + +function generateWaveform(segments: typeof SPEECH_SEGMENTS): number[] { + const points: number[] = []; + for (let i = 0; i < WAVEFORM_POINTS; i++) { + const time = (i / WAVEFORM_POINTS) * TOTAL_DURATION; + let amplitude = 0; + for (const seg of segments) { + if (time >= seg.start && time <= seg.end) { + const segProgress = (time - seg.start) / (seg.end - seg.start); + const envelope = Math.sin(segProgress * Math.PI); + const noise = Math.sin(time * 47) * 0.3 + Math.sin(time * 123) * 0.2 + Math.sin(time * 67) * 0.15; + amplitude = envelope * (0.5 + noise * 0.5); + amplitude = Math.max(0.08, Math.min(1, amplitude)); + break; + } + } + points.push(amplitude); + } + return points; +} + +interface EotMarker { + time: number; + type: "semantic" | "acoustic"; + segmentIndex: number; +} + +function computeEotMarkers( + confidenceThreshold: number, + minSilenceWhenConfident: number, + maxTurnSilence: number +): EotMarker[] { + const markers: EotMarker[] = []; + for (let i = 0; i < SPEECH_SEGMENTS.length; i++) { + const seg = SPEECH_SEGMENTS[i]; + const speechEnd = seg.end; + const minSilenceSec = minSilenceWhenConfident / 1000; + const maxSilenceSec = maxTurnSilence / 1000; + + if (seg.confidence >= confidenceThreshold) { + const eotTime = speechEnd + minSilenceSec; + if (eotTime <= TOTAL_DURATION) { + markers.push({ time: eotTime, type: "semantic", segmentIndex: i }); + } + } else { + const eotTime = speechEnd + maxSilenceSec; + if (eotTime <= TOTAL_DURATION) { + markers.push({ time: eotTime, type: "acoustic", segmentIndex: i }); + } + } + } + return markers; +} + +const PRESETS = { + aggressive: { confidence: 0.4, minSilence: 160, maxSilence: 400 }, + balanced: { confidence: 0.4, minSilence: 400, maxSilence: 1280 }, + conservative: { confidence: 0.7, minSilence: 800, maxSilence: 3600 }, +}; + +export function TurnDetectionVisualizer() { + const [isOpen, setIsOpen] = React.useState(false); + const [confidence, setConfidence] = React.useState(0.4); + const [minSilence, setMinSilence] = React.useState(400); + const [maxSilence, setMaxSilence] = React.useState(1280); + const [playbackTime, setPlaybackTime] = React.useState(-1); + const [isPlaying, setIsPlaying] = React.useState(false); + const animRef = React.useRef(null); + const startTimeRef = React.useRef(0); + + const waveform = React.useMemo(() => generateWaveform(SPEECH_SEGMENTS), []); + const eotMarkers = React.useMemo( + () => computeEotMarkers(confidence, minSilence, maxSilence), + [confidence, minSilence, maxSilence] + ); + + const startPlayback = () => { + setIsPlaying(true); + setPlaybackTime(0); + startTimeRef.current = performance.now(); + const animate = (now: number) => { + const elapsed = (now - startTimeRef.current) / 1000; + if (elapsed >= TOTAL_DURATION) { + setPlaybackTime(TOTAL_DURATION); + setIsPlaying(false); + return; + } + setPlaybackTime(elapsed); + animRef.current = requestAnimationFrame(animate); + }; + animRef.current = requestAnimationFrame(animate); + }; + + const stopPlayback = () => { + if (animRef.current) cancelAnimationFrame(animRef.current); + setIsPlaying(false); + setPlaybackTime(-1); + }; + + React.useEffect(() => { + return () => { + if (animRef.current) cancelAnimationFrame(animRef.current); + }; + }, []); + + const applyPreset = (preset: keyof typeof PRESETS) => { + const p = PRESETS[preset]; + setConfidence(p.confidence); + setMinSilence(p.minSilence); + setMaxSilence(p.maxSilence); + stopPlayback(); + }; + + if (!isOpen) { + return ( +

+ { + e.preventDefault(); + setIsOpen(true); + }} + style={{ + color: "var(--accent-9, #4f46e5)", + textDecoration: "underline", + cursor: "pointer", + fontWeight: 500, + }} + > + Open the interactive turn detection visualizer + {" "} + to see how these parameters affect end-of-turn detection. +

+ ); + } + + const containerStyle: React.CSSProperties = { + border: "1px solid var(--grayscale-a4, #e5e7eb)", + borderRadius: "12px", + padding: "24px", + backgroundColor: "var(--grayscale-2, #f9fafb)", + marginTop: "16px", + marginBottom: "16px", + }; + + const headerStyle: React.CSSProperties = { + display: "flex", + justifyContent: "space-between", + alignItems: "center", + marginBottom: "20px", + }; + + const titleStyle: React.CSSProperties = { + fontSize: "16px", + fontWeight: 600, + color: "var(--grayscale-12, #111827)", + margin: 0, + }; + + const closeBtnStyle: React.CSSProperties = { + background: "none", + border: "1px solid var(--grayscale-a4, #d1d5db)", + borderRadius: "6px", + padding: "4px 12px", + cursor: "pointer", + fontSize: "13px", + color: "var(--grayscale-11, #6b7280)", + }; + + const waveformContainerStyle: React.CSSProperties = { + position: "relative", + height: "180px", + backgroundColor: "var(--grayscale-1, #ffffff)", + borderRadius: "8px", + border: "1px solid var(--grayscale-a4, #e5e7eb)", + marginBottom: "20px", + overflow: "hidden", + }; + + const sliderContainerStyle: React.CSSProperties = { + display: "grid", + gridTemplateColumns: "1fr 1fr 1fr", + gap: "16px", + marginBottom: "20px", + }; + + const sliderGroupStyle: React.CSSProperties = { + display: "flex", + flexDirection: "column", + gap: "6px", + }; + + const sliderLabelStyle: React.CSSProperties = { + fontSize: "12px", + fontWeight: 500, + color: "var(--grayscale-11, #6b7280)", + }; + + const sliderValueStyle: React.CSSProperties = { + fontSize: "14px", + fontWeight: 600, + color: "var(--grayscale-12, #111827)", + }; + + const sliderStyle: React.CSSProperties = { + width: "100%", + accentColor: "var(--accent-9, #4f46e5)", + cursor: "pointer", + }; + + const presetBtnStyle = (active: boolean): React.CSSProperties => ({ + padding: "6px 14px", + border: active ? "2px solid var(--accent-9, #4f46e5)" : "1px solid var(--grayscale-a4, #d1d5db)", + borderRadius: "6px", + background: active ? "var(--accent-3, #eef2ff)" : "var(--grayscale-1, #ffffff)", + cursor: "pointer", + fontSize: "13px", + fontWeight: active ? 600 : 400, + color: active ? "var(--accent-11, #4338ca)" : "var(--grayscale-12, #111827)", + }); + + const playBtnStyle: React.CSSProperties = { + padding: "8px 20px", + border: "none", + borderRadius: "6px", + background: "var(--accent-9, #4f46e5)", + color: "#ffffff", + cursor: "pointer", + fontSize: "13px", + fontWeight: 500, + }; + + const legendItemStyle = (color: string): React.CSSProperties => ({ + display: "inline-flex", + alignItems: "center", + gap: "6px", + fontSize: "12px", + color: "var(--grayscale-11, #6b7280)", + }); + + const legendDotStyle = (color: string): React.CSSProperties => ({ + width: "10px", + height: "10px", + borderRadius: "50%", + backgroundColor: color, + display: "inline-block", + }); + + const activePreset = + confidence === PRESETS.aggressive.confidence && + minSilence === PRESETS.aggressive.minSilence && + maxSilence === PRESETS.aggressive.maxSilence + ? "aggressive" + : confidence === PRESETS.balanced.confidence && + minSilence === PRESETS.balanced.minSilence && + maxSilence === PRESETS.balanced.maxSilence + ? "balanced" + : confidence === PRESETS.conservative.confidence && + minSilence === PRESETS.conservative.minSilence && + maxSilence === PRESETS.conservative.maxSilence + ? "conservative" + : null; + + const timeToX = (time: number, width: number) => (time / TOTAL_DURATION) * width; + + const svgWidth = 700; + const svgHeight = 160; + const waveformTop = 20; + const waveformHeight = 80; + const barWidth = svgWidth / WAVEFORM_POINTS; + + return ( +
+
+

Turn Detection Visualizer

+ +
+ +
+ Presets: + {(["aggressive", "balanced", "conservative"] as const).map((p) => ( + + ))} +
+ +
+
+ +
+ + + + {SPEECH_SEGMENTS.map((seg, i) => { + const x1 = timeToX(seg.start, svgWidth); + const x2 = timeToX(seg.end, svgWidth); + return ( + + ); + })} + + {waveform.map((amp, i) => { + const x = i * barWidth; + const h = amp * waveformHeight * 0.9; + const y = waveformTop + (waveformHeight - h) / 2; + const time = (i / WAVEFORM_POINTS) * TOTAL_DURATION; + const inSpeech = SPEECH_SEGMENTS.some((s) => time >= s.start && time <= s.end); + const isPast = playbackTime >= 0 && time <= playbackTime; + let fill = inSpeech ? "var(--accent-7, #818cf8)" : "var(--grayscale-a4, #d1d5db)"; + if (isPast && inSpeech) fill = "var(--accent-9, #4f46e5)"; + if (isPast && !inSpeech) fill = "var(--grayscale-6, #9ca3af)"; + return ( + + ); + })} + + {SPEECH_SEGMENTS.map((seg, i) => { + const minSilenceSec = minSilence / 1000; + const maxSilenceSec = maxSilence / 1000; + const silenceEnd = seg.confidence >= confidence ? seg.end + minSilenceSec : seg.end + maxSilenceSec; + const silenceBarStart = timeToX(seg.end, svgWidth); + const silenceBarEnd = timeToX(Math.min(silenceEnd, TOTAL_DURATION), svgWidth); + const isSemanticPath = seg.confidence >= confidence; + const color = isSemanticPath ? "#22c55e" : "#f59e0b"; + return ( + + ); + })} + + {eotMarkers.map((marker, i) => { + const x = timeToX(marker.time, svgWidth); + const color = marker.type === "semantic" ? "#22c55e" : "#f59e0b"; + const show = playbackTime < 0 || playbackTime >= marker.time; + if (!show) return null; + return ( + + + + + EoT + + + ); + })} + + {SPEECH_SEGMENTS.map((seg, i) => { + const x = timeToX(seg.start, svgWidth); + const confColor = seg.confidence >= confidence ? "#22c55e" : "#f59e0b"; + return ( + + + conf: {seg.confidence.toFixed(2)} + + + + ); + })} + + {playbackTime >= 0 && ( + + )} + + {Array.from({ length: TOTAL_DURATION + 1 }).map((_, i) => { + const x = timeToX(i, svgWidth); + return ( + + + {i}s + + + ); + })} + +
+ +
+ + Semantic EoT (confidence met + min silence) + + + Acoustic EoT (max silence exceeded) + +
+ +
+
+ end_of_turn_confidence_threshold + {confidence.toFixed(2)} + setConfidence(parseFloat(e.target.value))} + style={sliderStyle} + /> + 0 = instant EoT on silence, 1 = disable semantic +
+
+ min_end_of_turn_silence_when_confident + {minSilence} ms + setMinSilence(parseInt(e.target.value))} + style={sliderStyle} + /> + Silence required when confidence is above threshold +
+
+ max_turn_silence + {maxSilence} ms + setMaxSilence(parseInt(e.target.value))} + style={sliderStyle} + /> + Max silence before EoT regardless of confidence +
+
+ +
+

+ The example above shows three speech segments with different confidence scores. + Drag the sliders to see how adjusting each parameter changes when end-of-turn (EoT) is detected. +

+

+ Semantic detection (green) triggers when confidence {"\u2265"} threshold and silence exceeds min_end_of_turn_silence_when_confident.{" "} + Acoustic detection (amber) triggers when confidence {"<"} threshold and silence exceeds max_turn_silence. +

+
+
+ ); +} diff --git a/fern/pages/02-speech-to-text/universal-streaming/turn-detection.mdx b/fern/pages/02-speech-to-text/universal-streaming/turn-detection.mdx index 0163173b..49cd7dd3 100644 --- a/fern/pages/02-speech-to-text/universal-streaming/turn-detection.mdx +++ b/fern/pages/02-speech-to-text/universal-streaming/turn-detection.mdx @@ -74,6 +74,8 @@ These configurations are just starting points and can be fine-tuned based on you ### How it works + + The turn detection model uses a neural network to detect when someone has finished speaking. It has two ways to detect end-of-turn: From f935ab15209037670777214dc9ad92e312ace098 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 13:45:33 +0000 Subject: [PATCH 2/8] Add missing import for TurnDetectionVisualizer component Co-Authored-By: Dan Ince --- .../02-speech-to-text/universal-streaming/turn-detection.mdx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fern/pages/02-speech-to-text/universal-streaming/turn-detection.mdx b/fern/pages/02-speech-to-text/universal-streaming/turn-detection.mdx index 49cd7dd3..5d4571f0 100644 --- a/fern/pages/02-speech-to-text/universal-streaming/turn-detection.mdx +++ b/fern/pages/02-speech-to-text/universal-streaming/turn-detection.mdx @@ -3,6 +3,8 @@ title: "Turn detection" description: "Intelligent turn detection with Streaming Speech-to-Text" --- +import { TurnDetectionVisualizer } from "../../../assets/components/TurnDetectionVisualizer"; + ### Overview AssemblyAI's turn detection model uses a neural network to detect when someone has finished speaking. Unlike traditional voice activity detection that only listens for silence, our model understands the meaning and flow of speech to make better decisions about when a turn has ended. From 6876c33d0a53c4f5069ced34ca6d167cd2c87b9f Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 13:56:57 +0000 Subject: [PATCH 3/8] Simplify visualizer: cleaner layout, less clutter, responsive grid Co-Authored-By: Dan Ince --- .../components/TurnDetectionVisualizer.tsx | 687 +++++++----------- 1 file changed, 269 insertions(+), 418 deletions(-) diff --git a/fern/assets/components/TurnDetectionVisualizer.tsx b/fern/assets/components/TurnDetectionVisualizer.tsx index 113a998f..78f91888 100644 --- a/fern/assets/components/TurnDetectionVisualizer.tsx +++ b/fern/assets/components/TurnDetectionVisualizer.tsx @@ -1,495 +1,346 @@ "use client"; import * as React from "react"; -const SPEECH_SEGMENTS = [ - { start: 0.5, end: 2.8, label: "Hello, I'd like to check on my order status...", confidence: 0.85 }, - { start: 4.2, end: 7.1, label: "um... it was placed last Tuesday I think", confidence: 0.35 }, - { start: 9.0, end: 11.5, label: "The order number is 4-5-7-2", confidence: 0.92 }, +const SEGMENTS = [ + { start: 0.5, end: 2.8, confidence: 0.85 }, + { start: 4.2, end: 7.1, confidence: 0.35 }, + { start: 9.0, end: 11.5, confidence: 0.92 }, ]; -const TOTAL_DURATION = 14; -const WAVEFORM_POINTS = 280; +const DURATION = 14; -function generateWaveform(segments: typeof SPEECH_SEGMENTS): number[] { - const points: number[] = []; - for (let i = 0; i < WAVEFORM_POINTS; i++) { - const time = (i / WAVEFORM_POINTS) * TOTAL_DURATION; - let amplitude = 0; - for (const seg of segments) { - if (time >= seg.start && time <= seg.end) { - const segProgress = (time - seg.start) / (seg.end - seg.start); - const envelope = Math.sin(segProgress * Math.PI); - const noise = Math.sin(time * 47) * 0.3 + Math.sin(time * 123) * 0.2 + Math.sin(time * 67) * 0.15; - amplitude = envelope * (0.5 + noise * 0.5); - amplitude = Math.max(0.08, Math.min(1, amplitude)); - break; - } - } - points.push(amplitude); +const PRESETS = { + aggressive: { confidence: 0.4, minSilence: 160, maxSilence: 400 }, + balanced: { confidence: 0.4, minSilence: 400, maxSilence: 1280 }, + conservative: { confidence: 0.7, minSilence: 800, maxSilence: 3600 }, +}; + +function getActivePreset(c: number, min: number, max: number): string | null { + for (const [name, p] of Object.entries(PRESETS)) { + if (c === p.confidence && min === p.minSilence && max === p.maxSilence) return name; } - return points; + return null; } -interface EotMarker { +interface EotResult { time: number; type: "semantic" | "acoustic"; - segmentIndex: number; + segIndex: number; } -function computeEotMarkers( - confidenceThreshold: number, - minSilenceWhenConfident: number, - maxTurnSilence: number -): EotMarker[] { - const markers: EotMarker[] = []; - for (let i = 0; i < SPEECH_SEGMENTS.length; i++) { - const seg = SPEECH_SEGMENTS[i]; - const speechEnd = seg.end; - const minSilenceSec = minSilenceWhenConfident / 1000; - const maxSilenceSec = maxTurnSilence / 1000; - - if (seg.confidence >= confidenceThreshold) { - const eotTime = speechEnd + minSilenceSec; - if (eotTime <= TOTAL_DURATION) { - markers.push({ time: eotTime, type: "semantic", segmentIndex: i }); - } +function computeEot(conf: number, minMs: number, maxMs: number): EotResult[] { + const results: EotResult[] = []; + for (let i = 0; i < SEGMENTS.length; i++) { + const seg = SEGMENTS[i]; + if (seg.confidence >= conf) { + const t = seg.end + minMs / 1000; + if (t <= DURATION) results.push({ time: t, type: "semantic", segIndex: i }); } else { - const eotTime = speechEnd + maxSilenceSec; - if (eotTime <= TOTAL_DURATION) { - markers.push({ time: eotTime, type: "acoustic", segmentIndex: i }); - } + const t = seg.end + maxMs / 1000; + if (t <= DURATION) results.push({ time: t, type: "acoustic", segIndex: i }); } } - return markers; + return results; } -const PRESETS = { - aggressive: { confidence: 0.4, minSilence: 160, maxSilence: 400 }, - balanced: { confidence: 0.4, minSilence: 400, maxSilence: 1280 }, - conservative: { confidence: 0.7, minSilence: 800, maxSilence: 3600 }, -}; +function SliderParam(props: { + label: string; + value: number; + displayValue: string; + min: number; + max: number; + step: number; + onChange: (v: number) => void; + parseValue: (s: string) => number; +}) { + return ( +
+
+ {props.label} + {props.displayValue} +
+ props.onChange(props.parseValue(e.target.value))} + style={{ width: "100%", accentColor: "var(--accent-9, #4f46e5)", cursor: "pointer" }} + /> +
+ ); +} export function TurnDetectionVisualizer() { const [isOpen, setIsOpen] = React.useState(false); const [confidence, setConfidence] = React.useState(0.4); const [minSilence, setMinSilence] = React.useState(400); const [maxSilence, setMaxSilence] = React.useState(1280); - const [playbackTime, setPlaybackTime] = React.useState(-1); - const [isPlaying, setIsPlaying] = React.useState(false); - const animRef = React.useRef(null); - const startTimeRef = React.useRef(0); - const waveform = React.useMemo(() => generateWaveform(SPEECH_SEGMENTS), []); const eotMarkers = React.useMemo( - () => computeEotMarkers(confidence, minSilence, maxSilence), + () => computeEot(confidence, minSilence, maxSilence), [confidence, minSilence, maxSilence] ); - const startPlayback = () => { - setIsPlaying(true); - setPlaybackTime(0); - startTimeRef.current = performance.now(); - const animate = (now: number) => { - const elapsed = (now - startTimeRef.current) / 1000; - if (elapsed >= TOTAL_DURATION) { - setPlaybackTime(TOTAL_DURATION); - setIsPlaying(false); - return; - } - setPlaybackTime(elapsed); - animRef.current = requestAnimationFrame(animate); - }; - animRef.current = requestAnimationFrame(animate); - }; - - const stopPlayback = () => { - if (animRef.current) cancelAnimationFrame(animRef.current); - setIsPlaying(false); - setPlaybackTime(-1); - }; - - React.useEffect(() => { - return () => { - if (animRef.current) cancelAnimationFrame(animRef.current); - }; - }, []); - - const applyPreset = (preset: keyof typeof PRESETS) => { - const p = PRESETS[preset]; - setConfidence(p.confidence); - setMinSilence(p.minSilence); - setMaxSilence(p.maxSilence); - stopPlayback(); - }; + const activePreset = getActivePreset(confidence, minSilence, maxSilence); if (!isOpen) { return (

{ - e.preventDefault(); - setIsOpen(true); - }} - style={{ - color: "var(--accent-9, #4f46e5)", - textDecoration: "underline", - cursor: "pointer", - fontWeight: 500, - }} + onClick={(e) => { e.preventDefault(); setIsOpen(true); }} + style={{ color: "var(--accent-9, #4f46e5)", textDecoration: "underline", cursor: "pointer", fontWeight: 500 }} > Open the interactive turn detection visualizer - {" "} - to see how these parameters affect end-of-turn detection. + + {" "}to see how parameters affect end-of-turn detection.

); } - const containerStyle: React.CSSProperties = { - border: "1px solid var(--grayscale-a4, #e5e7eb)", - borderRadius: "12px", - padding: "24px", - backgroundColor: "var(--grayscale-2, #f9fafb)", - marginTop: "16px", - marginBottom: "16px", - }; - - const headerStyle: React.CSSProperties = { - display: "flex", - justifyContent: "space-between", - alignItems: "center", - marginBottom: "20px", - }; - - const titleStyle: React.CSSProperties = { - fontSize: "16px", - fontWeight: 600, - color: "var(--grayscale-12, #111827)", - margin: 0, - }; - - const closeBtnStyle: React.CSSProperties = { - background: "none", - border: "1px solid var(--grayscale-a4, #d1d5db)", - borderRadius: "6px", - padding: "4px 12px", - cursor: "pointer", - fontSize: "13px", - color: "var(--grayscale-11, #6b7280)", - }; - - const waveformContainerStyle: React.CSSProperties = { - position: "relative", - height: "180px", - backgroundColor: "var(--grayscale-1, #ffffff)", - borderRadius: "8px", - border: "1px solid var(--grayscale-a4, #e5e7eb)", - marginBottom: "20px", - overflow: "hidden", - }; - - const sliderContainerStyle: React.CSSProperties = { - display: "grid", - gridTemplateColumns: "1fr 1fr 1fr", - gap: "16px", - marginBottom: "20px", - }; - - const sliderGroupStyle: React.CSSProperties = { - display: "flex", - flexDirection: "column", - gap: "6px", - }; - - const sliderLabelStyle: React.CSSProperties = { - fontSize: "12px", - fontWeight: 500, - color: "var(--grayscale-11, #6b7280)", - }; - - const sliderValueStyle: React.CSSProperties = { - fontSize: "14px", - fontWeight: 600, - color: "var(--grayscale-12, #111827)", - }; + const toX = (t: number) => (t / DURATION) * 100; - const sliderStyle: React.CSSProperties = { - width: "100%", - accentColor: "var(--accent-9, #4f46e5)", - cursor: "pointer", - }; - - const presetBtnStyle = (active: boolean): React.CSSProperties => ({ - padding: "6px 14px", - border: active ? "2px solid var(--accent-9, #4f46e5)" : "1px solid var(--grayscale-a4, #d1d5db)", - borderRadius: "6px", - background: active ? "var(--accent-3, #eef2ff)" : "var(--grayscale-1, #ffffff)", - cursor: "pointer", - fontSize: "13px", - fontWeight: active ? 600 : 400, - color: active ? "var(--accent-11, #4338ca)" : "var(--grayscale-12, #111827)", - }); - - const playBtnStyle: React.CSSProperties = { - padding: "8px 20px", - border: "none", - borderRadius: "6px", - background: "var(--accent-9, #4f46e5)", - color: "#ffffff", - cursor: "pointer", - fontSize: "13px", - fontWeight: 500, + const presetBtn = (name: "aggressive" | "balanced" | "conservative") => { + const isActive = activePreset === name; + return ( + + ); }; - const legendItemStyle = (color: string): React.CSSProperties => ({ - display: "inline-flex", - alignItems: "center", - gap: "6px", - fontSize: "12px", - color: "var(--grayscale-11, #6b7280)", - }); - - const legendDotStyle = (color: string): React.CSSProperties => ({ - width: "10px", - height: "10px", - borderRadius: "50%", - backgroundColor: color, - display: "inline-block", - }); - - const activePreset = - confidence === PRESETS.aggressive.confidence && - minSilence === PRESETS.aggressive.minSilence && - maxSilence === PRESETS.aggressive.maxSilence - ? "aggressive" - : confidence === PRESETS.balanced.confidence && - minSilence === PRESETS.balanced.minSilence && - maxSilence === PRESETS.balanced.maxSilence - ? "balanced" - : confidence === PRESETS.conservative.confidence && - minSilence === PRESETS.conservative.minSilence && - maxSilence === PRESETS.conservative.maxSilence - ? "conservative" - : null; - - const timeToX = (time: number, width: number) => (time / TOTAL_DURATION) * width; - - const svgWidth = 700; - const svgHeight = 160; - const waveformTop = 20; - const waveformHeight = 80; - const barWidth = svgWidth / WAVEFORM_POINTS; - return ( -
-
-

Turn Detection Visualizer

+
+
+ + Turn Detection Visualizer +
-
- Presets: - {(["aggressive", "balanced", "conservative"] as const).map((p) => ( - - ))} -
- -
+
+ {presetBtn("aggressive")} + {presetBtn("balanced")} + {presetBtn("conservative")}
-
- - - - {SPEECH_SEGMENTS.map((seg, i) => { - const x1 = timeToX(seg.start, svgWidth); - const x2 = timeToX(seg.end, svgWidth); - return ( - - ); - })} - - {waveform.map((amp, i) => { - const x = i * barWidth; - const h = amp * waveformHeight * 0.9; - const y = waveformTop + (waveformHeight - h) / 2; - const time = (i / WAVEFORM_POINTS) * TOTAL_DURATION; - const inSpeech = SPEECH_SEGMENTS.some((s) => time >= s.start && time <= s.end); - const isPast = playbackTime >= 0 && time <= playbackTime; - let fill = inSpeech ? "var(--accent-7, #818cf8)" : "var(--grayscale-a4, #d1d5db)"; - if (isPast && inSpeech) fill = "var(--accent-9, #4f46e5)"; - if (isPast && !inSpeech) fill = "var(--grayscale-6, #9ca3af)"; +
+
+ {SEGMENTS.map((seg, i) => { + const left = toX(seg.start); + const width = toX(seg.end) - left; + const isAbove = seg.confidence >= confidence; return ( - +
+
+
+ {"conf: " + seg.confidence} +
+
); })} - {SPEECH_SEGMENTS.map((seg, i) => { - const minSilenceSec = minSilence / 1000; - const maxSilenceSec = maxSilence / 1000; - const silenceEnd = seg.confidence >= confidence ? seg.end + minSilenceSec : seg.end + maxSilenceSec; - const silenceBarStart = timeToX(seg.end, svgWidth); - const silenceBarEnd = timeToX(Math.min(silenceEnd, TOTAL_DURATION), svgWidth); - const isSemanticPath = seg.confidence >= confidence; - const color = isSemanticPath ? "#22c55e" : "#f59e0b"; + {SEGMENTS.map((seg, i) => { + const isAbove = seg.confidence >= confidence; + const silenceMs = isAbove ? minSilence : maxSilence; + const silenceEnd = seg.end + silenceMs / 1000; + if (silenceEnd > DURATION) return null; + const startX = toX(seg.end); + const endX = toX(silenceEnd); + const color = isAbove ? "#22c55e" : "#f59e0b"; return ( - +
+
+
); })} - {eotMarkers.map((marker, i) => { - const x = timeToX(marker.time, svgWidth); - const color = marker.type === "semantic" ? "#22c55e" : "#f59e0b"; - const show = playbackTime < 0 || playbackTime >= marker.time; - if (!show) return null; + {eotMarkers.map((m, i) => { + const x = toX(m.time); + const color = m.type === "semantic" ? "#22c55e" : "#f59e0b"; + const label = m.type === "semantic" ? "semantic" : "acoustic"; return ( - - - - +
+
+
EoT - - - ); - })} - - {SPEECH_SEGMENTS.map((seg, i) => { - const x = timeToX(seg.start, svgWidth); - const confColor = seg.confidence >= confidence ? "#22c55e" : "#f59e0b"; - return ( - - - conf: {seg.confidence.toFixed(2)} - - - - ); - })} - - {playbackTime >= 0 && ( - - )} - - {Array.from({ length: TOTAL_DURATION + 1 }).map((_, i) => { - const x = timeToX(i, svgWidth); - return ( - - - {i}s - - +
+
+ {label} +
+
); })} - +
-
- - Semantic EoT (confidence met + min silence) +
+ + + Semantic EoT - - Acoustic EoT (max silence exceeded) + + + Acoustic EoT
-
-
- end_of_turn_confidence_threshold - {confidence.toFixed(2)} - setConfidence(parseFloat(e.target.value))} - style={sliderStyle} - /> - 0 = instant EoT on silence, 1 = disable semantic -
-
- min_end_of_turn_silence_when_confident - {minSilence} ms - setMinSilence(parseInt(e.target.value))} - style={sliderStyle} - /> - Silence required when confidence is above threshold -
-
- max_turn_silence - {maxSilence} ms - setMaxSilence(parseInt(e.target.value))} - style={sliderStyle} - /> - Max silence before EoT regardless of confidence -
+
+ + +
-
-

- The example above shows three speech segments with different confidence scores. - Drag the sliders to see how adjusting each parameter changes when end-of-turn (EoT) is detected. -

-

- Semantic detection (green) triggers when confidence {"\u2265"} threshold and silence exceeds min_end_of_turn_silence_when_confident.{" "} - Acoustic detection (amber) triggers when confidence {"<"} threshold and silence exceeds max_turn_silence. -

-
+

+ Drag the sliders to see how end-of-turn detection changes.{" "} + Semantic EoT triggers when confidence {"\u2265"} threshold after min_end_of_turn_silence_when_confident.{" "} + Acoustic EoT triggers when confidence {"<"} threshold after max_turn_silence. +

); } From 90b90d70f4b8e56d591ef4220d0e7b38b883f7c7 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 14:03:35 +0000 Subject: [PATCH 4/8] Fix text overflow: constrain content within component bounds Co-Authored-By: Dan Ince --- fern/assets/components/TurnDetectionVisualizer.tsx | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/fern/assets/components/TurnDetectionVisualizer.tsx b/fern/assets/components/TurnDetectionVisualizer.tsx index 78f91888..e7501626 100644 --- a/fern/assets/components/TurnDetectionVisualizer.tsx +++ b/fern/assets/components/TurnDetectionVisualizer.tsx @@ -56,7 +56,7 @@ function SliderParam(props: { return (
- {props.label} + {props.label} {props.displayValue}
@@ -336,10 +339,10 @@ export function TurnDetectionVisualizer() { />
-

+

Drag the sliders to see how end-of-turn detection changes.{" "} - Semantic EoT triggers when confidence {"\u2265"} threshold after min_end_of_turn_silence_when_confident.{" "} - Acoustic EoT triggers when confidence {"<"} threshold after max_turn_silence. + Semantic EoT triggers when confidence {"\u2265"} threshold after min_end_of_turn_silence_when_confident.{" "} + Acoustic EoT triggers when confidence {"<"} threshold after max_turn_silence.

); From dffecbe05c910b0e86161732a25087f04a422868 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 14:30:09 +0000 Subject: [PATCH 5/8] Remove sliders, add waveforms, fix turn continuation logic - Replace sliders with preset-only buttons (Aggressive/Balanced/Conservative) - Display parameter values as text instead of interactive sliders - Restore speech waveform visualization with confidence-based coloring - Fix EoT logic: when silence period extends into next speech segment, turn continues - In conservative mode, segment 2 (conf 0.35 < threshold 0.7) correctly shows turn continuing into segment 3 - Add 'turn continues' indicator between connected segments Co-Authored-By: Dan Ince --- .../components/TurnDetectionVisualizer.tsx | 318 ++++++++++-------- 1 file changed, 171 insertions(+), 147 deletions(-) diff --git a/fern/assets/components/TurnDetectionVisualizer.tsx b/fern/assets/components/TurnDetectionVisualizer.tsx index e7501626..7b1db8d5 100644 --- a/fern/assets/components/TurnDetectionVisualizer.tsx +++ b/fern/assets/components/TurnDetectionVisualizer.tsx @@ -9,19 +9,12 @@ const SEGMENTS = [ const DURATION = 14; -const PRESETS = { - aggressive: { confidence: 0.4, minSilence: 160, maxSilence: 400 }, - balanced: { confidence: 0.4, minSilence: 400, maxSilence: 1280 }, - conservative: { confidence: 0.7, minSilence: 800, maxSilence: 3600 }, +const PRESETS: Record = { + aggressive: { confidence: 0.4, minSilence: 160, maxSilence: 400, label: "Aggressive" }, + balanced: { confidence: 0.4, minSilence: 400, maxSilence: 1280, label: "Balanced" }, + conservative: { confidence: 0.7, minSilence: 800, maxSilence: 3600, label: "Conservative" }, }; -function getActivePreset(c: number, min: number, max: number): string | null { - for (const [name, p] of Object.entries(PRESETS)) { - if (c === p.confidence && min === p.minSilence && max === p.maxSilence) return name; - } - return null; -} - interface EotResult { time: number; type: "semantic" | "acoustic"; @@ -32,58 +25,50 @@ function computeEot(conf: number, minMs: number, maxMs: number): EotResult[] { const results: EotResult[] = []; for (let i = 0; i < SEGMENTS.length; i++) { const seg = SEGMENTS[i]; + const nextSeg = i < SEGMENTS.length - 1 ? SEGMENTS[i + 1] : null; if (seg.confidence >= conf) { const t = seg.end + minMs / 1000; - if (t <= DURATION) results.push({ time: t, type: "semantic", segIndex: i }); + if (t <= DURATION && (!nextSeg || t < nextSeg.start)) { + results.push({ time: t, type: "semantic", segIndex: i }); + } } else { const t = seg.end + maxMs / 1000; - if (t <= DURATION) results.push({ time: t, type: "acoustic", segIndex: i }); + if (t <= DURATION && (!nextSeg || t < nextSeg.start)) { + results.push({ time: t, type: "acoustic", segIndex: i }); + } } } return results; } -function SliderParam(props: { - label: string; - value: number; - displayValue: string; - min: number; - max: number; - step: number; - onChange: (v: number) => void; - parseValue: (s: string) => number; -}) { - return ( -
-
- {props.label} - {props.displayValue} -
- props.onChange(props.parseValue(e.target.value))} - style={{ width: "100%", accentColor: "var(--accent-9, #4f46e5)", cursor: "pointer" }} - /> -
- ); +function generateWaveformBars(start: number, end: number, seed: number): number[] { + const count = Math.max(8, Math.floor((end - start) * 12)); + const bars: number[] = []; + let s = seed; + for (let i = 0; i < count; i++) { + s = (s * 1103515245 + 12345) & 0x7fffffff; + const frac = i / count; + const envelope = Math.sin(frac * Math.PI) * 0.6 + 0.3; + const noise = (s % 100) / 100; + bars.push(Math.max(0.15, Math.min(1, envelope * (0.5 + noise * 0.5)))); + } + return bars; } export function TurnDetectionVisualizer() { const [isOpen, setIsOpen] = React.useState(false); - const [confidence, setConfidence] = React.useState(0.4); - const [minSilence, setMinSilence] = React.useState(400); - const [maxSilence, setMaxSilence] = React.useState(1280); + const [preset, setPreset] = React.useState("balanced"); + const config = PRESETS[preset]; const eotMarkers = React.useMemo( - () => computeEot(confidence, minSilence, maxSilence), - [confidence, minSilence, maxSilence] + () => computeEot(config.confidence, config.minSilence, config.maxSilence), + [config.confidence, config.minSilence, config.maxSilence] ); - const activePreset = getActivePreset(confidence, minSilence, maxSilence); + const waveforms = React.useMemo( + () => SEGMENTS.map((seg, i) => generateWaveformBars(seg.start, seg.end, (i + 1) * 7919)), + [] + ); if (!isOpen) { return ( @@ -102,32 +87,17 @@ export function TurnDetectionVisualizer() { const toX = (t: number) => (t / DURATION) * 100; - const presetBtn = (name: "aggressive" | "balanced" | "conservative") => { - const isActive = activePreset === name; - return ( - - ); - }; + const turnsNotEnded: Set = new Set(); + for (let i = 0; i < SEGMENTS.length; i++) { + const seg = SEGMENTS[i]; + const nextSeg = i < SEGMENTS.length - 1 ? SEGMENTS[i + 1] : null; + const isAbove = seg.confidence >= config.confidence; + const silenceMs = isAbove ? config.minSilence : config.maxSilence; + const eotTime = seg.end + silenceMs / 1000; + if (nextSeg && eotTime >= nextSeg.start) { + turnsNotEnded.add(i); + } + } return (
-
+
Turn Detection Visualizer @@ -163,10 +133,28 @@ export function TurnDetectionVisualizer() {
-
- {presetBtn("aggressive")} - {presetBtn("balanced")} - {presetBtn("conservative")} +
+ {Object.entries(PRESETS).map(([key, p]) => { + const isActive = preset === key; + return ( + + ); + })}
-
+
{SEGMENTS.map((seg, i) => { const left = toX(seg.start); const width = toX(seg.end) - left; - const isAbove = seg.confidence >= confidence; + const isAbove = seg.confidence >= config.confidence; + const bars = waveforms[i]; + const continued = turnsNotEnded.has(i); return (
+ > + {bars.map((h, bi) => ( +
+ ))} +
{"conf: " + seg.confidence}
+ {continued && ( +
+ )}
); })} {SEGMENTS.map((seg, i) => { - const isAbove = seg.confidence >= confidence; - const silenceMs = isAbove ? minSilence : maxSilence; + const isAbove = seg.confidence >= config.confidence; + const silenceMs = isAbove ? config.minSilence : config.maxSilence; const silenceEnd = seg.end + silenceMs / 1000; + const nextSeg = i < SEGMENTS.length - 1 ? SEGMENTS[i + 1] : null; if (silenceEnd > DURATION) return null; + if (nextSeg && silenceEnd >= nextSeg.start) return null; const startX = toX(seg.end); const endX = toX(silenceEnd); const color = isAbove ? "#22c55e" : "#f59e0b"; @@ -234,11 +254,11 @@ export function TurnDetectionVisualizer() { position: "absolute" as const, left: startX + "%", width: (endX - startX) + "%", - top: "40px", + top: "42px", height: "8px", borderRadius: "4px", background: color, - opacity: 0.3, + opacity: 0.25, }} />
@@ -255,8 +275,8 @@ export function TurnDetectionVisualizer() { style={{ position: "absolute" as const, left: x + "%", - top: "6px", - bottom: "10px", + top: "10px", + bottom: "30px", width: "2px", background: color, transform: "translateX(-1px)", @@ -266,36 +286,49 @@ export function TurnDetectionVisualizer() { style={{ position: "absolute" as const, left: x + "%", - top: "68px", + top: "78px", transform: "translateX(-50%)", fontSize: "10px", fontWeight: 600, color: color, whiteSpace: "nowrap" as const, + textAlign: "center" as const, + lineHeight: 1.3, }} > - EoT -
-
- {label} + {"EoT "} + {label}
); })} + + {turnsNotEnded.size > 0 && Array.from(turnsNotEnded).map((idx) => { + const seg = SEGMENTS[idx]; + const nextSeg = SEGMENTS[idx + 1]; + const midX = toX((seg.end + nextSeg.start) / 2); + return ( +
+ turn continues +
+ ); + })}
-
+
Semantic EoT @@ -304,45 +337,36 @@ export function TurnDetectionVisualizer() { Acoustic EoT + {turnsNotEnded.size > 0 && ( + + + Turn continues + + )}
-
- - - +
+ end_of_turn_confidence_threshold + {config.confidence} + min_end_of_turn_silence_when_confident + {config.minSilence} ms + max_turn_silence + {config.maxSilence} ms
-

- Drag the sliders to see how end-of-turn detection changes.{" "} - Semantic EoT triggers when confidence {"\u2265"} threshold after min_end_of_turn_silence_when_confident.{" "} - Acoustic EoT triggers when confidence {"<"} threshold after max_turn_silence. +

+ Semantic{" = confidence \u2265 threshold + min silence elapsed. "} + Acoustic{" = confidence < threshold + max silence elapsed. "} + When the silence period extends into the next speech segment, the turn is not ended.

); From b3e848482ef80c7eddaa9190fd2d2e49683543f1 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 16:38:27 +0000 Subject: [PATCH 6/8] Rename EoT labels to use parameter names instead of semantic/acoustic Co-Authored-By: Dan Ince --- .../components/TurnDetectionVisualizer.tsx | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/fern/assets/components/TurnDetectionVisualizer.tsx b/fern/assets/components/TurnDetectionVisualizer.tsx index 7b1db8d5..63a81624 100644 --- a/fern/assets/components/TurnDetectionVisualizer.tsx +++ b/fern/assets/components/TurnDetectionVisualizer.tsx @@ -268,7 +268,7 @@ export function TurnDetectionVisualizer() { {eotMarkers.map((m, i) => { const x = toX(m.time); const color = m.type === "semantic" ? "#22c55e" : "#f59e0b"; - const label = m.type === "semantic" ? "semantic" : "acoustic"; + const label = m.type === "semantic" ? "min_silence" : "max_silence"; return (
- {"EoT "} - {label} + {"EoT"} +
+ {label}
); @@ -331,11 +332,11 @@ export function TurnDetectionVisualizer() {
- Semantic EoT + EoT (min_silence_when_confident) - Acoustic EoT + EoT (max_turn_silence) {turnsNotEnded.size > 0 && ( @@ -364,9 +365,9 @@ export function TurnDetectionVisualizer() {

- Semantic{" = confidence \u2265 threshold + min silence elapsed. "} - Acoustic{" = confidence < threshold + max silence elapsed. "} - When the silence period extends into the next speech segment, the turn is not ended. + When confidence {"\u2265"} threshold, EoT triggers after min_end_of_turn_silence_when_confident.{" "} + When confidence {"<"} threshold, EoT triggers after max_turn_silence.{" "} + If the silence period extends into the next speech segment, the turn continues.

); From 9cb7dc5a8c3b6c34a34f3e4517d0e6ebcbbb1dba Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 16:39:50 +0000 Subject: [PATCH 7/8] Make visualizer always visible inline, remove hyperlink toggle Co-Authored-By: Dan Ince --- .../components/TurnDetectionVisualizer.tsx | 34 ++----------------- 1 file changed, 2 insertions(+), 32 deletions(-) diff --git a/fern/assets/components/TurnDetectionVisualizer.tsx b/fern/assets/components/TurnDetectionVisualizer.tsx index 63a81624..b1d8fb99 100644 --- a/fern/assets/components/TurnDetectionVisualizer.tsx +++ b/fern/assets/components/TurnDetectionVisualizer.tsx @@ -56,7 +56,6 @@ function generateWaveformBars(start: number, end: number, seed: number): number[ } export function TurnDetectionVisualizer() { - const [isOpen, setIsOpen] = React.useState(false); const [preset, setPreset] = React.useState("balanced"); const config = PRESETS[preset]; @@ -70,22 +69,7 @@ export function TurnDetectionVisualizer() { [] ); - if (!isOpen) { - return ( -

- { e.preventDefault(); setIsOpen(true); }} - style={{ color: "var(--accent-9, #4f46e5)", textDecoration: "underline", cursor: "pointer", fontWeight: 500 }} - > - Open the interactive turn detection visualizer - - {" "}to see how parameters affect end-of-turn detection. -

- ); - } - - const toX = (t: number) => (t / DURATION) * 100; + const toX= (t: number) => (t / DURATION) * 100; const turnsNotEnded: Set = new Set(); for (let i = 0; i < SEGMENTS.length; i++) { @@ -113,24 +97,10 @@ export function TurnDetectionVisualizer() { boxSizing: "border-box" as const, }} > -
+
Turn Detection Visualizer -
From 8f062696652cf92ab0cd0d54b6e65d2862c6e9a1 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 17:54:23 +0000 Subject: [PATCH 8/8] Move EoT labels and parameter names above vertical lines Co-Authored-By: Dan Ince --- .../components/TurnDetectionVisualizer.tsx | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/fern/assets/components/TurnDetectionVisualizer.tsx b/fern/assets/components/TurnDetectionVisualizer.tsx index b1d8fb99..c435d456 100644 --- a/fern/assets/components/TurnDetectionVisualizer.tsx +++ b/fern/assets/components/TurnDetectionVisualizer.tsx @@ -138,7 +138,7 @@ export function TurnDetectionVisualizer() { overflowX: "auto" as const, }} > -
+
{SEGMENTS.map((seg, i) => { const left = toX(seg.start); const width = toX(seg.end) - left; @@ -152,7 +152,7 @@ export function TurnDetectionVisualizer() { position: "absolute" as const, left: left + "%", width: width + "%", - top: "22px", + top: "32px", height: "48px", display: "flex", alignItems: "center", @@ -180,7 +180,7 @@ export function TurnDetectionVisualizer() { position: "absolute" as const, left: left + "%", width: width + "%", - top: "6px", + top: "16px", fontSize: "10px", color: "var(--grayscale-10, #4b5563)", textAlign: "center" as const, @@ -195,7 +195,7 @@ export function TurnDetectionVisualizer() { position: "absolute" as const, left: toX(seg.end) + "%", width: toX(SEGMENTS[i + 1].start) - toX(seg.end) + "%", - top: "42px", + top: "52px", height: "4px", background: "var(--accent-5, #a5b4fc)", opacity: 0.5, @@ -224,7 +224,7 @@ export function TurnDetectionVisualizer() { position: "absolute" as const, left: startX + "%", width: (endX - startX) + "%", - top: "42px", + top: "52px", height: "8px", borderRadius: "4px", background: color, @@ -245,7 +245,7 @@ export function TurnDetectionVisualizer() { style={{ position: "absolute" as const, left: x + "%", - top: "10px", + top: "20px", bottom: "30px", width: "2px", background: color, @@ -256,19 +256,19 @@ export function TurnDetectionVisualizer() { style={{ position: "absolute" as const, left: x + "%", - top: "78px", + top: "0px", transform: "translateX(-50%)", fontSize: "10px", fontWeight: 600, color: color, whiteSpace: "nowrap" as const, textAlign: "center" as const, - lineHeight: 1.3, + lineHeight: 1.2, }} > - {"EoT"} -
{label} +
+ {"EoT"}
); @@ -284,7 +284,7 @@ export function TurnDetectionVisualizer() { style={{ position: "absolute" as const, left: midX + "%", - top: "78px", + top: "88px", transform: "translateX(-50%)", fontSize: "9px", color: "var(--grayscale-9, #9ca3af)",