From aa15b2880f9e3bc35aa47c10159b8a955ff558fc Mon Sep 17 00:00:00 2001 From: Tavis Date: Sat, 21 Mar 2026 18:50:29 -1000 Subject: [PATCH 01/15] semaphore to enforce max 5 concurrent renders --- internal/server/render_utils.go | 10 ++++++++++ internal/server/server.go | 2 ++ 2 files changed, 12 insertions(+) diff --git a/internal/server/render_utils.go b/internal/server/render_utils.go index 4216b084..19e22aea 100644 --- a/internal/server/render_utils.go +++ b/internal/server/render_utils.go @@ -143,6 +143,16 @@ func (s *Server) possiblyRender(ctx context.Context, app *data.App, device *data now := time.Now() // uinterval is minutes if time.Since(app.LastRender) > time.Duration(app.UInterval)*time.Minute { + // Acquire render semaphore to limit concurrent renders + // This prevents thundering herd from overwhelming CPU + select { + case s.RenderSem <- struct{}{}: + defer func() { <-s.RenderSem }() + case <-ctx.Done(): + slog.Warn("Context cancelled waiting for render slot", "app", appBasename) + return false + } + slog.Info("Rendering app", "app", appBasename) startTime := time.Now() diff --git a/internal/server/server.go b/internal/server/server.go index 5f567607..35986e40 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -46,6 +46,7 @@ type Server struct { Upgrader *websocket.Upgrader PromRegistry prometheus.Registerer PromGatherer prometheus.Gatherer + RenderSem chan struct{} // Semaphore to limit concurrent renders systemAppsCache []apps.AppMetadata systemAppsCacheMutex sync.RWMutex @@ -87,6 +88,7 @@ func NewServer(db *gorm.DB, cfg *config.Settings) *Server { }, PromRegistry: prometheus.DefaultRegisterer, PromGatherer: prometheus.DefaultGatherer, + RenderSem: make(chan struct{}, 5), // Limit to 5 concurrent renders } // Load Settings from DB From 18bad880b2cbb502a750c737856e1a0b17ceb21a Mon Sep 17 00:00:00 2001 From: Tavis Date: Sat, 21 Mar 2026 19:10:33 -1000 Subject: [PATCH 02/15] add render metrics --- internal/server/render_metrics.go | 64 +++++++++++++++++++++++++++++++ internal/server/render_utils.go | 4 ++ internal/server/server.go | 16 ++++++++ 3 files changed, 84 insertions(+) create mode 100644 internal/server/render_metrics.go diff --git a/internal/server/render_metrics.go b/internal/server/render_metrics.go new file mode 100644 index 00000000..4b659ba6 --- /dev/null +++ b/internal/server/render_metrics.go @@ -0,0 +1,64 @@ +package server + +import ( + "log/slog" + "sync/atomic" + "time" +) + +type RenderMetrics struct { + activeCount atomic.Int64 + queuedCount atomic.Int64 + totalCount atomic.Int64 + failedCount atomic.Int64 + totalDur int64 // nanoseconds + maxDur int64 +} + +var renderMetrics RenderMetrics + +func (m *RenderMetrics) StartRender() { + m.activeCount.Add(1) + m.queuedCount.Add(1) +} + +func (m *RenderMetrics) EndRender(dur time.Duration, failed bool) { + m.activeCount.Add(-1) + m.queuedCount.Add(-1) + m.totalCount.Add(1) + atomic.AddInt64(&m.totalDur, int64(dur)) + + currentMax := atomic.LoadInt64(&m.maxDur) + if int64(dur) > currentMax { + atomic.StoreInt64(&m.maxDur, int64(dur)) + } + + if failed { + m.failedCount.Add(1) + } +} + +func (m *RenderMetrics) LogStats() { + slog.Info("Render stats", + "active", m.activeCount.Load(), + "queued", m.queuedCount.Load(), + "total", m.totalCount.Load(), + "failed", m.failedCount.Load(), + ) +} + +func (m *RenderMetrics) ActiveCount() int64 { + return m.activeCount.Load() +} + +func (m *RenderMetrics) AvgDuration() time.Duration { + total := m.totalCount.Load() + if total == 0 { + return 0 + } + return time.Duration(m.totalDur / total) +} + +func (m *RenderMetrics) MaxDuration() time.Duration { + return time.Duration(atomic.LoadInt64(&m.maxDur)) +} diff --git a/internal/server/render_utils.go b/internal/server/render_utils.go index 19e22aea..3fe872e9 100644 --- a/internal/server/render_utils.go +++ b/internal/server/render_utils.go @@ -153,12 +153,16 @@ func (s *Server) possiblyRender(ctx context.Context, app *data.App, device *data return false } + renderMetrics.StartRender() + slog.Info("Rendering app", "app", appBasename) startTime := time.Now() imgBytes, messages, err := s.RenderApp(ctx, device, app, appPath, nil) renderDur := time.Since(startTime) + renderMetrics.EndRender(renderDur, err != nil) + for _, msg := range messages { slog.Debug("Render message", "app", appBasename, "message", msg) } diff --git a/internal/server/server.go b/internal/server/server.go index 35986e40..a8573d17 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -304,6 +304,22 @@ func (s *Server) routes() { s.Router.HandleFunc("GET /debug/pprof/symbol", pprof.Symbol) s.Router.HandleFunc("GET /debug/pprof/trace", pprof.Trace) } + + // Start periodic render stats logger + go func() { + ticker := time.NewTicker(1 * time.Minute) + defer ticker.Stop() + for range ticker.C { + if count := renderMetrics.ActiveCount(); count > 0 { + slog.Warn("Render metrics", + "active_renders", count, + "avg_render_ms", renderMetrics.AvgDuration().Milliseconds(), + "max_render_ms", renderMetrics.MaxDuration().Milliseconds(), + ) + } + renderMetrics.LogStats() + } + }() } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { From 869108f374cfad9b6bc221bc0a813530a09f5d3c Mon Sep 17 00:00:00 2001 From: Tavis Date: Sun, 22 Mar 2026 10:13:31 -1000 Subject: [PATCH 03/15] env var for maxconcurrentrenders --- docker-compose.dev.yaml | 1 + docker-compose.https.yaml | 1 + docker-compose.postgres.yaml | 1 + docker-compose.redis.yaml | 1 + docker-compose.yaml | 1 + internal/config/config.go | 1 + internal/server/server.go | 8 +++++++- 7 files changed, 13 insertions(+), 1 deletion(-) diff --git a/docker-compose.dev.yaml b/docker-compose.dev.yaml index 510004d2..107e99f8 100644 --- a/docker-compose.dev.yaml +++ b/docker-compose.dev.yaml @@ -22,6 +22,7 @@ services: - LOG_LEVEL - ENABLE_USER_REGISTRATION - GITHUB_TOKEN + - MAX_CONCURRENT_RENDERS=${MAX_CONCURRENT_RENDERS:-5} volumes: go_modules: diff --git a/docker-compose.https.yaml b/docker-compose.https.yaml index c067a3b9..d344e938 100644 --- a/docker-compose.https.yaml +++ b/docker-compose.https.yaml @@ -13,6 +13,7 @@ services: - PRODUCTION - ENABLE_USER_REGISTRATION - GITHUB_TOKEN + - MAX_CONCURRENT_RENDERS=${MAX_CONCURRENT_RENDERS:-5} healthcheck: test: ["CMD", "/app/tronbyt-server", "health"] diff --git a/docker-compose.postgres.yaml b/docker-compose.postgres.yaml index 0203b845..fefff7f7 100644 --- a/docker-compose.postgres.yaml +++ b/docker-compose.postgres.yaml @@ -16,6 +16,7 @@ services: - ENABLE_USER_REGISTRATION - SINGLE_USER_AUTO_LOGIN - GITHUB_TOKEN + - MAX_CONCURRENT_RENDERS=${MAX_CONCURRENT_RENDERS:-5} - DB_DSN=host=db user=tronbyt password=tronbyt dbname=tronbyt port=5432 sslmode=disable TimeZone=UTC depends_on: - db diff --git a/docker-compose.redis.yaml b/docker-compose.redis.yaml index 669ed7db..186cbce0 100644 --- a/docker-compose.redis.yaml +++ b/docker-compose.redis.yaml @@ -17,6 +17,7 @@ services: - ENABLE_USER_REGISTRATION - SINGLE_USER_AUTO_LOGIN - GITHUB_TOKEN + - MAX_CONCURRENT_RENDERS=${MAX_CONCURRENT_RENDERS:-5} healthcheck: test: ["CMD", "/app/tronbyt-server", "health"] diff --git a/docker-compose.yaml b/docker-compose.yaml index 787b1629..bfca84d4 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -16,6 +16,7 @@ services: - ENABLE_USER_REGISTRATION - SINGLE_USER_AUTO_LOGIN - GITHUB_TOKEN + - MAX_CONCURRENT_RENDERS=${MAX_CONCURRENT_RENDERS:-5} healthcheck: test: ["CMD", "/app/tronbyt-server", "health"] diff --git a/internal/config/config.go b/internal/config/config.go index 28401f49..14b37468 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -28,6 +28,7 @@ type Settings struct { TrustedProxies string `env:"TRONBYT_TRUSTED_PROXIES" envDefault:"*"` LogLevel string `env:"LOG_LEVEL" envDefault:"INFO"` EnableUpdateChecks bool `env:"ENABLE_UPDATE_CHECKS" envDefault:"true"` + MaxConcurrentRenders int `env:"MAX_CONCURRENT_RENDERS" envDefault:"5"` } // TemplateConfig holds configuration values needed in templates. diff --git a/internal/server/server.go b/internal/server/server.go index a8573d17..3355a938 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -88,9 +88,15 @@ func NewServer(db *gorm.DB, cfg *config.Settings) *Server { }, PromRegistry: prometheus.DefaultRegisterer, PromGatherer: prometheus.DefaultGatherer, - RenderSem: make(chan struct{}, 5), // Limit to 5 concurrent renders } + // Initialize render semaphore (default to 5 for zero/negative values) + maxRenders := cfg.MaxConcurrentRenders + if maxRenders <= 0 { + maxRenders = 5 + } + s.RenderSem = make(chan struct{}, maxRenders) + // Load Settings from DB // Secret Key secretKey, err := s.getSetting("secret_key") From 1dbc50d253e53f7437dbae41ae8b0092d302e8b9 Mon Sep 17 00:00:00 2001 From: Tavis Date: Sun, 22 Mar 2026 10:15:54 -1000 Subject: [PATCH 04/15] print 10 sec stats to log. --- internal/server/handlers_device_api.go | 2 ++ internal/server/render_metrics.go | 38 ++++++++++++++++++++++++++ internal/server/render_utils.go | 1 + internal/server/server.go | 9 ++++++ 4 files changed, 50 insertions(+) diff --git a/internal/server/handlers_device_api.go b/internal/server/handlers_device_api.go index 985c6cb7..8b5382bc 100644 --- a/internal/server/handlers_device_api.go +++ b/internal/server/handlers_device_api.go @@ -142,6 +142,8 @@ func (s *Server) handleNextApp(w http.ResponseWriter, r *http.Request) { dwell := device.GetEffectiveDwellTime(app) w.Header().Set("Tronbyt-Dwell-Secs", fmt.Sprintf("%d", dwell)) + webpMetrics.RecordWebPServed(len(imgData)) + if _, err := w.Write(imgData); err != nil { slog.Error("Failed to write image data to response", "error", err) // Log error, but can't change HTTP status after writing headers. diff --git a/internal/server/render_metrics.go b/internal/server/render_metrics.go index 4b659ba6..37ebd35c 100644 --- a/internal/server/render_metrics.go +++ b/internal/server/render_metrics.go @@ -17,6 +17,15 @@ type RenderMetrics struct { var renderMetrics RenderMetrics +type WebPMetrics struct { + servedCount atomic.Int64 + renderCount atomic.Int64 + cacheHitCount atomic.Int64 + bytesServed atomic.Int64 +} + +var webpMetrics WebPMetrics + func (m *RenderMetrics) StartRender() { m.activeCount.Add(1) m.queuedCount.Add(1) @@ -62,3 +71,32 @@ func (m *RenderMetrics) AvgDuration() time.Duration { func (m *RenderMetrics) MaxDuration() time.Duration { return time.Duration(atomic.LoadInt64(&m.maxDur)) } + +func (w *WebPMetrics) RecordWebPServed(bytes int) { + w.servedCount.Add(1) + w.bytesServed.Add(int64(bytes)) +} + +func (w *WebPMetrics) RecordRender() { + w.renderCount.Add(1) +} + +func (w *WebPMetrics) RecordCacheHit() { + w.cacheHitCount.Add(1) +} + +func (w *WebPMetrics) LogStats() { + served := w.servedCount.Swap(0) + renders := w.renderCount.Swap(0) + cacheHits := w.cacheHitCount.Swap(0) + bytes := w.bytesServed.Swap(0) + + mbServed := float64(bytes) / (1024 * 1024) + + slog.Info("WebP stats (10s window)", + "webp_served", served, + "renders", renders, + "cache_hits", cacheHits, + "mb_served", mbServed, + ) +} diff --git a/internal/server/render_utils.go b/internal/server/render_utils.go index 3fe872e9..220d5eb5 100644 --- a/internal/server/render_utils.go +++ b/internal/server/render_utils.go @@ -154,6 +154,7 @@ func (s *Server) possiblyRender(ctx context.Context, app *data.App, device *data } renderMetrics.StartRender() + webpMetrics.RecordRender() slog.Info("Rendering app", "app", appBasename) diff --git a/internal/server/server.go b/internal/server/server.go index 3355a938..5fb0a5ca 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -326,6 +326,15 @@ func (s *Server) routes() { renderMetrics.LogStats() } }() + + // Start periodic WebP stats logger (every 10 seconds) + go func() { + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + for range ticker.C { + webpMetrics.LogStats() + } + }() } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { From 394240f323eb46128efd30ad7bdc24054dbcf0df Mon Sep 17 00:00:00 2001 From: Tavis Date: Sun, 22 Mar 2026 10:42:06 -1000 Subject: [PATCH 05/15] modify 10 sec stats --- internal/server/handlers_device_api.go | 4 ++++ internal/server/render_metrics.go | 23 +++++++++++++++++------ internal/server/websockets.go | 2 ++ 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/internal/server/handlers_device_api.go b/internal/server/handlers_device_api.go index 8b5382bc..45c19604 100644 --- a/internal/server/handlers_device_api.go +++ b/internal/server/handlers_device_api.go @@ -107,9 +107,13 @@ func (s *Server) handleNextApp(w http.ResponseWriter, r *http.Request) { // Send default image if error (or not found) slog.Error("Failed to get next app image", "device", device.ID, "error", err) s.sendDefaultImage(w, r, device) + webpMetrics.RecordWebPServed(0) + webpMetrics.RecordUniqueDevice(device.ID) return } + webpMetrics.RecordUniqueDevice(device.ID) + // For HTTP devices, we assume "Sent" equals "Displaying" (or roughly so). // We update DisplayingApp here so the Preview uses the explicit field instead of fallback. if app != nil { diff --git a/internal/server/render_metrics.go b/internal/server/render_metrics.go index 37ebd35c..81603303 100644 --- a/internal/server/render_metrics.go +++ b/internal/server/render_metrics.go @@ -2,6 +2,7 @@ package server import ( "log/slog" + "sync" "sync/atomic" "time" ) @@ -20,8 +21,9 @@ var renderMetrics RenderMetrics type WebPMetrics struct { servedCount atomic.Int64 renderCount atomic.Int64 - cacheHitCount atomic.Int64 bytesServed atomic.Int64 + uniqueMu sync.Mutex + uniqueDevices map[string]bool } var webpMetrics WebPMetrics @@ -81,22 +83,31 @@ func (w *WebPMetrics) RecordRender() { w.renderCount.Add(1) } -func (w *WebPMetrics) RecordCacheHit() { - w.cacheHitCount.Add(1) +func (w *WebPMetrics) RecordUniqueDevice(deviceID string) { + w.uniqueMu.Lock() + defer w.uniqueMu.Unlock() + if w.uniqueDevices == nil { + w.uniqueDevices = make(map[string]bool) + } + w.uniqueDevices[deviceID] = true } func (w *WebPMetrics) LogStats() { served := w.servedCount.Swap(0) renders := w.renderCount.Swap(0) - cacheHits := w.cacheHitCount.Swap(0) bytes := w.bytesServed.Swap(0) + w.uniqueMu.Lock() + uniqueDevs := len(w.uniqueDevices) + w.uniqueDevices = make(map[string]bool) // Reset for next window + w.uniqueMu.Unlock() + mbServed := float64(bytes) / (1024 * 1024) - slog.Info("WebP stats (10s window)", + slog.Info("Stats 10s -- ", "webp_served", served, "renders", renders, - "cache_hits", cacheHits, + "unique_devices", uniqueDevs, "mb_served", mbServed, ) } diff --git a/internal/server/websockets.go b/internal/server/websockets.go index f8f0c4d7..4cbf3735 100644 --- a/internal/server/websockets.go +++ b/internal/server/websockets.go @@ -274,6 +274,8 @@ func (s *Server) wsWriteLoop(ctx context.Context, conn *websocket.Conn, initialD if err := conn.WriteMessage(websocket.BinaryMessage, imgData); err != nil { return } + webpMetrics.RecordWebPServed(len(imgData)) + webpMetrics.RecordUniqueDevice(device.ID) if sendImmediate { if err := conn.WriteJSON(map[string]bool{"immediate": true}); err != nil { From 56742176fbf69b9d0f161906370b7d63d02b5131 Mon Sep 17 00:00:00 2001 From: Tavis Date: Sun, 22 Mar 2026 11:11:42 -1000 Subject: [PATCH 06/15] add admin stats dashboard --- internal/server/handlers_device_api.go | 2 + internal/server/handlers_user.go | 21 +++ internal/server/helpers.go | 5 + internal/server/render_metrics.go | 147 +++++++++++++++++++- internal/server/server.go | 26 ++-- web/templates/base.html | 6 + web/templates/manager/admin_dashboard.html | 154 +++++++++++++++++++++ 7 files changed, 348 insertions(+), 13 deletions(-) create mode 100644 web/templates/manager/admin_dashboard.html diff --git a/internal/server/handlers_device_api.go b/internal/server/handlers_device_api.go index 45c19604..3e402765 100644 --- a/internal/server/handlers_device_api.go +++ b/internal/server/handlers_device_api.go @@ -15,6 +15,8 @@ import ( func (s *Server) handleNextApp(w http.ResponseWriter, r *http.Request) { id := r.PathValue("id") + renderMetrics.RecordRequest() + var device *data.Device if d, err := DeviceFromContext(r.Context()); err == nil { device = d diff --git a/internal/server/handlers_user.go b/internal/server/handlers_user.go index 860c1bf4..5b763b92 100644 --- a/internal/server/handlers_user.go +++ b/internal/server/handlers_user.go @@ -429,3 +429,24 @@ func (s *Server) handleRefreshSystemRepo(w http.ResponseWriter, r *http.Request) http.Redirect(w, r, "/auth/edit", http.StatusSeeOther) } + +func (s *Server) handleAdminDashboard(w http.ResponseWriter, r *http.Request) { + user := GetUser(r) + if !user.IsAdmin { + http.Error(w, "Forbidden", http.StatusForbidden) + return + } + + var totalDevices, totalUsers int64 + s.DB.Model(&data.Device{}).Count(&totalDevices) + s.DB.Model(&data.User{}).Count(&totalUsers) + + stats := GetStatsSnapshot() + + s.renderTemplate(w, r, "admin_dashboard", TemplateData{ + User: user, + TotalDevices: totalDevices, + TotalUsers: totalUsers, + Stats: stats, + }) +} diff --git a/internal/server/helpers.go b/internal/server/helpers.go index 5dabe641..4d4a2382 100644 --- a/internal/server/helpers.go +++ b/internal/server/helpers.go @@ -86,6 +86,11 @@ type TemplateData struct { AppConfig map[string]any AppMetadata *apps.AppMetadata + // Admin Dashboard + TotalDevices int64 + TotalUsers int64 + Stats StatsSnapshot + // Device Update Extras ColorFilterOptions []ColorFilterOption ShowFullAnimationOptions []ShowFullAnimationOption diff --git a/internal/server/render_metrics.go b/internal/server/render_metrics.go index 81603303..81ccdfad 100644 --- a/internal/server/render_metrics.go +++ b/internal/server/render_metrics.go @@ -14,6 +14,11 @@ type RenderMetrics struct { failedCount atomic.Int64 totalDur int64 // nanoseconds maxDur int64 + + // Sliding window tracking (timestamps of events in last 60 seconds) + mu sync.Mutex + rendersByMinute []int64 // timestamps of renders + reqsByMinute []int64 // timestamps of requests } var renderMetrics RenderMetrics @@ -24,10 +29,17 @@ type WebPMetrics struct { bytesServed atomic.Int64 uniqueMu sync.Mutex uniqueDevices map[string]bool + + // Sliding window tracking + mu sync.Mutex + webpsByMinute []int64 // timestamps of webp serves + devicesByMinute []int64 // timestamps of unique devices } var webpMetrics WebPMetrics +const windowDuration = 60 * time.Second + func (m *RenderMetrics) StartRender() { m.activeCount.Add(1) m.queuedCount.Add(1) @@ -47,6 +59,11 @@ func (m *RenderMetrics) EndRender(dur time.Duration, failed bool) { if failed { m.failedCount.Add(1) } + + now := time.Now().Unix() + m.mu.Lock() + m.rendersByMinute = append(m.rendersByMinute, now) + m.mu.Unlock() } func (m *RenderMetrics) LogStats() { @@ -58,6 +75,13 @@ func (m *RenderMetrics) LogStats() { ) } +func (m *RenderMetrics) RecordRequest() { + now := time.Now().Unix() + m.mu.Lock() + m.reqsByMinute = append(m.reqsByMinute, now) + m.mu.Unlock() +} + func (m *RenderMetrics) ActiveCount() int64 { return m.activeCount.Load() } @@ -74,9 +98,52 @@ func (m *RenderMetrics) MaxDuration() time.Duration { return time.Duration(atomic.LoadInt64(&m.maxDur)) } +func (m *RenderMetrics) TotalCount() int64 { + return m.totalCount.Load() +} + +func (m *RenderMetrics) FailedCount() int64 { + return m.failedCount.Load() +} + +func (m *RenderMetrics) QueuedCount() int64 { + return m.queuedCount.Load() +} + +func (m *RenderMetrics) RendersPerMin() int64 { + m.mu.Lock() + defer m.mu.Unlock() + cutoff := time.Now().Add(-windowDuration).Unix() + var count int64 + for _, t := range m.rendersByMinute { + if t >= cutoff { + count++ + } + } + return count +} + +func (m *RenderMetrics) ReqsPerMin() int64 { + m.mu.Lock() + defer m.mu.Unlock() + cutoff := time.Now().Add(-windowDuration).Unix() + var count int64 + for _, t := range m.reqsByMinute { + if t >= cutoff { + count++ + } + } + return count +} + func (w *WebPMetrics) RecordWebPServed(bytes int) { w.servedCount.Add(1) w.bytesServed.Add(int64(bytes)) + + now := time.Now().Unix() + w.mu.Lock() + w.webpsByMinute = append(w.webpsByMinute, now) + w.mu.Unlock() } func (w *WebPMetrics) RecordRender() { @@ -85,11 +152,19 @@ func (w *WebPMetrics) RecordRender() { func (w *WebPMetrics) RecordUniqueDevice(deviceID string) { w.uniqueMu.Lock() - defer w.uniqueMu.Unlock() + alreadySeen := w.uniqueDevices[deviceID] if w.uniqueDevices == nil { w.uniqueDevices = make(map[string]bool) } w.uniqueDevices[deviceID] = true + w.uniqueMu.Unlock() + + if !alreadySeen { + now := time.Now().Unix() + w.mu.Lock() + w.devicesByMinute = append(w.devicesByMinute, now) + w.mu.Unlock() + } } func (w *WebPMetrics) LogStats() { @@ -111,3 +186,73 @@ func (w *WebPMetrics) LogStats() { "mb_served", mbServed, ) } + +func (w *WebPMetrics) ServedCount() int64 { + return w.servedCount.Load() +} + +func (w *WebPMetrics) RenderCount() int64 { + return w.renderCount.Load() +} + +func (w *WebPMetrics) BytesServed() int64 { + return w.bytesServed.Load() +} + +func (w *WebPMetrics) WebpsPerMin() int64 { + w.mu.Lock() + defer w.mu.Unlock() + cutoff := time.Now().Add(-windowDuration).Unix() + var count int64 + for _, t := range w.webpsByMinute { + if t >= cutoff { + count++ + } + } + return count +} + +func (w *WebPMetrics) UniqueDevicesPerMin() int64 { + w.mu.Lock() + defer w.mu.Unlock() + cutoff := time.Now().Add(-windowDuration).Unix() + var count int64 + for _, t := range w.devicesByMinute { + if t >= cutoff { + count++ + } + } + return count +} + +type StatsSnapshot struct { + ActiveRenders int64 + QueuedRenders int64 + TotalRenders int64 + FailedRenders int64 + AvgRenderMs int64 + MaxRenderMs int64 + RendersPerMin int64 + ReqsPerMin int64 + WebpsServed int64 + WebpsPerMin int64 + BytesServedMB float64 + UniqueDevsPerMin int64 +} + +func GetStatsSnapshot() StatsSnapshot { + return StatsSnapshot{ + ActiveRenders: renderMetrics.ActiveCount(), + QueuedRenders: renderMetrics.QueuedCount(), + TotalRenders: renderMetrics.TotalCount(), + FailedRenders: renderMetrics.FailedCount(), + AvgRenderMs: renderMetrics.AvgDuration().Milliseconds(), + MaxRenderMs: renderMetrics.MaxDuration().Milliseconds(), + RendersPerMin: renderMetrics.RendersPerMin(), + ReqsPerMin: renderMetrics.ReqsPerMin(), + WebpsServed: webpMetrics.ServedCount(), + WebpsPerMin: webpMetrics.WebpsPerMin(), + BytesServedMB: float64(webpMetrics.BytesServed()) / (1024 * 1024), + UniqueDevsPerMin: webpMetrics.UniqueDevicesPerMin(), + } +} diff --git a/internal/server/server.go b/internal/server/server.go index 5fb0a5ca..fe07987e 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -57,18 +57,19 @@ type Server struct { // Map template names to their file paths relative to web/templates. var templateFiles = map[string]string{ - "index": "manager/index.html", - "adminindex": "manager/adminindex.html", - "login": "auth/login.html", - "register": "auth/register.html", - "edit": "auth/edit.html", - "create": "manager/create.html", - "addapp": "manager/addapp.html", - "configapp": "manager/configapp.html", - "uploadapp": "manager/uploadapp.html", - "firmware": "manager/firmware.html", - "update": "manager/update.html", - "device_tv": "manager/device_tv.html", + "index": "manager/index.html", + "adminindex": "manager/adminindex.html", + "admin_dashboard": "manager/admin_dashboard.html", + "login": "auth/login.html", + "register": "auth/register.html", + "edit": "auth/edit.html", + "create": "manager/create.html", + "addapp": "manager/addapp.html", + "configapp": "manager/configapp.html", + "uploadapp": "manager/uploadapp.html", + "firmware": "manager/firmware.html", + "update": "manager/update.html", + "device_tv": "manager/device_tv.html", } func NewServer(db *gorm.DB, cfg *config.Settings) *Server { @@ -230,6 +231,7 @@ func (s *Server) routes() { // Web UI s.Router.HandleFunc("GET /", s.RequireLogin(s.handleIndex)) s.Router.HandleFunc("GET /admin", s.RequireLogin(s.handleAdminIndex)) + s.Router.HandleFunc("GET /admin/dashboard", s.RequireLogin(s.handleAdminDashboard)) s.Router.HandleFunc("DELETE /admin/users/{username}", s.RequireLogin(s.handleDeleteUser)) s.Router.HandleFunc("GET /devices/create", s.RequireLogin(s.handleCreateDeviceGet)) diff --git a/web/templates/base.html b/web/templates/base.html index 32bf1321..5e27fb7a 100644 --- a/web/templates/base.html +++ b/web/templates/base.html @@ -47,6 +47,9 @@

  • {{ t .Localizer "Create User" }}
  • +
  • + {{ t .Localizer "Dashboard" }} +
  • {{ end }}
  • {{ .User.Username }} @@ -119,6 +122,9 @@

  • {{ t .Localizer "Create User" }}
  • +
  • + {{ t .Localizer "Dashboard" }} +
  • {{ end }}
  • {{ .User.Username }} diff --git a/web/templates/manager/admin_dashboard.html b/web/templates/manager/admin_dashboard.html new file mode 100644 index 00000000..bf02dc9f --- /dev/null +++ b/web/templates/manager/admin_dashboard.html @@ -0,0 +1,154 @@ +{{ define "admin_dashboard" }} +{{ template "base" . }} +{{ end }} +{{ define "title" }}{{ t .Localizer "Admin Dashboard" }}{{ end }} +{{ define "header" }} + + +{{ end }} +{{ define "content" }} +
    +

    {{ t .Localizer "Admin Dashboard" }}

    +
    +
    +
    +
    +

    + {{ t .Localizer "Total Users" }} +

    +

    {{ .TotalUsers }}

    +
    +
    +
    +
    +

    + {{ t .Localizer "Total Devices" }} +

    +

    {{ .TotalDevices }}

    +
    +
    +
    +
    +

    + {{ t .Localizer "Reqs / Min" }} +

    +

    {{ .Stats.ReqsPerMin }}

    +
    +
    +
    +
    +
    +
    +
    +

    + {{ t .Localizer "WebP Stats" }} +

    +
    + + + + + + + + + + + + + + + + + +
    + {{ t .Localizer "Total WebPs Served" }} + {{ .Stats.WebpsServed }}
    + {{ t .Localizer "WebPs / Min (avg)" }} + {{ .Stats.WebpsPerMin }}
    + {{ t .Localizer "MB Served" }} + {{ printf "%.2f" .Stats.BytesServedMB }} MB
    + {{ t .Localizer "Unique Devices / Min" }} + {{ .Stats.UniqueDevsPerMin }}
    +
    +
    +
    +
    +

    + {{ t .Localizer "Render Stats" }} +

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + {{ t .Localizer "Active Renders" }} + {{ .Stats.ActiveRenders }}
    + {{ t .Localizer "Queued Renders" }} + {{ .Stats.QueuedRenders }}
    + {{ t .Localizer "Total Renders" }} + {{ .Stats.TotalRenders }}
    + {{ t .Localizer "Failed Renders" }} + {{ .Stats.FailedRenders }}
    + {{ t .Localizer "Renders / Min (avg)" }} + {{ .Stats.RendersPerMin }}
    + {{ t .Localizer "Avg Render Time" }} + {{ .Stats.AvgRenderMs }} ms
    + {{ t .Localizer "Max Render Time" }} + {{ .Stats.MaxRenderMs }} ms
    +
    +
    +
    +
    + +
    + +{{ end }} From 8cb8288f6e3d60a8bbc5709251713713a413de85 Mon Sep 17 00:00:00 2001 From: Tavis Date: Sun, 22 Mar 2026 12:24:29 -1000 Subject: [PATCH 07/15] auto refresh the admin dashboard --- internal/server/render_metrics.go | 43 ++++++++++++---------- web/templates/manager/admin_dashboard.html | 40 ++++++++++++++++++++ 2 files changed, 63 insertions(+), 20 deletions(-) diff --git a/internal/server/render_metrics.go b/internal/server/render_metrics.go index 81ccdfad..2cb65d3f 100644 --- a/internal/server/render_metrics.go +++ b/internal/server/render_metrics.go @@ -28,12 +28,11 @@ type WebPMetrics struct { renderCount atomic.Int64 bytesServed atomic.Int64 uniqueMu sync.Mutex - uniqueDevices map[string]bool + uniqueDevices map[string]int64 // device ID -> last seen timestamp // Sliding window tracking - mu sync.Mutex - webpsByMinute []int64 // timestamps of webp serves - devicesByMinute []int64 // timestamps of unique devices + mu sync.Mutex + webpsByMinute []int64 // timestamps of webp serves } var webpMetrics WebPMetrics @@ -151,20 +150,13 @@ func (w *WebPMetrics) RecordRender() { } func (w *WebPMetrics) RecordUniqueDevice(deviceID string) { + now := time.Now().Unix() w.uniqueMu.Lock() - alreadySeen := w.uniqueDevices[deviceID] if w.uniqueDevices == nil { - w.uniqueDevices = make(map[string]bool) + w.uniqueDevices = make(map[string]int64) } - w.uniqueDevices[deviceID] = true + w.uniqueDevices[deviceID] = now w.uniqueMu.Unlock() - - if !alreadySeen { - now := time.Now().Unix() - w.mu.Lock() - w.devicesByMinute = append(w.devicesByMinute, now) - w.mu.Unlock() - } } func (w *WebPMetrics) LogStats() { @@ -172,9 +164,20 @@ func (w *WebPMetrics) LogStats() { renders := w.renderCount.Swap(0) bytes := w.bytesServed.Swap(0) + cutoff := time.Now().Add(-windowDuration).Unix() w.uniqueMu.Lock() - uniqueDevs := len(w.uniqueDevices) - w.uniqueDevices = make(map[string]bool) // Reset for next window + var uniqueDevs int64 + for _, lastSeen := range w.uniqueDevices { + if lastSeen >= cutoff { + uniqueDevs++ + } + } + // Clean up old entries + for id, lastSeen := range w.uniqueDevices { + if lastSeen < cutoff { + delete(w.uniqueDevices, id) + } + } w.uniqueMu.Unlock() mbServed := float64(bytes) / (1024 * 1024) @@ -213,12 +216,12 @@ func (w *WebPMetrics) WebpsPerMin() int64 { } func (w *WebPMetrics) UniqueDevicesPerMin() int64 { - w.mu.Lock() - defer w.mu.Unlock() cutoff := time.Now().Add(-windowDuration).Unix() + w.uniqueMu.Lock() + defer w.uniqueMu.Unlock() var count int64 - for _, t := range w.devicesByMinute { - if t >= cutoff { + for _, lastSeen := range w.uniqueDevices { + if lastSeen >= cutoff { count++ } } diff --git a/web/templates/manager/admin_dashboard.html b/web/templates/manager/admin_dashboard.html index bf02dc9f..d699e5f2 100644 --- a/web/templates/manager/admin_dashboard.html +++ b/web/templates/manager/admin_dashboard.html @@ -8,6 +8,12 @@ {{ end }} {{ define "content" }}
    +
    + +

    {{ t .Localizer "Admin Dashboard" }}


    @@ -151,4 +157,38 @@

    border: 1px solid var(--border-color); } + {{ end }} From c652fb416626c8c8d3d7d68f2be23d8a6f448f8c Mon Sep 17 00:00:00 2001 From: Tavis Date: Sun, 22 Mar 2026 16:18:58 -1000 Subject: [PATCH 08/15] skip render if no slots open --- internal/server/render_utils.go | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/internal/server/render_utils.go b/internal/server/render_utils.go index 220d5eb5..9797ed85 100644 --- a/internal/server/render_utils.go +++ b/internal/server/render_utils.go @@ -143,14 +143,28 @@ func (s *Server) possiblyRender(ctx context.Context, app *data.App, device *data now := time.Now() // uinterval is minutes if time.Since(app.LastRender) > time.Duration(app.UInterval)*time.Minute { - // Acquire render semaphore to limit concurrent renders - // This prevents thundering herd from overwhelming CPU - select { - case s.RenderSem <- struct{}{}: - defer func() { <-s.RenderSem }() - case <-ctx.Done(): - slog.Warn("Context cancelled waiting for render slot", "app", appBasename) - return false + // Try to acquire render semaphore without blocking. + // If no slot available, only skip rendering if we have a valid cached image. + // If last render was empty or failed, we must attempt rendering. + hasValidCachedImage := app.LastSuccessfulRender != nil && !app.EmptyLastRender + + if hasValidCachedImage { + select { + case s.RenderSem <- struct{}{}: + defer func() { <-s.RenderSem }() + default: + slog.Debug("Skipping render - no slot available", "app", appBasename) + return true // Use existing cached image + } + } else { + // No valid cached image, must render - use non-blocking but wait briefly + select { + case s.RenderSem <- struct{}{}: + defer func() { <-s.RenderSem }() + case <-ctx.Done(): + slog.Warn("Context cancelled waiting for render slot", "app", appBasename) + return false // Cannot render, skip to next app + } } renderMetrics.StartRender() From cbb6a8df9d9478e81cc2a3eb3553ac050ca63068 Mon Sep 17 00:00:00 2001 From: Tavis Date: Sun, 22 Mar 2026 19:19:36 -1000 Subject: [PATCH 09/15] print load avg in stats line --- internal/server/render_metrics.go | 32 ++++++++++++++++++++++++------- internal/server/render_utils.go | 4 ++-- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/internal/server/render_metrics.go b/internal/server/render_metrics.go index 2cb65d3f..ca6c6866 100644 --- a/internal/server/render_metrics.go +++ b/internal/server/render_metrics.go @@ -2,6 +2,9 @@ package server import ( "log/slog" + "os" + "strconv" + "strings" "sync" "sync/atomic" "time" @@ -162,7 +165,6 @@ func (w *WebPMetrics) RecordUniqueDevice(deviceID string) { func (w *WebPMetrics) LogStats() { served := w.servedCount.Swap(0) renders := w.renderCount.Swap(0) - bytes := w.bytesServed.Swap(0) cutoff := time.Now().Add(-windowDuration).Unix() w.uniqueMu.Lock() @@ -180,16 +182,32 @@ func (w *WebPMetrics) LogStats() { } w.uniqueMu.Unlock() - mbServed := float64(bytes) / (1024 * 1024) + loadAvg1m := getLoadAverage() - slog.Info("Stats 10s -- ", - "webp_served", served, - "renders", renders, - "unique_devices", uniqueDevs, - "mb_served", mbServed, + slog.Info("Stats ----- ", + "served", served, + "renderred", renders, + "devices", uniqueDevs, + "load", loadAvg1m, ) } +func getLoadAverage() float64 { + data, err := os.ReadFile("/proc/loadavg") + if err != nil { + return 0 + } + parts := strings.Split(string(data), " ") + if len(parts) < 1 { + return 0 + } + f, err := strconv.ParseFloat(parts[0], 64) + if err != nil { + return 0 + } + return f +} + func (w *WebPMetrics) ServedCount() int64 { return w.servedCount.Load() } diff --git a/internal/server/render_utils.go b/internal/server/render_utils.go index 9797ed85..1ae73510 100644 --- a/internal/server/render_utils.go +++ b/internal/server/render_utils.go @@ -170,12 +170,12 @@ func (s *Server) possiblyRender(ctx context.Context, app *data.App, device *data renderMetrics.StartRender() webpMetrics.RecordRender() - slog.Info("Rendering app", "app", appBasename) - startTime := time.Now() imgBytes, messages, err := s.RenderApp(ctx, device, app, appPath, nil) renderDur := time.Since(startTime) + slog.Info("Rendered app", "app", appBasename, "duration", renderDur) + renderMetrics.EndRender(renderDur, err != nil) for _, msg := range messages { From f24a782a64500235c40d9fbaded7deebc6a58079 Mon Sep 17 00:00:00 2001 From: Tavis Date: Sun, 22 Mar 2026 19:21:06 -1000 Subject: [PATCH 10/15] tyrp --- internal/server/render_metrics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/server/render_metrics.go b/internal/server/render_metrics.go index ca6c6866..8de8fd15 100644 --- a/internal/server/render_metrics.go +++ b/internal/server/render_metrics.go @@ -186,7 +186,7 @@ func (w *WebPMetrics) LogStats() { slog.Info("Stats ----- ", "served", served, - "renderred", renders, + "rendered", renders, "devices", uniqueDevs, "load", loadAvg1m, ) From ee1889c2a47e085c12a3fff9b0d18f29529521e9 Mon Sep 17 00:00:00 2001 From: Tavis Date: Sun, 22 Mar 2026 19:40:39 -1000 Subject: [PATCH 11/15] remove periodic render stats --- internal/server/render_metrics.go | 20 ++++---------------- internal/server/render_utils.go | 2 +- internal/server/server.go | 16 ---------------- 3 files changed, 5 insertions(+), 33 deletions(-) diff --git a/internal/server/render_metrics.go b/internal/server/render_metrics.go index 8de8fd15..1410eea2 100644 --- a/internal/server/render_metrics.go +++ b/internal/server/render_metrics.go @@ -1,6 +1,7 @@ package server import ( + "fmt" "log/slog" "os" "strconv" @@ -68,15 +69,6 @@ func (m *RenderMetrics) EndRender(dur time.Duration, failed bool) { m.mu.Unlock() } -func (m *RenderMetrics) LogStats() { - slog.Info("Render stats", - "active", m.activeCount.Load(), - "queued", m.queuedCount.Load(), - "total", m.totalCount.Load(), - "failed", m.failedCount.Load(), - ) -} - func (m *RenderMetrics) RecordRequest() { now := time.Now().Unix() m.mu.Lock() @@ -183,13 +175,9 @@ func (w *WebPMetrics) LogStats() { w.uniqueMu.Unlock() loadAvg1m := getLoadAverage() - - slog.Info("Stats ----- ", - "served", served, - "rendered", renders, - "devices", uniqueDevs, - "load", loadAvg1m, - ) + if served > 0 { + slog.Info(fmt.Sprintf("Stats ------ : %.1f - %d / %d ", loadAvg1m, served, renders)) + } } func getLoadAverage() float64 { diff --git a/internal/server/render_utils.go b/internal/server/render_utils.go index 1ae73510..3cc165ea 100644 --- a/internal/server/render_utils.go +++ b/internal/server/render_utils.go @@ -174,7 +174,7 @@ func (s *Server) possiblyRender(ctx context.Context, app *data.App, device *data imgBytes, messages, err := s.RenderApp(ctx, device, app, appPath, nil) renderDur := time.Since(startTime) - slog.Info("Rendered app", "app", appBasename, "duration", renderDur) + slog.Info(fmt.Sprintf("Rendered in %dms %s ", renderDur.Milliseconds(), appBasename)) renderMetrics.EndRender(renderDur, err != nil) diff --git a/internal/server/server.go b/internal/server/server.go index fe07987e..48bf2d12 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -313,22 +313,6 @@ func (s *Server) routes() { s.Router.HandleFunc("GET /debug/pprof/trace", pprof.Trace) } - // Start periodic render stats logger - go func() { - ticker := time.NewTicker(1 * time.Minute) - defer ticker.Stop() - for range ticker.C { - if count := renderMetrics.ActiveCount(); count > 0 { - slog.Warn("Render metrics", - "active_renders", count, - "avg_render_ms", renderMetrics.AvgDuration().Milliseconds(), - "max_render_ms", renderMetrics.MaxDuration().Milliseconds(), - ) - } - renderMetrics.LogStats() - } - }() - // Start periodic WebP stats logger (every 10 seconds) go func() { ticker := time.NewTicker(10 * time.Second) From 71d3ffd4286fac987aea1c236a7a5e0de9259e28 Mon Sep 17 00:00:00 2001 From: Tavis Date: Sun, 22 Mar 2026 23:43:27 -1000 Subject: [PATCH 12/15] semaphors for /next --- internal/server/handlers_device_api.go | 9 ++++ internal/server/server.go | 73 ++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/internal/server/handlers_device_api.go b/internal/server/handlers_device_api.go index 3e402765..c0ef0b1c 100644 --- a/internal/server/handlers_device_api.go +++ b/internal/server/handlers_device_api.go @@ -17,6 +17,15 @@ func (s *Server) handleNextApp(w http.ResponseWriter, r *http.Request) { renderMetrics.RecordRequest() + // Acquire per-device semaphore to prevent queue backup from same device. + // If already processing a request for this device, return cached image immediately. + if !s.acquireDeviceSemaphore(id) { + slog.Debug("Device busy, serving cached image", "device", id) + s.serveCachedImageForDevice(w, r, id) + return + } + defer s.releaseDeviceSemaphore(id) + var device *data.Device if d, err := DeviceFromContext(r.Context()); err == nil { device = d diff --git a/internal/server/server.go b/internal/server/server.go index 48bf2d12..083a9bf9 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -20,6 +20,7 @@ import ( "tronbyt-server/internal/apps" "tronbyt-server/internal/config" + "tronbyt-server/internal/data" syncer "tronbyt-server/internal/sync" "tronbyt-server/web" @@ -48,6 +49,9 @@ type Server struct { PromGatherer prometheus.Gatherer RenderSem chan struct{} // Semaphore to limit concurrent renders + deviceSemaphores map[string]chan struct{} + deviceSemaphoresMutex sync.Mutex + systemAppsCache []apps.AppMetadata systemAppsCacheMutex sync.RWMutex @@ -98,6 +102,9 @@ func NewServer(db *gorm.DB, cfg *config.Settings) *Server { } s.RenderSem = make(chan struct{}, maxRenders) + // Initialize device semaphores map + s.deviceSemaphores = make(map[string]chan struct{}) + // Load Settings from DB // Secret Key secretKey, err := s.getSetting("secret_key") @@ -369,3 +376,69 @@ func (s *Server) handleDots(w http.ResponseWriter, r *http.Request) { slog.Error("Failed to write dots SVG", "error", err) } } + +// acquireDeviceSemaphore tries to acquire a slot for processing a device request. +// Returns true if acquired, false if device is already processing a request. +// When false is returned, the caller should serve a cached image. +func (s *Server) acquireDeviceSemaphore(deviceID string) bool { + s.deviceSemaphoresMutex.Lock() + ch, exists := s.deviceSemaphores[deviceID] + if !exists { + ch = make(chan struct{}, 1) + s.deviceSemaphores[deviceID] = ch + } + s.deviceSemaphoresMutex.Unlock() + + select { + case ch <- struct{}{}: + return true + default: + return false + } +} + +// releaseDeviceSemaphore releases the slot for a device after processing. +func (s *Server) releaseDeviceSemaphore(deviceID string) { + s.deviceSemaphoresMutex.Lock() + ch, exists := s.deviceSemaphores[deviceID] + s.deviceSemaphoresMutex.Unlock() + if exists { + <-ch + } +} + +// serveCachedImageForDevice returns a cached image when the device is busy processing another request. +// This prevents queue buildup from retry storms. +func (s *Server) serveCachedImageForDevice(w http.ResponseWriter, r *http.Request, deviceID string) { + device, err := gorm.G[data.Device](s.DB).Preload("Apps", nil).Where("id = ?", deviceID).First(r.Context()) + if err != nil { + slog.Error("Failed to fetch device for cached image", "device", deviceID, "error", err) + http.Error(w, "Device not found", http.StatusNotFound) + return + } + + imgData, app, err := s.GetCurrentAppImage(r.Context(), &device) + if err != nil || imgData == nil { + slog.Error("Failed to get cached image", "device", deviceID, "error", err) + http.Error(w, "Image not found", http.StatusNotFound) + return + } + + w.Header().Set("Content-Type", "image/webp") + w.Header().Set("Cache-Control", "public, max-age=0, must-revalidate") + w.Header().Set("X-Cached", "true") + + if app != nil { + w.Header().Set("Tronbyt-App", app.Iname) + } + + brightness := device.GetEffectiveBrightness() + w.Header().Set("Tronbyt-Brightness", fmt.Sprintf("%d", brightness)) + + dwell := device.GetEffectiveDwellTime(app) + w.Header().Set("Tronbyt-Dwell-Secs", fmt.Sprintf("%d", dwell)) + + if _, err := w.Write(imgData); err != nil { + slog.Error("Failed to write cached image", "error", err) + } +} From c864b683f1d46a4a5add5b08b582d9393c2baf2a Mon Sep 17 00:00:00 2001 From: Tavis Date: Mon, 23 Mar 2026 00:52:03 -1000 Subject: [PATCH 13/15] use atomic.Int64 --- internal/server/render_metrics.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/server/render_metrics.go b/internal/server/render_metrics.go index 1410eea2..ed57cdef 100644 --- a/internal/server/render_metrics.go +++ b/internal/server/render_metrics.go @@ -17,7 +17,7 @@ type RenderMetrics struct { totalCount atomic.Int64 failedCount atomic.Int64 totalDur int64 // nanoseconds - maxDur int64 + maxDur atomic.Int64 // Sliding window tracking (timestamps of events in last 60 seconds) mu sync.Mutex @@ -54,9 +54,9 @@ func (m *RenderMetrics) EndRender(dur time.Duration, failed bool) { m.totalCount.Add(1) atomic.AddInt64(&m.totalDur, int64(dur)) - currentMax := atomic.LoadInt64(&m.maxDur) + currentMax := m.maxDur.Load() if int64(dur) > currentMax { - atomic.StoreInt64(&m.maxDur, int64(dur)) + m.maxDur.Store(int64(dur)) } if failed { @@ -89,7 +89,7 @@ func (m *RenderMetrics) AvgDuration() time.Duration { } func (m *RenderMetrics) MaxDuration() time.Duration { - return time.Duration(atomic.LoadInt64(&m.maxDur)) + return time.Duration(m.maxDur.Load()) } func (m *RenderMetrics) TotalCount() int64 { From 4cd7681726763601c6ac9001f8ff11a0035b8854 Mon Sep 17 00:00:00 2001 From: Tavis Date: Mon, 23 Mar 2026 08:56:30 -1000 Subject: [PATCH 14/15] Apply suggestions from code review Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- internal/server/server.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/server/server.go b/internal/server/server.go index 083a9bf9..dab83a9a 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -96,11 +96,12 @@ func NewServer(db *gorm.DB, cfg *config.Settings) *Server { } // Initialize render semaphore (default to 5 for zero/negative values) - maxRenders := cfg.MaxConcurrentRenders + maxRenders := cfg.MaxConcurrentRenders if maxRenders <= 0 { maxRenders = 5 + slog.Warn("MaxConcurrentRenders is zero or negative, using default value of 5") } - s.RenderSem = make(chan struct{}, maxRenders) + // Initialize device semaphores map s.deviceSemaphores = make(map[string]chan struct{}) From 5bb4f4d4f0b97e6e1b0c83d6e2cfc6fcd56ea030 Mon Sep 17 00:00:00 2001 From: Tavis Date: Mon, 23 Mar 2026 09:49:07 -1000 Subject: [PATCH 15/15] fix rendersem --- internal/server/server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/server/server.go b/internal/server/server.go index dab83a9a..f48b25fe 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -96,12 +96,12 @@ func NewServer(db *gorm.DB, cfg *config.Settings) *Server { } // Initialize render semaphore (default to 5 for zero/negative values) - maxRenders := cfg.MaxConcurrentRenders + maxRenders := cfg.MaxConcurrentRenders if maxRenders <= 0 { maxRenders = 5 - slog.Warn("MaxConcurrentRenders is zero or negative, using default value of 5") + slog.Warn("MaxConcurrentRenders is zero or negative, using default value of 5") } - + s.RenderSem = make(chan struct{}, maxRenders) // Initialize device semaphores map s.deviceSemaphores = make(map[string]chan struct{})