forked from helixml/helix
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstack
More file actions
executable file
·2197 lines (1893 loc) · 84 KB
/
stack
File metadata and controls
executable file
·2197 lines (1893 loc) · 84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
export DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export PROJECTS_ROOT="$(dirname "$DIR")"
export HELIX_HOST_HOME="$DIR"
export TMUX_SESSION=${TMUX_SESSION:="helix"}
export WITH_RUNNER=${WITH_RUNNER:=""}
export WITH_DEMOS=${WITH_DEMOS:=""}
export STOP_KEYCLOAK=${STOP_KEYCLOAK:=""}
export STOP_POSTGRES=${STOP_POSTGRES:=""}
export STOP_PGVECTOR=${STOP_PGVECTOR:=""}
export WIPE_SLOTS=${WIPE_SLOTS:="0"}
export COMPOSE_PROFILES=${COMPOSE_PROFILES:=""}
# Desktop categories for build-sandbox
# Production desktops are always built; experimental require opt-in via EXPERIMENTAL_DESKTOPS
# Note: Using arrays because IFS is set to '\n\t' (no space splitting)
PRODUCTION_DESKTOPS=(sway ubuntu)
AVAILABLE_EXPERIMENTAL_DESKTOPS=(zorin xfce)
# EXPERIMENTAL_DESKTOPS can be set as space-separated string, converted to array below
# Configure host networking for Docker-in-Docker support
function setup_dev_networking() {
echo "🌐 Configuring networking for Docker-in-Docker support..."
# Check if already configured
local NEEDS_SETUP=false
if [[ $(cat /proc/sys/net/ipv4/conf/all/route_localnet 2>/dev/null) != "1" ]]; then
NEEDS_SETUP=true
fi
if [[ "$NEEDS_SETUP" == "true" ]]; then
# route_localnet: Allow 127.x.x.x addresses on non-loopback interfaces
# Required for localhost:PORT forwarding to container networks via DNAT
sudo sysctl -w net.ipv4.conf.all.route_localnet=1 >/dev/null 2>&1 || true
sudo sysctl -w net.ipv4.conf.default.route_localnet=1 >/dev/null 2>&1 || true
sudo sysctl -w net.ipv4.ip_forward=1 >/dev/null 2>&1 || true
echo "✅ Docker-in-Docker networking configured (route_localnet, ip_forward)"
else
echo "✅ Docker-in-Docker networking already configured"
fi
# Also ensure inotify limits are sufficient for Zed file watching
local CURRENT_WATCHES=$(cat /proc/sys/fs/inotify/max_user_watches 2>/dev/null || echo "0")
local TARGET_WATCHES=1048576
if [[ "$CURRENT_WATCHES" -lt "$TARGET_WATCHES" ]]; then
echo "📁 Increasing inotify limits for Zed file watching..."
sudo sysctl -w fs.inotify.max_user_watches=$TARGET_WATCHES >/dev/null 2>&1 || true
sudo sysctl -w fs.inotify.max_user_instances=8192 >/dev/null 2>&1 || true
echo "✅ inotify limits increased"
fi
}
# Helper function to check for GPU and set appropriate runner profile
function setup_runner_profile() {
export FORCE_CPU=${FORCE_CPU:=""}
if [[ -n "$FORCE_CPU" ]]; then
# Forced CPU mode
echo "💻 FORCE_CPU is set, forcing CPU mode regardless of GPU detection"
export RUNNER_CONTAINER="runner"
export RUNNER_PROFILE="--profile runner"
export DEV_CPU_ONLY_CMD="DEVELOPMENT_CPU_ONLY=true "
export VLLM_ENV_VARS="VLLM_DEVICE=cpu VLLM_LOGGING_LEVEL=DEBUG"
elif command -v nvidia-smi &> /dev/null && nvidia-smi &> /dev/null; then
# NVIDIA GPU mode
echo "🚀 NVIDIA GPU detected, using GPU support"
export RUNNER_CONTAINER="runner_gpu"
export RUNNER_PROFILE="--profile runner_gpu"
export DEV_CPU_ONLY_CMD=""
export VLLM_ENV_VARS=""
elif [[ -e "/dev/kfd" ]] && [[ -d "/dev/dri" ]] && command -v lspci &> /dev/null && lspci | grep -iE "(VGA|3D|Display).*AMD" &> /dev/null; then
# AMD GPU mode (ROCm)
echo "🚀 AMD GPU detected (ROCm), using AMD GPU support"
export RUNNER_CONTAINER="runner_gpu_amd"
export RUNNER_PROFILE="--profile runner_gpu_amd"
export DEV_CPU_ONLY_CMD=""
export VLLM_ENV_VARS=""
elif [[ -d "/dev/dri" ]] && command -v lspci &> /dev/null && lspci | grep -iE "(VGA|3D|Display).*(Intel|Iris)" &> /dev/null; then
# Intel GPU mode - video encoding supported (QSV), but no GPU compute for LLM inference
echo "🖥️ Intel GPU detected - sandbox/video encoding supported (QSV), external LLM recommended for AI inference"
export RUNNER_CONTAINER="runner"
export RUNNER_PROFILE="--profile runner"
export DEV_CPU_ONLY_CMD="DEVELOPMENT_CPU_ONLY=true "
export VLLM_ENV_VARS="VLLM_DEVICE=cpu VLLM_LOGGING_LEVEL=DEBUG"
else
# CPU mode (fallback)
echo "❌ No supported GPU detected, running without GPU support"
export RUNNER_CONTAINER="runner"
export RUNNER_PROFILE="--profile runner"
export DEV_CPU_ONLY_CMD="DEVELOPMENT_CPU_ONLY=true "
export VLLM_ENV_VARS="VLLM_DEVICE=cpu VLLM_LOGGING_LEVEL=DEBUG"
fi
}
# Helper function to determine sandbox service and container names based on COMPOSE_PROFILES
# Sets SANDBOX_SERVICE (docker-compose service name) and SANDBOX_CONTAINER (docker container name)
function get_sandbox_names() {
local profile="${COMPOSE_PROFILES:-}"
if [[ "$profile" == *"code-software"* ]]; then
export SANDBOX_SERVICE="sandbox-software"
export SANDBOX_CONTAINER="helix-sandbox-software-1"
elif [[ "$profile" == *"code-amd-intel"* ]]; then
export SANDBOX_SERVICE="sandbox-amd-intel"
export SANDBOX_CONTAINER="helix-sandbox-amd-intel-1"
else
# Default to NVIDIA (code-nvidia profile)
export SANDBOX_SERVICE="sandbox-nvidia"
export SANDBOX_CONTAINER="helix-sandbox-nvidia-1"
fi
}
# Helper function to detect GPU type and set appropriate sandbox profile
# Sets COMPOSE_PROFILES to include 'code-nvidia' (NVIDIA) or 'code-amd-intel' (AMD/Intel) if not already set
function setup_sandbox_profile() {
# Check if COMPOSE_PROFILES was explicitly set to a non-empty value in environment
# (empty string triggers auto-detection, non-empty respects user's choice)
local env_was_set=""
if [[ -n "${COMPOSE_PROFILES:-}" ]]; then
env_was_set="true"
fi
# Load existing .env if present
if [[ -f "$DIR/.env" ]]; then
source "$DIR/.env"
fi
# Stop conflicting sandbox containers before starting
# All sandbox variants use the same static IP (172.19.0.50), so only one can run at a time
# Silently remove any sandbox containers that might conflict with the one we're about to start
local current_profile="${COMPOSE_PROFILES:-}"
if [[ "$current_profile" == *"code-software"* ]]; then
# Starting software sandbox - stop GPU sandboxes
docker rm -f helix-sandbox-nvidia-1 helix-sandbox-amd-intel-1 2>/dev/null || true
elif [[ "$current_profile" == *"code-amd-intel"* ]]; then
# Starting AMD/Intel sandbox - stop other sandboxes
docker rm -f helix-sandbox-nvidia-1 helix-sandbox-software-1 2>/dev/null || true
else
# Starting NVIDIA sandbox (default) - stop other sandboxes
docker rm -f helix-sandbox-software-1 helix-sandbox-amd-intel-1 2>/dev/null || true
fi
# If COMPOSE_PROFILES was set in environment before .env (even to empty), respect it
if [[ "$env_was_set" == "true" ]]; then
echo "🎮 Using COMPOSE_PROFILES from environment: '${COMPOSE_PROFILES:-<empty>}'"
get_sandbox_names
return
fi
# If COMPOSE_PROFILES is explicitly set in .env (even to empty), respect it
if [[ -f "$DIR/.env" ]] && grep -q "^COMPOSE_PROFILES=" "$DIR/.env"; then
echo "🎮 Using COMPOSE_PROFILES from .env: '${COMPOSE_PROFILES:-<empty>}'"
get_sandbox_names
return
fi
# Auto-detect GPU type
local gpu_profile=""
# Check for NVIDIA GPU first
if command -v nvidia-smi &> /dev/null && nvidia-smi &> /dev/null; then
gpu_profile="code-nvidia"
echo "🎮 NVIDIA GPU detected, using 'code-nvidia' sandbox profile"
# Check for AMD GPU (ROCm)
elif [[ -e "/dev/kfd" ]] && [[ -d "/dev/dri" ]] && command -v lspci &> /dev/null && lspci | grep -iE "(VGA|3D|Display).*AMD" &> /dev/null; then
gpu_profile="code-amd-intel"
echo "🎮 AMD GPU detected (ROCm), using 'code-amd-intel' sandbox profile"
# Check for Intel GPU (or generic /dev/dri)
elif [[ -d "/dev/dri" ]] && [[ -n "$(ls -A /dev/dri 2>/dev/null)" ]]; then
gpu_profile="code-amd-intel" # Intel uses same profile as AMD (no nvidia runtime)
echo "🎮 Intel/Generic GPU detected, using 'code-amd-intel' sandbox profile"
else
echo "⚠️ No GPU detected, sandbox features may not work"
get_sandbox_names
return
fi
# Add to COMPOSE_PROFILES if we detected a GPU
if [[ -n "$gpu_profile" ]]; then
if [[ -n "${COMPOSE_PROFILES:-}" ]]; then
export COMPOSE_PROFILES="${COMPOSE_PROFILES},${gpu_profile}"
else
export COMPOSE_PROFILES="$gpu_profile"
fi
echo "📋 COMPOSE_PROFILES set to: $COMPOSE_PROFILES"
fi
# Set sandbox service/container names based on profile
get_sandbox_names
}
function mock-runner() {
echo "🔨 Building helix-runner binary for mock runner..."
build-runner || return 1
echo "🚀 Starting mock runner..."
./helix-runner \
--mock-runner \
--server-port 8090 \
--api-host http://localhost:8080 \
--api-token oh-hallo-insecure-token \
--memory 24GB \
--runner-id mock \
--label gpu=4090 "$@"
}
function build() {
# First detect GPU and set variables
setup_runner_profile
if [[ -n "$WITH_RUNNER" ]]; then
# Check for Zed dependency and build if needed
if [ ! -d "$PROJECTS_ROOT/zed" ]; then
echo "❌ ERROR: Zed source code not found at $PROJECTS_ROOT/zed/"
echo ""
echo "The Zed runner requires the Zed source code to be checked out alongside Helix."
echo ""
echo "Please run:"
echo " cd .."
echo " git clone https://github.com/helixml/zed.git"
echo " cd helix"
echo " WITH_RUNNER=1 ./stack build"
exit 1
fi
if [ ! -f "./zed-build/zed" ]; then
echo "🔨 Zed binary not found, building automatically..."
build-zed || {
echo "❌ Failed to build Zed. Please check the error messages above."
echo "Note: Rust/Cargo is required to build Zed. Install from: https://rustup.rs/"
exit 1
}
fi
echo "🔨 Building runner: $RUNNER_CONTAINER"
docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" build
return
fi
if [[ -n "$WITH_DEMOS" ]]; then
echo "🔨 Building demos"
docker compose -f docker-compose.dev.yaml --profile demos build
return
fi
# No profiles specified, just build everything
echo "🔨 Building all services"
docker compose -f docker-compose.dev.yaml build
}
function static-compile() {
export CGO_ENABLED=0
go build -ldflags '-extldflags "-static"' -o helix .
}
function build-runner() {
echo "🔨 Building helix-runner binary..."
export CGO_ENABLED=1
local APP_VERSION=${APP_VERSION:-"v0.0.0+dev"}
if go build -buildvcs=false -tags '!rocm' -ldflags '-s -w -X github.com/helixml/helix/api/pkg/data.Version='$APP_VERSION -o helix-runner ./runner-cmd/helix-runner; then
echo "✅ Successfully built helix-runner binary"
else
echo "❌ Failed to build helix-runner binary"
return 1
fi
}
function build-runner-image() {
echo "🐳 Building runner Docker image..."
local IMAGE_TAG="${1:-test}"
local APP_VERSION=$(git rev-parse HEAD 2>/dev/null || echo "v0.0.0+dev")
# Base image tag - default to latest-empty for fast test builds (skips ~16GB download)
# Use 'latest-small' for production builds that need full base image
local BASE_TAG="${2:-latest-empty}"
echo " Output: registry.helixml.tech/helix/runner:$IMAGE_TAG"
echo " Base: registry.helixml.tech/helix/runner-base:$BASE_TAG"
echo " Version: $APP_VERSION"
echo " Note: ROCm vLLM build takes ~10 minutes"
echo ""
docker build \
-f Dockerfile.runner \
--build-arg TAG="$BASE_TAG" \
--build-arg APP_VERSION="$APP_VERSION" \
-t "registry.helixml.tech/helix/runner:$IMAGE_TAG" \
.
}
function build-zed() {
# ====================================================================
# Build Zed inside Ubuntu 22.04 container for glibc 2.35 compatibility
# ====================================================================
# This produces a binary that works on Ubuntu 22.04+ (including 25.04)
# Building in Docker ensures consistent, portable binaries regardless
# of the host OS version.
# ====================================================================
echo "🔨 Building Zed with External WebSocket Thread Sync (Docker build)..."
local ZED_SOURCE_DIR="$PROJECTS_ROOT/zed"
local ZED_OUTPUT_DIR="./zed-build"
local BUILD_TYPE="${1:-dev}"
# Validate build type
if [[ "$BUILD_TYPE" != "dev" && "$BUILD_TYPE" != "release" ]]; then
echo "❌ Error: BUILD_TYPE must be 'dev' or 'release'"
echo "Usage: ./stack build-zed [dev|release]"
echo ""
echo "Build types:"
echo " dev - Fast incremental builds with debug symbols (default)"
echo " release - Optimized production builds (slower)"
return 1
fi
# Check if Zed source directory exists
if [ ! -d "$ZED_SOURCE_DIR" ]; then
echo "❌ Zed source directory not found at: $ZED_SOURCE_DIR"
echo "Expected directory structure:"
echo " helix/ (current directory)"
echo " zed/ (Zed fork with external_websocket_sync)"
return 1
fi
# Check if external_websocket_sync exists in Zed source
if [ ! -d "$ZED_SOURCE_DIR/crates/external_websocket_sync" ]; then
echo "❌ external_websocket_sync crate not found in Zed source"
echo "Make sure you're using the Zed fork with External WebSocket Thread Sync"
return 1
fi
# Create output directory and ensure we own it
# (Docker bind mounts may have created it as root on fresh installs)
mkdir -p "$ZED_OUTPUT_DIR"
if [ ! -w "$ZED_OUTPUT_DIR" ]; then
echo "⚠️ $ZED_OUTPUT_DIR exists but is not writable"
echo " Fixing permissions..."
sudo chown -R "$USER:$USER" "$ZED_OUTPUT_DIR"
fi
echo "🐳 Building Zed inside Ubuntu 22.04 container..."
echo " Source: $ZED_SOURCE_DIR"
echo " Output: $ZED_OUTPUT_DIR"
echo " Mode: $BUILD_TYPE"
# Build the builder image if needed
if ! docker image inspect zed-builder:ubuntu22 &> /dev/null; then
echo "📦 Building zed-builder:ubuntu22 image (first time only, ~2-3 min)..."
docker build -t zed-builder:ubuntu22 -f Dockerfile.zed-build .
if [ $? -ne 0 ]; then
echo "❌ Failed to build zed-builder:ubuntu22 image"
return 1
fi
fi
# Setup cargo cache for faster rebuilds
# Note: We mount registry and git separately to avoid shadowing /root/.cargo/bin
local CARGO_CACHE="$HOME/.cargo-docker-cache"
mkdir -p "$CARGO_CACHE/registry" "$CARGO_CACHE/git"
# Build command based on type
# Use a separate target directory to avoid conflicts with host-compiled build scripts
# (host build scripts link against glibc 2.39+ which doesn't exist in Ubuntu 22.04)
local BUILD_CMD
local BINARY_PATH
local TARGET_DIR="target-ubuntu22"
if [ "$BUILD_TYPE" = "release" ]; then
echo "🔨 Building in release mode (optimized, stripped, slower build)..."
BUILD_CMD="CARGO_TARGET_DIR=$TARGET_DIR RUSTFLAGS='-C link-arg=-s' cargo build --release --features external_websocket_sync"
BINARY_PATH="$TARGET_DIR/release/zed"
else
echo "🔨 Building in dev mode (fast incremental builds with debug symbols)..."
BUILD_CMD="CARGO_TARGET_DIR=$TARGET_DIR cargo build --features external_websocket_sync"
BINARY_PATH="$TARGET_DIR/debug/zed"
fi
# Get absolute path to Zed source
local ZED_ABS_PATH
ZED_ABS_PATH=$(cd "$ZED_SOURCE_DIR" && pwd)
# Get absolute path to output directory
local OUTPUT_ABS_PATH
OUTPUT_ABS_PATH=$(cd "$ZED_OUTPUT_DIR" && pwd)
echo "🚀 Starting Docker build (this may take a while on first run)..."
# Run build inside container
# Mount:
# - Zed source at /zed
# - Cargo registry cache at /root/.cargo/registry (crates index and sources)
# - Cargo git cache at /root/.cargo/git (git dependencies)
# - Output directory at /output
# Note: We don't mount the entire /root/.cargo to avoid shadowing the cargo binary
docker run --rm \
-v "$ZED_ABS_PATH:/zed" \
-v "$CARGO_CACHE/registry:/root/.cargo/registry" \
-v "$CARGO_CACHE/git:/root/.cargo/git" \
-v "$OUTPUT_ABS_PATH:/output" \
-w /zed \
zed-builder:ubuntu22 \
bash -c "$BUILD_CMD && cp $BINARY_PATH /output/zed.new && chmod +x /output/zed.new"
if [ $? -ne 0 ]; then
echo "❌ Docker build failed"
return 1
fi
# Atomic rename (works even if old binary is in use)
if [ -f "$ZED_OUTPUT_DIR/zed" ]; then
mv "$ZED_OUTPUT_DIR/zed" "$ZED_OUTPUT_DIR/zed.old" 2>/dev/null || true
fi
mv "$ZED_OUTPUT_DIR/zed.new" "$ZED_OUTPUT_DIR/zed"
rm -f "$ZED_OUTPUT_DIR/zed.old" 2>/dev/null || true
# Verify the binary
local BINARY_SIZE=$(du -h "$ZED_OUTPUT_DIR/zed" | cut -f1)
echo "✅ Zed binary built successfully"
echo "📦 Binary size: $BINARY_SIZE"
echo "🎯 Compatible with: Ubuntu 22.04+ (glibc 2.35+)"
# Verify external WebSocket sync is included
if strings "$ZED_OUTPUT_DIR/zed" | grep -q "external_websocket_sync"; then
echo "✅ External WebSocket Thread Sync detected in binary"
else
echo "⚠️ External WebSocket Thread Sync not clearly detectable (this might be normal)"
fi
# Note: Release builds are already stripped via RUSTFLAGS during compilation
if [ "$BUILD_TYPE" = "release" ]; then
echo "✅ Binary built with symbols stripped (via linker flags)"
fi
# Create test configuration
cat > "$ZED_OUTPUT_DIR/test-settings.json" << EOF
{
"external_websocket_sync": {
"enabled": true,
"server": {
"enabled": true,
"host": "127.0.0.1",
"port": 3030
},
"websocket_sync": {
"enabled": true,
"external_url": "localhost:8080",
"use_tls": false,
"auto_reconnect": true
}
}
}
EOF
echo "✅ Created test configuration: $ZED_OUTPUT_DIR/test-settings.json"
echo "🎉 Zed build completed successfully!"
echo ""
echo "Next steps:"
echo " 1. Test the binary: cd $ZED_OUTPUT_DIR && ./zed --version"
echo " 2. Build Sway container with Zed: ./stack build-sway"
echo " 3. Start services: ./stack start"
}
function start() {
if tmux has-session -t "$TMUX_SESSION" 2>/dev/null; then
echo "📺 Session $TMUX_SESSION already exists. Attaching..."
sleep 1
tmux -2 attach -t $TMUX_SESSION
exit 0;
fi
# Check for Zed dependency and build if needed
if [ ! -d "$PROJECTS_ROOT/zed" ]; then
echo "❌ ERROR: Zed source code not found at $PROJECTS_ROOT/zed/"
echo ""
echo "The Zed runner requires the Zed source code to be checked out alongside Helix."
echo ""
echo "Please run:"
echo " cd $PROJECTS_ROOT"
echo " git clone https://github.com/helixml/zed.git"
echo " cd helix"
echo " ./stack start"
exit 1
fi
if [ ! -f "./zed-build/zed" ]; then
echo "🔨 Zed binary not found, building automatically..."
build-zed || {
echo "❌ Failed to build Zed. Please check the error messages above."
exit 1
}
fi
# Check if Wolf container exists, build if needed
if ! docker image inspect wolf:helix-fixed &> /dev/null; then
echo "🐺 Wolf container not found, building automatically..."
build-wolf || {
echo "❌ Failed to build Wolf. Please check the error messages above."
exit 1
}
fi
# Check if Moonlight Web container exists, build if needed
if ! docker image inspect helix-moonlight-web:helix-fixed &> /dev/null; then
echo "🌙 Moonlight Web container not found, building automatically..."
build-moonlight-web || {
echo "❌ Failed to build Moonlight Web. Please check the error messages above."
exit 1
}
fi
export MANUALRUN=1
export LOG_LEVEL=debug
# Configure host networking for Docker-in-Docker
setup_dev_networking
echo "🐳 Starting docker compose"
# Setup runner profiles first
setup_runner_profile
# Setup sandbox profile based on GPU detection
setup_sandbox_profile
# Clean Wolf and Moonlight Web pairing state for fresh startup
echo "🧹 Cleaning Wolf and Moonlight Web pairing state..."
# Stop sandbox if running (Wolf + Moonlight Web are unified in sandbox now)
docker compose -f docker-compose.dev.yaml stop "$SANDBOX_SERVICE" 2>/dev/null || true
# Remove state files to force fresh pairing on startup
# These are bind-mounted into the sandbox container
rm -f "$DIR/wolf/config.toml" "$DIR/moonlight-web-config/data.json" 2>/dev/null || true
echo "✅ Pairing state cleaned (will auto-pair on startup)"
# Start services based on enabled profiles
if [[ -n "$WITH_RUNNER" ]]; then
if [[ -n "$WITH_DEMOS" ]]; then
# Both runner and demos
echo "🚀 Starting services with runner ($RUNNER_CONTAINER) and demos profiles"
docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" --profile demos up -d
else
# Just runner
echo "🚀 Starting services with runner ($RUNNER_CONTAINER) profile"
docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" up -d
fi
elif [[ -n "$WITH_DEMOS" ]]; then
# Just demos
echo "🚀 Starting services with demos profile"
docker compose -f docker-compose.dev.yaml --profile demos up -d
else
# No special profiles
echo "🚀 Starting base services"
docker compose -f docker-compose.dev.yaml up -d
fi
sleep 2
# Wait for postgres to be ready before trying to wipe slots
echo "⏳ Waiting for postgres to be ready..."
timeout=60
while ! docker compose -f docker-compose.dev.yaml exec postgres pg_isready -h localhost -p 5432 >/dev/null 2>&1; do
timeout=$((timeout - 1))
if [[ $timeout -eq 0 ]]; then
echo "⚠️ Warning: Postgres not ready after 60 seconds, continuing anyway"
break
fi
echo "⏳ Waiting for postgres... ($timeout seconds remaining)"
sleep 1
done
# Check if WIPE_SLOTS is set and wipe slots if requested
if [[ -n "$WIPE_SLOTS" ]]; then
echo "🧹 WIPE_SLOTS is set, wiping slots from database..."
if ! wipe-slots; then
echo "⚠️ Warning: Failed to wipe slots, but continuing startup..."
fi
fi
echo "📺 Creating tmux session $TMUX_SESSION with 3x2 grid layout + full-width hacking terminal..."
tmux -2 new-session -d -s "$TMUX_SESSION"
# Create a 3x2 grid layout with full-width hacking terminal at bottom
# First create top and middle rows for logs
tmux split-window -v -d
tmux split-window -v -d
# Split the top row into 3 columns (Frontend, API, Haystack)
tmux select-pane -t 0
tmux split-window -h -d
tmux select-pane -t 1
tmux split-window -h -d
# Split the middle row into 3 columns (Zed Agent, Zed Process, GPU Runner)
tmux select-pane -t 3
tmux split-window -h -d
tmux select-pane -t 4
tmux split-window -h -d
# Bottom pane (6) stays full-width for hacking terminal
# Set pane titles and start processes in 3x2 + full-width layout
# Top row (0-2): Frontend, API, Haystack
tmux select-pane -t 0 -T "Frontend Logs"
tmux send-keys -t 0 'docker compose -f docker-compose.dev.yaml logs -f frontend' C-m
tmux select-pane -t 1 -T "API Logs"
tmux send-keys -t 1 'docker compose -f docker-compose.dev.yaml logs -f api' C-m
tmux select-pane -t 2 -T "Haystack Logs"
tmux send-keys -t 2 'docker compose -f docker-compose.dev.yaml logs -f haystack' C-m
# Middle row (3-5): Context-aware based on WITH_RUNNER
if [[ -n "$WITH_RUNNER" ]]; then
# WITH_RUNNER mode: Sandbox logs in pane 3
tmux select-pane -t 3 -T "Sandbox Logs (Wolf + Moonlight)"
tmux send-keys -t 3 "docker compose -f docker-compose.dev.yaml logs -f $SANDBOX_SERVICE" C-m
tmux select-pane -t 4 -T "🔨 HACKING TERMINAL"
tmux send-keys -t 4 'echo "🔨 Hacking terminal ready!" && echo "💡 Tip: Use this for development, debugging, and building"' C-m
# GPU runner logs with air hot reloading
tmux select-pane -t 5 -T "GPU Runner ($RUNNER_CONTAINER)"
tmux send-keys -t 5 'echo "Monitoring GPU Runner logs (with air hot reloading)..." && sleep 3 && docker compose -f docker-compose.dev.yaml --profile '"$RUNNER_CONTAINER"' logs -f '"$RUNNER_CONTAINER" C-m
else
# WITHOUT_RUNNER mode: Unified sandbox logs (Wolf + Moonlight Web in one container)
tmux select-pane -t 3 -T "Sandbox Logs (Wolf + Moonlight)"
tmux send-keys -t 3 "docker compose -f docker-compose.dev.yaml logs -f $SANDBOX_SERVICE" C-m
tmux select-pane -t 4 -T "🔨 HACKING TERMINAL"
tmux send-keys -t 4 'echo "🔨 Hacking terminal ready!" && echo "💡 Tip: Use this for development, debugging, and building"' C-m
# Middle right pane (5) - contextual based on demos
if [[ -n "$WITH_DEMOS" ]]; then
# Demos interactive session
tmux select-pane -t 5 -T "Demos"
tmux send-keys -t 5 'docker compose -f docker-compose.dev.yaml --profile demos exec demos bash' C-m
else
# Hacking terminal
tmux select-pane -t 5 -T "🔨 HACKING TERMINAL"
tmux send-keys -t 5 'echo "🔨 Hacking terminal ready!" && echo "💡 Tip: Use this for development, debugging, and building"' C-m
fi
fi
# Bottom full-width pane (6) - HACKING TERMINAL! 🔨
tmux select-pane -t 6 -T "🔨 HACKING TERMINAL"
tmux send-keys -t 6 'echo "🔨 Full-width hacking terminal ready!" && echo "💡 Tip: Use this for development, debugging, and building"' C-m
if [[ -n "$WITH_DEMOS" && -n "$WITH_RUNNER" ]]; then
echo "Note: Both GPU runner and demos enabled - demos available in background. Run manually with: docker compose -f docker-compose.dev.yaml --profile demos exec demos bash"
fi
# Enable pane titles display
tmux set-option -g pane-border-status top
tmux set-option -g pane-border-format "#{pane_index}: #{pane_title}"
# Make all panes equal size
tmux select-layout even-horizontal
tmux select-layout tiled
tmux -2 attach-session -t $TMUX_SESSION
}
function stop() {
echo "🛑 Stopping docker containers and tmux session..."
# Clean up Wolf config and certificates to ensure fresh start next time
if [ -f "wolf/config.toml" ]; then
echo "🗑️ Removing Wolf config.toml (will be regenerated from template on next start)"
rm -f wolf/config.toml
fi
if [ -f "wolf/cert.pem" ] || [ -f "wolf/key.pem" ]; then
echo "🗑️ Removing Wolf SSL certificates (will be regenerated on next start)"
rm -f wolf/cert.pem wolf/key.pem
fi
# Clean up Moonlight Web pairing data to ensure fresh pairing with Wolf's new certs
if [ -f "moonlight-web-config/data.json" ]; then
echo "🗑️ Removing Moonlight Web pairing data (will re-pair with Wolf on next start)"
rm -f moonlight-web-config/data.json
fi
# Build exclude pattern for services that should not be stopped
local exclude_services=()
[[ -z "$STOP_KEYCLOAK" ]] && exclude_services+=("keycloak")
[[ -z "$STOP_POSTGRES" ]] && exclude_services+=("postgres")
[[ -z "$STOP_PGVECTOR" ]] && exclude_services+=("pgvector")
# Setup runner profiles first
setup_runner_profile
if [[ ${#exclude_services[@]} -eq 0 ]]; then
echo "🗑️ Removing all docker containers"
# Stop containers based on enabled profiles
if [[ -n "$WITH_RUNNER" ]]; then
if [[ -n "$WITH_DEMOS" ]]; then
# Both runner and demos
echo "🔄 Stopping services with runner ($RUNNER_CONTAINER) and demos profiles"
docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" --profile demos down -t 1 || echo "⚠️ Some services may not exist"
else
# Just runner
echo "🔄 Stopping services with runner ($RUNNER_CONTAINER) profile"
docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" down -t 1 || echo "⚠️ Some services may not exist"
fi
elif [[ -n "$WITH_DEMOS" ]]; then
# Just demos
echo "🔄 Stopping services with demos profile"
docker compose -f docker-compose.dev.yaml --profile demos down -t 1 || echo "⚠️ Some services may not exist"
else
# Include all profiles when no environment variables are set
echo "🔄 Stopping all services (all profiles)"
docker compose -f docker-compose.dev.yaml --profile runner --profile runner_gpu --profile demos down -t 1 || echo "⚠️ Some services may not exist"
fi
else
# Create exclude list for display and grep pattern
local exclude_list=$(IFS=', '; echo "${exclude_services[*]}")
local exclude_pattern=$(IFS='|'; echo "${exclude_services[*]}")
echo "🗑️ Removing docker containers (except: $exclude_list)"
# Get list of services to stop (excluding the ones we want to keep)
if [[ -n "$WITH_RUNNER" ]]; then
if [[ -n "$WITH_DEMOS" ]]; then
echo "🔄 Stopping services with runner ($RUNNER_CONTAINER) and demos profiles (except: $exclude_list)"
local services=$(docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" --profile demos config --services 2>/dev/null | grep -v -E "$exclude_pattern" || true)
else
echo "🔄 Stopping services with runner ($RUNNER_CONTAINER) profile (except: $exclude_list)"
local services=$(docker compose -f docker-compose.dev.yaml --profile "$RUNNER_CONTAINER" config --services 2>/dev/null | grep -v -E "$exclude_pattern" || true)
fi
elif [[ -n "$WITH_DEMOS" ]]; then
echo "🔄 Stopping services with demos profile (except: $exclude_list)"
local services=$(docker compose -f docker-compose.dev.yaml --profile demos config --services 2>/dev/null | grep -v -E "$exclude_pattern" || true)
else
echo "🔄 Stopping all services (all profiles, except: $exclude_list)"
local services=$(docker compose -f docker-compose.dev.yaml --profile runner --profile runner_gpu --profile demos config --services 2>/dev/null | grep -v -E "$exclude_pattern" || true)
fi
# Stop only the non-excluded services
if [[ -n "$services" ]]; then
echo "🗑️ Going to remove containers: $(echo $services | tr '\n' ' ')"
# Stop and remove containers using a while loop to avoid xargs command line length issues
while IFS= read -r service; do
if [[ -n "$service" ]]; then
echo "🗑️ Stopping and removing: $service"
docker compose -f docker-compose.dev.yaml stop "$service" 2>/dev/null && \
docker compose -f docker-compose.dev.yaml rm -f "$service" 2>/dev/null || \
echo "⚠️ Could not stop/remove $service"
fi
done <<< "$services"
else
echo "✨ No services to stop (all are excluded)"
fi
fi
echo "📺 Stopping tmux session $TMUX_SESSION..."
if tmux has-session -t $TMUX_SESSION 2>/dev/null; then
tmux kill-session -t $TMUX_SESSION || echo "⚠️ Failed to kill tmux session, but continuing..."
else
echo "📺 Tmux session $TMUX_SESSION not found"
fi
echo "✨ Stop completed successfully!"
}
function up() {
# Check if Wolf source code exists, if not clone it
if [ ! -d "$PROJECTS_ROOT/wolf" ]; then
echo "🐺 Wolf source code not found at $PROJECTS_ROOT/wolf/"
echo "📥 Cloning Wolf repository..."
git clone https://github.com/games-on-whales/wolf.git "$PROJECTS_ROOT/wolf"
echo "✅ Wolf repository cloned successfully"
fi
# Setup sandbox profile based on GPU detection (if not already in .env)
setup_sandbox_profile
# Sandbox services are enabled via COMPOSE_PROFILES in .env or auto-detected above
# Profile 'code-nvidia' = NVIDIA GPU, 'code-amd-intel' = AMD/Intel GPU
docker compose -f docker-compose.dev.yaml up -d $@
}
function build-zed-agent() {
echo "🔨 Building Zed agent Docker image..."
# Build the Docker image using the Zed binary we built
if [ ! -f "./zed-build/zed" ]; then
echo "❌ Zed binary not found. Run './stack build-zed' first."
return 1
fi
docker build -t helix-sway:latest -f Dockerfile.sway-helix .
if [ $? -eq 0 ]; then
echo "✅ Zed agent Docker image built successfully"
else
echo "❌ Failed to build Zed agent Docker image"
return 1
fi
}
function zed-agent-up() {
echo "Starting Zed agent services..."
# Build Zed if binary doesn't exist
if [ ! -f "./zed-build/zed" ]; then
echo "Zed binary not found, building first..."
build-zed || return 1
fi
# Check if image doesn't exist
if ! docker image inspect helix/zed-agent:latest &> /dev/null; then
echo "Zed agent image not found, building first..."
build-zed-agent || return 1
fi
docker compose -f docker-compose.zed-agent.yaml up -d
echo "✅ Zed agent services started"
echo "📋 Services running:"
echo " - Helix API: http://localhost:8080"
echo " - Zed HTTP API: http://localhost:3030"
echo " - VNC Web Client: http://localhost:6080"
echo ""
echo "🧪 Test commands:"
echo " curl http://localhost:8080/health # Helix API"
echo " curl http://localhost:3030/health # Zed integration API"
}
function zed-agent-down() {
echo "Stopping Zed agent services..."
docker compose -f docker-compose.zed-agent.yaml down
}
function zed-agent-logs() {
docker compose -f docker-compose.zed-agent.yaml logs -f "${1:-zed-agent-runner}"
}
function rebuild() {
docker compose -f docker-compose.dev.yaml up -d --build $@
}
# Helper function to build image tags string (commit hash + git tag if available)
function get_image_tags() {
local OLD_IFS=$IFS
IFS=' ' # Temporarily use space as IFS for proper word splitting
local IMAGE_BASE=$1
local COMMIT_HASH=$(git rev-parse --short HEAD)
local GIT_TAG=$(git describe --exact-match --tags HEAD 2>/dev/null || echo "")
local TAG_STRING="-t ${IMAGE_BASE}:${COMMIT_HASH}"
if [ -n "$GIT_TAG" ]; then
TAG_STRING="${TAG_STRING} -t ${IMAGE_BASE}:${GIT_TAG}"
echo "🏷️ Git tag detected: ${GIT_TAG}" >&2
fi
printf "%s" "${TAG_STRING}" # Use printf to avoid trailing newline issues
IFS=$OLD_IFS
}
# Helper function to push all image tags
function push_image_tags() {
local IMAGE_BASE=$1
local COMMIT_HASH=$(git rev-parse --short HEAD)
# Use exported GIT_TAG if available (from build-and-push-helix-code), otherwise detect from git
local GIT_TAG="${GIT_TAG:-$(git describe --exact-match --tags HEAD 2>/dev/null || echo "")}"
# Always push commit hash tag
echo "📤 Pushing ${IMAGE_BASE}:${COMMIT_HASH}"
if ! docker push "${IMAGE_BASE}:${COMMIT_HASH}"; then
echo "⚠️ Failed to push ${IMAGE_BASE}:${COMMIT_HASH}"
return 1
fi
# Also push git tag if available
if [ -n "$GIT_TAG" ]; then
echo "📤 Pushing ${IMAGE_BASE}:${GIT_TAG}"
if ! docker push "${IMAGE_BASE}:${GIT_TAG}"; then
echo "⚠️ Failed to push ${IMAGE_BASE}:${GIT_TAG}"
return 1
fi
fi
return 0
}
function build-wolf() {
echo "🐺 Building Wolf container with latest source code..."
# Check if Wolf source directory exists
if [ ! -d "$PROJECTS_ROOT/wolf" ]; then
echo "❌ ERROR: Wolf source code not found at $PROJECTS_ROOT/wolf/"
echo ""
echo "The Wolf integration requires the Wolf source code to be checked out alongside Helix."
echo ""
echo "Please run:"
echo " cd $PROJECTS_ROOT"
echo " git clone https://github.com/games-on-whales/wolf.git"
echo " cd helix"
echo " ./stack build-wolf"
exit 1
fi
# Build Wolf container with image tags
local COMMIT_HASH=$(git rev-parse --short HEAD)
local GIT_TAG=$(git describe --exact-match --tags HEAD 2>/dev/null || echo "")
echo "🔨 Building Wolf container from source..."
cd "$PROJECTS_ROOT/wolf"
if [ -n "$GIT_TAG" ]; then
echo "🏷️ Git tag detected: ${GIT_TAG}"
docker build -f docker/wolf.Dockerfile \
-t wolf:helix-fixed \
-t "registry.helixml.tech/helix/wolf:${COMMIT_HASH}" \
-t "registry.helixml.tech/helix/wolf:${GIT_TAG}" \
.
else
docker build -f docker/wolf.Dockerfile \
-t wolf:helix-fixed \
-t "registry.helixml.tech/helix/wolf:${COMMIT_HASH}" \
.
fi
if [ $? -eq 0 ]; then
echo "✅ Wolf container built successfully"
else
echo "❌ Failed to build Wolf container"
cd - > /dev/null
return 1
fi
cd - > /dev/null
# Note: Wolf image is an intermediate build artifact, embedded in helix-sandbox
# It's not pushed to registry separately - only the sandbox container is pushed
# Wolf runs inside the sandbox container now, not as a standalone service
}
function build-qwen-code() {
# ====================================================================
# Build Qwen Code inside container for consistent, reproducible builds
# ====================================================================
# This avoids issues with host npm/node versions and husky prepare scripts.
# The build produces a pre-bundled qwen-code that can be installed globally
# in the sway container without triggering prepare hooks.
# ====================================================================
echo "📦 Building Qwen Code (containerized build)..."
local QWEN_SOURCE_DIR="$PROJECTS_ROOT/qwen-code"
local QWEN_OUTPUT_DIR="./qwen-code-build"
# Check if qwen-code source directory exists
if [ ! -d "$QWEN_SOURCE_DIR" ]; then
echo "❌ qwen-code source directory not found at: $QWEN_SOURCE_DIR"
echo "Clone it next to helix with: cd $PROJECTS_ROOT && git clone git@github.com:helixml/qwen-code.git"
return 1
fi
# Get current qwen-code git commit hash
local QWEN_CODE_HASH=$(cd "$QWEN_SOURCE_DIR" && git rev-parse HEAD)
local SAVED_HASH=""
if [ -f "$QWEN_OUTPUT_DIR/.git-commit-hash" ]; then
SAVED_HASH=$(cat "$QWEN_OUTPUT_DIR/.git-commit-hash")
fi
# Check if we need to rebuild
local NEEDS_REBUILD=false
if [ ! -d "$QWEN_OUTPUT_DIR" ] || [ ! -f "$QWEN_OUTPUT_DIR/package.json" ]; then
NEEDS_REBUILD=true
echo "📦 qwen-code-build directory missing or incomplete"
elif [ ! -d "$QWEN_OUTPUT_DIR/dist" ]; then
NEEDS_REBUILD=true
echo "📦 qwen-code dist directory missing (bundle not built)"
elif [ "$QWEN_CODE_HASH" != "$SAVED_HASH" ]; then
NEEDS_REBUILD=true
echo "📦 qwen-code changed: ${SAVED_HASH:0:8} -> ${QWEN_CODE_HASH:0:8}"
fi
if [ "$NEEDS_REBUILD" != "true" ]; then
echo "✅ Using existing qwen-code build at $QWEN_OUTPUT_DIR (${SAVED_HASH:0:8})"
return 0
fi
echo "🐳 Building qwen-code inside Node.js 20 container..."
echo " Source: $QWEN_SOURCE_DIR"
echo " Output: $QWEN_OUTPUT_DIR"
# Build the builder image if needed
if ! docker image inspect qwen-code-builder:node20 &> /dev/null; then
echo "📦 Building qwen-code-builder:node20 image (first time only)..."
docker build -t qwen-code-builder:node20 -f Dockerfile.qwen-code-build .
if [ $? -ne 0 ]; then