forked from dfki-ric/docker_image_development
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsettings.bash
More file actions
executable file
·140 lines (117 loc) · 7.87 KB
/
settings.bash
File metadata and controls
executable file
·140 lines (117 loc) · 7.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
#!/bin/bash
# set a project name, no empty spaces or special characters allowed
export PROJECT_NAME=cdff
# path to your docker registry, leave blank if you don't have one
# e.g. my.registry.com, ghcr.io/dfki-ric, docker.pkg.github.com, harbor.mydomain.com
export DOCKER_REGISTRY=d-reg.hb.dfki.de
# When your registry has subfolders/groups you can add it here
export DOCKER_REGISTRY_GROUP=developmentimage
# in case you are not using a single registry, you can push images in different ones
# e.g. store base images on hub.docker.com and others in a local registry
export BASE_REGISTRY=
export DEVEL_REGISTRY=$DOCKER_REGISTRY
export FROZEN_REGISTRY=$DOCKER_REGISTRY
export RELEASE_REGISTRY=$DOCKER_REGISTRY
# when your images should be stored in different groups of your registry, you can chage it here
# (or completely customize by setting WORKSPACE_DEVEL_IMAGE, WORKSPACE_RELEASE_IMAGE, WORKSPACE_CD_IMAGE below directly)
export DEVEL_REGISTRY_GROUP=$DOCKER_REGISTRY_GROUP
export FROZEN_REGISTRY_GROUP=$DOCKER_REGISTRY_GROUP
export RELEASE_REGISTRY_GROUP=$DOCKER_REGISTRY_GROUP
export CD_REGISTRY_GROUP=$DOCKER_REGISTRY_GROUP
# should exec and build scripts auto-pull updated images from the registry?
export DOCKER_REGISTRY_AUTOPULL=false
### The default release mode to use if no mode paramater is given to ./exec.bash or ./stop.bash
### The checked in version should reflect the image status and be the highest availale image (base - devel - release)
# export DEFAULT_EXECMODE="base" # Use this only for setting up the initial devel image (modify setup_workspace.bash)
export DEFAULT_EXECMODE="devel" # This is used while deveoping code and preparing a relase
# export DEFAULT_EXECMODE="frozen" # use the release as default
# export DEFAULT_EXECMODE="release" # use the release as default
# export DEFAULT_EXECMODE="CD" # use the continuous deployment image as default
### The base image used when building a workspace image (one of the ones build in base_images)
# export WORKSPACE_BASE_IMAGE=developmentimage/rock_master_20.04:base # image with rock core dependencies installed
# export WORKSPACE_BASE_IMAGE=developmentimage/rock_master_22.04:base # image with rock core dependencies installed
export WORKSPACE_BASE_IMAGE=developmentimage/rock_master_24.04:base # image with rock core dependencies installed
# export WORKSPACE_BASE_IMAGE=developmentimage/ros_noetic_20.04:base # image with basic ros noetic installed
# export WORKSPACE_BASE_IMAGE=developmentimage/ros2_foxy_20.04:base # image with ros2 foxy desktop installed
# export WORKSPACE_BASE_IMAGE=developmentimage/ros2_humble_22.04:base # image with ros2 humble desktop installed
# export WORKSPACE_BASE_IMAGE=developmentimage/ros2_jazzy_24.04:base # image with ros2 humble desktop installed
# export WORKSPACE_BASE_IMAGE=developmentimage/plain_20.04:base # plain image with build_essentials installed
# export WORKSPACE_BASE_IMAGE=developmentimage/plain_22.04:base # plain image with build_essentials installed
# export WORKSPACE_BASE_IMAGE=developmentimage/plain_24.04:base # plain image with build_essentials installed
# The Name of the Workspace image to use
# you should add a workspace name folder and a image name
# e.g MY_PROJECT/docker_image_development:devel
# under normal circumstances you should not need to change the values here
export WORKSPACE_DEVEL_IMAGE=${DEVEL_REGISTRY_GROUP}/${PROJECT_NAME}:devel
export WORKSPACE_FROZEN_IMAGE=${FROZEN_REGISTRY_GROUP}/${PROJECT_NAME}:frozen
export WORKSPACE_RELEASE_IMAGE=${RELEASE_REGISTRY_GROUP}/${PROJECT_NAME}:release
export WORKSPACE_CD_IMAGE=${CD_REGISTRY_GROUP}/${PROJECT_NAME}:CD
# In case your docker container needs special run paramaters
# like open ports, additional mounts etc.
# When you change this, you need to recreate the container
# best way to do this, run the delete_contianer.bash script
# often used params:
# --dns-search=mydomain
# --net=host
# --privileged
export ADDITIONAL_DOCKER_RUN_ARGS="
--privileged \
--net=host \
-e NVIDIA_DRIVER_CAPABILITIES=all \
--dns-search=dfki.uni-bremen.de \
-v /dev:/dev
"
# List of system services to be started in the container
# e.g. SERVICES="neo4j redis-server"
# These should be installed in the devel image via dockerfule or workspace_os_dependencies.txt
export SERVICES=""
# Make the exec script to talk more for debugging/docker setup purposes.
# This may also be stated in the command line: $> VERBOSE=true ./exec.bash
# export VERBOSE=true
# Make the output as quiet as possible (does not apply to programs started in the container)
# export SILENT=false
# make the local ssh-agent available in the container
# with this you can checkout code into your devel contaienr usind ssh
# export MOUNT_SSH_AGENT=true
# mount ccache volume, if enabled, a volume name based on the base image name is generated
# and mounted to /ccache, this way multiple workspaces in docker_image_development
# can share a single ccache, CCACHE_DIR is automatically set in the env, just install
# and enable ccache for your builds
# export MOUNT_CCACHE_VOLUME=true
# Icecc will only work in a single container at a time.
# For icecc to work, you need to make int available from the host, so either use
# "--net=host" or "-p 10245:10245 -p 8765:8765/udp" in the ADDITIONAL_DOCKER_RUN_ARGS.
# if none of them is added there, "-p 10245:10245 -p 8765:8765/udp" will be added automatically with a warning
# WARNING: using "-p 10245:10245 -p 8765:8765/udp" will block the port on the host, only allowing one container to exist at a time using the ports.
# stop the container using the port before launching the next one, or use --net=host for the containers and stop the iceccd service.
# You'll have to enable the use of icecc for your workspace manually, this only set up the availability of icecc in the container
# export USE_ICECC=true
# conenct to xserver via [mount, xpra, none, auto]
# auto will detect if the container is started over an ssh session and if yes, xpra is used, mount otherwise
export DOCKER_XSERVER_TYPE=auto
#xpra_port may be set if --net=host is used, otherwise, please use -p in the ADDITIONAL_DOCKER_RUN_ARGS to assign a port for the
#xpra server, DOCKER_XSERVER_TYPE needs to be "xpra"
export XPRA_PORT="14500"
# always update DISPLAY variable for new ./exec.bash commands
# when ./exec.bash is called through ssh -X each ssh conenction will have its own DISPLAY)
# this also needs --net=host (auto-added) to reach the xserver via localhost
export USE_XSERVER_VIA_SSH=false
# If you need to start docker contaiers from your workspace (e.g. for launch tools) set this option to true
# It will add "--privileged -v /var/run/docker.sock:/var/run/docker.sock" to the DOCKER_RUN_ARGS
# The hosts docker deamon can then be used from within the container. Setting this to true will also add the dockeruser
# to the docker group inside the container and set the docker groups id in the container to have the same gid as the host docker group.
# The docker.io (apt) package has to be installed manually (e.g. through the 02_devel_image/Dockerfile)
# Also, add "RUN adduser dockeruser docker" that file, if you want to use it with the default user
export NEEDS_DOCKER_IN_CONTAINER=false
# To allow profiling with perf tools on the host, mount the container roor dir to a local folder (./container_root)
# Then run profiling tools on the host, but provide root folder location to search for libs:
# sudo hotspot --sysroot ./container_root
# sudo perf record -o perf.data --call-graph dwarf --aio -z --pid $(pidof my_program) && perf report --symfs ./container_root
# needs bindfs installed on the host
export MOUNT_CONTAINER_ROOT=false
PATH_TO_COYOTE_LOGS_IN_HOST="/media/rdominguez/SSDRD1/Datasets_Logs/minio_clones/dfki_coyote_minio"
#PATH_TO_COYOTE_LOGS_IN_HOST=""
if [ -d "$PATH_TO_COYOTE_LOGS_IN_HOST" ]; then
# echo "Adding mount of SLAM LUNA logs from host (not minio): $PATH_TO_COYOTE_LOGS_IN_HOST"
export ADDITIONAL_DOCKER_RUN_ARGS="$ADDITIONAL_DOCKER_RUN_ARGS -v $PATH_TO_COYOTE_LOGS_IN_HOST:/dfki_coyote_minio"
fi