Skip to content

Commit 463483b

Browse files
committed
chore: apply shellcheck
1 parent 9b668f5 commit 463483b

File tree

9 files changed

+70
-63
lines changed

9 files changed

+70
-63
lines changed

docker/cluster-cleaner/scripts/clean-failed-namespaces.sh

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,15 @@ touch error.log
44
tail -F error.log &
55

66
delete_resources_safely() {
7-
resource_type="$1"
8-
namespace="$2"
7+
resource_type="${1}"
8+
namespace="${2}"
99

10-
echo "Attempting normal deletion of $resource_type in $namespace..."
10+
echo "Attempting normal deletion of ${resource_type} in ${namespace}..."
1111
kubectl delete "${resource_type}" --all -n "${namespace}" --wait=true --timeout=10s 2>error.log|| true
1212

1313
# Check if any resources are still stuck
1414
# Let's not fail here and continue deletion
15-
resources=$(kubectl get "$resource_type" -n "${namespace}" --no-headers -o custom-columns=":metadata.name" 2>error.log || true)
15+
resources=$(kubectl get "${resource_type}" -n "${namespace}" --no-headers -o custom-columns=":metadata.name" 2>error.log || true)
1616

1717
for resource in ${resources}; do
1818
echo "${resource_type}/${resource} is still present, force deleting..."
@@ -39,7 +39,7 @@ kubectl get namespace -l "${LABELS}" -o name
3939
for namespace in $(kubectl get namespace -l "${LABELS}" -o name 2>error.log); do
4040
creation_time=$(kubectl get "${namespace}" -o jsonpath='{.metadata.creationTimestamp}' 2>error.log || echo "")
4141

42-
if [ -z "$creation_time" ]; then
42+
if [ -z "${creation_time}" ]; then
4343
echo "Namespace ${namespace} does not exist or has no creation timestamp, skipping."
4444
continue
4545
fi

docker/delve-sidecar/attach-delve.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,6 @@ while ! pgrep -f "mongodb-mms-automation-agent" > /dev/null; do
88
done
99

1010
APP_PID=$(pgrep -f "mongodb-mms-automation-agent")
11-
echo "Found app with PID: $APP_PID, attaching Delve..."
11+
echo "Found app with PID: ${APP_PID}, attaching Delve..."
1212

13-
dlv attach "$APP_PID" --headless --listen=:2345 --api-version=2 --accept-multiclient --continue
13+
dlv attach "${APP_PID}" --headless --listen=:2345 --api-version=2 --accept-multiclient --continue

docker/mongodb-agent/agent-launcher-shim.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,13 @@ SCRIPTS_DIR="/opt/scripts"
99
# Function to start the agent launcher
1010
start_agent_launcher() {
1111
echo "Starting agent launcher..."
12-
echo "Final contents of $SCRIPTS_DIR:"
13-
ls -la "$SCRIPTS_DIR"
12+
echo "Final contents of ${SCRIPTS_DIR}:"
13+
ls -la "${SCRIPTS_DIR}"
1414

15-
if [[ -f "$SCRIPTS_DIR/agent-launcher.sh" ]]; then
15+
if [[ -f "${SCRIPTS_DIR}/agent-launcher.sh" ]]; then
1616
echo "Found agent-launcher.sh, executing..."
1717
echo "Note: agent-launcher.sh will become PID 1 and handle all signal processing including cleanup"
18-
exec "$SCRIPTS_DIR/agent-launcher.sh"
18+
exec "${SCRIPTS_DIR}/agent-launcher.sh"
1919
else
2020
echo "ERROR: agent-launcher.sh not found"
2121
exit 1

docker/mongodb-agent/dummy-probe.sh

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,34 +4,34 @@
44
find_init_container() {
55
local pid
66
pid=$(pgrep -f "agent-utilities-holder_marker" | head -n1)
7-
if [[ -n "$pid" && -d "/proc/$pid/root/probes" ]]; then
8-
echo "$pid"
7+
if [[ -n "${pid}" && -d "/proc/${pid}/root/probes" ]]; then
8+
echo "${pid}"
99
return 0
1010
fi
1111
return 1
1212
}
1313

1414
execute_liveness_probe() {
15-
local init_pid="$1"
16-
local init_probe_path="/proc/$init_pid/root/probes/probe.sh"
15+
local init_pid="${1}"
16+
local init_probe_path="/proc/${init_pid}/root/probes/probe.sh"
1717

18-
if [[ ! -f "$init_probe_path" ]]; then
19-
echo "ERROR: Liveness probe script not found at $init_probe_path"
18+
if [[ ! -f "${init_probe_path}" ]]; then
19+
echo "ERROR: Liveness probe script not found at ${init_probe_path}"
2020
exit 1
21-
elif [[ ! -x "$init_probe_path" ]]; then
22-
echo "ERROR: Liveness probe script not executable at $init_probe_path"
21+
elif [[ ! -x "${init_probe_path}" ]]; then
22+
echo "ERROR: Liveness probe script not executable at ${init_probe_path}"
2323
exit 1
2424
else
2525
# Execute the actual probe script from the init-database container
2626
# This works because of shared process namespace - the probe can see all processes
27-
exec "$init_probe_path"
27+
exec "${init_probe_path}"
2828
fi
2929
}
3030

3131
# Main execution
3232
if init_pid=$(find_init_container); then
33-
echo "Found init container with PID: $init_pid, executing liveness probe..."
34-
execute_liveness_probe "$init_pid"
33+
echo "Found init container with PID: ${init_pid}, executing liveness probe..."
34+
execute_liveness_probe "${init_pid}"
3535
else
3636
echo "WARNING: Init container not found, falling back to basic liveness check"
3737
# Fallback: if we can't find the init container, just check if this container is alive

docker/mongodb-agent/setup-agent-files.sh

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@ SCRIPTS_DIR="/opt/scripts"
88
# readiness always returns failure
99
setup_dummy_probes() {
1010
echo "Setting up dummy probe scripts..."
11-
cp --remove-destination /usr/local/bin/dummy-probe.sh "$SCRIPTS_DIR/probe.sh"
12-
cp --remove-destination /usr/local/bin/dummy-readinessprobe "$SCRIPTS_DIR/readinessprobe"
11+
cp --remove-destination /usr/local/bin/dummy-probe.sh "${SCRIPTS_DIR}/probe.sh"
12+
cp --remove-destination /usr/local/bin/dummy-readinessprobe "${SCRIPTS_DIR}/readinessprobe"
1313
echo "Dummy probe scripts ready"
1414
}
1515

@@ -18,23 +18,23 @@ find_init_container() {
1818
for i in {1..150}; do
1919
local pid
2020
pid=$(pgrep -f "agent-utilities-holder_marker" | head -n1)
21-
if [[ -n "$pid" && -d "/proc/$pid/root/scripts" ]]; then
22-
echo "$pid"
21+
if [[ -n "${pid}" && -d "/proc/${pid}/root/scripts" ]]; then
22+
echo "${pid}"
2323
return 0
2424
fi
25-
echo "Waiting for init container... (attempt $i)" >&2
25+
echo "Waiting for init container... (attempt ${i})" >&2
2626
sleep 2
2727
done
2828
return 1
2929
}
3030

3131
link_agent_scripts() {
32-
local init_scripts_dir="$1"
32+
local init_scripts_dir="${1}"
3333

3434
echo "Linking agent launcher scripts..."
3535
for script in agent-launcher.sh agent-launcher-lib.sh; do
36-
ln -sf "$init_scripts_dir/$script" "$SCRIPTS_DIR/$script"
37-
echo "Linked $script"
36+
ln -sf "${init_scripts_dir}/${script}" "${SCRIPTS_DIR}/${script}"
37+
echo "Linked ${script}"
3838
done
3939
}
4040

@@ -44,26 +44,26 @@ main() {
4444
setup_dummy_probes
4545

4646
if init_pid=$(find_init_container); then
47-
echo "Found init container with PID: $init_pid"
47+
echo "Found init container with PID: ${init_pid}"
4848

49-
init_root="/proc/$init_pid/root"
50-
init_scripts="$init_root/scripts"
51-
init_probes="$init_root/probes"
49+
init_root="/proc/${init_pid}/root"
50+
init_scripts="${init_root}/scripts"
51+
init_probes="${init_root}/probes"
5252

5353
# Verify scripts directory exists
54-
if [[ ! -d "$init_scripts" ]]; then
55-
echo "ERROR: Scripts directory $init_scripts not found"
54+
if [[ ! -d "${init_scripts}" ]]; then
55+
echo "ERROR: Scripts directory ${init_scripts} not found"
5656
exit 1
5757
fi
5858

5959
# Verify probes directory exists
60-
if [[ ! -d "$init_probes" ]]; then
61-
echo "ERROR: Probes directory $init_probes not found"
60+
if [[ ! -d "${init_probes}" ]]; then
61+
echo "ERROR: Probes directory ${init_probes} not found"
6262
exit 1
6363
fi
6464

6565
# Link scripts from init container
66-
link_agent_scripts "$init_scripts"
66+
link_agent_scripts "${init_scripts}"
6767

6868
echo "File setup completed successfully"
6969
exit 0

docker/mongodb-kubernetes-init-database/content/probe.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@
22
set -Eeou pipefail
33

44
check_process() {
5-
local check_process=$1
5+
local check_process=${1}
66
# shellcheck disable=SC2009
7-
ps -ax | grep -v " grep " | grep -v jq | grep -v tail | grep "$check_process"
7+
ps -ax | grep -v " grep " | grep -v jq | grep -v tail | grep "${check_process}"
88
return $?
99
}
1010

mongodb-community-operator/scripts/dev/setup_sa.sh

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
service_accounts=$(kubectl get serviceaccounts -n "${NAMESPACE}" -o jsonpath='{.items[*].metadata.name}')
44

5-
for service_account in $service_accounts; do
6-
kubectl patch serviceaccount "$service_account" -n "${NAMESPACE}" -p "{\"imagePullSecrets\": [{\"name\": \"image-registries-secret\"}]}"
7-
done
5+
# shellcheck disable=SC2086 # service_accounts is intentionally unquoted for word splitting
6+
for service_account in ${service_accounts}; do
7+
kubectl patch serviceaccount "${service_account}" -n "${NAMESPACE}" -p "{\"imagePullSecrets\": [{\"name\": \"image-registries-secret\"}]}"
8+
done

multi_cluster/tools/install_istio.sh

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,14 @@ export CTX_CLUSTER3=${CTX_CLUSTER3:-e2e.cluster3.mongokubernetes.com}
88
export VERSION=${VERSION:-1.12.8}
99

1010
IS_KIND="false"
11-
if [[ $CTX_CLUSTER1 = kind* ]]; then
11+
if [[ ${CTX_CLUSTER1} = kind* ]]; then
1212
IS_KIND="true"
1313
fi
1414

1515
source multi_cluster/tools/download_istio.sh
1616

1717
#
18-
cd istio-${VERSION}
18+
cd "istio-${VERSION}"
1919
## perform cleanup prior to install
2020
bin/istioctl x uninstall --context="${CTX_CLUSTER1}" --purge --skip-confirmation &
2121
bin/istioctl x uninstall --context="${CTX_CLUSTER2}" --purge --skip-confirmation &
@@ -40,28 +40,28 @@ kubectl --context="${CTX_CLUSTER1}" delete ns istio-system || true
4040
kubectl --context="${CTX_CLUSTER1}" create ns istio-system
4141
kubectl --context="${CTX_CLUSTER1}" label --overwrite ns istio-system pod-security.kubernetes.io/enforce=privileged
4242
kubectl --context="${CTX_CLUSTER1}" create secret generic cacerts -n istio-system \
43-
--from-file=${CTX_CLUSTER1}/ca-cert.pem \
44-
--from-file=${CTX_CLUSTER1}/ca-key.pem \
45-
--from-file=${CTX_CLUSTER1}/root-cert.pem \
46-
--from-file=${CTX_CLUSTER1}/cert-chain.pem
43+
--from-file="${CTX_CLUSTER1}/ca-cert.pem" \
44+
--from-file="${CTX_CLUSTER1}/ca-key.pem" \
45+
--from-file="${CTX_CLUSTER1}/root-cert.pem" \
46+
--from-file="${CTX_CLUSTER1}/cert-chain.pem"
4747

4848
kubectl --context="${CTX_CLUSTER2}" delete ns istio-system || true
4949
kubectl --context="${CTX_CLUSTER2}" create ns istio-system
5050
kubectl --context="${CTX_CLUSTER2}" label --overwrite ns istio-system pod-security.kubernetes.io/enforce=privileged
5151
kubectl --context="${CTX_CLUSTER2}" create secret generic cacerts -n istio-system \
52-
--from-file=${CTX_CLUSTER2}/ca-cert.pem \
53-
--from-file=${CTX_CLUSTER2}/ca-key.pem \
54-
--from-file=${CTX_CLUSTER2}/root-cert.pem \
55-
--from-file=${CTX_CLUSTER2}/cert-chain.pem
52+
--from-file="${CTX_CLUSTER2}/ca-cert.pem" \
53+
--from-file="${CTX_CLUSTER2}/ca-key.pem" \
54+
--from-file="${CTX_CLUSTER2}/root-cert.pem" \
55+
--from-file="${CTX_CLUSTER2}/cert-chain.pem"
5656

5757
kubectl --context="${CTX_CLUSTER3}" delete ns istio-system || true
5858
kubectl --context="${CTX_CLUSTER3}" create ns istio-system
5959
kubectl --context="${CTX_CLUSTER3}" label --overwrite ns istio-system pod-security.kubernetes.io/enforce=privileged
6060
kubectl --context="${CTX_CLUSTER3}" create secret generic cacerts -n istio-system \
61-
--from-file=${CTX_CLUSTER3}/ca-cert.pem \
62-
--from-file=${CTX_CLUSTER3}/ca-key.pem \
63-
--from-file=${CTX_CLUSTER3}/root-cert.pem \
64-
--from-file=${CTX_CLUSTER3}/cert-chain.pem
61+
--from-file="${CTX_CLUSTER3}/ca-cert.pem" \
62+
--from-file="${CTX_CLUSTER3}/ca-key.pem" \
63+
--from-file="${CTX_CLUSTER3}/root-cert.pem" \
64+
--from-file="${CTX_CLUSTER3}/cert-chain.pem"
6565
popd
6666

6767
# install IstioOperator in clusters
@@ -145,43 +145,49 @@ wait
145145
CLUSTER_1_ADDITIONAL_OPTS=""
146146
CLUSTER_2_ADDITIONAL_OPTS=""
147147
CLUSTER_3_ADDITIONAL_OPTS=""
148-
if [[ $IS_KIND == "true" ]]; then
149-
CLUSTER_1_ADDITIONAL_OPTS="--server https://$(kubectl --context=${CTX_CLUSTER1} get node e2e-cluster-1-control-plane -o=jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}'):6443"
150-
CLUSTER_2_ADDITIONAL_OPTS="--server https://$(kubectl --context=${CTX_CLUSTER2} get node e2e-cluster-2-control-plane -o=jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}'):6443"
151-
CLUSTER_3_ADDITIONAL_OPTS="--server https://$(kubectl --context=${CTX_CLUSTER3} get node e2e-cluster-3-control-plane -o=jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}'):6443"
148+
if [[ ${IS_KIND} == "true" ]]; then
149+
CLUSTER_1_ADDITIONAL_OPTS="--server https://$(kubectl --context="${CTX_CLUSTER1}" get node e2e-cluster-1-control-plane -o=jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}'):6443"
150+
CLUSTER_2_ADDITIONAL_OPTS="--server https://$(kubectl --context="${CTX_CLUSTER2}" get node e2e-cluster-2-control-plane -o=jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}'):6443"
151+
CLUSTER_3_ADDITIONAL_OPTS="--server https://$(kubectl --context="${CTX_CLUSTER3}" get node e2e-cluster-3-control-plane -o=jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}'):6443"
152152
fi
153153

154154
# enable endpoint discovery
155+
# shellcheck disable=SC2086 # CLUSTER_X_ADDITIONAL_OPTS must not be quoted - empty string breaks istioctl
155156
bin/istioctl x create-remote-secret \
156157
--context="${CTX_CLUSTER1}" \
157158
-n istio-system \
158159
--name=cluster1 ${CLUSTER_1_ADDITIONAL_OPTS} |
159160
kubectl apply -f - --context="${CTX_CLUSTER2}"
160161

162+
# shellcheck disable=SC2086
161163
bin/istioctl x create-remote-secret \
162164
--context="${CTX_CLUSTER1}" \
163165
-n istio-system \
164166
--name=cluster1 ${CLUSTER_1_ADDITIONAL_OPTS} |
165167
kubectl apply -f - --context="${CTX_CLUSTER3}"
166168

169+
# shellcheck disable=SC2086
167170
bin/istioctl x create-remote-secret \
168171
--context="${CTX_CLUSTER2}" \
169172
-n istio-system \
170173
--name=cluster2 ${CLUSTER_2_ADDITIONAL_OPTS} |
171174
kubectl apply -f - --context="${CTX_CLUSTER1}"
172175

176+
# shellcheck disable=SC2086
173177
bin/istioctl x create-remote-secret \
174178
--context="${CTX_CLUSTER2}" \
175179
-n istio-system \
176180
--name=cluster2 ${CLUSTER_2_ADDITIONAL_OPTS} |
177181
kubectl apply -f - --context="${CTX_CLUSTER3}"
178182

183+
# shellcheck disable=SC2086
179184
bin/istioctl x create-remote-secret \
180185
--context="${CTX_CLUSTER3}" \
181186
-n istio-system \
182187
--name=cluster3 ${CLUSTER_3_ADDITIONAL_OPTS} |
183188
kubectl apply -f - --context="${CTX_CLUSTER1}"
184189

190+
# shellcheck disable=SC2086
185191
bin/istioctl x create-remote-secret \
186192
--context="${CTX_CLUSTER3}" \
187193
-n istio-system \

multi_cluster/tools/install_istio_central.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ export VERSION=${VERSION:-1.14.2}
77
export CTX_CLUSTER=${CTX_CLUSTER:-e2e.operator.mongokubernetes.com}
88

99
source multi_cluster/tools/download_istio.sh
10-
cd istio-${VERSION}
10+
cd "istio-${VERSION}"
1111

1212
bin/istioctl x uninstall --context="${CTX_CLUSTER}" --purge --skip-confirmation
1313
bin/istioctl install --context="${CTX_CLUSTER}" --set components.cni.enabled=true --set profile=default --set meshConfig.outboundTrafficPolicy.mode=REGISTRY_ONLY --skip-confirmation

0 commit comments

Comments
 (0)