@@ -34,7 +34,7 @@ clean_kind=no
34
34
api_group_suffix=" pinniped.dev" # same default as in the values.yaml ytt file
35
35
dockerfile_path=" "
36
36
get_active_directory_vars=" " # specify a filename for a script to get AD related env variables
37
- get_github_vars=" " # specify a filename for a script to get GitHub related env variables
37
+ get_github_vars=" " # specify a filename for a script to get GitHub related env variables
38
38
alternate_deploy=" undefined"
39
39
pre_install=" undefined"
40
40
@@ -319,6 +319,15 @@ service_https_nodeport_nodeport: $service_https_nodeport_nodeport
319
319
service_https_clusterip_port: $service_https_clusterip_port
320
320
EOF
321
321
322
+ if [[ " ${FIREWALL_IDPS:- no} " == " yes" ]]; then
323
+ # Configure the web proxy on the Supervisor pods. Note that .svc and .cluster.local are not included,
324
+ # so requests for things like dex.tools.svc.cluster.local will go through the web proxy.
325
+ cat << EOF >>"$data_values_file "
326
+ https_proxy: "http://proxy.tools.svc.cluster.local:3128"
327
+ no_proxy: "\$ (KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost"
328
+ EOF
329
+ fi
330
+
322
331
if [ " $alternate_deploy " != " undefined" ]; then
323
332
log_note " The Pinniped Supervisor will be deployed with $alternate_deploy pinniped-supervisor $tag $registry_with_port $repo $data_values_file ..."
324
333
$alternate_deploy pinniped-supervisor " $tag " $registry_with_port $repo $data_values_file
@@ -338,7 +347,7 @@ manifest=/tmp/pinniped-concierge.yaml
338
347
data_values_file=/tmp/concierge-values.yml
339
348
concierge_app_name=" pinniped-concierge"
340
349
concierge_namespace=" concierge"
341
- webhook_url=" https://local-user-authenticator.local-user-authenticator.svc/authenticate"
350
+ webhook_url=" https://local-user-authenticator.local-user-authenticator.svc.cluster.local /authenticate"
342
351
discovery_url=" $( TERM=dumb kubectl cluster-info | awk ' /master|control plane/ {print $NF}' ) "
343
352
concierge_custom_labels=" {myConciergeCustomLabelName: myConciergeCustomLabelValue}"
344
353
log_level=" debug"
@@ -354,6 +363,16 @@ image_tag: $tag
354
363
discovery_url: $discovery_url
355
364
EOF
356
365
366
+ if [[ " ${FIREWALL_IDPS:- no} " == " yes" ]]; then
367
+ # Configure the web proxy on the Concierge pods. Note that .svc and .cluster.local are not included,
368
+ # so requests for things like pinniped-supervisor-clusterip.supervisor.svc.cluster.local and
369
+ # local-user-authenticator.local-user-authenticator.svc.cluster.local will go through the web proxy.
370
+ cat << EOF >>"$data_values_file "
371
+ https_proxy: "http://proxy.tools.svc.cluster.local:3128"
372
+ no_proxy: "\$ (KUBERNETES_SERVICE_HOST),169.254.169.254,127.0.0.1,localhost"
373
+ EOF
374
+ fi
375
+
357
376
if [ " $alternate_deploy " != " undefined" ]; then
358
377
log_note " The Pinniped Concierge will be deployed with $alternate_deploy pinniped-concierge $tag $registry_with_port $repo $data_values_file ..."
359
378
$alternate_deploy pinniped-concierge " $tag " $registry_with_port $repo $data_values_file
@@ -366,6 +385,77 @@ else
366
385
popd > /dev/null
367
386
fi
368
387
388
+ #
389
+ # Now that the everything is deployed, optionally firewall the Dex server, the local user authenticator server,
390
+ # and the GitHub API so that the Supervisor and Concierge cannot reach them directly. However, the Squid
391
+ # proxy server can reach them all, so the Supervisor and Concierge can reach them through the proxy.
392
+ #
393
+ if [[ " ${FIREWALL_IDPS:- no} " == " yes" ]]; then
394
+ log_note " Setting up firewalls for the Supervisor and Concierge's outgoing TCP/UDP network traffic..."
395
+ cat << EOF | kubectl apply --wait -f -
396
+ ---
397
+ apiVersion: networking.k8s.io/v1
398
+ kind: NetworkPolicy
399
+ metadata:
400
+ name: supervisor-cannot-make-external-requests
401
+ namespace: ${supervisor_namespace}
402
+ spec:
403
+ # An empty podSelector matches all pods in this namespace.
404
+ podSelector: {}
405
+ policyTypes:
406
+ - Egress
407
+ # This is an allow list. Everything else disallowed.
408
+ # Especially note that it cannot access Dex or the GitHub API directly.
409
+ egress:
410
+ - to:
411
+ # Allowed to make requests to all pods in kube-system for DNS and Kube API.
412
+ - namespaceSelector:
413
+ matchLabels:
414
+ kubernetes.io/metadata.name: kube-system
415
+ # Allowed to make requests to the LDAP server in tools, because we cannot use
416
+ # an HTTP proxy for the LDAP protocol, since LDAP is not over HTTP.
417
+ - namespaceSelector:
418
+ matchLabels:
419
+ kubernetes.io/metadata.name: tools
420
+ podSelector:
421
+ matchLabels:
422
+ app: ldap
423
+ # Allowed to make requests to the Squid proxy server in the tools namespace.
424
+ - namespaceSelector:
425
+ matchLabels:
426
+ kubernetes.io/metadata.name: tools
427
+ podSelector:
428
+ matchLabels:
429
+ app: proxy
430
+ ---
431
+ apiVersion: networking.k8s.io/v1
432
+ kind: NetworkPolicy
433
+ metadata:
434
+ name: concierge-cannot-make-external-requests
435
+ namespace: ${concierge_namespace}
436
+ spec:
437
+ # An empty podSelector matches all pods in this namespace.
438
+ podSelector: {}
439
+ policyTypes:
440
+ - Egress
441
+ # This is an allow list. Everything else disallowed.
442
+ # Especially note that it cannot access the local user authenticator or Supervisor directly.
443
+ egress:
444
+ - to:
445
+ # Allowed to make requests to all pods in kube-system for DNS and Kube API.
446
+ - namespaceSelector:
447
+ matchLabels:
448
+ kubernetes.io/metadata.name: kube-system
449
+ # Allowed to make requests to the Squid proxy server in the tools namespace.
450
+ - namespaceSelector:
451
+ matchLabels:
452
+ kubernetes.io/metadata.name: tools
453
+ podSelector:
454
+ matchLabels:
455
+ app: proxy
456
+ EOF
457
+ fi
458
+
369
459
#
370
460
# Create a test user in the local-user-authenticator and get its CA bundle.
371
461
#
0 commit comments