diff --git a/Dockerfile b/Dockerfile
index 7b61303..249da3e 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,6 @@
ARG UBUNTU_DISTRO=jammy
-ARG ROS_DISTRO=rolling
+#rolling is alternative
+ARG ROS_DISTRO=humble
FROM ubuntu:${UBUNTU_DISTRO}
# Set up install, set tzdata
diff --git a/fogros2/fogros2/__init__.py b/fogros2/fogros2/__init__.py
index af6f673..3babf44 100755
--- a/fogros2/fogros2/__init__.py
+++ b/fogros2/fogros2/__init__.py
@@ -32,5 +32,8 @@
# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
from .aws_cloud_instance import AWSCloudInstance # noqa: F401
+from .gcp_cloud_instance import GCPCloudInstance
+from .azure_cloud_instance import AzureCloudInstance
+from .kubernetes.generic import KubeInstance
from .cloud_node import CloudNode # noqa: F401
from .launch_description import FogROSLaunchDescription # noqa: F401
diff --git a/fogros2/fogros2/aws_cloud_instance.py b/fogros2/fogros2/aws_cloud_instance.py
old mode 100755
new mode 100644
index b28de41..243fac1
--- a/fogros2/fogros2/aws_cloud_instance.py
+++ b/fogros2/fogros2/aws_cloud_instance.py
@@ -116,9 +116,9 @@ def create(self):
self.info(flush_to_disk=True)
self.connect()
# Uncomment out the next three lines if you are not using a custom AMI
- #self.install_ros()
- #self.install_colcon()
- #self.install_cloud_dependencies()
+ self.install_ros()
+ self.install_cloud_dependencies()
+ self.install_colcon()
self.push_ros_workspace()
self.info(flush_to_disk=True)
self._is_created = True
diff --git a/fogros2/fogros2/azure_cloud_instance.py b/fogros2/fogros2/azure_cloud_instance.py
new file mode 100644
index 0000000..397af18
--- /dev/null
+++ b/fogros2/fogros2/azure_cloud_instance.py
@@ -0,0 +1,135 @@
+# Copyright 2022 The Regents of the University of California (Regents)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Copyright ©2022. The Regents of the University of California (Regents).
+# All Rights Reserved. Permission to use, copy, modify, and distribute this
+# software and its documentation for educational, research, and not-for-profit
+# purposes, without fee and without a signed licensing agreement, is hereby
+# granted, provided that the above copyright notice, this paragraph and the
+# following two paragraphs appear in all copies, modifications, and
+# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
+# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
+# otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial
+# licensing opportunities. IN NO EVEpNT SHALL REGENTS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
+# INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
+# DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY,
+# PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+import json
+import os
+
+import subprocess
+import uuid
+import time
+from .cloud_instance import CloudInstance
+import json
+from .util import extract_bash_column
+
+
+class AzureCloudInstance(CloudInstance):
+ """Azure Implementation of CloudInstance."""
+
+ def __init__(
+ self,
+ resource_group,
+ ami_image='UbuntuLTS',
+ zone="eastus",
+ machine_type="Standard_DS2_v2",
+ disk_size=10,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.cloud_service_provider = "Azure"
+
+ id_ = str(uuid.uuid4())[0:8]
+ self._name = f'fog-{id_}-{self._name}'
+
+ self.zone = zone
+ self.type = machine_type
+ self.compute_instance_disk_size = disk_size # GB
+ self.azure_ami_image = ami_image
+
+ self._working_dir = os.path.join(self._working_dir_base, self._name)
+ os.makedirs(self._working_dir, exist_ok=True)
+
+ self.resource_group = resource_group
+
+ # after config
+ self._ssh_key = None
+
+ self.create()
+
+ def create(self):
+ self.logger.info(f"Creating new Azure Compute Engine instance with name {self._name}")
+ self.create_compute_engine_instance()
+ self.info(flush_to_disk=True)
+ self.connect()
+ self.install_ros()
+ self.install_colcon()
+ self.install_cloud_dependencies()
+ self.push_ros_workspace()
+ self.info(flush_to_disk=True)
+ self._is_created = True
+
+ def info(self, flush_to_disk=True):
+ info_dict = super().info(flush_to_disk)
+ info_dict["compute_region"] = self.zone
+ info_dict["compute_instance_type"] = self.type
+ info_dict["disk_size"] = self.compute_instance_disk_size
+ info_dict["compute_instance_id"] = self._name
+ if flush_to_disk:
+ with open(os.path.join(self._working_dir, "info"), "w+") as f:
+ json.dump(info_dict, f)
+ return info_dict
+
+ def create_compute_engine_instance(self):
+ os.system(f'az group create --name {self.resource_group} --location {self.zone} ')
+ time.sleep(1)
+
+ cmd = f'az vm create --resource-group {self.resource_group} ' + f'--name={self._name} --image={self.azure_ami_image} --machine-type={self.type} ' + f'--admin-username ubuntu ' + '--generate-ssh-keys '
+ print(cmd)
+ result = subprocess.check_output(f'az vm create --resource-group {self.resource_group} '
+ f'--name={self._name} --image={self.azure_ami_image} '
+ # f'--machine-type={self.type} '
+ f'--admin-username ubuntu '
+ '--generate-ssh-keys ', shell=True).decode()
+ print(result)
+
+ # Grab external IP
+ ip = json.loads(result)["publicIpAddress"] #extract_bash_column(result, 'publicIpAddress')
+
+ # Verifies the response was an ip
+ if len(ip.split('.')) != 4:
+ raise Exception(f'Error creating instance: {ip}')
+
+ self._ip = ip
+
+ # # Generate SSH keys
+ # os.system(f"printf '\n\n' | gcloud compute ssh {self._name} --zone {self.zone}")
+
+ user = subprocess.check_output('whoami', shell=True).decode().strip()
+
+
+ self._ssh_key_path = f'/home/{user}/.ssh/id_rsa'
+ self._is_created = True
+
+ self.logger.info(
+ f"Created {self.type} instance named {self._name} "
+ f"with id {self._name} and public IP address {self._ip}"
+ )
diff --git a/fogros2/fogros2/cloud_instance.py b/fogros2/fogros2/cloud_instance.py
index f2a5602..821255f 100644
--- a/fogros2/fogros2/cloud_instance.py
+++ b/fogros2/fogros2/cloud_instance.py
@@ -72,6 +72,7 @@ def __init__(
self.cyclone_builder = None
self.scp = None
self._ip = None
+ self._vpn_ip = None
self.ros_workspace = ros_workspace
self.ros_distro = os.getenv("ROS_DISTRO")
self.logger.debug(f"Using ROS workspace: {self.ros_workspace}")
@@ -84,6 +85,7 @@ def __init__(
self.cloud_service_provider = None
self.dockers = []
self.launch_foxglove = launch_foxglove
+ self._username = 'ubuntu'
@abc.abstractmethod
def create(self):
@@ -102,15 +104,23 @@ def info(self, flush_to_disk=True):
with open(os.path.join(self._working_dir, "info"), "w+") as f:
json.dump(info_dict, f)
return info_dict
+
+ def force_start_vpn(self):
+ return True
def connect(self):
- self.scp = SCPClient(self._ip, self._ssh_key_path)
+ self.scp = SCPClient(self._ip, self._ssh_key_path, username=self._username)
self.scp.connect()
@property
def ip(self):
return self._ip
+ @property
+ def vpn_ip(self):
+ # Use this when the VPN IP is not None.
+ return self._vpn_ip
+
@property
def is_created(self):
return self._is_created
@@ -125,10 +135,14 @@ def apt_install(self, args):
)
def pip_install(self, args):
- self.scp.execute_cmd(f"sudo pip3 install {args}")
+ self.scp.execute_cmd(f"python3 -m pip install {args}")
def install_cloud_dependencies(self):
self.apt_install("wireguard unzip docker.io python3-pip ros-humble-rmw-cyclonedds-cpp")
+ self.pip_install("boto3")
+ self.pip_install("paramiko")
+ self.pip_install("scp")
+ self.pip_install("wgconfig")
def install_ros(self):
# setup sources
@@ -159,7 +173,7 @@ def install_ros(self):
self.scp.execute_cmd("export LANG=en_US.UTF-8")
# install ros2 packages
- # self.apt_install(f"ros-{self.ros_distro}-desktop")
+ self.apt_install(f"ros-{self.ros_distro}-desktop")
# source environment
self.scp.execute_cmd(f"source /opt/ros/{self.ros_distro}/setup.bash")
@@ -173,7 +187,7 @@ def configure_rosbridge(self):
rosbridge_launch_script = (
"ssh -o StrictHostKeyChecking=no -i "
f"{self._ssh_key_path}"
- " ubuntu@"
+ f" {self._username}@"
f"{self._ip}"
f' "source /opt/ros/{self.ros_distro}/setup.bash && '
'ros2 launch rosbridge_server rosbridge_websocket_launch.xml &"'
@@ -203,8 +217,10 @@ def push_ros_workspace(self):
make_zip_file(workspace_path, zip_dst)
self.scp.execute_cmd("echo removing old workspace")
self.scp.execute_cmd("rm -rf ros_workspace.zip ros2_ws fog_ws")
- self.scp.send_file(f"{zip_dst}.zip", "/home/ubuntu/")
- self.scp.execute_cmd("unzip -q /home/ubuntu/ros_workspace.zip")
+ #self.scp.send_file(f"{zip_dst}.zip", "/home/ubuntu/")
+ self.scp.send_file(f"{zip_dst}.tar", "/home/ubuntu/")
+ #self.scp.execute_cmd("unzip -q /home/ubuntu/ros_workspace.zip")
+ self.scp.execute_cmd("tar -xf /home/ubuntu/ros_workspace.tar")
self.scp.execute_cmd("echo successfully extracted new workspace")
def push_to_cloud_nodes(self):
@@ -223,7 +239,7 @@ def push_and_setup_vpn(self):
def configure_DDS(self):
# configure DDS
- self.cyclone_builder = CycloneConfigBuilder(["10.0.0.1"])
+ self.cyclone_builder = CycloneConfigBuilder(["10.0.0.1"], username=self._username)
self.cyclone_builder.generate_config_file()
self.scp.send_file("/tmp/cyclonedds.xml", "~/cyclonedds.xml")
@@ -231,9 +247,9 @@ def launch_cloud_node(self):
cmd_builder = BashBuilder()
cmd_builder.append(f"source /opt/ros/{self.ros_distro}/setup.bash")
cmd_builder.append(
- "cd /home/ubuntu/fog_ws && colcon build --cmake-clean-cache"
+ f"cd /home/{self._username}/fog_ws && colcon build --cmake-clean-cache"
)
- cmd_builder.append(". /home/ubuntu/fog_ws/install/setup.bash")
+ cmd_builder.append(f". /home/{self._username}/fog_ws/install/setup.bash")
cmd_builder.append(self.cyclone_builder.env_cmd)
ros_domain_id = os.environ.get("ROS_DOMAIN_ID")
if not ros_domain_id:
diff --git a/fogros2/fogros2/dds_config_builder.py b/fogros2/fogros2/dds_config_builder.py
index f88d00f..7e2dd15 100644
--- a/fogros2/fogros2/dds_config_builder.py
+++ b/fogros2/fogros2/dds_config_builder.py
@@ -53,24 +53,18 @@ def generate_config_file(self):
class CycloneConfigBuilder(DDSConfigBuilder):
- def __init__(self, ip_addresses):
+ def __init__(self, ip_addresses, username='ubuntu'):
super().__init__(ip_addresses)
self.config_save_path = "/tmp/cyclonedds.xml"
self.env_cmd = (
"export RMW_IMPLEMENTATION=rmw_cyclonedds_cpp && "
- "export CYCLONEDDS_URI=file:///home/ubuntu/cyclonedds.xml"
+ f"export CYCLONEDDS_URI=file:///home/{username}/cyclonedds.xml"
)
- def generate_config_file(self):
- if ubuntu_release == "20.04":
- interfaces = """
- wg0
- false
- """
- else:
- interfaces = """
+ def generate_config_file(self, extra_peers = []):
+ interfaces = """
-
+
"""
@@ -82,6 +76,8 @@ def generate_config_file(self):
'cyclonedds/master/etc/cyclonedds.xsd"'
)
+ peer_xml = "".join(f"\n" for peer in extra_peers)
+
template = f"""
@@ -91,6 +87,7 @@ def generate_config_file(self):
+ {peer_xml}
auto
diff --git a/fogros2/fogros2/gcp_cloud_instance.py b/fogros2/fogros2/gcp_cloud_instance.py
new file mode 100644
index 0000000..1178278
--- /dev/null
+++ b/fogros2/fogros2/gcp_cloud_instance.py
@@ -0,0 +1,150 @@
+# Copyright 2022 The Regents of the University of California (Regents)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Copyright ©2022. The Regents of the University of California (Regents).
+# All Rights Reserved. Permission to use, copy, modify, and distribute this
+# software and its documentation for educational, research, and not-for-profit
+# purposes, without fee and without a signed licensing agreement, is hereby
+# granted, provided that the above copyright notice, this paragraph and the
+# following two paragraphs appear in all copies, modifications, and
+# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
+# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
+# otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial
+# licensing opportunities. IN NO EVEpNT SHALL REGENTS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
+# INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
+# DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY,
+# PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+import json
+import os
+
+import subprocess
+import uuid
+
+from .cloud_instance import CloudInstance
+
+from .util import extract_bash_column
+
+
+class GCPCloudInstance(CloudInstance):
+ """GCP Implementation of CloudInstance."""
+
+ def __init__(
+ self,
+ project_id,
+ ami_image='projects/ubuntu-os-cloud/global/images/ubuntu-2204-jammy-v20220712a',
+ zone="us-central1-a",
+ machine_type="e2-medium",
+ disk_size=10,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.cloud_service_provider = "GCP"
+
+ id_ = str(uuid.uuid4())[0:8]
+ self._name = f'fog-{id_}-{self._name}'
+
+ self.zone = zone
+ self.type = machine_type
+ self.compute_instance_disk_size = disk_size # GB
+ self.gcp_ami_image = ami_image
+
+ self._working_dir = os.path.join(self._working_dir_base, self._name)
+ os.makedirs(self._working_dir, exist_ok=True)
+
+ self._project_id = project_id
+
+ # after config
+ self._ssh_key = None
+
+ self.create()
+
+ def create(self):
+ self.logger.info(f"Creating new GCP Compute Engine instance with name {self._name}")
+ self.create_compute_engine_instance()
+ self.info(flush_to_disk=True)
+ self.connect()
+ self.install_ros()
+ self.install_colcon()
+ self.install_cloud_dependencies()
+ self.push_ros_workspace()
+ self.info(flush_to_disk=True)
+ self._is_created = True
+
+ def info(self, flush_to_disk=True):
+ info_dict = super().info(flush_to_disk)
+ info_dict["compute_region"] = self.zone
+ info_dict["compute_instance_type"] = self.type
+ info_dict["disk_size"] = self.compute_instance_disk_size
+ info_dict["compute_instance_id"] = self._name
+ if flush_to_disk:
+ with open(os.path.join(self._working_dir, "info"), "w+") as f:
+ json.dump(info_dict, f)
+ return info_dict
+
+ def create_compute_engine_instance(self):
+ os.system(f'gcloud config set project {self._project_id}')
+
+ result = subprocess.check_output(f'gcloud compute instances create {self._name} '
+ f'--project={self._project_id} --zone={self.zone} --machine-type={self.type} '
+ '--network-interface=network-tier=PREMIUM,subnet=default '
+ '--maintenance-policy=MIGRATE --provisioning-model=STANDARD '
+ '--scopes=https://www.googleapis.com/auth/devstorage.read_only,'
+ 'https://www.googleapis.com/auth/logging.write,'
+ 'https://www.googleapis.com/auth/monitoring.write,'
+ 'https://www.googleapis.com/auth/servicecontrol,'
+ 'https://www.googleapis.com/auth/service.management.readonly,'
+ 'https://www.googleapis.com/auth/trace.append '
+ '--create-disk=auto-delete=yes,'
+ 'boot=yes,'
+ f'device-name={self._name},'
+ f'image={self.gcp_ami_image},'
+ 'mode=rw,'
+ f'size={self.compute_instance_disk_size},'
+ f'type=projects/{self._project_id}/zones/{self.zone}/diskTypes/pd-balanced '
+ '--no-shielded-secure-boot '
+ '--shielded-vtpm '
+ '--shielded-integrity-monitoring '
+ '--reservation-affinity=any', shell=True).decode()
+
+ # Grab external IP
+ ip = extract_bash_column(result, 'EXTERNAL_IP')
+
+ # Verifies the response was an ip
+ if len(ip.split('.')) != 4:
+ raise Exception(f'Error creating instance: {ip}')
+
+ self._ip = ip
+
+ # Generate SSH keys
+ os.system(f"printf '\n\n' | gcloud compute ssh {self._name} --zone {self.zone}")
+
+ user = subprocess.check_output('whoami', shell=True).decode().strip()
+
+ # Username
+ self._username = (open(f'/home/{user}/.ssh/google_compute_engine.pub').
+ read()).split(' ')[-1].strip().split('@')[0]
+
+ self._ssh_key_path = f'/home/{user}/.ssh/google_compute_engine'
+ self._is_created = True
+
+ self.logger.info(
+ f"Created {self.type} instance named {self._name} "
+ f"with id {self._name} and public IP address {self._ip}"
+ )
diff --git a/fogros2/fogros2/kubernetes/__init__.py b/fogros2/fogros2/kubernetes/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/fogros2/fogros2/kubernetes/generic.py b/fogros2/fogros2/kubernetes/generic.py
new file mode 100644
index 0000000..21b48a0
--- /dev/null
+++ b/fogros2/fogros2/kubernetes/generic.py
@@ -0,0 +1,257 @@
+# Copyright 2022 The Regents of the University of California (Regents)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Copyright ©2022. The Regents of the University of California (Regents).
+# All Rights Reserved. Permission to use, copy, modify, and distribute this
+# software and its documentation for educational, research, and not-for-profit
+# purposes, without fee and without a signed licensing agreement, is hereby
+# granted, provided that the above copyright notice, this paragraph and the
+# following two paragraphs appear in all copies, modifications, and
+# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
+# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
+# otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial
+# licensing opportunities. IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
+# INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
+# DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY,
+# PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+import json
+import os
+
+import subprocess
+import time
+import uuid
+import tempfile
+import textwrap
+
+from ..util import extract_bash_column
+
+from ..cloud_instance import CloudInstance
+
+class KubeInstance(CloudInstance):
+ """Generic Kubernetes CloudInstance"""
+
+ def __init__(
+ self,
+ container_image="docker.io/njha/fogros2_base",
+ zone="us-central1-a",
+ mcpu=0,
+ mb=0,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.cloud_service_provider = "ONPREM"
+
+ id_ = str(uuid.uuid4())[0:8]
+ self._name = f"fog-{id_}-{self._name}"
+
+ self.zone = zone
+ self.type = f"{mcpu}mx{mb}Mb"
+ self.container_image = container_image
+
+ self._mcpu = mcpu
+ self._mmb = mb
+
+ self._working_dir = os.path.join(self._working_dir_base, self._name)
+ os.makedirs(self._working_dir, exist_ok=True)
+
+ # after config
+ self._ssh_key = None
+
+ self.create()
+
+ def create(self):
+ self.logger.info(f"Creating new ROS node on Kubernetes with name {self._name}")
+ self.create_compute_engine_instance()
+ self.info(flush_to_disk=True)
+ self.connect()
+ self.install_cloud_dependencies()
+ self.push_ros_workspace()
+ self.info(flush_to_disk=True)
+ self._is_created = True
+
+ def info(self, flush_to_disk=True):
+ info_dict = super().info(flush_to_disk)
+ info_dict["compute_region"] = self.zone
+ info_dict["compute_instance_type"] = self.type
+ info_dict["compute_instance_id"] = self._name
+ if flush_to_disk:
+ with open(os.path.join(self._working_dir, "info"), "w+") as f:
+ json.dump(info_dict, f)
+ return info_dict
+
+ def force_start_vpn(self):
+ return False
+
+ def create_service_pair(self, pub_key_path: str):
+ # Instance Selector
+ selector = {
+ "edu.berkeley.autolab.fogros/instance": self._name,
+ }
+
+ # SSH Service
+ ssh_config: dict = {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {"name": f"{self._name}-ssh"},
+ "spec": {
+ "type": "LoadBalancer",
+ "ports": [
+ {
+ "port": 22,
+ "targetPort": 22,
+ "name": "ssh",
+ "protocol": "TCP",
+ }
+ ],
+ "selector": selector,
+ },
+ }
+ # VPN Service
+ vpn_config: dict = {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "name": f"{self._name}-vpn",
+ },
+ "spec": {
+ "type": "LoadBalancer",
+ "ports": [
+ {
+ "port": 51820,
+ "targetPort": 51820,
+ "name": "wg",
+ "protocol": "UDP",
+ }
+ ],
+ "selector": selector,
+ },
+ }
+
+ # Runner Pod
+ pod_resources = {
+ "memory": f"{self._mmb}Mi",
+ "cpu": f"{self._mcpu}m",
+ }
+ pod_config: dict = {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "name": self._name,
+ "labels": selector,
+ },
+ "spec": {
+ "restartPolicy": "Never",
+ "containers": [
+ {
+ "name": self._name,
+ "image": self.container_image,
+ "imagePullPolicy": "Always",
+ "securityContext": {
+ "capabilities": {
+ "add": ["NET_ADMIN", "CAP_SYS_ADMIN"],
+ },
+ "privileged": True,
+ },
+ "resources": {
+ "requests": pod_resources,
+ "limits": pod_resources,
+ },
+ "env": [
+ {
+ "name": "SSH_PUBKEY",
+ "value": open(pub_key_path).read().strip(),
+ },
+ ],
+ "command": ["/bin/bash"],
+ "args": [
+ "-c",
+ textwrap.dedent(
+ """
+ echo $SSH_PUBKEY >> '/home/ubuntu/.ssh/authorized_keys' &&\\
+ chmod -R u=rwX '/home/ubuntu/.ssh' &&\\
+ chown -R 'ubuntu:ubuntu' '/home/ubuntu/.ssh' &&\\
+ service ssh restart &&\\
+ sleep infinity
+ """,
+ ),
+ ],
+ },
+ ],
+ },
+ }
+
+ # TODO: Use the Kubernetes API (pypy/kubernetes) instead of shelling out to kubectl.
+ for config in [vpn_config, ssh_config, pod_config]:
+ file = tempfile.NamedTemporaryFile()
+ open(file.name, "w").write(json.dumps(config))
+ self.logger.debug(
+ f"Creating {config['kind']}/{config['metadata']['name']}..."
+ )
+ os.system(f"kubectl apply -f {file.name}")
+ file.close()
+
+ # Poll until services are live...
+ while True:
+ if (
+ not "Running"
+ in subprocess.check_output(
+ f"kubectl get pod {self._name}", shell=True
+ ).decode()
+ or "pending"
+ in subprocess.check_output(
+ f'kubectl get service {ssh_config["metadata"]["name"]}', shell=True
+ ).decode()
+ or "pending"
+ in subprocess.check_output(
+ f'kubectl get service {vpn_config["metadata"]["name"]}', shell=True
+ ).decode()
+ ):
+ self.logger.info("Some services still creating...")
+ time.sleep(5)
+ else:
+ break
+
+ self.logger.debug("Extracting IPs")
+ ssh_data = subprocess.check_output(
+ f'kubectl get service {ssh_config["metadata"]["name"]}', shell=True
+ ).decode()
+ vpn_data = subprocess.check_output(
+ f'kubectl get service {vpn_config["metadata"]["name"]}', shell=True
+ ).decode()
+
+ return extract_bash_column(ssh_data, "EXTERNAL-IP"), extract_bash_column(vpn_data, "EXTERNAL-IP")
+
+ def create_compute_engine_instance(self):
+ # Generate SSH keys
+ self._ssh_key_path = os.path.expanduser(f"~/.ssh/{self._name}")
+ os.system(f"ssh-keygen -f {self._ssh_key_path} -q -N ''")
+
+ ssh_ip, vpn_ip = self.create_service_pair(f"{self._ssh_key_path}.pub")
+
+ self._ip = ssh_ip
+ self._vpn_ip = vpn_ip
+
+ self._username = "ubuntu"
+ self._is_created = True
+
+ self.logger.info(
+ f"Created {self.type} instance named {self._name} "
+ f"with id {self._name} and public IP address {self._ip} with VPN IP {self._vpn_ip}"
+ )
diff --git a/fogros2/fogros2/launch_description.py b/fogros2/fogros2/launch_description.py
index e965b8e..167ed06 100644
--- a/fogros2/fogros2/launch_description.py
+++ b/fogros2/fogros2/launch_description.py
@@ -104,7 +104,8 @@ def visit(
]
vpn = VPN()
vpn.generate_wg_config_files(machines)
- vpn.start_robot_vpn()
+ if any([machine.force_start_vpn() for machine in machines]):
+ vpn.start_robot_vpn()
# tell remote machine to push the to cloud nodes and
# wait here until all the nodes are done
diff --git a/fogros2/fogros2/scp.py b/fogros2/fogros2/scp.py
index c74b745..8f82ab5 100755
--- a/fogros2/fogros2/scp.py
+++ b/fogros2/fogros2/scp.py
@@ -45,12 +45,17 @@
class SCPClient:
- def __init__(self, ip, ssh_key_path):
+ def __init__(self, ip, ssh_key_path, username=None):
self.ip = ip
self.ssh_key = paramiko.RSAKey.from_private_key_file(ssh_key_path)
self.ssh_client = paramiko.SSHClient()
self.logger = logging.get_logger(__name__)
+ if username is None:
+ self.username = 'ubuntu'
+ else:
+ self.username = username
+
def connect(self):
self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
connected = False
@@ -58,7 +63,7 @@ def connect(self):
try:
self.ssh_client.connect(
hostname=self.ip,
- username="ubuntu",
+ username=self.username,
pkey=self.ssh_key,
look_for_keys=False,
)
@@ -67,7 +72,7 @@ def connect(self):
# See https://docs.paramiko.org/en/stable/api/client.html
except Exception as e:
self.logger.warn(f"{e}, retrying...")
- sleep(1)
+ sleep(5)
self.logger.info("SCP connected!")
def send_file(self, src_path, dst_path):
diff --git a/fogros2/fogros2/util.py b/fogros2/fogros2/util.py
index f7125c1..03fdf26 100644
--- a/fogros2/fogros2/util.py
+++ b/fogros2/fogros2/util.py
@@ -33,7 +33,10 @@
import errno
import os
+from platform import architecture
import shutil
+import tarfile
+
_work_dir_cache = None
_instance_dir_cache = None
@@ -70,12 +73,58 @@ def instance_dir():
return _instance_dir_cache
-def make_zip_file(dir_name, target_path):
+# def make_zip_file(dir_name, target_path):
+# root_dir, workspace_name = os.path.split(dir_name)
+# print(root_dir, workspace_name)
+# return shutil.make_archive(
+# base_dir=workspace_name,
+# root_dir=root_dir,
+# format="zip",
+# base_name=target_path,
+# )
+
+#Using Tar not Zip
+def make_zip_file(dir_name, target_path):
root_dir, workspace_name = os.path.split(dir_name)
print(root_dir, workspace_name)
- return shutil.make_archive(
- base_dir=workspace_name,
- root_dir=root_dir,
- format="zip",
- base_name=target_path,
- )
+ base_name = os.path.abspath(target_path)
+ os.chdir(root_dir)
+
+ tar_compression = ''
+ archive_name = base_name + '.tar' + ''
+ archive_dir = os.path.dirname(archive_name)
+
+ EXCLUDE_FILES = ['.git'] #https://stackoverflow.com/questions/16000794/python-tarfile-and-excludes
+
+ if archive_dir and not os.path.exists(archive_dir):
+ os.makedirs(archive_dir)
+ tar = tarfile.open(archive_name, 'w|%s' % tar_compression)
+ try:
+ tar.add(workspace_name, filter=lambda x: None if x.name in EXCLUDE_FILES else x)
+ finally:
+ tar.close()
+ return archive_name
+
+def extract_bash_column(subprocess_output: str, column_name: str, row_number: int = 0):
+ """
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ ssh-balancer LoadBalancer 10.0.0.15 22:32695/TCP 19s
+
+ This util finds the value of any given column value - ex: CLUSTER-IP -> 10.0.015
+ :param subprocess_output: Direct output of subprocess.check_output().decode()
+ :param column_name: The column name to search for ex: CLUSTER-IP
+ :param row_number: Defaults to the first data row, row_number = 1 is second data row
+ :return: String of output value
+ """
+ lines = subprocess_output.split('\n')
+ if column_name not in lines[0]:
+ raise LookupError(f"Could not find column {column_name} in {lines[0].strip()}")
+ column_index = lines[0].index(column_name)
+
+ output_str = ''
+ while column_index != len(lines[row_number+1]) and lines[row_number+1][column_index] != ' ':
+ output_str += lines[row_number+1][column_index]
+ column_index += 1
+
+ return output_str
+
diff --git a/fogros2/fogros2/verb/image.py b/fogros2/fogros2/verb/image.py
new file mode 100644
index 0000000..9b6bdae
--- /dev/null
+++ b/fogros2/fogros2/verb/image.py
@@ -0,0 +1,117 @@
+import boto3
+import os
+import shutil
+
+from botocore.exceptions import NoRegionError
+from ros2cli.verb import VerbExtension
+
+from ..util import instance_dir
+
+
+class ImageVerb(VerbExtension):
+
+ def add_arguments(self, parser, cli_name):
+ parser.add_argument(
+ "name",
+ type=str,
+ nargs="*",
+ help="Set instance name. Can be found when running 'ros2 fog list' command next to ssh_key after "
+ "'FogROS2KEY-'",
+ )
+ parser.add_argument(
+ "--region",
+ nargs="*",
+ help="Set AWS region (overrides config/env settings)",
+ )
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help="Show what would happen, but do not execute",
+ )
+
+ def query_region(self, region, name):
+ try:
+ client = boto3.client("ec2", region)
+ except NoRegionError:
+ raise RuntimeError(
+ "AWS is not configured! Please run `aws configure` first."
+ )
+ print("Instance name: ", name)
+ ec2_instances = client.describe_instances(
+ Filters=[
+ {
+ "Name": "instance.group-name",
+ "Values": ["FOGROS2_SECURITY_GROUP"],
+ },
+ {"Name": "tag:FogROS2-Name", "Values": name},
+ ]
+ )
+
+ if len(ec2_instances["Reservations"]) == 0:
+ print(
+ "No EC2 instances found with the specified name; "
+ "check list to be sure name is correct!"
+ )
+
+ return [client, ec2_instances]
+
+ def create_ami(self, client, ec2_instances, dry_run):
+ image_count = 0
+ for res in ec2_instances["Reservations"]:
+ for inst in res["Instances"]:
+ tag_map = (
+ {t["Key"]: t["Value"] for t in inst["Tags"]}
+ if "Tags" in inst
+ else {}
+ )
+ print(
+ f"Converting {tag_map.get('FogROS2-Name', '(unknown)')} "
+ f"{inst['InstanceId']} to AMI."
+ )
+ name = tag_map["FogROS2-Name"] + "-image"
+ inst_id = inst['InstanceId']
+
+ if not dry_run:
+ response = client.create_image(InstanceId=inst_id, Name=name)
+ if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
+ raise RuntimeError(
+ f"Could not create image for {inst['KeyName']}!"
+ )
+
+ print("done.")
+ image_count += 1
+
+ return image_count
+
+ def main(self, *, args):
+ regions = args.region
+ if regions is None or len(regions) == 0:
+ regions = [None]
+ elif "*" in regions or "all" in regions:
+ client = boto3.client("ec2")
+ response = client.describe_regions()
+ regions = [r["RegionName"] for r in response["Regions"]]
+
+
+ if len(regions) == 1:
+ image_count = self.create_ami(
+ *self.query_region(regions[0], args.name), args.dry_run
+ )
+ else:
+ from concurrent.futures import ThreadPoolExecutor
+
+ with ThreadPoolExecutor(max_workers=len(regions)) as executor:
+ futures = [
+ executor.submit(self.query_region, r, args.name)
+ for r in regions
+ ]
+ image_count = sum(
+ [
+ self.create_ami(*f.result(), args.dry_run)
+ for f in futures
+ ]
+ )
+
+ if image_count == 0:
+ print("No image was created")
+
diff --git a/fogros2/fogros2/vpn.py b/fogros2/fogros2/vpn.py
index 212f309..4b7f1e8 100755
--- a/fogros2/fogros2/vpn.py
+++ b/fogros2/fogros2/vpn.py
@@ -33,8 +33,8 @@
import os
-import wgconfig
-import wgconfig.wgexec as wgexec
+from .wgconfig import WGConfig
+from .wgexec import get_publickey, generate_privatekey
class VPN:
@@ -49,8 +49,8 @@ def __init__(
self.cloud_name_to_pub_key_path = dict()
self.cloud_name_to_priv_key_path = dict()
- self.robot_private_key = wgexec.generate_privatekey()
- self.robot_public_key = wgexec.get_publickey(self.robot_private_key)
+ self.robot_private_key = generate_privatekey()
+ self.robot_public_key = get_publickey(self.robot_private_key)
def generate_key_pairs(self, machines):
"""
@@ -60,9 +60,9 @@ def generate_key_pairs(self, machines):
"""
for machine in machines:
name = machine.name
- cloud_private_key = wgexec.generate_privatekey()
+ cloud_private_key = generate_privatekey()
self.cloud_name_to_priv_key_path[name] = cloud_private_key
- cloud_public_key = wgexec.get_publickey(cloud_private_key)
+ cloud_public_key = get_publickey(cloud_private_key)
self.cloud_name_to_pub_key_path[name] = cloud_public_key
def generate_wg_config_files(self, machines):
@@ -74,7 +74,7 @@ def generate_wg_config_files(self, machines):
name = machine.name
machine_config_pwd = self.cloud_key_path + name
machine_priv_key = self.cloud_name_to_priv_key_path[name]
- aws_config = wgconfig.WGConfig(machine_config_pwd)
+ aws_config = WGConfig(machine_config_pwd)
aws_config.add_attr(None, "PrivateKey", machine_priv_key)
aws_config.add_attr(None, "ListenPort", 51820)
aws_config.add_attr(None, "Address", f"10.0.0.{counter:d}/24")
@@ -86,13 +86,16 @@ def generate_wg_config_files(self, machines):
counter += 1
# generate robot configs
- robot_config = wgconfig.WGConfig(self.robot_key_path)
+ robot_config = WGConfig(self.robot_key_path)
robot_config.add_attr(None, "PrivateKey", self.robot_private_key)
robot_config.add_attr(None, "ListenPort", 51820)
robot_config.add_attr(None, "Address", "10.0.0.1/24")
for machine in machines:
name = machine.name
- ip = machine.ip
+ if hasattr(machine, 'vpn_ip') and machine.vpn_ip is not None:
+ ip = machine.vpn_ip
+ else:
+ ip = machine.ip
cloud_pub_key = self.cloud_name_to_pub_key_path[name]
robot_config.add_peer(cloud_pub_key, f"# AWS{name}")
robot_config.add_attr(cloud_pub_key, "AllowedIPs", "10.0.0.2/32")
@@ -102,7 +105,7 @@ def generate_wg_config_files(self, machines):
def start_robot_vpn(self):
# Copy /tmp/fogros-local.conf to /etc/wireguard/wg0.conf locally.
- # TODO: This needs root. Move this to a separate script with setuid.
+ # Do NOT move this to a SUID executable because that will cause trivial LPE.
os.system("sudo cp /tmp/fogros-local.conf /etc/wireguard/wg0.conf")
os.system("sudo chmod 600 /etc/wireguard/wg0.conf")
os.system("sudo wg-quick down wg0")
diff --git a/fogros2/fogros2/wgconfig.py b/fogros2/fogros2/wgconfig.py
new file mode 100644
index 0000000..706c80f
--- /dev/null
+++ b/fogros2/fogros2/wgconfig.py
@@ -0,0 +1,287 @@
+# Copyright 2022 The Regents of the University of California (Regents)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Copyright ©2022. The Regents of the University of California (Regents).
+# All Rights Reserved. Permission to use, copy, modify, and distribute this
+# software and its documentation for educational, research, and not-for-profit
+# purposes, without fee and without a signed licensing agreement, is hereby
+# granted, provided that the above copyright notice, this paragraph and the
+# following two paragraphs appear in all copies, modifications, and
+# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
+# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
+# otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial
+# licensing opportunities. IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
+# INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
+# DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY,
+# PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+
+from builtins import str
+from builtins import range
+from io import open
+import os
+
+
+class WGConfig():
+ """A class for parsing and writing Wireguard configuration files"""
+ SECTION_FIRSTLINE = '_index_firstline'
+ SECTION_LASTLINE = '_index_lastline'
+ SECTION_RAW = '_rawdata'
+ _interface = None # interface attributes
+ _peers = None # peer data
+
+ def __init__(self, file, keyattr='PublicKey'):
+ """Object initialization"""
+ self.filename = self.file2filename(file)
+ self.keyattr = keyattr
+ self.lines = []
+ self.initialize_file()
+
+ @staticmethod
+ def file2filename(file):
+ """Handle special filenames: 'wg0' and 'wg0.conf' become '/etc/wireguard/wg0.conf' """
+ if os.path.basename(file) == file:
+ if not file.endswith('.conf'):
+ file += '.conf'
+ file = os.path.join('/etc/wireguard', file)
+ return file
+
+ def invalidate_data(self):
+ """Clears the data structs"""
+ self._interface = None
+ self._peers = None
+
+ def read_file(self):
+ """Reads the Wireguard config file into memory"""
+ with open(self.filename, 'r') as wgfile:
+ self.lines = [line.rstrip() for line in wgfile.readlines()]
+ self.invalidate_data()
+
+ def write_file(self, file=None):
+ """Writes a Wireguard config file from memory to file"""
+ if file is None:
+ filename = self.filename
+ else:
+ filename = self.file2filename(file)
+ with os.fdopen(os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o640), 'w') as wgfile:
+ wgfile.writelines(line + '\n' for line in self.lines)
+
+ @staticmethod
+ def parse_line(line):
+ """Splits a single attr/value line into its parts"""
+ attr, _, value = line.partition('=')
+ attr = attr.strip()
+ parts = value.partition('#')
+ value = parts[0].strip() # strip comments and whitespace
+ value = str(value) # this line is for Python2 support only
+ comment = parts[1] + parts[2]
+ if value.isnumeric():
+ value = [int(value)]
+ else:
+ value = [item.strip() for item in value.split(',')] # decompose into list based on commata as separator
+ return attr, value, comment
+
+ def parse_lines(self):
+ """Parses the lines of a Wireguard config file into memory"""
+
+ # There will be two special attributes in the parsed data:
+ #_index_firstline: Line (zero indexed) of the section header (including any leading lines with comments)
+ #_index_lastline: Line (zero indexed) of the last attribute line of the section (including any directly following comments)
+
+ def close_section(section, section_data):
+ section_data = {k: (v if len(v) > 1 else v[0]) for k, v in section_data.items()}
+ if section is None: # nothing to close on first section
+ return
+ elif section == 'interface': # close interface section
+ self._interface = section_data
+ else: # close peer section
+ peername = section_data.get(self.keyattr)
+ self._peers[peername] = section_data
+ section_data[self.SECTION_RAW] = self.lines[section_data[self.SECTION_FIRSTLINE]:(section_data[self.SECTION_LASTLINE] + 1)]
+
+ self._interface = dict()
+ self._peers = dict()
+ section = None
+ section_data = dict()
+ last_empty_line_in_section = -1 # virtual empty line before start of file
+ for i, line in enumerate(self.lines):
+ # Ignore leading whitespace and trailing whitespace
+ line = line.strip()
+ # Ignore empty lines and comments
+ if len(line) == 0:
+ last_empty_line_in_section = i
+ continue
+ if line.startswith('['): # section
+ if last_empty_line_in_section is not None:
+ section_data[self.SECTION_LASTLINE] = [last_empty_line_in_section - 1]
+ close_section(section, section_data)
+ section_data = dict()
+ section = line[1:].partition(']')[0].lower()
+ if last_empty_line_in_section is None:
+ section_data[self.SECTION_FIRSTLINE] = [i]
+ else:
+ section_data[self.SECTION_FIRSTLINE] = [last_empty_line_in_section + 1]
+ last_empty_line_in_section = None
+ section_data[self.SECTION_LASTLINE] = [i]
+ if not section in ['interface', 'peer']:
+ raise ValueError('Unsupported section [{0}] in line {1}'.format(section, i))
+ elif line.startswith('#'):
+ section_data[self.SECTION_LASTLINE] = [i]
+ else: # regular line
+ attr, value, _comment = self.parse_line(line)
+ section_data[attr] = section_data.get(attr, [])
+ section_data[attr].extend(value)
+ section_data[self.SECTION_LASTLINE] = [i]
+ close_section(section, section_data)
+
+ def handle_leading_comment(self, leading_comment):
+ """Appends a leading comment for a section"""
+ if leading_comment is not None:
+ if leading_comment.strip()[0] != '#':
+ raise ValueError('A comment needs to start with a "#"')
+ self.lines.append(leading_comment)
+
+ def initialize_file(self, leading_comment=None):
+ """Empties the file and adds the interface section header"""
+ self.lines = list()
+ self.handle_leading_comment(leading_comment) # add leading comment if needed
+ self.lines.append('[Interface]')
+ self.invalidate_data()
+
+ def add_peer(self, key, leading_comment=None):
+ """Adds a new peer with the given (public) key"""
+ if key in self.peers:
+ raise KeyError('Peer to be added already exists')
+ self.lines.append('') # append an empty line for separation
+ self.handle_leading_comment(leading_comment) # add leading comment if needed
+ # Append peer with key attribute
+ self.lines.append('[Peer]')
+ self.lines.append('{0} = {1}'.format(self.keyattr, key))
+ # Invalidate data cache
+ self.invalidate_data()
+
+ def del_peer(self, key):
+ """Removes the peer with the given (public) key"""
+ if not key in self.peers:
+ raise KeyError('The peer to be deleted does not exist')
+ section_firstline = self.peers[key][self.SECTION_FIRSTLINE]
+ section_lastline = self.peers[key][self.SECTION_LASTLINE]
+ # Remove a blank line directly before the peer section
+ if section_firstline > 0:
+ if len(self.lines[section_firstline - 1]) == 0:
+ section_firstline -= 1
+ # Only keep needed lines
+ result = []
+ if section_firstline > 0:
+ result.extend(self.lines[0:section_firstline])
+ result.extend(self.lines[(section_lastline + 1):])
+ self.lines = result
+ # Invalidate data cache
+ self.invalidate_data()
+
+ def get_sectioninfo(self, key):
+ """Get first and last line of the section identified by the given key ("None" for interface section)"""
+ if key is None: # interface
+ section_firstline = self.interface[self.SECTION_FIRSTLINE]
+ section_lastline = self.interface[self.SECTION_LASTLINE]
+ else: # peer
+ if not key in self.peers:
+ raise KeyError('The specified peer does not exist')
+ section_firstline = self.peers[key][self.SECTION_FIRSTLINE]
+ section_lastline = self.peers[key][self.SECTION_LASTLINE]
+ return section_firstline, section_lastline
+
+ def add_attr(self, key, attr, value, leading_comment=None, append_as_line=False):
+ """Adds an attribute/value pair to the given peer ("None" for adding an interface attribute)"""
+ section_firstline, section_lastline = self.get_sectioninfo(key)
+ if leading_comment is not None:
+ if leading_comment.strip()[0] != '#':
+ raise ValueError('A comment needs to start with a "#"')
+ # Look for line with the attribute
+ line_found = None
+ for i in range(section_firstline + 1, section_lastline + 1):
+ line_attr, line_value, line_comment = self.parse_line(self.lines[i])
+ if attr == line_attr:
+ line_found = i
+ # Add the attribute at the right place
+ if (line_found is None) or append_as_line:
+ line_found = section_lastline if (line_found is None) else line_found
+ line_found += 1
+ self.lines.insert(line_found, '{0} = {1}'.format(attr, value))
+ else:
+ line_attr, line_value, line_comment = self.parse_line(self.lines[line_found])
+ line_value.append(value)
+ if len(line_comment) > 0:
+ line_comment = ' ' + line_comment
+ line_value = [str(item) for item in line_value]
+ self.lines[line_found] = line_attr + ' = ' + ', '.join(line_value) + line_comment
+ # Handle leading comments
+ if leading_comment is not None:
+ self.lines.insert(line_found, leading_comment)
+ # Invalidate data cache
+ self.invalidate_data()
+
+ def del_attr(self, key, attr, value=None, remove_leading_comments=True):
+ """Removes an attribute/value pair from the given peer ("None" for adding an interface attribute); set 'value' to 'None' to remove all values"""
+ section_firstline, section_lastline = self.get_sectioninfo(key)
+ # Find all lines with matching attribute name and (if requested) value
+ line_found = []
+ for i in range(section_firstline + 1, section_lastline + 1):
+ line_attr, line_value, line_comment = self.parse_line(self.lines[i])
+ if attr == line_attr:
+ if (value is None) or (value in line_value):
+ line_found.append(i)
+ if len(line_found) == 0:
+ raise ValueError('The attribute/value to be deleted is not present')
+ # Process all relevant lines
+ for i in reversed(line_found): # reversed so that non-processed indices stay valid
+ if value is None:
+ del(self.lines[i])
+ else:
+ line_attr, line_value, line_comment = self.parse_line(self.lines[i])
+ line_value.remove(value)
+ if len(line_value) > 0: # keep remaining values in that line
+ self.lines[i] = line_attr + ' = ' + ', '.join(line_value) + line_comment
+ else: # otherwise line is no longer needed
+ del(self.lines[i])
+ # Handle leading comments
+ if remove_leading_comments:
+ i = line_found[0] - 1
+ while i > 0:
+ if len(self.lines[i]) and (self.lines[i][0] == '#'):
+ del(self.lines[i])
+ i -= 1
+ else:
+ break
+ # Invalidate data cache
+ self.invalidate_data()
+
+ @property
+ def interface(self):
+ """Dictionary with interface attributes"""
+ if self._interface is None:
+ self.parse_lines()
+ return self._interface
+
+ @property
+ def peers(self):
+ """Dictionary with peer data"""
+ if self._peers is None:
+ self.parse_lines()
+ return self._peers
\ No newline at end of file
diff --git a/fogros2/fogros2/wgexec.py b/fogros2/fogros2/wgexec.py
new file mode 100644
index 0000000..fffe250
--- /dev/null
+++ b/fogros2/fogros2/wgexec.py
@@ -0,0 +1,90 @@
+# Copyright 2022 The Regents of the University of California (Regents)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Copyright ©2022. The Regents of the University of California (Regents).
+# All Rights Reserved. Permission to use, copy, modify, and distribute this
+# software and its documentation for educational, research, and not-for-profit
+# purposes, without fee and without a signed licensing agreement, is hereby
+# granted, provided that the above copyright notice, this paragraph and the
+# following two paragraphs appear in all copies, modifications, and
+# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
+# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
+# otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial
+# licensing opportunities. IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
+# INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
+# DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY,
+# PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+
+import logging
+import shlex
+import subprocess
+
+
+logger = logging.getLogger(__name__)
+
+
+def execute(command, input=None, suppressoutput=False, suppresserrors=False):
+ """Execute a command"""
+ args = shlex.split(command)
+ stdin = None if input is None else subprocess.PIPE
+ input = None if input is None else input.encode('utf-8')
+ nsp = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = nsp.communicate(input=input)
+ if err is not None:
+ err = err.decode('utf8')
+ if not suppresserrors and (len(err) > 0):
+ logger.error(err)
+ out = out.decode('utf8')
+ if not suppressoutput and (len(out) > 0):
+ print(out)
+ nsp.wait()
+ return out, err, nsp.returncode
+
+def generate_privatekey():
+ """Generates a WireGuard private key"""
+ out, err, returncode = execute('wg genkey', suppressoutput=True)
+ if (returncode != 0) or (len(err) > 0):
+ return None
+ out = out.strip() # remove trailing newline
+ return out
+
+def get_publickey(wg_private):
+ """Gets the public key belonging to the given WireGuard private key"""
+ if wg_private is None:
+ return None
+ out, err, returncode = execute('wg pubkey', input=wg_private, suppressoutput=True)
+ if (returncode != 0) or (len(err) > 0):
+ return None
+ out = out.strip() # remove trailing newline
+ return out
+
+def generate_keypair():
+ """Generates a WireGuard key pair (returns tuple of private key and public key)"""
+ wg_private = generate_privatekey()
+ wg_public = get_publickey(wg_private)
+ return wg_private, wg_public
+
+def generate_presharedkey():
+ """Generates a WireGuard preshared key"""
+ out, err, returncode = execute('wg genpsk', suppressoutput=True)
+ if (returncode != 0) or (len(err) > 0):
+ return None
+ out = out.strip() # remove trailing newline
+ return out
\ No newline at end of file
diff --git a/fogros2/get-docker.sh b/fogros2/get-docker.sh
new file mode 100644
index 0000000..563493f
--- /dev/null
+++ b/fogros2/get-docker.sh
@@ -0,0 +1,660 @@
+#!/bin/sh
+set -e
+# Docker CE for Linux installation script
+#
+# See https://docs.docker.com/engine/install/ for the installation steps.
+#
+# This script is meant for quick & easy install via:
+# $ curl -fsSL https://get.docker.com -o get-docker.sh
+# $ sh get-docker.sh
+#
+# For test builds (ie. release candidates):
+# $ curl -fsSL https://test.docker.com -o test-docker.sh
+# $ sh test-docker.sh
+#
+# NOTE: Make sure to verify the contents of the script
+# you downloaded matches the contents of install.sh
+# located at https://github.com/docker/docker-install
+# before executing.
+#
+# Git commit from https://github.com/docker/docker-install when
+# the script was uploaded (Should only be modified by upload job):
+SCRIPT_COMMIT_SHA="b2e29ef7a9a89840d2333637f7d1900a83e7153f"
+
+# strip "v" prefix if present
+VERSION="${VERSION#v}"
+
+# The channel to install from:
+# * nightly
+# * test
+# * stable
+# * edge (deprecated)
+DEFAULT_CHANNEL_VALUE="stable"
+if [ -z "$CHANNEL" ]; then
+ CHANNEL=$DEFAULT_CHANNEL_VALUE
+fi
+
+DEFAULT_DOWNLOAD_URL="https://download.docker.com"
+if [ -z "$DOWNLOAD_URL" ]; then
+ DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL
+fi
+
+DEFAULT_REPO_FILE="docker-ce.repo"
+if [ -z "$REPO_FILE" ]; then
+ REPO_FILE="$DEFAULT_REPO_FILE"
+fi
+
+mirror=''
+DRY_RUN=${DRY_RUN:-}
+while [ $# -gt 0 ]; do
+ case "$1" in
+ --mirror)
+ mirror="$2"
+ shift
+ ;;
+ --dry-run)
+ DRY_RUN=1
+ ;;
+ --*)
+ echo "Illegal option $1"
+ ;;
+ esac
+ shift $(( $# > 0 ? 1 : 0 ))
+done
+
+case "$mirror" in
+ Aliyun)
+ DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
+ ;;
+ AzureChinaCloud)
+ DOWNLOAD_URL="https://mirror.azure.cn/docker-ce"
+ ;;
+esac
+
+command_exists() {
+ command -v "$@" > /dev/null 2>&1
+}
+
+# version_gte checks if the version specified in $VERSION is at least
+# the given CalVer (YY.MM) version. returns 0 (success) if $VERSION is either
+# unset (=latest) or newer or equal than the specified version. Returns 1 (fail)
+# otherwise.
+#
+# examples:
+#
+# VERSION=20.10
+# version_gte 20.10 // 0 (success)
+# version_gte 19.03 // 0 (success)
+# version_gte 21.10 // 1 (fail)
+version_gte() {
+ if [ -z "$VERSION" ]; then
+ return 0
+ fi
+ eval calver_compare "$VERSION" "$1"
+}
+
+# calver_compare compares two CalVer (YY.MM) version strings. returns 0 (success)
+# if version A is newer or equal than version B, or 1 (fail) otherwise. Patch
+# releases and pre-release (-alpha/-beta) are not taken into account
+#
+# examples:
+#
+# calver_compare 20.10 19.03 // 0 (success)
+# calver_compare 20.10 20.10 // 0 (success)
+# calver_compare 19.03 20.10 // 1 (fail)
+calver_compare() (
+ set +x
+
+ yy_a="$(echo "$1" | cut -d'.' -f1)"
+ yy_b="$(echo "$2" | cut -d'.' -f1)"
+ if [ "$yy_a" -lt "$yy_b" ]; then
+ return 1
+ fi
+ if [ "$yy_a" -gt "$yy_b" ]; then
+ return 0
+ fi
+ mm_a="$(echo "$1" | cut -d'.' -f2)"
+ mm_b="$(echo "$2" | cut -d'.' -f2)"
+ if [ "${mm_a#0}" -lt "${mm_b#0}" ]; then
+ return 1
+ fi
+
+ return 0
+)
+
+is_dry_run() {
+ if [ -z "$DRY_RUN" ]; then
+ return 1
+ else
+ return 0
+ fi
+}
+
+is_wsl() {
+ case "$(uname -r)" in
+ *microsoft* ) true ;; # WSL 2
+ *Microsoft* ) true ;; # WSL 1
+ * ) false;;
+ esac
+}
+
+is_darwin() {
+ case "$(uname -s)" in
+ *darwin* ) true ;;
+ *Darwin* ) true ;;
+ * ) false;;
+ esac
+}
+
+deprecation_notice() {
+ distro=$1
+ distro_version=$2
+ echo
+ printf "\033[91;1mDEPRECATION WARNING\033[0m\n"
+ printf " This Linux distribution (\033[1m%s %s\033[0m) reached end-of-life and is no longer supported by this script.\n" "$distro" "$distro_version"
+ echo " No updates or security fixes will be released for this distribution, and users are recommended"
+ echo " to upgrade to a currently maintained version of $distro."
+ echo
+ printf "Press \033[1mCtrl+C\033[0m now to abort this script, or wait for the installation to continue."
+ echo
+ sleep 10
+}
+
+get_distribution() {
+ lsb_dist=""
+ # Every system that we officially support has /etc/os-release
+ if [ -r /etc/os-release ]; then
+ lsb_dist="$(. /etc/os-release && echo "$ID")"
+ fi
+ # Returning an empty string here should be alright since the
+ # case statements don't act unless you provide an actual value
+ echo "$lsb_dist"
+}
+
+echo_docker_as_nonroot() {
+ if is_dry_run; then
+ return
+ fi
+ if command_exists docker && [ -e /var/run/docker.sock ]; then
+ (
+ set -x
+ $sh_c 'docker version'
+ ) || true
+ fi
+
+ # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
+ echo
+ echo "================================================================================"
+ echo
+ if version_gte "20.10"; then
+ echo "To run Docker as a non-privileged user, consider setting up the"
+ echo "Docker daemon in rootless mode for your user:"
+ echo
+ echo " dockerd-rootless-setuptool.sh install"
+ echo
+ echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode."
+ echo
+ fi
+ echo
+ echo "To run the Docker daemon as a fully privileged service, but granting non-root"
+ echo "users access, refer to https://docs.docker.com/go/daemon-access/"
+ echo
+ echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent"
+ echo " to root access on the host. Refer to the 'Docker daemon attack surface'"
+ echo " documentation for details: https://docs.docker.com/go/attack-surface/"
+ echo
+ echo "================================================================================"
+ echo
+}
+
+# Check if this is a forked Linux distro
+check_forked() {
+
+ # Check for lsb_release command existence, it usually exists in forked distros
+ if command_exists lsb_release; then
+ # Check if the `-u` option is supported
+ set +e
+ lsb_release -a -u > /dev/null 2>&1
+ lsb_release_exit_code=$?
+ set -e
+
+ # Check if the command has exited successfully, it means we're in a forked distro
+ if [ "$lsb_release_exit_code" = "0" ]; then
+ # Print info about current distro
+ cat <<-EOF
+ You're using '$lsb_dist' version '$dist_version'.
+ EOF
+
+ # Get the upstream release info
+ lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]')
+ dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]')
+
+ # Print info about upstream distro
+ cat <<-EOF
+ Upstream release is '$lsb_dist' version '$dist_version'.
+ EOF
+ else
+ if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
+ if [ "$lsb_dist" = "osmc" ]; then
+ # OSMC runs Raspbian
+ lsb_dist=raspbian
+ else
+ # We're Debian and don't even know it!
+ lsb_dist=debian
+ fi
+ dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
+ case "$dist_version" in
+ 11)
+ dist_version="bullseye"
+ ;;
+ 10)
+ dist_version="buster"
+ ;;
+ 9)
+ dist_version="stretch"
+ ;;
+ 8)
+ dist_version="jessie"
+ ;;
+ esac
+ fi
+ fi
+ fi
+}
+
+do_install() {
+ echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA"
+
+ if command_exists docker; then
+ cat >&2 <<-'EOF'
+ Warning: the "docker" command appears to already exist on this system.
+
+ If you already have Docker installed, this script can cause trouble, which is
+ why we're displaying this warning and provide the opportunity to cancel the
+ installation.
+
+ If you installed the current Docker package using this script and are using it
+ again to update Docker, you can safely ignore this message.
+
+ You may press Ctrl+C now to abort this script.
+ EOF
+ ( set -x; sleep 20 )
+ fi
+
+ user="$(id -un 2>/dev/null || true)"
+
+ sh_c='sh -c'
+ if [ "$user" != 'root' ]; then
+ if command_exists sudo; then
+ sh_c='sudo -E sh -c'
+ elif command_exists su; then
+ sh_c='su -c'
+ else
+ cat >&2 <<-'EOF'
+ Error: this installer needs the ability to run commands as root.
+ We are unable to find either "sudo" or "su" available to make this happen.
+ EOF
+ exit 1
+ fi
+ fi
+
+ if is_dry_run; then
+ sh_c="echo"
+ fi
+
+ # perform some very rudimentary platform detection
+ lsb_dist=$( get_distribution )
+ lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
+
+ if is_wsl; then
+ echo
+ echo "WSL DETECTED: We recommend using Docker Desktop for Windows."
+ echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
+ echo
+ cat >&2 <<-'EOF'
+
+ You may press Ctrl+C now to abort this script.
+ EOF
+ ( set -x; sleep 20 )
+ fi
+
+ case "$lsb_dist" in
+
+ ubuntu)
+ if command_exists lsb_release; then
+ dist_version="$(lsb_release --codename | cut -f2)"
+ fi
+ if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
+ dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
+ fi
+ ;;
+
+ debian|raspbian)
+ dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
+ case "$dist_version" in
+ 11)
+ dist_version="bullseye"
+ ;;
+ 10)
+ dist_version="buster"
+ ;;
+ 9)
+ dist_version="stretch"
+ ;;
+ 8)
+ dist_version="jessie"
+ ;;
+ esac
+ ;;
+
+ centos|rhel|sles)
+ if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
+ dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
+ fi
+ ;;
+
+ *)
+ if command_exists lsb_release; then
+ dist_version="$(lsb_release --release | cut -f2)"
+ fi
+ if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
+ dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
+ fi
+ ;;
+
+ esac
+
+ # Check if this is a forked Linux distro
+ check_forked
+
+ # Print deprecation warnings for distro versions that recently reached EOL,
+ # but may still be commonly used (especially LTS versions).
+ case "$lsb_dist.$dist_version" in
+ debian.stretch|debian.jessie)
+ deprecation_notice "$lsb_dist" "$dist_version"
+ ;;
+ raspbian.stretch|raspbian.jessie)
+ deprecation_notice "$lsb_dist" "$dist_version"
+ ;;
+ ubuntu.xenial|ubuntu.trusty)
+ deprecation_notice "$lsb_dist" "$dist_version"
+ ;;
+ fedora.*)
+ if [ "$dist_version" -lt 33 ]; then
+ deprecation_notice "$lsb_dist" "$dist_version"
+ fi
+ ;;
+ esac
+
+ # Run setup for each distro accordingly
+ case "$lsb_dist" in
+ ubuntu|debian|raspbian)
+ pre_reqs="apt-transport-https ca-certificates curl"
+ if ! command -v gpg > /dev/null; then
+ pre_reqs="$pre_reqs gnupg"
+ fi
+ apt_repo="deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL"
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ $sh_c 'apt-get update -qq >/dev/null'
+ $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq $pre_reqs >/dev/null"
+ $sh_c 'mkdir -p /etc/apt/keyrings && chmod -R 0755 /etc/apt/keyrings'
+ $sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg"
+ $sh_c "chmod a+r /etc/apt/keyrings/docker.gpg"
+ $sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list"
+ $sh_c 'apt-get update -qq >/dev/null'
+ )
+ pkg_version=""
+ if [ -n "$VERSION" ]; then
+ if is_dry_run; then
+ echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
+ else
+ # Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel
+ pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g").*-0~$lsb_dist"
+ search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
+ pkg_version="$($sh_c "$search_command")"
+ echo "INFO: Searching repository for VERSION '$VERSION'"
+ echo "INFO: $search_command"
+ if [ -z "$pkg_version" ]; then
+ echo
+ echo "ERROR: '$VERSION' not found amongst apt-cache madison results"
+ echo
+ exit 1
+ fi
+ if version_gte "18.09"; then
+ search_command="apt-cache madison 'docker-ce-cli' | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
+ echo "INFO: $search_command"
+ cli_pkg_version="=$($sh_c "$search_command")"
+ fi
+ pkg_version="=$pkg_version"
+ fi
+ fi
+ (
+ pkgs="docker-ce${pkg_version%=}"
+ if version_gte "18.09"; then
+ # older versions didn't ship the cli and containerd as separate packages
+ pkgs="$pkgs docker-ce-cli${cli_pkg_version%=} containerd.io"
+ fi
+ if version_gte "20.10"; then
+ pkgs="$pkgs docker-compose-plugin"
+ fi
+ if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then
+ # also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently)
+ pkgs="$pkgs docker-scan-plugin"
+ fi
+ # TODO(thaJeztah) remove the $CHANNEL check once 22.06 and docker-buildx-plugin is published to the "stable" channel
+ if [ "$CHANNEL" = "test" ] && version_gte "22.06"; then
+ pkgs="$pkgs docker-buildx-plugin"
+ fi
+ if ! is_dry_run; then
+ set -x
+ fi
+ $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq --no-install-recommends $pkgs >/dev/null"
+ if version_gte "20.10"; then
+ # Install docker-ce-rootless-extras without "--no-install-recommends", so as to install slirp4netns when available
+ $sh_c "DEBIAN_FRONTEND=noninteractive apt-get install -y -qq docker-ce-rootless-extras${pkg_version%=} >/dev/null"
+ fi
+ )
+ echo_docker_as_nonroot
+ exit 0
+ ;;
+ centos|fedora|rhel)
+ if [ "$(uname -m)" != "s390x" ] && [ "$lsb_dist" = "rhel" ]; then
+ echo "Packages for RHEL are currently only available for s390x."
+ exit 1
+ fi
+ yum_repo="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
+ if ! curl -Ifs "$yum_repo" > /dev/null; then
+ echo "Error: Unable to curl repository file $yum_repo, is it valid?"
+ exit 1
+ fi
+ if [ "$lsb_dist" = "fedora" ]; then
+ pkg_manager="dnf"
+ config_manager="dnf config-manager"
+ enable_channel_flag="--set-enabled"
+ disable_channel_flag="--set-disabled"
+ pre_reqs="dnf-plugins-core"
+ pkg_suffix="fc$dist_version"
+ else
+ pkg_manager="yum"
+ config_manager="yum-config-manager"
+ enable_channel_flag="--enable"
+ disable_channel_flag="--disable"
+ pre_reqs="yum-utils"
+ pkg_suffix="el"
+ fi
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ $sh_c "$pkg_manager install -y -q $pre_reqs"
+ $sh_c "$config_manager --add-repo $yum_repo"
+
+ if [ "$CHANNEL" != "stable" ]; then
+ $sh_c "$config_manager $disable_channel_flag docker-ce-*"
+ $sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL"
+ fi
+ $sh_c "$pkg_manager makecache"
+ )
+ pkg_version=""
+ if [ -n "$VERSION" ]; then
+ if is_dry_run; then
+ echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
+ else
+ pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix"
+ search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
+ pkg_version="$($sh_c "$search_command")"
+ echo "INFO: Searching repository for VERSION '$VERSION'"
+ echo "INFO: $search_command"
+ if [ -z "$pkg_version" ]; then
+ echo
+ echo "ERROR: '$VERSION' not found amongst $pkg_manager list results"
+ echo
+ exit 1
+ fi
+ if version_gte "18.09"; then
+ # older versions don't support a cli package
+ search_command="$pkg_manager list --showduplicates 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
+ cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)"
+ fi
+ # Cut out the epoch and prefix with a '-'
+ pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)"
+ fi
+ fi
+ (
+ pkgs="docker-ce$pkg_version"
+ if version_gte "18.09"; then
+ # older versions didn't ship the cli and containerd as separate packages
+ if [ -n "$cli_pkg_version" ]; then
+ pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io"
+ else
+ pkgs="$pkgs docker-ce-cli containerd.io"
+ fi
+ fi
+ if version_gte "20.10" && [ "$(uname -m)" = "x86_64" ]; then
+ # also install the latest version of the "docker scan" cli-plugin (only supported on x86 currently)
+ pkgs="$pkgs docker-scan-plugin"
+ fi
+ if version_gte "20.10"; then
+ pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
+ fi
+ # TODO(thaJeztah) remove the $CHANNEL check once 22.06 and docker-buildx-plugin is published to the "stable" channel
+ if [ "$CHANNEL" = "test" ] && version_gte "22.06"; then
+ pkgs="$pkgs docker-buildx-plugin"
+ fi
+ if ! is_dry_run; then
+ set -x
+ fi
+ $sh_c "$pkg_manager install -y -q $pkgs"
+ )
+ echo_docker_as_nonroot
+ exit 0
+ ;;
+ sles)
+ if [ "$(uname -m)" != "s390x" ]; then
+ echo "Packages for SLES are currently only available for s390x"
+ exit 1
+ fi
+
+ sles_version="${dist_version##*.}"
+ sles_repo="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
+ opensuse_repo="https://download.opensuse.org/repositories/security:SELinux/SLE_15_SP$sles_version/security:SELinux.repo"
+ if ! curl -Ifs "$sles_repo" > /dev/null; then
+ echo "Error: Unable to curl repository file $sles_repo, is it valid?"
+ exit 1
+ fi
+ pre_reqs="ca-certificates curl libseccomp2 awk"
+ (
+ if ! is_dry_run; then
+ set -x
+ fi
+ $sh_c "zypper install -y $pre_reqs"
+ $sh_c "zypper addrepo $sles_repo"
+ if ! is_dry_run; then
+ cat >&2 <<-'EOF'
+ WARNING!!
+ openSUSE repository (https://download.opensuse.org/repositories/security:SELinux) will be enabled now.
+ Do you wish to continue?
+ You may press Ctrl+C now to abort this script.
+ EOF
+ ( set -x; sleep 30 )
+ fi
+ $sh_c "zypper addrepo $opensuse_repo"
+ $sh_c "zypper --gpg-auto-import-keys refresh"
+ $sh_c "zypper lr -d"
+ )
+ pkg_version=""
+ if [ -n "$VERSION" ]; then
+ if is_dry_run; then
+ echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
+ else
+ pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g")"
+ search_command="zypper search -s --match-exact 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'"
+ pkg_version="$($sh_c "$search_command")"
+ echo "INFO: Searching repository for VERSION '$VERSION'"
+ echo "INFO: $search_command"
+ if [ -z "$pkg_version" ]; then
+ echo
+ echo "ERROR: '$VERSION' not found amongst zypper list results"
+ echo
+ exit 1
+ fi
+ search_command="zypper search -s --match-exact 'docker-ce-cli' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'"
+ # It's okay for cli_pkg_version to be blank, since older versions don't support a cli package
+ cli_pkg_version="$($sh_c "$search_command")"
+ pkg_version="-$pkg_version"
+
+ search_command="zypper search -s --match-exact 'docker-ce-rootless-extras' | grep '$pkg_pattern' | tail -1 | awk '{print \$6}'"
+ rootless_pkg_version="$($sh_c "$search_command")"
+ rootless_pkg_version="-$rootless_pkg_version"
+ fi
+ fi
+ (
+ pkgs="docker-ce$pkg_version"
+ if version_gte "18.09"; then
+ if [ -n "$cli_pkg_version" ]; then
+ # older versions didn't ship the cli and containerd as separate packages
+ pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io"
+ else
+ pkgs="$pkgs docker-ce-cli containerd.io"
+ fi
+ fi
+ if version_gte "20.10"; then
+ pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
+ fi
+ # TODO(thaJeztah) remove the $CHANNEL check once 22.06 and docker-buildx-plugin is published to the "stable" channel
+ if [ "$CHANNEL" = "test" ] && version_gte "22.06"; then
+ pkgs="$pkgs docker-buildx-plugin"
+ fi
+ if ! is_dry_run; then
+ set -x
+ fi
+ $sh_c "zypper -q install -y $pkgs"
+ )
+ echo_docker_as_nonroot
+ exit 0
+ ;;
+ *)
+ if [ -z "$lsb_dist" ]; then
+ if is_darwin; then
+ echo
+ echo "ERROR: Unsupported operating system 'macOS'"
+ echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
+ echo
+ exit 1
+ fi
+ fi
+ echo
+ echo "ERROR: Unsupported distribution '$lsb_dist'"
+ echo
+ exit 1
+ ;;
+ esac
+ exit 1
+}
+
+# wrapped up in a function so that we have some protection against only getting
+# half the file during "curl | sh"
+do_install
diff --git a/fogros2/image/Dockerfile b/fogros2/image/Dockerfile
new file mode 100644
index 0000000..bee9937
--- /dev/null
+++ b/fogros2/image/Dockerfile
@@ -0,0 +1,11 @@
+FROM ros:humble
+
+# TODO: every ssh server using this will have the same key :rofl:
+RUN apt update && apt install -y vim software-properties-common gnupg lsb-release locales ros-humble-rmw-cyclonedds-cpp openssh-server sudo curl python3-colcon-common-extensions wireguard unzip python3-pip iproute2
+
+# Shouldn't be needed but oh well...
+RUN python3 -m pip install boto3 paramiko scp wgconfig kubernetes
+
+RUN useradd 'ubuntu' -m -s /bin/bash && mkdir '/home/ubuntu/.ssh' && echo 'ubuntu ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
+
+CMD sleep infinity
diff --git a/fogros2/setup.py b/fogros2/setup.py
index f948207..72dfa95 100755
--- a/fogros2/setup.py
+++ b/fogros2/setup.py
@@ -62,6 +62,7 @@
"list = fogros2.verb.list:ListVerb",
"delete = fogros2.verb.delete:DeleteVerb",
"connect = fogros2.verb.ssh:SSHVerb",
+ "image = fogros2.verb.image:ImageVerb",
],
},
)
diff --git a/fogros2_examples/launch/talker.auto_aws.launch.py b/fogros2_examples/launch/talker.auto_aws.launch.py
index cf69125..a225384 100644
--- a/fogros2_examples/launch/talker.auto_aws.launch.py
+++ b/fogros2_examples/launch/talker.auto_aws.launch.py
@@ -38,10 +38,13 @@
def generic_ubuntu_ami():
return {
- "us-west-1": { "ami_image": "ami-01154c8b2e9a14885" },
- "us-west-2": { "ami_image": "ami-0ddf424f81ddb0720" },
- "us-east-1": { "ami_image": "ami-08d4ac5b634553e16" },
- "us-east-2": { "ami_image": "ami-0960ab670c8bb45f3" },
+ "us-west-1": { "ami_image": "ami-02ea247e531eb3ce6" },
+ "us-west-2": { "ami_image": "ami-017fecd1353bcc96e" },
+ "us-east-1": { "ami_image": "ami-08c40ec9ead489470" },
+ "us-east-2": { "ami_image": "ami-097a2df4ac947655f" },
+ "ap-northeast-1": { "ami_image": "ami-03f4fa076d2981b45" },
+ "ap-northeast-2": { "ami_image": "ami-0e9bfdb247cc8de84" },
+ "ap-northeast-3": { "ami_image": "ami-08c2ee02329b72f26" },
}
def generate_launch_description():
diff --git a/fogros2_examples/launch/talker.azure.launch.py b/fogros2_examples/launch/talker.azure.launch.py
new file mode 100644
index 0000000..0870818
--- /dev/null
+++ b/fogros2_examples/launch/talker.azure.launch.py
@@ -0,0 +1,60 @@
+# Copyright 2022 The Regents of the University of California (Regents)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Copyright ©2022. The Regents of the University of California (Regents).
+# All Rights Reserved. Permission to use, copy, modify, and distribute this
+# software and its documentation for educational, research, and not-for-profit
+# purposes, without fee and without a signed licensing agreement, is hereby
+# granted, provided that the above copyright notice, this paragraph and the
+# following two paragraphs appear in all copies, modifications, and
+# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
+# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
+# otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial
+# licensing opportunities. IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
+# INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
+# DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY,
+# PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+from launch_ros.actions import Node
+
+import fogros2
+
+
+def generate_launch_description():
+ """Install and run azure cli by az login. You may follow the link"""
+ """https://learn.microsoft.com/en-us/cli/azure/install-azure-cli-linux"""
+ """Talker example that launches the listener on Azure."""
+ ld = fogros2.FogROSLaunchDescription()
+ machine1 = fogros2.AzureCloudInstance(
+ resource_group='fogros'
+ )
+
+ listener_node = Node(
+ package="fogros2_examples", executable="listener", output="screen"
+ )
+
+ talker_node = fogros2.CloudNode(
+ package="fogros2_examples",
+ executable="talker",
+ output="screen",
+ machine=machine1,
+ )
+ ld.add_action(talker_node)
+ ld.add_action(listener_node)
+ return ld
diff --git a/fogros2_examples/launch/talker.gcp.launch.py b/fogros2_examples/launch/talker.gcp.launch.py
new file mode 100644
index 0000000..b45faad
--- /dev/null
+++ b/fogros2_examples/launch/talker.gcp.launch.py
@@ -0,0 +1,58 @@
+# Copyright 2022 The Regents of the University of California (Regents)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Copyright ©2022. The Regents of the University of California (Regents).
+# All Rights Reserved. Permission to use, copy, modify, and distribute this
+# software and its documentation for educational, research, and not-for-profit
+# purposes, without fee and without a signed licensing agreement, is hereby
+# granted, provided that the above copyright notice, this paragraph and the
+# following two paragraphs appear in all copies, modifications, and
+# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
+# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
+# otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial
+# licensing opportunities. IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
+# INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
+# DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY,
+# PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+from launch_ros.actions import Node
+
+import fogros2
+
+
+def generate_launch_description():
+ """Talker example that launches the listener on Google Compute Engine."""
+ ld = fogros2.FogROSLaunchDescription()
+ machine1 = fogros2.GCPCloudInstance(
+ project_id='shade-prod'
+ )
+
+ listener_node = Node(
+ package="fogros2_examples", executable="listener", output="screen"
+ )
+
+ talker_node = fogros2.CloudNode(
+ package="fogros2_examples",
+ executable="talker",
+ output="screen",
+ machine=machine1,
+ )
+ ld.add_action(talker_node)
+ ld.add_action(listener_node)
+ return ld
diff --git a/fogros2_examples/launch/talker.kube.launch.py b/fogros2_examples/launch/talker.kube.launch.py
new file mode 100644
index 0000000..141c15e
--- /dev/null
+++ b/fogros2_examples/launch/talker.kube.launch.py
@@ -0,0 +1,56 @@
+# Copyright 2022 The Regents of the University of California (Regents)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Copyright ©2022. The Regents of the University of California (Regents).
+# All Rights Reserved. Permission to use, copy, modify, and distribute this
+# software and its documentation for educational, research, and not-for-profit
+# purposes, without fee and without a signed licensing agreement, is hereby
+# granted, provided that the above copyright notice, this paragraph and the
+# following two paragraphs appear in all copies, modifications, and
+# distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150
+# Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201,
+# otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial
+# licensing opportunities. IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
+# INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
+# DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE. REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY,
+# PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+from launch_ros.actions import Node
+
+import fogros2
+
+
+def generate_launch_description():
+ """Talker example that launches the listener on GCP Kube."""
+ ld = fogros2.FogROSLaunchDescription()
+ machine1 = fogros2.KubeInstance()
+
+ listener_node = Node(
+ package="fogros2_examples", executable="listener", output="screen"
+ )
+
+ talker_node = fogros2.CloudNode(
+ package="fogros2_examples",
+ executable="talker",
+ output="screen",
+ machine=machine1,
+ )
+ ld.add_action(talker_node)
+ ld.add_action(listener_node)
+ return ld