diff --git a/src/Cargo.lock b/src/Cargo.lock index 012b9c40..e88665c2 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -2034,6 +2034,7 @@ dependencies = [ "borsh", "qos_core", "qos_hex", + "qos_nsm", "serde", "serde_json", "tokio", diff --git a/src/init/Cargo.lock b/src/init/Cargo.lock index 5dd30d6d..19ac74cc 100644 --- a/src/init/Cargo.lock +++ b/src/init/Cargo.lock @@ -1157,6 +1157,7 @@ dependencies = [ "qos_p256", "serde", "serde_bytes", + "serde_json", "tokio", "tokio-vsock", ] diff --git a/src/init/init.rs b/src/init/init.rs index a46c37df..fa62b089 100644 --- a/src/init/init.rs +++ b/src/init/init.rs @@ -1,8 +1,8 @@ use qos_core::{ handles::Handles, - io::{SocketAddress, StreamPool, VMADDR_NO_FLAGS}, + io::{SocketAddress, VMADDR_NO_FLAGS}, reaper::Reaper, - EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, SEC_APP_SOCK, + EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, }; use qos_nsm::Nsm; use qos_system::{dmesg, freopen, get_local_cid, mount, reboot}; @@ -27,8 +27,8 @@ fn init_rootfs() { ]; for (src, target, fstype, flags, data) in args { match mount(src, target, fstype, flags, data) { - Ok(()) => dmesg(format!("Mounted {}", target)), - Err(e) => eprintln!("{}", e), + Ok(()) => dmesg(format!("Mounted {target}")), + Err(e) => eprintln!("{e}"), } } } @@ -43,7 +43,7 @@ fn init_console() { for (filename, mode, file) in args { match freopen(filename, mode, file) { Ok(()) => {} - Err(e) => eprintln!("{}", e), + Err(e) => eprintln!("{e}"), } } } @@ -60,7 +60,7 @@ async fn main() { dmesg("QuorumOS Booted".to_string()); let cid = get_local_cid().unwrap(); - dmesg(format!("CID is {}", cid)); + dmesg(format!("CID is {cid}")); let handles = Handles::new( EPHEMERAL_KEY_FILE.to_string(), @@ -70,20 +70,10 @@ async fn main() { ); const START_PORT: u32 = 3; - const INITIAL_POOL_SIZE: u8 = 1; // start at pool size 1, grow based on manifest/args as necessary (see Reaper) - let core_pool = StreamPool::new( - SocketAddress::new_vsock(cid, START_PORT, VMADDR_NO_FLAGS), - INITIAL_POOL_SIZE, - ) - .expect("unable to create core pool"); + let core_socket = + SocketAddress::new_vsock(cid, START_PORT, VMADDR_NO_FLAGS); - let app_pool = StreamPool::new( - SocketAddress::new_unix(SEC_APP_SOCK), - INITIAL_POOL_SIZE, // start at pool size 1, grow based on manifest/args as necessary (see Reaper) - ) - .expect("unable to create app pool"); - - Reaper::execute(&handles, Box::new(Nsm), core_pool, app_pool, None).await; + Reaper::execute(&handles, Box::new(Nsm), core_socket, None).await; reboot(); } diff --git a/src/integration/examples/boot_enclave.rs b/src/integration/examples/boot_enclave.rs index cb248b83..32b4bee5 100644 --- a/src/integration/examples/boot_enclave.rs +++ b/src/integration/examples/boot_enclave.rs @@ -33,7 +33,6 @@ async fn main() { fs::create_dir_all(&*tmp).unwrap(); let usock: PathWrapper = "/tmp/enclave-example/example.sock".into(); - let app_usock: PathWrapper = "/tmp/enclave-example/example-app.sock".into(); let secret_path: PathWrapper = "/tmp/enclave-example/example.secret".into(); let pivot_path: PathWrapper = "/tmp/enclave-example/example.pivot".into(); let manifest_path: PathWrapper = @@ -245,8 +244,6 @@ async fn main() { .args([ "--usock", &*usock, - "--app-usock", - &*app_usock, "--quorum-file", &*secret_path, "--pivot-file", diff --git a/src/integration/tests/enclave_app_client_socket_stress.rs b/src/integration/tests/enclave_app_client_socket_stress.rs index b07506a1..44c74cd5 100644 --- a/src/integration/tests/enclave_app_client_socket_stress.rs +++ b/src/integration/tests/enclave_app_client_socket_stress.rs @@ -5,18 +5,17 @@ use integration::{ wait_for_usock, PivotSocketStressMsg, PIVOT_SOCKET_STRESS_PATH, }; use qos_core::{ - client::SocketClient, + client::{ClientError, SocketClient}, handles::Handles, - io::{SocketAddress, StreamPool}, + io::{IOError, SocketAddress, StreamPool}, protocol::{ - msg::ProtocolMsg, services::boot::{ Manifest, ManifestEnvelope, ManifestSet, Namespace, NitroConfig, PivotConfig, RestartPolicy, ShareSet, }, - ProtocolError, ProtocolPhase, INITIAL_CLIENT_TIMEOUT, + ProtocolPhase, }, - reaper::{Reaper, REAPER_RESTART_DELAY}, + reaper::Reaper, }; use qos_nsm::mock::MockNsm; use qos_p256::P256Pair; @@ -77,18 +76,13 @@ async fn enclave_app_client_socket_stress() { handles.put_manifest_envelope(&manifest_envelope).unwrap(); handles.put_quorum_key(&p256_pair).unwrap(); - let enclave_pool = - StreamPool::single(SocketAddress::new_unix(ENCLAVE_SOCK)).unwrap(); - - let app_pool = - StreamPool::single(SocketAddress::new_unix(APP_SOCK)).unwrap(); + let enclave_socket = SocketAddress::new_unix(ENCLAVE_SOCK); tokio::spawn(async move { Reaper::execute( &handles, Box::new(MockNsm), - enclave_pool, - app_pool, + enclave_socket, // Force the phase to quorum key provisioned so message proxy-ing // works Some(ProtocolPhase::QuorumKeyProvisioned), @@ -99,56 +93,37 @@ async fn enclave_app_client_socket_stress() { // Make sure the pivot has some time to start up wait_for_usock(APP_SOCK).await; - let enclave_client_pool = - StreamPool::single(SocketAddress::new_unix(ENCLAVE_SOCK)).unwrap(); - let enclave_client = SocketClient::new( - enclave_client_pool.shared(), - INITIAL_CLIENT_TIMEOUT + Duration::from_secs(3), // needs to be bigger than the slow request below + some time for recovery + let app_client_pool = + StreamPool::single(SocketAddress::new_unix(APP_SOCK)).unwrap(); + let app_client = SocketClient::new( + app_client_pool.shared(), + Duration::from_millis(2000), ); - let app_request = - borsh::to_vec(&PivotSocketStressMsg::PanicRequest).unwrap(); - let request = - borsh::to_vec(&ProtocolMsg::ProxyRequest { data: app_request }) - .unwrap(); - let raw_response = enclave_client.call(&request).await.unwrap(); - let response = ProtocolMsg::try_from_slice(&raw_response).unwrap(); - - assert_eq!( - response, - ProtocolMsg::ProtocolErrorResponse( - ProtocolError::AppClientRecvConnectionClosed - ) - ); + let request = borsh::to_vec(&PivotSocketStressMsg::PanicRequest).unwrap(); + let raw_response = app_client.call(&request).await.unwrap_err(); + + match raw_response { + ClientError::IOError(IOError::RecvConnectionClosed) => {} // expected + _ => panic!("unexpected error received: {:?}", raw_response), + } + + // Make sure the pivot has some time to restart + wait_for_usock(APP_SOCK).await; - tokio::time::sleep(REAPER_RESTART_DELAY + Duration::from_secs(1)).await; // The pivot panicked and should have been restarted. - let app_request = - borsh::to_vec(&PivotSocketStressMsg::OkRequest(1)).unwrap(); - let request = - borsh::to_vec(&ProtocolMsg::ProxyRequest { data: app_request }) - .unwrap(); - let raw_response = enclave_client.call(&request).await.unwrap(); - let response = { - let msg = ProtocolMsg::try_from_slice(&raw_response).unwrap(); - let data = match msg { - ProtocolMsg::ProxyResponse { data } => data, - x => panic!("Expected proxy response, got {x:?}"), - }; - PivotSocketStressMsg::try_from_slice(&data).unwrap() - }; + let request = borsh::to_vec(&PivotSocketStressMsg::OkRequest(1)).unwrap(); + let raw_response = app_client.call(&request).await.unwrap(); + let response = PivotSocketStressMsg::try_from_slice(&raw_response).unwrap(); assert_eq!(response, PivotSocketStressMsg::OkResponse(1)); // Send a request that the app will take too long to respond to - let app_request = - borsh::to_vec(&PivotSocketStressMsg::SlowRequest(5500)).unwrap(); let request = - borsh::to_vec(&ProtocolMsg::ProxyRequest { data: app_request }) - .unwrap(); - let raw_response = enclave_client.call(&request).await.unwrap(); - let response = ProtocolMsg::try_from_slice(&raw_response).unwrap(); - assert_eq!( - response, - ProtocolMsg::ProtocolErrorResponse(ProtocolError::AppClientRecvTimeout) - ); + borsh::to_vec(&PivotSocketStressMsg::SlowRequest(2100)).unwrap(); + let raw_response = app_client.call(&request).await.unwrap_err(); + + match raw_response { + ClientError::IOError(IOError::RecvTimeout) => {} // expected + _ => panic!("unexpected error received: {:?}", raw_response), + } } diff --git a/src/integration/tests/qos_host.rs b/src/integration/tests/qos_host.rs index d108608d..3b878dce 100644 --- a/src/integration/tests/qos_host.rs +++ b/src/integration/tests/qos_host.rs @@ -3,13 +3,12 @@ use std::{process::Command, time::Duration}; use integration::PIVOT_OK_PATH; use qos_test_primitives::{ChildWrapper, PathWrapper}; -const TEST_ENCLAVE_SOCKET: &str = "/tmp/async_qos_host_test/enclave.sock"; - #[tokio::test] async fn connects_and_gets_info() { // prep sock pool dir - std::fs::create_dir_all("/tmp/async_qos_host_test").unwrap(); + std::fs::create_dir_all("/tmp/qos_host_test").unwrap(); + const TEST_ENCLAVE_SOCKET: &str = "/tmp/qos_host_test/enclave.sock"; let _qos_host: ChildWrapper = Command::new("../target/debug/qos_host") .arg("--usock") .arg(TEST_ENCLAVE_SOCKET) @@ -29,8 +28,8 @@ async fn connects_and_gets_info() { assert!(r.is_err()); // expect 500 here let enclave_socket = format!("{TEST_ENCLAVE_SOCKET}_0"); // manually pick the 1st one - let secret_path: PathWrapper = "/tmp/async_qos_host_test.secret".into(); - let manifest_path: PathWrapper = "/tmp/async_qos_host_test.manifest".into(); + let secret_path: PathWrapper = "/tmp/qos_host_test.secret".into(); + let manifest_path: PathWrapper = "/tmp/qos_host_test.manifest".into(); // For our sanity, ensure the secret does not yet exist drop(std::fs::remove_file(&*secret_path)); diff --git a/src/integration/tests/qos_host_bridge.rs b/src/integration/tests/qos_host_bridge.rs new file mode 100644 index 00000000..37a751f1 --- /dev/null +++ b/src/integration/tests/qos_host_bridge.rs @@ -0,0 +1,501 @@ +use std::{ + fs, + io::{BufRead, BufReader, Write}, + process::{Command, Stdio}, +}; + +use borsh::de::BorshDeserialize; +use integration::{ + PivotSocketStressMsg, LOCAL_HOST, PCR3_PRE_IMAGE_PATH, + PIVOT_SOCKET_STRESS_PATH, QOS_DIST_DIR, +}; +use qos_core::protocol::{ + services::{ + boot::{ + Approval, Manifest, ManifestSet, Namespace, PivotConfig, + RestartPolicy, ShareSet, DEFAULT_APP_HOST_PORT, + }, + genesis::{GenesisMemberOutput, GenesisOutput}, + }, + ProtocolPhase, QosHash, +}; +use qos_crypto::sha_256; +use qos_host::EnclaveInfo; +use qos_p256::P256Pair; +use qos_test_primitives::{ChildWrapper, PathWrapper}; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, +}; + +#[tokio::test] +async fn qos_host_bridge_works() { + const PIVOT_HASH_PATH: &str = "/tmp/qos_host_bridge-pivot-hash.txt"; + + let host_port = qos_test_primitives::find_free_port().unwrap(); + let tmp: PathWrapper = "/tmp/qos_host_bridge".into(); + let _: PathWrapper = PIVOT_HASH_PATH.into(); + fs::create_dir_all(&*tmp).unwrap(); + + let usock_path = "/tmp/qos_host_bridge/qos_host_bridge.sock".to_owned(); + let usock: PathWrapper = usock_path.clone().into(); + let secret_path: PathWrapper = + "/tmp/qos_host_bridge/qos_host_bridge.secret".into(); + let pivot_path: PathWrapper = + "/tmp/qos_host_bridge/qos_host_bridge.pivot".into(); + let manifest_path: PathWrapper = + "/tmp/qos_host_bridge/qos_host_bridge.manifest".into(); + let eph_path: PathWrapper = + "/tmp/qos_host_bridge/ephemeral_key.secret".into(); + + let boot_dir: PathWrapper = "/tmp/qos_host_bridge/boot-dir".into(); + fs::create_dir_all(&*boot_dir).unwrap(); + let attestation_dir: PathWrapper = + "/tmp/qos_host_bridge/attestation-dir".into(); + fs::create_dir_all(&*attestation_dir).unwrap(); + let attestation_doc_path = format!("{}/attestation_doc", &*attestation_dir); + + let all_personal_dir = "./mock/boot-e2e/all-personal-dir"; + + let namespace = "quit-coding-to-vape"; + + let personal_dir = |user: &str| format!("{all_personal_dir}/{user}-dir"); + + let user1 = "user1"; + let user2 = "user2"; + let user3 = "user3"; + + // -- Create pivot-build-fingerprints.txt + let pivot = fs::read(PIVOT_SOCKET_STRESS_PATH).unwrap(); + let mock_pivot_hash = sha_256(&pivot); + let pivot_hash = qos_hex::encode_to_vec(&mock_pivot_hash); + std::fs::write(PIVOT_HASH_PATH, pivot_hash).unwrap(); + + // -- CLIENT create manifest. + let pivot_app_sock_path = usock_path + ".appsock"; + let pivot_args = format!("[{pivot_app_sock_path}]"); + let cli_manifest_path = format!("{}/manifest", &*boot_dir); + + assert!(Command::new("../target/debug/qos_client") + .args([ + "generate-manifest", + "--nonce", + "2", + "--namespace", + namespace, + "--restart-policy", + "never", + "--pivot-hash-path", + PIVOT_HASH_PATH, + "--qos-release-dir", + QOS_DIST_DIR, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--manifest-path", + &cli_manifest_path, + "--pivot-args", + &pivot_args, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--share-set-dir", + "./mock/keys/share-set", + "--patch-set-dir", + "./mock/keys/manifest-set", + "--quorum-key-path", + "./mock/namespaces/quit-coding-to-vape/quorum_key.pub" + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // Check the manifest written to file + let manifest: Manifest = + serde_json::from_slice(&fs::read(&cli_manifest_path).unwrap()).unwrap(); + + let genesis_output = { + let contents = + fs::read("./mock/boot-e2e/genesis-dir/genesis_output").unwrap(); + GenesisOutput::try_from_slice(&contents).unwrap() + }; + // For simplicity sake, we use the same keys for the share set and manifest + // set. + let mut members: Vec<_> = genesis_output + .member_outputs + .iter() + .cloned() + .map(|GenesisMemberOutput { share_set_member, .. }| share_set_member) + .collect(); + members.sort(); + + let namespace_field = Namespace { + name: namespace.to_string(), + nonce: 2, + quorum_key: genesis_output.quorum_key, + }; + assert_eq!(manifest.namespace, namespace_field); + let pivot = PivotConfig { + hash: mock_pivot_hash, + restart: RestartPolicy::Never, + args: vec![pivot_app_sock_path.to_string()], + }; + assert_eq!(manifest.pivot, pivot); + let manifest_set = ManifestSet { threshold: 2, members: members.clone() }; + assert_eq!(manifest.manifest_set, manifest_set); + let share_set = ShareSet { threshold: 2, members }; + assert_eq!(manifest.share_set, share_set); + + // -- CLIENT make sure each user can run `approve-manifest` + for alias in [user1, user2, user3] { + let approval_path = format!( + "{}/{}-{}-{}.approval", + &*boot_dir, alias, namespace, manifest.namespace.nonce, + ); + + let secret_path = format!("{}/{}.secret", &personal_dir(alias), alias); + + let mut child = Command::new("../target/debug/qos_client") + .args([ + "approve-manifest", + "--secret-path", + &*secret_path, + "--manifest-path", + &cli_manifest_path, + "--manifest-approvals-dir", + &*boot_dir, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--pivot-hash-path", + PIVOT_HASH_PATH, + "--qos-release-dir", + QOS_DIST_DIR, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--share-set-dir", + "./mock/keys/share-set", + "--patch-set-dir", + "./mock/keys/manifest-set", + "--quorum-key-path", + "./mock/namespaces/quit-coding-to-vape/quorum_key.pub", + "--alias", + alias, + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let mut stdin = child.stdin.take().expect("Failed to open stdin"); + + let mut stdout = { + let stdout = child.stdout.as_mut().unwrap(); + let stdout_reader = BufReader::new(stdout); + stdout_reader.lines() + }; + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace name: quit-coding-to-vape? (y/n)" + ); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace nonce: 2? (y/n)" + ); + // On purpose, try to input a bad value, neither yes or no + stdin + .write_all("maybe\n".as_bytes()) + .expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Please answer with either \"yes\" (y) or \"no\" (n)" + ); + // Try the longer option ("yes" rather than "y") + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct pivot restart policy: RestartPolicy::Never? (y/n)" + ); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Are these the correct pivot args:" + ); + assert_eq!( + &stdout.next().unwrap().unwrap(), + "[\"/tmp/qos_host_bridge/qos_host_bridge.sock.appsock\"]?" + ); + assert_eq!(&stdout.next().unwrap().unwrap(), "(y/n)"); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct socket pool size:" + ); + assert_eq!(&stdout.next().unwrap().unwrap(), "1?"); + assert_eq!(&stdout.next().unwrap().unwrap(), "(y/n)"); + stdin.write_all("y\n".as_bytes()).expect("Failed to write to stdin"); + + // Wait for the command to write the approval and exit + assert!(child.wait().unwrap().success()); + + // Read in the generated approval to check it was created correctly + let approval: Approval = + serde_json::from_slice(&fs::read(approval_path).unwrap()).unwrap(); + let personal_pair = P256Pair::from_hex_file(format!( + "{}/{}.secret", + personal_dir(alias), + alias, + )) + .unwrap(); + + let signature = personal_pair.sign(&manifest.qos_hash()).unwrap(); + assert_eq!(approval.signature, signature); + + assert_eq!(approval.member.alias, alias); + assert_eq!( + approval.member.pub_key, + personal_pair.public_key().to_bytes(), + ); + } + + // -- ENCLAVE start enclave + let mut _enclave_child_process: ChildWrapper = + Command::new("../target/debug/qos_core") + .args([ + "--usock", + &*usock, + "--quorum-file", + &*secret_path, + "--pivot-file", + &*pivot_path, + "--ephemeral-file", + &*eph_path, + "--mock", + "--manifest-file", + &*manifest_path, + ]) + .spawn() + .unwrap() + .into(); + + // -- HOST start host + let mut _host_child_process: ChildWrapper = + Command::new("../target/debug/qos_host") + .args([ + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--usock", + &*usock, + "--enable-host-bridge", + ]) + .spawn() + .unwrap() + .into(); + + // -- Make sure the enclave and host have time to boot + qos_test_primitives::wait_until_port_is_bound(host_port); + + // -- CLIENT generate the manifest envelope + assert!(Command::new("../target/debug/qos_client") + .args([ + "generate-manifest-envelope", + "--manifest-approvals-dir", + &*boot_dir, + "--manifest-path", + &cli_manifest_path, + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // -- CLIENT broadcast boot standard instruction + let manifest_envelope_path = format!("{}/manifest_envelope", &*boot_dir,); + assert!(Command::new("../target/debug/qos_client") + .args([ + "boot-standard", + "--manifest-envelope-path", + &manifest_envelope_path, + "--pivot-path", + PIVOT_SOCKET_STRESS_PATH, + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--pcr3-preimage-path", + "./mock/pcr3-preimage.txt", + "--unsafe-skip-attestation", + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + // For each user, post a share, + for user in [&user1, &user2] { + // Get attestation doc and manifest + assert!(Command::new("../target/debug/qos_client") + .args([ + "get-attestation-doc", + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--attestation-doc-path", + &*attestation_doc_path, + "--manifest-envelope-path", + "/tmp/dont_care" + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + + let share_path = format!("{}/{}.share", &personal_dir(user), user); + let secret_path = format!("{}/{}.secret", &personal_dir(user), user); + let eph_wrapped_share_path: PathWrapper = + format!("{}/{}.eph_wrapped.share", &*tmp, user).into(); + let approval_path: PathWrapper = + format!("{}/{}.attestation.approval", &*tmp, user).into(); + // Encrypt share to ephemeral key + let mut child = Command::new("../target/debug/qos_client") + .args([ + "proxy-re-encrypt-share", + "--share-path", + &share_path, + "--secret-path", + &secret_path, + "--attestation-doc-path", + &*attestation_doc_path, + "--eph-wrapped-share-path", + &eph_wrapped_share_path, + "--approval-path", + &approval_path, + "--manifest-envelope-path", + &manifest_envelope_path, + "--pcr3-preimage-path", + PCR3_PRE_IMAGE_PATH, + "--manifest-set-dir", + "./mock/keys/manifest-set", + "--alias", + user, + "--unsafe-skip-attestation", + "--unsafe-eph-path-override", + &*eph_path, + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let mut stdin = child.stdin.take().expect("Failed to open stdin"); + + let mut stdout = { + let stdout = child.stdout.as_mut().unwrap(); + let stdout_reader = BufReader::new(stdout); + stdout_reader.lines() + }; + + // Skip over a log message + stdout.next(); + + // Answer prompts with yes + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace name: quit-coding-to-vape? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Is this the correct namespace nonce: 2? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "Does this AWS IAM role belong to the intended organization: arn:aws:iam::123456789012:role/Webserver? (y/n)" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + assert_eq!( + &stdout.next().unwrap().unwrap(), + "The following manifest set members approved:" + ); + stdin.write_all("yes\n".as_bytes()).expect("Failed to write to stdin"); + + // Check that it finished successfully + assert!(child.wait().unwrap().success()); + + // Post the encrypted share + assert!(Command::new("../target/debug/qos_client") + .args([ + "post-share", + "--host-port", + &host_port.to_string(), + "--host-ip", + LOCAL_HOST, + "--eph-wrapped-share-path", + &eph_wrapped_share_path, + "--approval-path", + &approval_path, + ]) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); + } + + let enclave_info_url = + format!("http://{LOCAL_HOST}:{host_port}/qos/enclave-info"); + let enclave_info: EnclaveInfo = + ureq::get(&enclave_info_url).call().unwrap().into_json().unwrap(); + assert_eq!(enclave_info.phase, ProtocolPhase::QuorumKeyProvisioned); + + // Give the enclave time to start the pivot + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + + // Wait for the qos_host app bridge to run + qos_test_primitives::wait_until_port_is_bound(DEFAULT_APP_HOST_PORT); + + // send a PivotSocketStressMsg to check if the bridge works all the way + let mut tcp_stream = TcpStream::connect("127.0.0.1:3000").await.unwrap(); + + let msg = PivotSocketStressMsg::OkRequest(42); + let msg_bytes = borsh::to_vec(&msg).unwrap(); + let mut header = (msg_bytes.len() as u64).to_le_bytes(); + + // send the header/length + tcp_stream.write(&header).await.unwrap(); + // send the msg + tcp_stream.write(&msg_bytes).await.unwrap(); + + // receive the reply header + assert_eq!(8, tcp_stream.read_exact(&mut header).await.unwrap()); + let reply_size = usize::from_le_bytes(header); + let mut reply_bytes = vec![0u8; reply_size]; + // receive the reply msg + assert_eq!( + reply_size, + tcp_stream.read_exact(&mut reply_bytes).await.unwrap() + ); + // decode the reply msg + let reply: PivotSocketStressMsg = borsh::from_slice(&reply_bytes).unwrap(); + + match reply { + PivotSocketStressMsg::OkResponse(val) => assert_eq!(val, 42), + _ => panic!("invalid pivot response"), + } +} diff --git a/src/integration/tests/reaper.rs b/src/integration/tests/reaper.rs index 6b2ee69a..26785823 100644 --- a/src/integration/tests/reaper.rs +++ b/src/integration/tests/reaper.rs @@ -1,17 +1,13 @@ -use std::{fs, time::Duration}; +use std::fs; use integration::{ - wait_for_usock, PivotSocketStressMsg, PIVOT_ABORT_PATH, PIVOT_OK_PATH, - PIVOT_PANIC_PATH, PIVOT_POOL_SIZE_PATH, PIVOT_SOCKET_STRESS_PATH, + wait_for_usock, PIVOT_ABORT_PATH, PIVOT_OK_PATH, PIVOT_PANIC_PATH, + PIVOT_POOL_SIZE_PATH, }; use qos_core::{ - client::SocketClient, handles::Handles, - io::{SocketAddress, StreamPool}, - protocol::{ - msg::ProtocolMsg, services::boot::ManifestEnvelope, ProtocolError, - ProtocolPhase, - }, + io::SocketAddress, + protocol::services::boot::ManifestEnvelope, reaper::{Reaper, REAPER_EXIT_DELAY}, }; use qos_nsm::mock::MockNsm; @@ -43,21 +39,11 @@ async fn reaper_works() { handles.put_manifest_envelope(&manifest_envelope).unwrap(); assert!(handles.pivot_exists()); - let enclave_pool = - StreamPool::single(SocketAddress::new_unix(&usock)).unwrap(); - - let app_pool = - StreamPool::single(SocketAddress::new_unix("./never.sock")).unwrap(); + let enclave_socket = SocketAddress::new_unix(&usock); let reaper_handle = tokio::spawn(async move { - Reaper::execute( - &handles, - Box::new(MockNsm), - enclave_pool, - app_pool, - None, - ) - .await; + Reaper::execute(&handles, Box::new(MockNsm), enclave_socket, None) + .await; }); // Give the enclave server time to bind to the socket @@ -78,94 +64,6 @@ async fn reaper_works() { assert!(fs::remove_file(integration::PIVOT_OK_SUCCESS_FILE).is_ok()); } -#[tokio::test] -async fn reaper_timeout_works() { - let secret_path: PathWrapper = "/tmp/reaper_timeout_works.secret".into(); - let enclave_sock: PathWrapper = "/tmp/reaper_timeout_works.sock".into(); - let app_sock: PathWrapper = "/tmp/reaper_timeout_works_app.sock".into(); - let manifest_path: PathWrapper = - "/tmp/reaper_timeout_works.manifest".into(); - - // clean up old manifest if it's left from a panic - drop(std::fs::remove_file(&*manifest_path)); - - // For our sanity, ensure the secret does not yet exist - drop(fs::remove_file(&*secret_path)); - - let handles = Handles::new( - "eph_path".to_string(), - (*secret_path).to_string(), - (*manifest_path).to_string(), - PIVOT_SOCKET_STRESS_PATH.to_string(), - ); - - // Make sure we have written everything necessary to pivot, except the - // quorum key - let mut manifest_envelope = ManifestEnvelope::default(); - // Tell pivot where to open up the server app socket - manifest_envelope.manifest.pivot.args = vec![app_sock.to_string()]; - - // we'll be checking if this is set by passing slow and fast requests - manifest_envelope.manifest.client_timeout_ms = Some(2000); - - handles.put_manifest_envelope(&manifest_envelope).unwrap(); - assert!(handles.pivot_exists()); - - let enclave_pool = - StreamPool::single(SocketAddress::new_unix(&enclave_sock)).unwrap(); - - let app_pool = - StreamPool::single(SocketAddress::new_unix(&app_sock)).unwrap(); - - let reaper_handle = tokio::spawn(async move { - Reaper::execute( - &handles, - Box::new(MockNsm), - enclave_pool, - app_pool, - Some(ProtocolPhase::QuorumKeyProvisioned), - ) - .await; - }); - - // Give the enclave server time to bind to the socket - wait_for_usock(&enclave_sock).await; - - // Check that the reaper is still running, presumably waiting for - // the secret. - assert!(!reaper_handle.is_finished()); - - // Create the file with the secret, which should cause the reaper - // to start executable. - fs::write(&*secret_path, b"super dank tank secret tech").unwrap(); - - // Give the app server time to bind to the socket - wait_for_usock(&app_sock).await; - - // create a "slow" app request longer than client timeout from `Manifest`, but longer than 5s timeout on our local client. - let app_request = - borsh::to_vec(&PivotSocketStressMsg::SlowRequest(3000)).unwrap(); - let request = - borsh::to_vec(&ProtocolMsg::ProxyRequest { data: app_request }) - .unwrap(); - - // ensure our client to the enclave has longer timeout than the configured 2s and the slow request 3s - let client = SocketClient::single( - SocketAddress::new_unix(&enclave_sock), - Duration::from_millis(5000), - ) - .unwrap(); - - let response: ProtocolMsg = - borsh::from_slice(&client.call(&request).await.unwrap()).unwrap(); - - // The response should be AppClientRecvTimeout which indicates the enclave short-circuited the timeout - assert_eq!( - response, - ProtocolMsg::ProtocolErrorResponse(ProtocolError::AppClientRecvTimeout) - ); -} - #[tokio::test] async fn reaper_handles_non_zero_exits() { let secret_path: PathWrapper = @@ -189,21 +87,11 @@ async fn reaper_handles_non_zero_exits() { handles.put_manifest_envelope(&Default::default()).unwrap(); assert!(handles.pivot_exists()); - let enclave_pool = - StreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); - - let app_pool = - StreamPool::new(SocketAddress::new_unix("./never.sock"), 1).unwrap(); + let enclave_socket = SocketAddress::new_unix(&usock); let reaper_handle = tokio::spawn(async move { - Reaper::execute( - &handles, - Box::new(MockNsm), - enclave_pool, - app_pool, - None, - ) - .await; + Reaper::execute(&handles, Box::new(MockNsm), enclave_socket, None) + .await; }); // Give the enclave server time to bind to the socket @@ -246,21 +134,11 @@ async fn reaper_handles_panic() { handles.put_manifest_envelope(&Default::default()).unwrap(); assert!(handles.pivot_exists()); - let enclave_pool = - StreamPool::new(SocketAddress::new_unix(&usock), 1).unwrap(); - - let app_pool = - StreamPool::new(SocketAddress::new_unix("./never.sock"), 1).unwrap(); + let enclave_socket = SocketAddress::new_unix(&usock); let reaper_handle = tokio::spawn(async move { - Reaper::execute( - &handles, - Box::new(MockNsm), - enclave_pool, - app_pool, - None, - ) - .await; + Reaper::execute(&handles, Box::new(MockNsm), enclave_socket, None) + .await; }); // Give the enclave server time to bind to the socket @@ -311,21 +189,11 @@ async fn reaper_handles_pool_size() { handles.put_manifest_envelope(&manifest_envelope).unwrap(); assert!(handles.pivot_exists()); - let enclave_pool = - StreamPool::single(SocketAddress::new_unix(&usock)).unwrap(); - - let app_pool = - StreamPool::single(SocketAddress::new_unix("/tmp/never.sock")).unwrap(); + let enclave_socket = SocketAddress::new_unix(&usock); let reaper_handle = tokio::spawn(async move { - Reaper::execute( - &handles, - Box::new(MockNsm), - enclave_pool, - app_pool, - None, - ) - .await; + Reaper::execute(&handles, Box::new(MockNsm), enclave_socket, None) + .await; }); // wait for enclave to listen diff --git a/src/qos_client/src/cli/mod.rs b/src/qos_client/src/cli/mod.rs index 7fec6570..33a1e05b 100644 --- a/src/qos_client/src/cli/mod.rs +++ b/src/qos_client/src/cli/mod.rs @@ -12,7 +12,10 @@ use std::env; use qos_core::{ parser::{CommandParser, GetParserForCommand, Parser, Token}, - protocol::{msg::ProtocolMsg, services::boot}, + protocol::{ + msg::ProtocolMsg, + services::boot::{self, DEFAULT_APP_HOST_PORT}, + }, }; mod services; @@ -40,6 +43,7 @@ const PATCH_SET_DIR: &str = "patch-set-dir"; const NAMESPACE_DIR: &str = "namespace-dir"; const UNSAFE_AUTO_CONFIRM: &str = "unsafe-auto-confirm"; const PUB_PATH: &str = "pub-path"; +const APP_HOST_PORT: &str = "app-host-port"; const POOL_SIZE: &str = "pool-size"; const CLIENT_TIMEOUT: &str = "client-timeout"; const YUBIKEY: &str = "yubikey"; @@ -1017,6 +1021,22 @@ impl ClientOpts { } } + fn app_host_port(&self) -> u16 { + if let Some(port_str) = self.parsed.single(APP_HOST_PORT) { + let val = port_str.parse().expect( + "app-host-port not valid integer in range <1026..65535>", + ); + // ensure we can only use valid ports + if val < 1026 { + panic!("app-host-port not in valid range <1026..65535>"); + } + + val + } else { + DEFAULT_APP_HOST_PORT + } + } + fn pool_size(&self) -> Option { self.parsed.single(POOL_SIZE).map(|s| { s.parse().expect("pool-size not valid integer in range <1..255>") @@ -1561,6 +1581,7 @@ mod handlers { manifest_set_dir: opts.manifest_set_dir(), patch_set_dir: opts.patch_set_dir(), quorum_key_path: opts.quorum_key_path(), + app_host_port: opts.app_host_port(), pool_size: opts.pool_size(), client_timeout_ms: opts.client_timeout_ms(), }) { diff --git a/src/qos_client/src/cli/services.rs b/src/qos_client/src/cli/services.rs index 39104747..e4ed248a 100644 --- a/src/qos_client/src/cli/services.rs +++ b/src/qos_client/src/cli/services.rs @@ -13,7 +13,7 @@ use qos_core::protocol::{ boot::{ Approval, Manifest, ManifestEnvelope, ManifestSet, MemberPubKey, Namespace, NitroConfig, PatchSet, PivotConfig, QuorumMember, - RestartPolicy, ShareSet, + RestartPolicy, ShareSet, DEFAULT_APP_HOST_PORT, }, genesis::{GenesisOutput, GenesisSet}, key::EncryptedQuorumKey, @@ -706,6 +706,7 @@ pub(crate) struct GenerateManifestArgs> { pub quorum_key_path: P, pub manifest_path: P, pub pivot_args: Vec, + pub app_host_port: u16, pub pool_size: Option, pub client_timeout_ms: Option, } @@ -726,6 +727,7 @@ pub(crate) fn generate_manifest>( quorum_key_path, manifest_path, pivot_args, + app_host_port, pool_size, client_timeout_ms, } = args; @@ -758,6 +760,7 @@ pub(crate) fn generate_manifest>( share_set, patch_set, enclave: nitro_config, + app_host_port, pool_size, client_timeout_ms, }; @@ -1665,6 +1668,7 @@ pub(crate) fn dangerous_dev_boot>( members: vec![member.clone()], }, patch_set: PatchSet { threshold: 0, members: vec![] }, + app_host_port: DEFAULT_APP_HOST_PORT, pool_size: None, client_timeout_ms: None, }; @@ -2219,7 +2223,7 @@ mod tests { services::boot::{ Approval, Manifest, ManifestEnvelope, ManifestSet, MemberPubKey, Namespace, NitroConfig, PatchSet, PivotConfig, QuorumMember, - RestartPolicy, ShareSet, + RestartPolicy, ShareSet, DEFAULT_APP_HOST_PORT, }, QosHash, }; @@ -2294,6 +2298,7 @@ mod tests { share_set: share_set.clone(), patch_set: patch_set.clone(), enclave: nitro_config.clone(), + app_host_port: DEFAULT_APP_HOST_PORT, pool_size: None, client_timeout_ms: None, }; diff --git a/src/qos_core/src/cli.rs b/src/qos_core/src/cli.rs index e2b9caa1..de73e529 100644 --- a/src/qos_core/src/cli.rs +++ b/src/qos_core/src/cli.rs @@ -9,10 +9,10 @@ use crate::{ io::SocketAddress, parser::{GetParserForOptions, OptionsParser, Parser, Token}, reaper::Reaper, - EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, SEC_APP_SOCK, + EPHEMERAL_KEY_FILE, MANIFEST_FILE, PIVOT_FILE, QUORUM_FILE, }; -use crate::io::{IOError, StreamPool}; +use crate::io::IOError; /// "cid" pub const CID: &str = "cid"; @@ -29,7 +29,6 @@ pub const PIVOT_FILE_OPT: &str = "pivot-file"; pub const EPHEMERAL_FILE_OPT: &str = "ephemeral-file"; /// Name for the option to specify the manifest file. pub const MANIFEST_FILE_OPT: &str = "manifest-file"; -const APP_USOCK: &str = "app-usock"; /// Name for the option to specify the maximum `StreamPool` size. pub const POOL_SIZE: &str = "pool-size"; @@ -48,25 +47,12 @@ impl EnclaveOpts { Self { parsed } } - /// Create a new `StreamPool` for connecting to the enclave. - fn enclave_pool(&self) -> Result { - self.async_pool(false) - } - - /// Create a new `StreamPool` for connecting to the app. - fn app_pool(&self) -> Result { - self.async_pool(true) - } - - /// Create a new `StreamPool` using the list of `SocketAddress` for the qos host. - /// The `app` parameter specifies if this is a pool meant for the enclave itself, or the enclave app. - fn async_pool(&self, app: bool) -> Result { - let usock_param = if app { APP_USOCK } else { USOCK }; - + /// Create a new `StreamSocket` for the qos host. + fn enclave_socket(&self) -> Result { match ( self.parsed.single(CID), self.parsed.single(PORT), - self.parsed.single(usock_param), + self.parsed.single(USOCK), ) { #[cfg(feature = "vm")] (Some(c), Some(p), None) => { @@ -74,15 +60,9 @@ impl EnclaveOpts { c.parse().map_err(|_| IOError::ConnectAddressInvalid)?; let p = p.parse().map_err(|_| IOError::ConnectAddressInvalid)?; - StreamPool::single(SocketAddress::new_vsock( - c, - p, - crate::io::VMADDR_NO_FLAGS, - )) - } - (None, None, Some(u)) => { - StreamPool::single(SocketAddress::new_unix(u)) + Ok(SocketAddress::new_vsock(c, p, crate::io::VMADDR_NO_FLAGS)) } + (None, None, Some(u)) => Ok(SocketAddress::new_unix(u)), _ => panic!("Invalid socket opts"), } } @@ -161,9 +141,8 @@ impl CLI { opts.pivot_file(), ), opts.nsm(), - opts.enclave_pool() - .expect("Unable to create enclave socket pool"), - opts.app_pool().expect("Unable to create enclave app pool"), + opts.enclave_socket() + .expect("Unable to create enclave socket"), None, ) .await; @@ -220,11 +199,6 @@ impl GetParserForOptions for EnclaveParser { .takes_value(true) .default_value(MANIFEST_FILE) ) - .token( - Token::new(APP_USOCK, "the socket the secure app is listening on.") - .takes_value(true) - .default_value(SEC_APP_SOCK) - ) } } @@ -266,40 +240,16 @@ mod test { #[test] fn parse_usock() { - let mut args: Vec<_> = vec![ - "binary", - "--usock", - "/tmp/usock", - "--app-usock", - "/tmp/app_usock", - ] - .into_iter() - .map(String::from) - .collect(); + let mut args: Vec<_> = vec!["binary", "--usock", "/tmp/usock"] + .into_iter() + .map(String::from) + .collect(); let opts = EnclaveOpts::new(&mut args); assert_eq!( *opts.parsed.single(USOCK).unwrap(), "/tmp/usock".to_string() ); - assert_eq!( - *opts.parsed.single(APP_USOCK).unwrap(), - "/tmp/app_usock".to_string() - ); - } - - #[test] - fn builds_async_pool() { - let mut args: Vec<_> = vec!["binary", "--usock", "./test.sock"] - .into_iter() - .map(String::from) - .collect(); - let opts = EnclaveOpts::new(&mut args); - - let pool = opts.async_pool(true).unwrap(); - assert_eq!(pool.len(), 1); - let pool = opts.async_pool(false).unwrap(); - assert_eq!(pool.len(), 1); } #[test] diff --git a/src/qos_core/src/handles.rs b/src/qos_core/src/handles.rs index e6d515c9..69f6e423 100644 --- a/src/qos_core/src/handles.rs +++ b/src/qos_core/src/handles.rs @@ -320,7 +320,7 @@ mod test { use super::*; use crate::protocol::services::boot::{ Manifest, ManifestSet, Namespace, NitroConfig, PatchSet, PivotConfig, - RestartPolicy, ShareSet, + RestartPolicy, ShareSet, DEFAULT_APP_HOST_PORT, }; #[test] @@ -451,6 +451,7 @@ mod test { manifest_set: ManifestSet { threshold: 2, members: vec![] }, share_set: ShareSet { threshold: 2, members: vec![] }, patch_set: PatchSet::default(), + app_host_port: DEFAULT_APP_HOST_PORT, pool_size: None, client_timeout_ms: None, }; diff --git a/src/qos_core/src/io/host_bridge.rs b/src/qos_core/src/io/host_bridge.rs new file mode 100644 index 00000000..eacea8f1 --- /dev/null +++ b/src/qos_core/src/io/host_bridge.rs @@ -0,0 +1,181 @@ +use std::{net::SocketAddr, time::Duration}; + +use futures::future::join_all; +use tokio::{ + io::copy_bidirectional, + net::{TcpListener, TcpStream}, + task::JoinHandle, +}; + +use super::{IOError, Listener, Stream, StreamPool}; + +pub struct HostBridge { + enclave_pool: StreamPool, + host_addr: SocketAddr, +} + +impl HostBridge { + /// Create a new `HostBridge` with given `StreamPool` VSOCK connections and target `SocketAddr`. + /// NOTE: bridge operation is decided by run calls e.g. `tcp_to_vsock`. + pub fn new(enclave_pool: StreamPool, host_addr: SocketAddr) -> Self { + // ensure we have ports to spare + assert!( + enclave_pool.len() + usize::from(host_addr.port()) + < u16::MAX.into() + ); + + Self { enclave_pool, host_addr } + } + + /// Create a TCP to VSOCK bridge using the provided `StreamPool` and `SocketAddr` from constructor. + /// This consumes the `HostBridge` instance and starts background tasks that only return on unrecoverable errors. + /// NOTE: this spawns a standalone tasks and *DOES NOT WAIT* for completion. + pub async fn tcp_to_vsock(self) { + tokio::spawn(async move { + let streams = self.enclave_pool.to_streams(); + let mut tasks = Vec::new(); + let mut host_addr = self.host_addr; + + for stream in streams { + eprintln!("tcp to vsock bridge listening on tcp:{host_addr}"); + tasks.push(tokio::spawn(tcp_to_vsock(stream, host_addr))); + // bump port by 1 for next listener + host_addr.set_port(host_addr.port() + 1); + } + + await_all(tasks).await; + }); + } + + /// Create a VSOCK to TCP bridge using the provided `StreamPool` and `SocketAddr` from constructor. + /// This consumes the `HostBridge` instance and starts background tasks that only return on unrecoverable errors. + /// NOTE: this spawns a standalone tasks and *DOES NOT WAIT* for completion. + pub async fn vsock_to_tcp(self) { + tokio::spawn(async move { + let listeners = self + .enclave_pool + .listen() + .expect("unable to listen on vsock connections"); + + let mut tasks = Vec::new(); + let mut host_addr = self.host_addr; + + for listener in listeners { + eprintln!("vsock to tcp bridge listening on vsock:TODO"); + tasks.push(tokio::spawn(vsock_to_tcp(listener, host_addr))); + // bump port by 1 for next listener + host_addr.set_port(host_addr.port() + 1); + } + + await_all(tasks).await; + }); + } +} + +async fn await_all(tasks: Vec>>) { + let results = join_all(tasks).await; + + for result in results { + match result { + Err(err) => eprintln!("error on task joining: {err:?}"), + Ok(result) => match result { + Ok(()) => eprintln!("tcp to vsock bridge host exit, no errors. This shouldn't happen"), // TODO: error? panic? + Err(err) => eprintln!("error in task: {err:?}"), + }, + } + } +} + +const DEFAULT_RETRY_DELAY: Duration = Duration::from_millis(1000); + +// bridge tcp to vsock in an endless loop with 1s retry on errors +async fn tcp_to_vsock( + mut enclave_stream: Stream, + host_addr: SocketAddr, +) -> Result<(), IOError> { + let mut first_time = true; + + loop { + if !first_time { + tokio::time::sleep(DEFAULT_RETRY_DELAY).await; + } + first_time = false; + + if let Err(err) = enclave_stream.connect().await { + eprintln!("error connecting to VSOCK {err:?}, retrying"); + continue; + } + + let listener = match TcpListener::bind(host_addr).await { + Ok(value) => value, + Err(err) => { + eprintln!( + "error binding tcp addr {host_addr}: {err:?}, retrying" + ); + continue; + } + }; + + let mut tcp_stream = match listener.accept().await { + Ok((value, _)) => value, + Err(err) => { + eprintln!( + "error accepting connection on tcp addr {host_addr}: {err:?}, retrying" + ); + continue; + } + }; + + if let Err(err) = + copy_bidirectional(&mut tcp_stream, &mut enclave_stream).await + { + eprintln!("error on tcp to vsock stream bridge: {err:?}, retrying"); + } else { + eprintln!("tcp to vsock stream bridge shutdown, retrying"); + } + } +} + +// bridge vsock to tcp in an endless loop with 1s retry on errors +async fn vsock_to_tcp( + enclave_listener: Listener, + host_addr: SocketAddr, +) -> Result<(), IOError> { + let mut first_time = true; + + loop { + if !first_time { + tokio::time::sleep(DEFAULT_RETRY_DELAY).await; + } + first_time = false; + + let mut enclave_stream = match enclave_listener.accept().await { + Ok(value) => value, + Err(err) => { + eprintln!( + "error accepting connection on vsock: {err:?}, retrying" + ); + continue; + } + }; + + let mut tcp_stream = match TcpStream::connect(host_addr).await { + Ok(value) => value, + Err(err) => { + eprintln!( + "error connecting to tcp addr {host_addr}: {err:?}, retrying" + ); + continue; + } + }; + + if let Err(err) = + copy_bidirectional(&mut enclave_stream, &mut tcp_stream).await + { + eprintln!("error on vsock to tcp stream bridge: {err:?}, retrying"); + } else { + eprintln!("vsock to tcp stream bridge shutdown, retrying"); + } + tokio::time::sleep(DEFAULT_RETRY_DELAY).await; + } +} diff --git a/src/qos_core/src/io/mod.rs b/src/qos_core/src/io/mod.rs index efc6ce1c..e7dc1c42 100644 --- a/src/qos_core/src/io/mod.rs +++ b/src/qos_core/src/io/mod.rs @@ -3,8 +3,10 @@ //! NOTE TO MAINTAINERS: Interaction with any sys calls should be contained //! within this module. +mod host_bridge; mod pool; mod stream; +pub use host_bridge::*; pub use pool::*; pub use stream::*; @@ -164,6 +166,34 @@ impl SocketAddress { _ => panic!("invalid socket address requested"), } } + + /// Returns anew `SocketAddress` depending on socket type: + /// If VSOCK, the same CID is used with the provided port + /// If USOCK, the ".appsock" suffix is added and port is unused + #[allow(unused)] + pub fn with_port(&self, port: u16) -> Result { + match self { + #[cfg(feature = "vm")] + Self::Vsock(vsa) => Ok(Self::new_vsock( + vsa.cid(), + port.into(), + vsock_svm_flags(*vsa), + )), + Self::Unix(ua) => { + let mut path = ua + .path() + .ok_or(IOError::ConnectAddressInvalid)? + .as_os_str() + .to_owned(); + + path.push(".appsock"); + + Ok(Self::new_unix( + path.to_str().ok_or(IOError::ConnectAddressInvalid)?, + )) + } + } + } } impl std::fmt::Display for SocketAddress { diff --git a/src/qos_core/src/io/pool.rs b/src/qos_core/src/io/pool.rs index 7017a3a3..6bf38187 100644 --- a/src/qos_core/src/io/pool.rs +++ b/src/qos_core/src/io/pool.rs @@ -143,7 +143,7 @@ impl StreamPool { PoolGuard::new(guard) } - /// Create a new pool by listening new connection on all the addresses + /// Create a new pool by listening for new connection on all the addresses pub fn listen(&self) -> Result, IOError> { let mut listeners = Vec::new(); @@ -196,6 +196,11 @@ impl StreamPool { Ok(listeners) } + + /// Deconstruct the pool into all contained `Stream` objects. + pub fn to_streams(self) -> Vec { + self.handles.into_iter().map(|m| m.into_inner()).collect() + } } /// Provide the "next" usock path. Given a `"*_X"` where X is a number, this function diff --git a/src/qos_core/src/protocol/mod.rs b/src/qos_core/src/protocol/mod.rs index b3644282..16800299 100644 --- a/src/qos_core/src/protocol/mod.rs +++ b/src/qos_core/src/protocol/mod.rs @@ -1,5 +1,7 @@ //! Quorum protocol +use std::{sync::Arc, time::Duration}; + use borsh::BorshSerialize; use qos_crypto::sha_256; @@ -13,7 +15,13 @@ pub use state::ProtocolPhase; pub(crate) use state::ProtocolState; pub(crate) mod processor; -pub use processor::INITIAL_CLIENT_TIMEOUT; +use tokio::sync::RwLock; + +const MEGABYTE: usize = 1024 * 1024; +const MAX_ENCODED_MSG_LEN: usize = 128 * MEGABYTE; + +/// Initial client timeout for the processor until the Manifest says otherwise, see reaper.rs +pub const INITIAL_CLIENT_TIMEOUT: Duration = Duration::from_secs(5); /// 256bit hash pub type Hash256 = [u8; 32]; @@ -28,3 +36,13 @@ pub trait QosHash: BorshSerialize { // Blanket implement QosHash for any type that implements BorshSerialize. impl QosHash for T {} + +/// Helper type to keep `ProtocolState` shared using `Arc>` +type SharedProtocolState = Arc>; + +impl ProtocolState { + /// Wrap this `ProtocolState` into a `Mutex` in an `Arc`. + pub fn shared(self) -> SharedProtocolState { + Arc::new(RwLock::new(self)) + } +} diff --git a/src/qos_core/src/protocol/processor.rs b/src/qos_core/src/protocol/processor.rs index 407f7502..abde78bd 100644 --- a/src/qos_core/src/protocol/processor.rs +++ b/src/qos_core/src/protocol/processor.rs @@ -1,69 +1,26 @@ //! Quorum protocol processor -use std::{sync::Arc, time::Duration}; + +use std::sync::Arc; use borsh::BorshDeserialize; use tokio::sync::RwLock; use super::{ - error::ProtocolError, msg::ProtocolMsg, state::ProtocolState, ProtocolPhase, -}; -use crate::{ - client::{ClientError, SocketClient}, - io::SharedStreamPool, - server::RequestProcessor, + error::ProtocolError, msg::ProtocolMsg, SharedProtocolState, + MAX_ENCODED_MSG_LEN, }; - -const MEGABYTE: usize = 1024 * 1024; -const MAX_ENCODED_MSG_LEN: usize = 128 * MEGABYTE; - -/// Initial client timeout for the processor until the Manifest says otherwise, see reaper.rs -pub const INITIAL_CLIENT_TIMEOUT: Duration = Duration::from_secs(5); - -/// Helper type to keep `ProtocolState` shared using `Arc>` -type SharedProtocolState = Arc>; - -impl ProtocolState { - /// Wrap this `ProtocolState` into a `Mutex` in an `Arc`. - pub fn shared(self) -> SharedProtocolState { - Arc::new(RwLock::new(self)) - } -} +use crate::server::{RequestProcessor, SharedProcessor}; /// Enclave state machine that executes when given a `ProtocolMsg`. -#[derive(Clone)] pub struct ProtocolProcessor { - app_client: SocketClient, state: SharedProtocolState, } impl ProtocolProcessor { /// Create a new `Self` inside `Arc` and `Mutex`. #[must_use] - pub fn new( - state: SharedProtocolState, - app_pool: SharedStreamPool, - ) -> Arc> { - let app_client = SocketClient::new(app_pool, INITIAL_CLIENT_TIMEOUT); - Arc::new(RwLock::new(Self { app_client, state })) - } - - /// Helper to get phase between locking the shared state - async fn get_phase(&self) -> ProtocolPhase { - self.state.read().await.get_phase() - } - - /// Sets the client timeout value for the `app_client`, maximum allowed value is `u16::MAX` milliseconds - pub fn set_client_timeout(&mut self, timeout: Duration) { - assert!(timeout.as_millis() < u16::MAX.into(), "client timeout > 65s"); - self.app_client.set_timeout(timeout); - } - - /// Expands the app pool to given pool size - pub async fn expand_to( - &mut self, - pool_size: u8, - ) -> Result<(), ClientError> { - self.app_client.expand_to(pool_size).await + pub fn new(state: SharedProtocolState) -> SharedProcessor { + Arc::new(RwLock::new(Self { state })) } } @@ -83,30 +40,6 @@ impl RequestProcessor for ProtocolProcessor { .expect("ProtocolMsg can always be serialized. qed."); }; - // handle Proxy outside of the state - if let ProtocolMsg::ProxyRequest { data } = msg_req { - let phase = self.get_phase().await; - - if phase != ProtocolPhase::QuorumKeyProvisioned { - let err = ProtocolError::NoMatchingRoute(phase); - return borsh::to_vec(&ProtocolMsg::ProtocolErrorResponse(err)) - .expect("ProtocolMsg can always be serialized. qed."); - } - - let result = self - .app_client - .call(&data) - .await - .map(|data| ProtocolMsg::ProxyResponse { data }) - .map_err(|e| ProtocolMsg::ProtocolErrorResponse(e.into())); - - match result { - Ok(msg_resp) | Err(msg_resp) => borsh::to_vec(&msg_resp) - .expect("ProtocolMsg can always be serialized. qed."), - } - } else { - // handle all the others here - self.state.write().await.handle_msg(&msg_req) - } + self.state.write().await.handle_msg(&msg_req) } } diff --git a/src/qos_core/src/protocol/services/boot.rs b/src/qos_core/src/protocol/services/boot.rs index 89f37bd8..0d720d65 100644 --- a/src/qos_core/src/protocol/services/boot.rs +++ b/src/qos_core/src/protocol/services/boot.rs @@ -294,6 +294,8 @@ impl fmt::Debug for Namespace { } } +pub const DEFAULT_APP_HOST_PORT: u16 = 3000; + /// The Manifest for the enclave. /// NOTE: we currently use JSON format for storing this value. /// Since we don't have any `HashMap` inside the `Manifest` it works out of the box. @@ -323,6 +325,8 @@ pub struct Manifest { pub enclave: NitroConfig, /// Patch set members and threshold pub patch_set: PatchSet, + /// App host TCP port + pub app_host_port: u16, /// Client timeout for calls via the VSOCK/USOCK, defaults to 5s if not specified pub client_timeout_ms: Option, /// Pool size argument used to set up our socket pipes, defaults to 1 if not specified @@ -357,6 +361,7 @@ impl From for Manifest { share_set: old.share_set, enclave: old.enclave, patch_set: old.patch_set, + app_host_port: DEFAULT_APP_HOST_PORT, pool_size: None, client_timeout_ms: None, } diff --git a/src/qos_core/src/protocol/services/provision.rs b/src/qos_core/src/protocol/services/provision.rs index 84f8c606..6ac96730 100644 --- a/src/qos_core/src/protocol/services/provision.rs +++ b/src/qos_core/src/protocol/services/provision.rs @@ -140,6 +140,7 @@ mod test { Approval, Manifest, ManifestEnvelope, ManifestSet, Namespace, NitroConfig, PatchSet, PivotConfig, QuorumMember, RestartPolicy, ShareSet, + DEFAULT_APP_HOST_PORT, }, provision::provision, }, @@ -211,6 +212,7 @@ mod test { members: members.clone().into_iter().map(|(m, _)| m).collect(), }, patch_set: PatchSet::default(), + app_host_port: DEFAULT_APP_HOST_PORT, pool_size: None, client_timeout_ms: None, }; diff --git a/src/qos_core/src/reaper.rs b/src/qos_core/src/reaper.rs index 1cae4d0c..fb0f2988 100644 --- a/src/qos_core/src/reaper.rs +++ b/src/qos_core/src/reaper.rs @@ -14,7 +14,7 @@ use tokio::process::Command; use crate::{ handles::Handles, - io::StreamPool, + io::{SocketAddress, StreamPool}, protocol::{ processor::ProtocolProcessor, services::boot::{PivotConfig, RestartPolicy}, @@ -38,55 +38,23 @@ async fn run_server( server_state: Arc>, handles: Handles, nsm: Box, - pool: StreamPool, - app_pool: StreamPool, + core_socket: SocketAddress, test_only_init_phase_override: Option, ) { let protocol_state = - ProtocolState::new(nsm, handles.clone(), test_only_init_phase_override); + ProtocolState::new(nsm, handles.clone(), test_only_init_phase_override) + .shared(); + let core_pool = StreamPool::single(core_socket) + .expect("unable to create single socket core pool"); // send a shared version of state and the async pool to each processor - let processor = - ProtocolProcessor::new(protocol_state.shared(), app_pool.shared()); - // listen_all will multiplex the processor accross all sockets - let mut server = SocketServer::listen_all(pool, &processor) - .expect("unable to get listen task list"); - - loop { - // see if we got interrupted - if *server_state.read().unwrap() == InterState::Quitting { - return; - } - - if let Ok(envelope) = handles.get_manifest_envelope() { - let pool_size = - envelope.manifest.pool_size.unwrap_or(DEFAULT_POOL_SIZE); - // expand server to pool_size - server - .listen_to(pool_size, &processor) - .expect("unable to listen_to on the running server"); - { - // get the processor writable - let mut p = processor.write().await; - - // expand app connections to pool_size - p.expand_to(pool_size) - .await - .expect("unable to expand_to on the processor app pool"); - if let Some(timeout_ms) = envelope.manifest.client_timeout_ms { - let timeout = Duration::from_millis(timeout_ms.into()); - p.set_client_timeout(timeout); - } - } - - *server_state.write().unwrap() = InterState::PivotReady; - eprintln!("Reaper::server manifest is present, breaking out of server check loop"); - break; - } + let protocol_processor = ProtocolProcessor::new(protocol_state); - tokio::time::sleep(REAPER_STATE_CHECK_DELAY).await; - } + // listen on the protocol server + let _protocol_server = + SocketServer::listen_all(core_pool, &protocol_processor) + .expect("unable to get listen task list for protocol server"); - eprintln!("Reaper::server post-expansion, waiting for shutdown"); + eprintln!("Reaper::server running"); while *server_state.read().unwrap() != InterState::Quitting { tokio::time::sleep(REAPER_STATE_CHECK_DELAY).await; } @@ -110,8 +78,7 @@ impl Reaper { pub async fn execute( handles: &Handles, nsm: Box, - pool: StreamPool, - app_pool: StreamPool, + core_socket: SocketAddress, test_only_init_phase_override: Option, ) { // state switch to communicate between pivot run task (here) and run_server task @@ -123,8 +90,7 @@ impl Reaper { server_state, handles.clone(), nsm, - pool, - app_pool, + core_socket, test_only_init_phase_override, )); @@ -139,7 +105,6 @@ impl Reaper { if handles.quorum_key_exists() && handles.pivot_exists() && handles.manifest_envelope_exists() - && server_state == InterState::PivotReady { // The state required to pivot exists, so we can break this // holding pattern and start the pivot. @@ -160,10 +125,13 @@ impl Reaper { let mut pivot = Command::new(handles.pivot_path()); // set the pool-size env var for pivots that use it - pivot.env( - "POOL_SIZE", - manifest.pool_size.unwrap_or(DEFAULT_POOL_SIZE).to_string(), - ); + pivot + .env_clear() + .env( + "POOL_SIZE", + manifest.pool_size.unwrap_or(DEFAULT_POOL_SIZE).to_string(), + ) + .env("CID", "3"); // TODO: ales channels pivot.args(&args[..]); match restart { RestartPolicy::Always => loop { @@ -210,8 +178,6 @@ impl Reaper { enum InterState { // We're booting, no pivot yet Booting, - // We've booted and pivot is ready - PivotReady, // We're quitting (ctrl+c for tests and such) Quitting, } diff --git a/src/qos_core/src/server.rs b/src/qos_core/src/server.rs index ba441661..06367c2c 100644 --- a/src/qos_core/src/server.rs +++ b/src/qos_core/src/server.rs @@ -12,6 +12,8 @@ use crate::io::{IOError, Listener, StreamPool}; pub enum SocketServerError { /// `io::IOError` wrapper. IOError(IOError), + /// invalid pool configuration + PoolInvalid, } impl From for SocketServerError { diff --git a/src/qos_host/Cargo.toml b/src/qos_host/Cargo.toml index c42873e9..dad3f90f 100644 --- a/src/qos_host/Cargo.toml +++ b/src/qos_host/Cargo.toml @@ -11,13 +11,14 @@ workspace = true [dependencies] qos_core = { workspace = true, default-features = false } qos_hex = { workspace = true, features = ["serde"], default-features = false } +qos_nsm = { workspace = true, default-features = false } # Third party axum = { workspace = true } -tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } borsh = { workspace = true } serde_json = { workspace = true } serde = { workspace = true } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } [features] vm = ["qos_core/vm"] diff --git a/src/qos_host/src/cli.rs b/src/qos_host/src/cli.rs index a92fb70f..3eb41326 100644 --- a/src/qos_host/src/cli.rs +++ b/src/qos_host/src/cli.rs @@ -9,7 +9,7 @@ use std::{ use qos_core::{ cli::{CID, PORT, USOCK}, - io::{SocketAddress, StreamPool}, + io::SocketAddress, parser::{GetParserForOptions, OptionsParser, Parser, Token}, }; @@ -18,6 +18,7 @@ const HOST_PORT: &str = "host-port"; const ENDPOINT_BASE_PATH: &str = "endpoint-base-path"; const VSOCK_TO_HOST: &str = "vsock-to-host"; const SOCKET_TIMEOUT: &str = "socket-timeout"; +const ENABLE_HOST_BRIDGE: &str = "enable-host-bridge"; struct HostParser; impl GetParserForOptions for HostParser { @@ -38,7 +39,7 @@ impl GetParserForOptions for HostParser { .token( Token::new(USOCK, "name of the socket file (ex: `dev.sock`) (only for unix sockets)") .takes_value(true) - .forbids(vec!["port", "cid"]) + .forbids(vec![PORT, CID]) ) .token( Token::new(HOST_IP, "IP address this server should listen on") @@ -65,6 +66,11 @@ impl GetParserForOptions for HostParser { .required(false) .forbids(vec![USOCK]) ) + .token( + Token::new(ENABLE_HOST_BRIDGE, "whether to enable the app host bridge for tcp -> vsock") + .takes_value(false) + .required(false) + ) } } @@ -121,9 +127,9 @@ impl HostOpts { } /// Create a new `StreamPool` using the list of `SocketAddress` for the qos host. - pub(crate) fn enclave_pool( + pub(crate) fn enclave_socket( &self, - ) -> Result { + ) -> Result { match ( self.parsed.single(CID), self.parsed.single(PORT), @@ -138,16 +144,10 @@ impl HostOpts { qos_core::io::IOError::ConnectAddressInvalid })?; - let address = - SocketAddress::new_vsock(c, p, self.to_host_flag()); - - StreamPool::new(address, 1) // qos_host needs only 1 + Ok(SocketAddress::new_vsock(c, p, self.to_host_flag())) } - (None, None, Some(u)) => { - let address = SocketAddress::new_unix(u); + (None, None, Some(u)) => Ok(SocketAddress::new_unix(u)), - StreamPool::new(address, 1) - } _ => panic!("Invalid socket opts"), } } @@ -164,6 +164,10 @@ impl HostOpts { self.parsed.single(ENDPOINT_BASE_PATH).cloned() } + fn enable_host_bridge(&self) -> bool { + self.parsed.flag(ENABLE_HOST_BRIDGE).unwrap_or(false) + } + #[cfg(feature = "vm")] fn to_host_flag(&self) -> u8 { let include = self @@ -202,13 +206,11 @@ impl CLI { println!("{}", options.parsed.info()); } else { crate::host::HostServer::new( - options - .enclave_pool() - .expect("unable to create enclave pool") - .shared(), + options.enclave_socket().expect("invalid enclave socket"), options.socket_timeout(), options.host_addr(), options.base_path(), + options.enable_host_bridge(), ) .serve() .await; diff --git a/src/qos_host/src/host.rs b/src/qos_host/src/host.rs index 5ed566f0..1c10e71c 100644 --- a/src/qos_host/src/host.rs +++ b/src/qos_host/src/host.rs @@ -13,7 +13,11 @@ //! * Response: //! * Responding with error: -use std::{net::SocketAddr, sync::Arc, time::Duration}; +use std::{ + net::{Ipv4Addr, SocketAddr}, + sync::Arc, + time::Duration, +}; use axum::{ body::Bytes, @@ -26,9 +30,10 @@ use axum::{ use borsh::BorshDeserialize; use qos_core::{ client::SocketClient, - io::SharedStreamPool, + io::{HostBridge, SocketAddress, StreamPool}, protocol::{msg::ProtocolMsg, ProtocolError, ProtocolPhase}, }; +use qos_nsm::types::NsmResponse; use crate::{ EnclaveInfo, EnclaveVitalStats, Error, ENCLAVE_HEALTH, ENCLAVE_INFO, @@ -38,29 +43,34 @@ use crate::{ /// Resource shared across tasks in the `HostServer`. #[derive(Debug)] struct QosHostState { + enclave_address: SocketAddress, enclave_client: SocketClient, + enable_host_bridge: bool, } /// HTTP server for the host of the enclave; proxies requests to the enclave. #[allow(clippy::module_name_repetitions)] pub struct HostServer { - enclave_pool: SharedStreamPool, + enclave_address: SocketAddress, timeout: Duration, addr: SocketAddr, base_path: Option, + enable_host_bridge: bool, } impl HostServer { - /// Create a new `HostServer`. See `Self::serve` for starting the - /// server. + /// Create a new `HostServer`. See `Self::serve` for starting the server. + /// Uses `enclave_pool` to connect to the enclave for commands and `app_socket` to create + /// a tcp to VSOCK bridge after the pivot is accepted if the `enable_host_bridge` is set to `true`. #[must_use] pub fn new( - enclave_pool: SharedStreamPool, + enclave_address: SocketAddress, timeout: Duration, addr: SocketAddr, base_path: Option, + enable_host_bridge: bool, ) -> Self { - Self { enclave_pool, timeout, addr, base_path } + Self { enclave_address, timeout, addr, base_path, enable_host_bridge } } fn path(&self, endpoint: &str) -> String { @@ -77,12 +87,15 @@ impl HostServer { /// /// Panics if there is an issue starting the server. // pub async fn serve(&self) -> Result<(), String> { - pub async fn serve(&self) { + pub async fn serve(self) { let state = Arc::new(QosHostState { - enclave_client: SocketClient::new( - self.enclave_pool.clone(), + enclave_address: self.enclave_address.clone(), + enclave_client: SocketClient::single( + self.enclave_address.clone(), self.timeout, - ), + ) + .expect("unable to create enclave socket client"), + enable_host_bridge: self.enable_host_bridge, }); let app = Router::new() @@ -252,7 +265,17 @@ impl HostServer { } match state.enclave_client.call(&encoded_request).await { - Ok(encoded_response) => (StatusCode::OK, encoded_response), + Ok(encoded_response) => { + if state.enable_host_bridge { + maybe_start_app_host_bridge( + &state.enclave_address, + &encoded_request, + &encoded_response, + ) + .await; + } + (StatusCode::OK, encoded_response) + } Err(e) => { eprintln!("Error while trying to send request over socket to enclave: {e:?}"); @@ -267,3 +290,81 @@ impl HostServer { } } } + +// Start the tcp -> vsock `HostBridge` in case we have successfully processed the manifest +async fn maybe_start_app_host_bridge( + enclave_socket: &SocketAddress, + encoded_request: &Bytes, + encoded_response: &[u8], +) { + if let Ok(decoded_msg) = borsh::from_slice::(encoded_request) { + // if we got the pivot and it was accepted by the enclave, we should start the tcp -> vsock host bridge + match decoded_msg { + ProtocolMsg::BootStandardRequest { + manifest_envelope, + pivot: _, + } + | ProtocolMsg::BootKeyForwardRequest { + manifest_envelope, + pivot: _, + } => { + if let Ok(decoded_msg) = + borsh::from_slice::(encoded_response) + { + // check if we got success + match decoded_msg { + ProtocolMsg::BootStandardResponse { nsm_response } + | ProtocolMsg::BootKeyForwardResponse { + nsm_response, + } => { + if let NsmResponse::Error(_) = nsm_response { + return; // do not run bridge if we got an error back + } + } + _ => { + // this really shouldn't happen + eprintln!("mismatched boot response, tcp to vsock bridge might not start"); + return; + } + }; + } else { + eprintln!("error decoding response msg, tcp to vsock bridge might not start"); + return; + } + + let pool_size = + manifest_envelope.manifest.pool_size.unwrap_or(1); + let host_port = manifest_envelope.manifest.app_host_port; + let host_addr = + SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), host_port); + + // derive the app socket, for vsock just use the app host port with same CID as the enclave socket, + // with usock just add ".appsock" suffix + let app_socket = match enclave_socket.with_port(host_port) { + Ok(value) => value, + Err(err) => { + eprintln!("unable to derive app socket from enclave socket: {err:?}, tcp to vsock bridge will not start"); + return; + } + }; + + let app_pool = match StreamPool::new(app_socket, pool_size) { + Ok(value) => value, + Err(err) => { + eprintln!("unable to create new app socket pool: {err:?}, tcp to vsock bridge will not start"); + return; + } + }; + + let bridge = HostBridge::new(app_pool, host_addr); + + bridge.tcp_to_vsock().await; // NOTE: this doesn't await for completion + } + _ => {} + } + } else { + eprintln!( + "error decoding request msg, tcp to vsock bridge might not start" + ); + } +}