diff --git a/.rubocop_todo.yml b/.rubocop_todo.yml index 2a5e34f0e..ee9cf2f1a 100644 --- a/.rubocop_todo.yml +++ b/.rubocop_todo.yml @@ -356,7 +356,6 @@ Lint/MissingSuper: - 'lib/syskit/robot/master_device_instance.rb' - 'lib/syskit/robot/slave_device_instance.rb' - 'lib/syskit/telemetry/agent/server.rb' - - 'lib/syskit/telemetry/ui/name_service.rb' - 'lib/syskit/test/stub_network.rb' # Offense count: 2 diff --git a/Rakefile b/Rakefile index ad3d5f956..882dd5450 100644 --- a/Rakefile +++ b/Rakefile @@ -28,19 +28,37 @@ def minitest_set_options(test_task, name) test_task.options = "#{TESTOPTS} #{minitest_args} -- --simplecov-name=#{name}" end -Rake::TestTask.new("test:core") do |t| +def core(early_deploy: false) + s = ":no-early-deploy" + if early_deploy + s = ":early-deploy" + early_deploy_setup = ["test/features/early_deploy.rb"] + end + + Rake::TestTask.new("test:core#{s}") do |t| + t.libs << "." + t.libs << "lib" + minitest_set_options(t, "core") + test_files = FileList["test/**/test_*.rb", *early_deploy_setup] + test_files = test_files + .exclude("test/ros/**/*.rb") + .exclude("test/gui/**/*.rb") + .exclude("test/live/**/*.rb") + .exclude("test/telemetry/**/*.rb") + t.test_files = test_files + t.warning = false + end +end + +Rake::TestTask.new("test:telemetry") do |t| t.libs << "." t.libs << "lib" - minitest_set_options(t, "core") - test_files = FileList["test/**/test_*.rb"] - test_files = test_files - .exclude("test/ros/**/*.rb") - .exclude("test/gui/**/*.rb") - .exclude("test/live/**/*.rb") - t.test_files = test_files + minitest_set_options(t, "telemetry") + t.test_files = FileList["test/telemetry/**/test_*.rb"] t.warning = false end +desc "Run separate tests that require a live syskit instance" task "test:live" do tests = Dir.enum_for(:glob, "test/live/test_*.rb").to_a unless system(File.join("test", "live", "run"), *tests) @@ -48,6 +66,8 @@ task "test:live" do exit 1 end end + +desc "run gui-only tests" Rake::TestTask.new("test:gui") do |t| t.libs << "." t.libs << "lib" @@ -57,7 +77,13 @@ Rake::TestTask.new("test:gui") do |t| t.warning = false end -task "test" => ["test:gui", "test:core", "test:live"] +core early_deploy: true +core +desc "Run core library tests, excluding GUI and live tests" +task "test:core" => ["test:core:no-early-deploy", "test:core:early-deploy"] + +desc "Run all tests" +task "test" => ["test:gui", "test:core", "test:live", "test:telemetry"] task "rubocop" do raise "rubocop failed" unless system(ENV["RUBOCOP_CMD"] || "rubocop") diff --git a/lib/syskit.rb b/lib/syskit.rb index 08a8c1b43..5bc5941c0 100644 --- a/lib/syskit.rb +++ b/lib/syskit.rb @@ -42,7 +42,7 @@ module ProcessManagers require "syskit/roby_app/log_transfer_server" require "syskit/process_managers/process_base" require "syskit/process_managers/status" -require "syskit/process_managers/remote/server/log_upload_state" +require "syskit/roby_app/log_transfer_server/log_upload_state" require "syskit/process_managers/remote/protocol" require "syskit/process_managers/remote/loader" require "syskit/process_managers/remote/manager" @@ -127,6 +127,7 @@ module ProcessManagers require "syskit/actual_data_flow_graph" require "syskit/data_flow" require "syskit/connection_graphs" +require "syskit/network_generation_exception_helpers" require "syskit/exceptions" require "syskit/network_generation" require "syskit/runtime" diff --git a/lib/syskit/base.rb b/lib/syskit/base.rb index 64b6f3c5c..d2d6f40f8 100644 --- a/lib/syskit/base.rb +++ b/lib/syskit/base.rb @@ -2,6 +2,7 @@ require "logger" require "utilrb/logger" +require "syskit/network_generation_exception_helpers" require "syskit/exceptions" require "facets/string/snakecase" diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 66e01a84e..8d47e92f3 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -2,6 +2,7 @@ require "archive/tar/minitar" require "sys/filesystem" +require "syskit/roby_app/log_transfer_server/ftp_upload" module Syskit module CLI @@ -17,16 +18,40 @@ class CompressionFailed < RuntimeError; end class LogRuntimeArchive DEFAULT_MAX_ARCHIVE_SIZE = 10_000_000_000 # 10G + FTPParameters = Struct.new(:host, :port, :certificate, :user, :password, + :implicit_ftps, :max_upload_rate, + keyword_init: true) + + # Initializes the LogRuntimeArchive + # + # @param [Pathname] root_dir the logs directory + # @param [Pathname] target_dir the path to store the file in the archive, + # should be nil in transfer mode, as the logs will be transferred directly + # to the ftp server @see process_root_folder_transfer + # @param [Logger] logger the log structure def initialize( - root_dir, target_dir, - logger: LogRuntimeArchive.null_logger, - max_archive_size: DEFAULT_MAX_ARCHIVE_SIZE + root_dir, target_dir: nil, + logger: LogRuntimeArchive.null_logger ) @last_archive_index = {} @logger = logger @root_dir = root_dir @target_dir = target_dir - @max_archive_size = max_archive_size + end + + # Iterate over all datasets in a Roby log root folder and transfer them + # through FTP server + # + # @param [Params] server_params the FTP server parameters + # @return [Array] + def process_root_folder_transfer(server_params) + candidates = self.class.find_all_dataset_folders(@root_dir) + running = candidates.last + candidates.map do |child| + process_dataset_transfer( + child, server_params, @root_dir, full: child != running + ) + end end # Iterate over all datasets in a Roby log root folder and archive them @@ -37,11 +62,14 @@ def initialize( # @param [Pathname] root_dir the log root folder # @param [Pathname] target_dir the folder in which to save the # archived datasets - def process_root_folder + # @param [Integer] max_archive_size the max size of the archive + def process_root_folder(max_archive_size: DEFAULT_MAX_ARCHIVE_SIZE) candidates = self.class.find_all_dataset_folders(@root_dir) running = candidates.last candidates.each do |child| - process_dataset(child, full: child != running) + process_dataset( + child, max_archive_size: max_archive_size, full: child != running + ) end end @@ -54,40 +82,47 @@ def process_root_folder # bytes, at which the archiver starts deleting the oldest log files # @param [integer] free_space_delete_until: post-deletion free space in bytes, # at which the archiver stops deleting the oldest log files - def ensure_free_space(free_space_low_limit, free_space_delete_until) + # + # @return [Boolean] true if successfully ensured free space, meaning there is + # the required free space, false if deleting the files in this directory was + # not enough to free up the required space + def ensure_free_space( + free_space_low_limit, free_space_delete_until, directory: @target_dir + ) if free_space_low_limit > free_space_delete_until raise ArgumentError, "cannot erase files: freed limit is smaller than " \ "low limit space." end - stat = Sys::Filesystem.stat(@target_dir) + stat = Sys::Filesystem.stat(directory) available_space = stat.bytes_available - return if available_space > free_space_low_limit + return true if available_space > free_space_low_limit until available_space >= free_space_delete_until - files = @target_dir.each_child.select(&:file?) + files = directory.each_child.select(&:file?) if files.empty? Roby.warn "Cannot erase files: the folder is empty but the " \ "available space is smaller than the threshold." - break + return false end - removed_file = files.min + removed_file = files.min_by(&:mtime) size_removed_file = removed_file.size removed_file.unlink available_space += size_removed_file end + true end - def process_dataset(child, full:) + def process_dataset(child, full:, max_archive_size: DEFAULT_MAX_ARCHIVE_SIZE) use_existing = true loop do open_archive_for( child.basename.to_s, use_existing: use_existing ) do |io| - if io.tell > @max_archive_size + if io.tell > max_archive_size use_existing = false break end @@ -95,7 +130,7 @@ def process_dataset(child, full:) dataset_complete = self.class.archive_dataset( io, child, logger: @logger, full: full, - max_size: @max_archive_size + max_size: max_archive_size ) return if dataset_complete end @@ -104,6 +139,99 @@ def process_dataset(child, full:) end end + def process_dataset_transfer(child, server, root, full:) + self.class.transfer_dataset( + child, server, root, full: full, logger: @logger + ) + end + + TransferDatasetResult = Struct.new( + :complete, :transfer_results, keyword_init: true + ) do + def success? + transfer_results.all?(&:success?) + end + + def failures + transfer_results.find_all { !_1.success? } + end + end + + # Transfer the given dataset + def self.transfer_dataset( + dataset_path, server, root, + full:, logger: null_logger + ) + logger.info( + "Transfering dataset #{dataset_path} in " \ + "#{full ? 'full' : 'partial'} mode" + ) + candidates = each_file_from_path(dataset_path).to_a + + complete, candidates = + if full + archive_filter_candidates_full(candidates) + else + archive_filter_candidates_partial(candidates) + end + + transfer_results = candidates.map do |child_path| + result = transfer_file(child_path, server, root) + child_path.unlink if result.success? + + result + end + + result = TransferDatasetResult.new( + complete: complete, transfer_results: transfer_results + ) + log_transfer_results(dataset_path, result, logger: logger) + end + + # Logs the transfer dataset results + # + # @param [String] the dataset path + # @param [TransferDatasetResult] the transfer dataset result + # @param [Logger] optional logger, if unfilled will use null logger + # + # @result [TransferDatasetResult] the received transfer dataset result + def self.log_transfer_results(dataset_path, result, logger: null_logger) + failed_results = result[:transfer_results].reject(&:success) + + if failed_results.empty? + logger.info( + "Transfering of " \ + "#{result[:complete] ? 'complete' : 'incomplete'} " \ + "#{dataset_path} finished" + ) + else + failed_results.each do |failed_result| + failed_message = + if failed_result.message + "with message : #{failed_result.message}" + end + logger.info( + "Failed on file #{failed_result.file} #{failed_message}" + ) + end + end + + result + end + + # Transfer a file to the central log server via FTP + # + # @return [LogUploadState:Result] + def self.transfer_file(file, server, root) + ftp = RobyApp::LogTransferServer::FTPUpload.new( + server.host, server.port, server.certificate, server.user, + server.password, file, + max_upload_rate: server.max_upload_rate || Float::INFINITY, + implicit_ftps: server.implicit_ftps + ) + ftp.open_and_transfer(root: root) + end + # Create or open an archive # # The method will find an archive to open or create, do it and @@ -158,7 +286,7 @@ def self.find_all_dataset_folders(root_dir) child if (child / "info.yml").file? end - candidates.compact.sort_by { _1.basename.to_s } + candidates.compact.sort_by { |a| a.basename.to_s } end # Safely add an entry into an archive, compressing it with zstd diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb old mode 100644 new mode 100755 index aa536b575..4dd1845a2 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -6,6 +6,7 @@ require "pathname" require "thor" require "syskit/cli/log_runtime_archive" +require "syskit/runtime/server/spawn_server" module Syskit module CLI @@ -16,7 +17,6 @@ def self.exit_on_failure? end desc "watch", "watch a dataset root folder and call archiver" - option :period, type: :numeric, default: 600, desc: "polling period in seconds" option :max_size, @@ -52,16 +52,114 @@ def self.exit_on_failure? def archive(root_dir, target_dir) root_dir = validate_directory_exists(root_dir) target_dir = validate_directory_exists(target_dir) - archiver = make_archiver(root_dir, target_dir) + archiver = make_archiver(root_dir, target_dir: target_dir) archiver.ensure_free_space( options[:free_space_low_limit] * 1_000_000, options[:free_space_freed_limit] * 1_000_000 ) - archiver.process_root_folder + archiver.process_root_folder( + max_archive_size: options[:max_size] * (1024**2) + ) + end + + desc "watch_transfer", "watches a dataset root folder \ + and periodically performs transfer" + option :period, + type: :numeric, default: 600, desc: "polling period in seconds" + option :max_upload_rate_mbps, + type: :numeric, default: 1_000, desc: "max upload rate in Mbps" + def watch_transfer( # rubocop:disable Metrics/ParameterLists + source_dir, host, port, certificate_path, user, password, implicit_ftps + ) + loop do + begin + transfer(source_dir, host, port, certificate_path, user, password, + implicit_ftps) + rescue Errno::ENOSPC + next + end + + puts "Transferred pending logs, sleeping #{options[:period]}s" + sleep options[:period] + end + end + + desc "transfer", "transfers the datasets" + option :max_upload_rate_mbps, + type: :numeric, default: 1_000, desc: "max upload rate in Mbps" + def transfer( # rubocop:disable Metrics/ParameterLists + source_dir, host, port, certificate_path, user, password, implicit_ftps + ) + source_dir = validate_directory_exists(source_dir) + archiver = make_archiver(source_dir) + + server_params = LogRuntimeArchive::FTPParameters.new( + host: host, port: port, certificate: File.read(certificate_path), + user: user, password: password, + implicit_ftps: implicit_ftps, + max_upload_rate: rate_mbps_to_bps(options[:max_upload_rate_mbps]) + ) + archiver.process_root_folder_transfer(server_params) + end + + desc "transfer_server", "creates the log transfer FTP server \ + that runs on the main computer" + def transfer_server( # rubocop:disable Metrics/ParameterLists + target_dir, host, port, certfile_path, user, password, implicit_ftps + ) + server = create_server(target_dir, host, port, certfile_path, user, + password, implicit_ftps == "true") + server.run end - no_commands do + desc "watch_ensure_free_space", "watches the ensure free space process" + option :period, + type: :numeric, default: 10, desc: "polling period in seconds" + option :free_space_low_limit, + type: :numeric, default: 5_000, desc: "start deleting files if \ + available space is below this threshold (threshold in MB)" + option :free_space_freed_limit, + type: :numeric, default: 25_000, desc: "stop deleting files if \ + available space is above this threshold (threshold in MB)" + def watch_ensure_free_space(source_dir) + loop do + ensure_free_space(source_dir) + + puts "Ensured free space in #{source_dir}, " \ + "sleeping #{options[:period]}s" + sleep options[:period] + end + end + + desc "ensure_free_space", "ensures there is free space, if not, start \ + deleting files" + option :free_space_low_limit, + type: :numeric, default: 5_000, desc: "start deleting files if \ + available space is below this threshold (threshold in MB)" + option :free_space_freed_limit, + type: :numeric, default: 25_000, desc: "stop deleting files if \ + available space is above this threshold (threshold in MB)" + def ensure_free_space(source_dir) + source_dir = validate_directory_exists(source_dir) + + archiver = make_archiver(source_dir) + + source_dir.children.select(&:directory?).sort_by(&:mtime).each do |child| + break if archiver.ensure_free_space( + options[:free_space_low_limit] * 1_000_000, + options[:free_space_freed_limit] * 1_000_000, + directory: (source_dir / child) + ) + end + end + + no_commands do # rubocop:disable Metrics/BlockLength + # Converts rate in Mbps to bps + def rate_mbps_to_bps(rate_mbps) + rate_mbps * (10**6) + end + def validate_directory_exists(dir) dir = Pathname.new(dir) unless dir.directory? @@ -72,12 +170,24 @@ def validate_directory_exists(dir) dir end - def make_archiver(root_dir, target_dir) + def make_archiver(root_dir, target_dir: nil) logger = Logger.new($stdout) Syskit::CLI::LogRuntimeArchive.new( - root_dir, target_dir, - logger: logger, max_archive_size: options[:max_size] * (1024**2) + root_dir, + target_dir: target_dir, logger: logger + ) + end + + def create_server( # rubocop:disable Metrics/ParameterLists + target_dir, host, port, certificate, user, password, implicit_ftps + ) + Runtime::Server::SpawnServer.new( + target_dir, user, password, + certificate, + interface: host, + port: port, + implicit_ftps: implicit_ftps ) end end diff --git a/lib/syskit/dynamic_port_binding.rb b/lib/syskit/dynamic_port_binding.rb index b503a28d9..37670ac8f 100644 --- a/lib/syskit/dynamic_port_binding.rb +++ b/lib/syskit/dynamic_port_binding.rb @@ -99,6 +99,10 @@ def attach_to_task(task) # the port was updated, and false otherwise. The tuple's second element # is the new resolved port which may be nil if no ports can be found def update + if @resolved_port && @port_resolver&.current_selection_valid?(@resolved_port) + return false, @resolved_port + end + port = @port_resolver&.update return false, @resolved_port if @resolved_port == port @@ -199,6 +203,7 @@ def initialize( # Method called by {Accessor} to create the accessor object from a # port def create_accessor(port) + @policy = { init: port.model.init_policy? }.merge(@policy) port.reader(**policy) end @@ -294,6 +299,10 @@ def initialize(plan, matcher) @last_provider_task = nil end + def current_selection_valid?(port) + @matcher === port + end + def update port = @matcher.each_in_plan(@plan).first port&.to_actual_port @@ -319,6 +328,10 @@ def initialize(port) @port = port end + def current_selection_valid?(port) + !!port.component.plan + end + def update @port if @port.component.plan end @@ -337,6 +350,10 @@ def initialize(port) @port = port end + def current_selection_valid?(port) + !!port.component.to_task.plan + end + def update @port if @port.component.to_task.plan end diff --git a/lib/syskit/exceptions.rb b/lib/syskit/exceptions.rb index 25085e706..c92324fd6 100644 --- a/lib/syskit/exceptions.rb +++ b/lib/syskit/exceptions.rb @@ -437,6 +437,8 @@ def pretty_print(pp) end class ConflictingDeviceAllocation < SpecError + include Syskit::NetworkGenerationsExceptionHelpers + attr_reader :device, :tasks, :inputs def can_merge? @@ -447,38 +449,54 @@ def initialize(device, task0, task1, toplevel_tasks_to_requirements = {}) @device = device @tasks = [task0, task1] - solver = NetworkGeneration::MergeSolver.new(task0.plan) - @merge_result = solver.resolve_merge(task0, task1, {}) @involved_definitions = @tasks.map do |t| find_all_related_syskit_actions(t, toplevel_tasks_to_requirements) end end - def find_all_related_syskit_actions(task, toplevel_tasks_to_requirements) - result = [] - while task - result.concat(toplevel_tasks_to_requirements[task] || []) - task = task.each_parent_task.first - end - result - end - def pretty_print(pp) pp.text "device '#{device.name}' of type #{device.model} is assigned " pp.text "to two tasks that cannot be merged" - pp.breakable - @merge_result.pretty_print_failure(pp) - @involved_definitions.each_with_index do |defs, i| - next if defs.empty? + print_failed_merge_chain(pp, *@tasks) + @tasks.zip(@involved_definitions).each do |t, defs| + print_dependent_definitions(pp, t, defs) + end + end + end - pp.breakable - pp.text "Chain #{i + 1} is needed by the following definitions:" - pp.nest(2) do - defs.each do |d| - pp.breakable - pp.text d.to_s - end + class ConflictingDeploymentAllocation < SpecError + include Syskit::NetworkGenerationsExceptionHelpers + + attr_reader :deployment_to_tasks + + def initialize(deployment_to_tasks, toplevel_tasks_to_requirements = {}) + @deployment_to_tasks = deployment_to_tasks + @toplevel_tasks_to_requirements = toplevel_tasks_to_requirements + @deployment_to_execution_agent = \ + deployment_to_tasks.transform_values do |tasks| + tasks.first.execution_agent + end + end + + def pretty_print(pp) + deployment_to_tasks.each do |orocos_name, tasks| + agent = @deployment_to_execution_agent[orocos_name] + deployment_m = agent.deployed_orogen_model_by_name(orocos_name) + pp.text( + "deployed task '#{orocos_name}' from deployment " \ + "'#{deployment_m.name}' defined in " \ + "'#{deployment_m.project.name}' on '#{agent.process_server_name}' " \ + "is assigned to #{tasks.size} tasks. Below is the list of " \ + "the dependent non-deployed actions. Right after the list " \ + "is a detailed explanation of why the first two tasks are not merged:" + ) + tasks.each do |t| + defs = find_all_related_syskit_actions( + t, @toplevel_tasks_to_requirements + ) + print_dependent_definitions(pp, t, defs) end + print_failed_merge_chain(pp, tasks[0], tasks[1]) end end end diff --git a/lib/syskit/interface/commands.rb b/lib/syskit/interface/commands.rb index b39f6c983..a2bf5336c 100644 --- a/lib/syskit/interface/commands.rb +++ b/lib/syskit/interface/commands.rb @@ -16,6 +16,22 @@ def deployments command :deployments, "returns information about running deployments" + # Return incremental update about deployments + # + # @return [Protocol::Deployment] + def poll_ready_deployments(known: []) + deployments = + plan.find_tasks(Syskit::Deployment).running.find_all(&:ready?) + deployment_ids = deployments.map { _1.droby_id.id } + new_deployments = + deployments.find_all { !known.include?(_1.droby_id.id) } + removed_deployments = + known.find_all { |id| !deployment_ids.include?(id) } + [new_deployments, removed_deployments] + end + command :poll_ready_deployments, + "incremental information about deployments" + # Save the configuration of all running tasks of the given model to disk # # @param [String,nil] name the section name for the new configuration. diff --git a/lib/syskit/interface/v2/protocol.rb b/lib/syskit/interface/v2/protocol.rb index 36a32bb8e..4d2d335db 100644 --- a/lib/syskit/interface/v2/protocol.rb +++ b/lib/syskit/interface/v2/protocol.rb @@ -9,6 +9,8 @@ module V2 module Protocol ROBY_TASK_MEMBERS = Roby::Interface::V2::Protocol::Task.new.members + DeviceModel = Struct.new(:name, keyword_init: true) + MasterDeviceInstance = Struct.new(:name, :model, keyword_init: true) Deployment = Struct.new( *ROBY_TASK_MEMBERS, :pid, :ready_since, :deployed_tasks, keyword_init: true @@ -36,12 +38,27 @@ def self.register_marshallers(protocol) protocol.add_marshaller( Syskit::Deployment, &method(:marshal_deployment_task) ) + protocol.add_marshaller( + Syskit::Robot::MasterDeviceInstance, + &method(:marshal_master_device_instance) + ) protocol.allow_objects( Orocos::RubyTasks::TaskContext, Orocos::RubyTasks::StubTaskContext ) end + def self.marshal_device_model(model) + DeviceModel.new(name: model.name) + end + + def self.marshal_master_device_instance(_channel, device) + MasterDeviceInstance.new( + name: device.name, + model: marshal_device_model(device.device_model) + ) + end + def self.marshal_remote_task_handle(name, remote_task_handle) ior = remote_task_handle.handle.ior model_name = remote_task_handle.handle.model.name diff --git a/lib/syskit/models/port.rb b/lib/syskit/models/port.rb index 959907074..ceaf1c158 100644 --- a/lib/syskit/models/port.rb +++ b/lib/syskit/models/port.rb @@ -108,16 +108,7 @@ def connect_to(in_port, policy = {}) if out_port == self if in_port.respond_to?(:to_component_port) in_port = in_port.to_component_port - if !out_port.output? - raise WrongPortConnectionDirection.new(self, in_port), "cannot connect #{out_port} to #{in_port}: #{out_port} is not an output port" - elsif !in_port.input? - raise WrongPortConnectionDirection.new(self, in_port), "cannot connect #{out_port} to #{in_port}: #{in_port} is not an input port" - elsif out_port.component_model == in_port.component_model - raise SelfConnection.new(out_port, in_port), "cannot connect #{out_port} to #{in_port}: they are both ports of the same component" - elsif out_port.type != in_port.type - raise WrongPortConnectionTypes.new(self, in_port), "cannot connect #{out_port} to #{in_port}: types mismatch" - end - + validate_connection(out_port, in_port) component_model.connect_ports(in_port.component_model, [out_port.name, in_port.name] => policy) else Syskit.connect self, in_port, policy @@ -128,6 +119,28 @@ def connect_to(in_port, policy = {}) end end + def validate_connection(out_port, in_port) + unless out_port.output? + raise WrongPortConnectionDirection.new(self, in_port), + "cannot connect #{out_port} to #{in_port}: " \ + "#{out_port} is not an output port" + end + unless in_port.input? + raise WrongPortConnectionDirection.new(self, in_port), + "cannot connect #{out_port} to #{in_port}: " \ + "#{in_port} is not an input port" + end + if out_port.component_model == in_port.component_model + raise SelfConnection.new(out_port, in_port), + "cannot connect #{out_port} to #{in_port}: " \ + "they are both ports of the same component" + end + unless out_port.type == in_port.type + raise WrongPortConnectionTypes.new(self, in_port), + "cannot connect #{out_port} to #{in_port}: types mismatch" + end + end + # Tests whether self is connected to the provided port def connected_to?(sink_port) source_port = try_to_component_port diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index 9ce1cfa88..03fd89f91 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -553,6 +553,16 @@ def compute_connection_policies policy_graph end + def merge_policy(explicit_policy, computed_policy) + merged_policy = computed_policy.merge(explicit_policy) + + if merged_policy[:type] == :data + merged_policy.delete(:size) + end + + merged_policy + end + # @api private # # Compute the policies for all connections starting from a given task @@ -563,13 +573,10 @@ def compute_policies_from(connection_graph, source_task, policy_graph = {}) mappings.each_with_object({}) do |(port_pair, policy), h| policy = policy.dup fallback_policy = policy.delete(:fallback_policy) - if policy.empty? - h[port_pair] = - policy_for(source_task, *port_pair, sink_task, - fallback_policy) - else - h[port_pair] = policy - end + computed_policy = policy_for( + source_task, *port_pair, sink_task, fallback_policy + ) + h[port_pair] = merge_policy(policy, computed_policy) end policy_graph[[source_task, sink_task]] = computed_policies end @@ -603,23 +610,24 @@ def policy_for( sink_port_m = sink_port.model if sink_port_m.needs_reliable_connection? - compute_reliable_connection_policy( + policy = compute_reliable_connection_policy( source_port, sink_port, fallback_policy ) elsif sink_port_m.required_connection_type == :data policy = Orocos::Port.prepare_policy(type: :data) DataFlowDynamics.debug { " result: #{policy}" } - policy elsif sink_port_m.required_connection_type == :buffer policy = Orocos::Port.prepare_policy(type: :buffer, size: 1) DataFlowDynamics.debug { " result: #{policy}" } - policy else raise UnsupportedConnectionType, "unknown required connection type " \ "#{sink_port_m.required_connection_type} " \ "on #{sink_port}" end + + source_port_m = source_port.model + policy.merge(init: source_port_m.init_policy?) end def compute_reliable_connection_policy( diff --git a/lib/syskit/network_generation/engine.rb b/lib/syskit/network_generation/engine.rb index 4d294e535..d6edc0952 100644 --- a/lib/syskit/network_generation/engine.rb +++ b/lib/syskit/network_generation/engine.rb @@ -401,7 +401,7 @@ def finalize_deployed_tasks # This is required to merge the already existing compositions # with the ones in the plan - merge_solver.merge_identical_tasks + merge_solver.merge_compositions log_timepoint "merge" [selected_deployment_tasks, reused_deployed_tasks | newly_deployed_tasks] @@ -703,18 +703,27 @@ def compute_system_network( Engine.discover_requirement_tasks_from_plan(real_plan), garbage_collect: true, validate_abstract_network: true, - validate_generated_network: true + validate_generated_network: true, + default_deployment_group: Syskit.conf.deployment_group, + validate_deployed_network: (true if Syskit.conf.early_deploy?), + early_deploy: Syskit.conf.early_deploy? ) requirement_tasks = requirement_tasks.to_a instance_requirements = requirement_tasks.map(&:requirements) + merge_solver.merge_task_contexts_with_same_agent = early_deploy system_network_generator = SystemNetworkGenerator.new( - work_plan, event_logger: event_logger, merge_solver: merge_solver + work_plan, + event_logger: event_logger, + merge_solver: merge_solver, + default_deployment_group: default_deployment_group, + early_deploy: early_deploy ) toplevel_tasks = system_network_generator.generate( instance_requirements, garbage_collect: garbage_collect, validate_abstract_network: validate_abstract_network, - validate_generated_network: validate_generated_network + validate_generated_network: validate_generated_network, + validate_deployed_network: validate_deployed_network ) Hash[requirement_tasks.zip(toplevel_tasks)] @@ -749,14 +758,19 @@ def resolve_system_network( validate_deployed_network: true, compute_deployments: true, default_deployment_group: Syskit.conf.deployment_group, - compute_policies: true + compute_policies: true, + early_deploy: Syskit.conf.early_deploy? ) + merge_solver.merge_task_contexts_with_same_agent = early_deploy required_instances = compute_system_network( requirement_tasks, garbage_collect: garbage_collect, validate_abstract_network: validate_abstract_network, - validate_generated_network: validate_generated_network + validate_generated_network: validate_generated_network, + default_deployment_group: (default_deployment_group if early_deploy), + validate_deployed_network: validate_deployed_network, + early_deploy: early_deploy && compute_deployments ) if compute_deployments @@ -804,8 +818,10 @@ def resolve( validate_abstract_network: true, validate_generated_network: true, validate_deployed_network: true, - validate_final_network: true + validate_final_network: true, + early_deploy: Syskit.conf.early_deploy? ) + merge_solver.merge_task_contexts_with_same_agent = early_deploy required_instances = resolve_system_network( requirement_tasks, garbage_collect: garbage_collect, @@ -814,7 +830,8 @@ def resolve( compute_deployments: compute_deployments, default_deployment_group: default_deployment_group, compute_policies: compute_policies, - validate_deployed_network: validate_deployed_network + validate_deployed_network: validate_deployed_network, + early_deploy: early_deploy ) apply_system_network_to_plan( diff --git a/lib/syskit/network_generation/merge_solver.rb b/lib/syskit/network_generation/merge_solver.rb index 5040f4034..c435d361e 100644 --- a/lib/syskit/network_generation/merge_solver.rb +++ b/lib/syskit/network_generation/merge_solver.rb @@ -7,7 +7,7 @@ module NetworkGeneration # # This is the core of the system deployment algorithm implemented in # Engine - class MergeSolver + class MergeSolver # rubocop:disable Metrics/ClassLength extend Logger::Hierarchy include Logger::Hierarchy include Roby::DRoby::EventLogging @@ -34,6 +34,8 @@ class MergeSolver # information attr_reader :event_logger + attr_writer :merge_task_contexts_with_same_agent + def initialize(plan, event_logger: plan.event_logger) @plan = plan @event_logger = event_logger @@ -43,6 +45,7 @@ def initialize(plan, event_logger: plan.event_logger) @task_replacement_graph = Roby::Relations::BidirectionalDirectedAdjacencyGraph.new @resolved_replacements = {} @invalid_merges = Set.new + @merge_task_contexts_with_same_agent = false end def clear @@ -51,6 +54,10 @@ def clear @invalid_merges.clear end + def merge_task_contexts_with_same_agent? + @merge_task_contexts_with_same_agent + end + # Returns the task that is used in place of the given task # # @param [Roby::Task] the task for which we want to know the @@ -138,6 +145,7 @@ def apply_merge_group(merged_task_to_task) merged_task_to_task.each do |merged_task, task| unless merged_task.transaction_proxy? + plan.copy_task_marks(from: merged_task, to: task) plan.remove_task(merged_task) end register_replacement(merged_task, task) @@ -197,16 +205,7 @@ def self.merge_identical_tasks(plan) solver.merge_identical_tasks end - # Tests whether task.merge(target_task) is a valid operation - # - # @param [Syskit::TaskContext] task - # @param [Syskit::TaskContext] target_task - # - # @return [false,true] if false, the merge is not possible. If - # true, it is possible. If nil, the only thing that makes the - # merge impossible are missing inputs, and these tasks might - # therefore be merged if there was a dataflow cycle - def may_merge_task_contexts?(merged_task, task) + def may_merge_components?(merged_task, task) can_merge = log_nest(2) do task.can_merge?(merged_task) end @@ -219,9 +218,22 @@ def may_merge_task_contexts?(merged_task, task) return false end + true + end + + # Tests whether task.merge(target_task) is a valid operation + # + # @param [Syskit::TaskContext] task + # @param [Syskit::TaskContext] target_task + # + # @return [false,true] if false, the merge is not possible. If + # true, it is possible. + def may_merge_task_contexts?(merged_task, task) + return false unless may_merge_components?(merged_task, task) + # Merges involving a deployed task can only involve a # non-deployed task as well - if task.execution_agent && merged_task.execution_agent + unless mergeable_agents?(merged_task, task) info "rejected: deployment attribute mismatches" return false end @@ -229,6 +241,16 @@ def may_merge_task_contexts?(merged_task, task) true end + def mergeable_agents?(merged_task, task) + unless merge_task_contexts_with_same_agent? + return !(task.execution_agent && merged_task.execution_agent) + end + + return false unless task.execution_agent && merged_task.execution_agent + + task.orocos_name == merged_task.orocos_name + end + def each_component_merge_candidate(task) # Get the set of candidates. We are checking if the tasks in # this set can be replaced by +task+ @@ -328,9 +350,7 @@ def composition_children_by_role(task) end def may_merge_compositions?(merged_task, task) - unless may_merge_task_contexts?(merged_task, task) - return false - end + return false unless may_merge_components?(merged_task, task) merged_task_children = composition_children_by_role(merged_task) task_children = composition_children_by_role(task) @@ -445,6 +465,22 @@ def pretty_print_failure(pp) :source_task, :source_port, :policy, :sink_port, :sink_task ) + def may_merge?(merged_task, task) + case merged_task + when TaskContext + may_merge_task_contexts?(merged_task, task) + when Composition + may_merge_compositions?(merged_task, task) + when Placeholder + may_merge_components?(merged_task, task) + else + raise ArgumentError, + "may_merge? called with #{merged_task} of type " \ + "#{merged_task.class}, expected either TaskContext, " \ + "Composition or Placeholder" + end + end + # Resolve merge between N tasks with the given tasks as seeds # # The method will cycle through the task's mismatching inputs (if @@ -461,7 +497,7 @@ def pretty_print_failure(pp) # # @return [MergeResolution] def resolve_merge(merged_task, task, mappings) - unless may_merge_task_contexts?(merged_task, task) + unless may_merge?(merged_task, task) return MergeResolution.new(mappings, merged_task, task, [], []) end diff --git a/lib/syskit/network_generation/system_network_deployer.rb b/lib/syskit/network_generation/system_network_deployer.rb index f4c12f912..1794cc6c7 100644 --- a/lib/syskit/network_generation/system_network_deployer.rb +++ b/lib/syskit/network_generation/system_network_deployer.rb @@ -56,15 +56,15 @@ def initialize(plan, # will run on the generated network # @return [Set] the set of tasks for which the deployer could # not find a deployment - def deploy(validate: true) + def deploy(validate: true, reuse_deployments: false, deployment_tasks: {}) debug "Deploying the system network" all_tasks = plan.find_local_tasks(TaskContext).to_a selected_deployments, missing_deployments = - select_deployments(all_tasks) + select_deployments(all_tasks, reuse: reuse_deployments) log_timepoint "select_deployments" - apply_selected_deployments(selected_deployments) + apply_selected_deployments(selected_deployments, deployment_tasks) log_timepoint "apply_selected_deployments" if validate @@ -75,13 +75,22 @@ def deploy(validate: true) missing_deployments end + def find_all_suitable_deployments_for(task, from: task) + self.class.find_all_suitable_deployments_for( + default_deployment_group, + task, + from: from + ) + end + # Find all candidates, resolved using deployment groups in the task hierarchy # # The method falls back to the default deployment group if no # deployments for the task could be found in the plan itself # # @return [Set] - def find_all_suitable_deployments_for(task, from: task) + def self.find_all_suitable_deployments_for(default_deployment_group, + task, from: task) candidates = from.requirements.deployment_group .find_all_suitable_deployments_for(task) return candidates unless candidates.empty? @@ -93,7 +102,9 @@ def find_all_suitable_deployments_for(task, from: task) end parents.each_with_object(Set.new) do |p, s| - s.merge(find_all_suitable_deployments_for(task, from: p)) + s.merge(find_all_suitable_deployments_for(default_deployment_group, + task, + from: p)) end end @@ -132,13 +143,10 @@ def find_suitable_deployment_for(task) # Find which deployments should be used for which tasks # # @param [[Component]] tasks the tasks to be deployed - # @param [Component=>Models::DeploymentGroup] the association - # between a component and the group that should be used to - # deploy it # @return [(Component=>Deployment,[Component])] the association # between components and the deployments that should be used # for them, and the list of components without deployments - def select_deployments(tasks) + def select_deployments(tasks, reuse: false) used_deployments = Set.new missing_deployments = Set.new selected_deployments = {} @@ -150,7 +158,7 @@ def select_deployments(tasks) if !selected missing_deployments << task - elsif used_deployments.include?(selected) + elsif !reuse && used_deployments.include?(selected) debug do machine, configured_deployment, task_name = *selected "#{task} resolves to #{configured_deployment}.#{task_name} " \ @@ -170,8 +178,7 @@ def select_deployments(tasks) # @param [Component=>Deployment] selected_deployments the # component-to-deployment association # @return [void] - def apply_selected_deployments(selected_deployments) - deployment_tasks = {} + def apply_selected_deployments(selected_deployments, deployment_tasks = {}) selected_deployments.each do |task, deployed_task| deployed_task, = deployed_task.instanciate( plan, @@ -205,12 +212,16 @@ def validate_deployed_network verify_all_configurations_exist end + def verify_all_tasks_deployed + self.class.verify_all_tasks_deployed(plan, default_deployment_group) + end + # Verifies that all tasks in the plan are deployed # # @param [Component=>DeploymentGroup] deployment_groups which # deployment groups has been used for which task. This is used # to generate the error messages when needed. - def verify_all_tasks_deployed + def self.verify_all_tasks_deployed(plan, default_deployment_group) not_deployed = plan.find_local_tasks(TaskContext) .not_finished.not_abstract .find_all { |t| !t.execution_agent } @@ -219,7 +230,10 @@ def verify_all_tasks_deployed tasks_with_candidates = {} not_deployed.each do |task| - candidates = find_all_suitable_deployments_for(task) + candidates = find_all_suitable_deployments_for( + default_deployment_group, + task + ) candidates = candidates.map do |deployed_task| task_name = deployed_task.mapped_task_name existing_tasks = diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index 77b15e946..9d701c295 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -11,11 +11,21 @@ class SystemNetworkGenerator include Logger::Hierarchy include Roby::DRoby::EventLogging - attr_reader :plan, :event_logger, :merge_solver + attr_reader :plan, + :event_logger, + :merge_solver, + :default_deployment_group + + # Indicates if deployment stage happens within network generation + def early_deploy? + @early_deploy + end def initialize(plan, event_logger: plan.event_logger, - merge_solver: MergeSolver.new(plan)) + merge_solver: MergeSolver.new(plan), + default_deployment_group: nil, + early_deploy: false) if merge_solver.plan != plan raise ArgumentError, "gave #{merge_solver} as merge solver, which applies on #{merge_solver.plan}. Was expecting #{plan}" end @@ -23,9 +33,13 @@ def initialize(plan, @plan = plan @event_logger = event_logger @merge_solver = merge_solver + @default_deployment_group = default_deployment_group + @early_deploy = early_deploy end # Generate the network in the plan + # param [bool] validate_deployed_network controls whether or not the + # deployed network is validated, when #early_deploy? is true # # @return [HashArray>] the # list of toplevel tasks mapped to the instance requirements it @@ -33,7 +47,8 @@ def initialize(plan, def generate(instance_requirements, garbage_collect: true, validate_abstract_network: true, - validate_generated_network: true) + validate_generated_network: true, + validate_deployed_network: true) # We first generate a non-deployed network that fits all # requirements. @@ -41,7 +56,8 @@ def generate(instance_requirements, compute_system_network(instance_requirements, garbage_collect: garbage_collect, validate_abstract_network: validate_abstract_network, - validate_generated_network: validate_generated_network) + validate_generated_network: validate_generated_network, + validate_deployed_network: validate_deployed_network) end end @@ -188,17 +204,36 @@ def self.remove_abstract_composition_optional_children(plan) end end + def deploy(deployment_tasks) + network_deployer = SystemNetworkDeployer.new( + plan, + merge_solver: merge_solver, + default_deployment_group: default_deployment_group + ) + + network_deployer.deploy(validate: false, + reuse_deployments: true, + deployment_tasks: deployment_tasks) + end + # Compute in #plan the network needed to fullfill the requirements # # This network is neither validated nor tied to actual deployments def compute_system_network(instance_requirements, garbage_collect: true, validate_abstract_network: true, - validate_generated_network: true) + validate_generated_network: true, + validate_deployed_network: true) + @toplevel_tasks = log_timepoint_group "instanciate" do instanciate(instance_requirements) end + @toplevel_instance_requirements = instance_requirements + deployment_tasks = {} + + deploy(deployment_tasks) if early_deploy? + merge_solver.merge_identical_tasks log_timepoint "merge" Engine.instanciated_network_postprocessing.each do |block| @@ -207,7 +242,11 @@ def compute_system_network(instance_requirements, garbage_collect: true, end link_to_busses log_timepoint "link_to_busses" + + deploy(deployment_tasks) if early_deploy? + merge_solver.merge_identical_tasks + log_timepoint "merge" self.class.remove_abstract_composition_optional_children(plan) @@ -232,9 +271,9 @@ def compute_system_network(instance_requirements, garbage_collect: true, # And get rid of the 'permanent' marking we use to be able to # run static_garbage_collect - plan.each_task do |task| - plan.unmark_permanent_task(task) - end + plan.permanent_tasks + .find_all { |task| !task.kind_of?(Syskit::Deployment) } + .each { |task| plan.unmark_permanent_task(task) } Engine.system_network_postprocessing.each do |block| block.call(self, plan) @@ -251,6 +290,10 @@ def compute_system_network(instance_requirements, garbage_collect: true, log_timepoint "validate_generated_network" end + if early_deploy? && validate_deployed_network + self.validate_deployed_network + end + @toplevel_tasks end @@ -343,6 +386,24 @@ def self.verify_device_allocation(plan, toplevel_tasks_to_requirements = {}) end end + def self.verify_all_deployments_are_unique( + plan, + toplevel_tasks_to_requirements + ) + deployment_to_task_map = plan.find_local_tasks(Syskit::TaskContext) + .group_by(&:orocos_name) + + using_same_deployment = deployment_to_task_map.select do |_, tasks| + tasks.size > 1 + end + + return if using_same_deployment.empty? + + raise ConflictingDeploymentAllocation.new( + using_same_deployment, toplevel_tasks_to_requirements + ), "there are deployments used multiple times" + end + # Validates the network generated by {#compute_system_network} # # It performs the tests that are only needed on an abstract network, @@ -358,6 +419,21 @@ def validate_generated_network self.class.verify_device_allocation(plan, toplevel_tasks_to_requirements) super if defined? super end + + def validate_deployed_network + self.class.verify_all_tasks_deployed(plan, default_deployment_group) + self.class.verify_all_deployments_are_unique( + plan, toplevel_tasks_to_requirements.dup + ) + super if defined? super + end + + def self.verify_all_tasks_deployed(plan, default_deployment_group) + SystemNetworkDeployer.verify_all_tasks_deployed( + plan, + default_deployment_group + ) + end end end end diff --git a/lib/syskit/network_generation_exception_helpers.rb b/lib/syskit/network_generation_exception_helpers.rb new file mode 100644 index 000000000..969cece5a --- /dev/null +++ b/lib/syskit/network_generation_exception_helpers.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +module Syskit + # Common methods for network generation exception messages + module NetworkGenerationsExceptionHelpers + def find_all_related_syskit_actions(task, toplevel_tasks_to_requirements) + result = [] + while task + result.concat(toplevel_tasks_to_requirements[task] || []) + task = task.each_parent_task.first + end + result + end + + def print_dependent_definitions(pp, task, defs) + return if defs.empty? + + pp.breakable + pp.text "#{task} is needed by the following definitions:" + pp.nest(2) do + defs.each do |d| + pp.breakable + pp.text d.to_s + end + end + end + + def print_failed_merge_chain(pp, task0, task1) + solver = NetworkGeneration::MergeSolver.new(task0.plan) + @merge_result = solver.resolve_merge(task0, task1, {}) + pp.breakable + @merge_result.pretty_print_failure(pp) + end + end +end diff --git a/lib/syskit/process_managers/remote/manager.rb b/lib/syskit/process_managers/remote/manager.rb index 0c1d49945..c30596df9 100644 --- a/lib/syskit/process_managers/remote/manager.rb +++ b/lib/syskit/process_managers/remote/manager.rb @@ -16,7 +16,7 @@ module Remote # Defined here to make sure it is actually defined. Otherwise, the log # state reporting would fail at runtime, and unit-testing for this is # very hard. - LogUploadState = Server::LogUploadState + LogUploadState = RobyApp::LogTransferServer::LogUploadState # Syskit-side interface to the remote process server class Manager @@ -238,7 +238,7 @@ def queue_death_announcement def log_upload_file( host, port, certificate, user, password, localfile, max_upload_rate: Float::INFINITY, - implicit_ftps: RobyApp::LogTransferServer.use_implicit_ftps? + implicit_ftps: Runtime::Server.use_implicit_ftps? ) socket.write(COMMAND_LOG_UPLOAD_FILE) Marshal.dump( diff --git a/lib/syskit/process_managers/remote/server.rb b/lib/syskit/process_managers/remote/server.rb index a5ede01fd..5f4efdf7f 100644 --- a/lib/syskit/process_managers/remote/server.rb +++ b/lib/syskit/process_managers/remote/server.rb @@ -19,7 +19,7 @@ module Server end require "syskit/process_managers/remote/protocol" -require "syskit/process_managers/remote/server/ftp_upload" -require "syskit/process_managers/remote/server/log_upload_state" +require "syskit/roby_app/log_transfer_server/ftp_upload" +require "syskit/roby_app/log_transfer_server/log_upload_state" require "syskit/process_managers/remote/server/process" require "syskit/process_managers/remote/server/server" diff --git a/lib/syskit/process_managers/remote/server/ftp_upload.rb b/lib/syskit/process_managers/remote/server/ftp_upload.rb deleted file mode 100644 index 9f13b5c44..000000000 --- a/lib/syskit/process_managers/remote/server/ftp_upload.rb +++ /dev/null @@ -1,98 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module ProcessManagers - module Remote - module Server - # Encapsulation of the log file upload process - class FTPUpload - def initialize( # rubocop:disable Metrics/ParameterLists - host, port, certificate, user, password, file, - max_upload_rate: Float::INFINITY, - implicit_ftps: false - ) - - @host = host - @port = port - @certificate = certificate - @user = user - @password = password - @file = file - - @max_upload_rate = Float(max_upload_rate) - @implicit_ftps = implicit_ftps - end - - # Create a temporary file with the FTP server's public key, to pass - # to FTP.open - # - # @yieldparam [String] path the certificate path - def with_certificate - Tempfile.create do |cert_io| - cert_io.write @certificate - cert_io.flush - yield(cert_io.path) - end - end - - # Open the FTP connection - # - # @yieldparam [Net::FTP] - def open - with_certificate do |cert_path| - Net::FTP.open( - @host, - private_data_connection: false, port: @port, - implicit_ftps: @implicit_ftps, - ssl: { verify_mode: OpenSSL::SSL::VERIFY_PEER, - ca_file: cert_path } - ) do |ftp| - ftp.login(@user, @password) - yield(ftp) - end - end - end - - # Open the connection and transfer the file - # - # @return [LogUploadState::Result] - def open_and_transfer - open { |ftp| transfer(ftp) } - LogUploadState::Result.new(@file, true, nil) - rescue StandardError => e - LogUploadState::Result.new(@file, false, e.message) - end - - # Do transfer the file through the given connection - # - # @param [Net::FTP] ftp - def transfer(ftp) - last = Time.now - File.open(@file) do |file_io| - ftp.storbinary("STOR #{File.basename(@file)}", - file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| - now = Time.now - rate_limit(buf.size, now, last) - last = Time.now - end - end - end - - # @api private - # - # Sleep when needed to keep the expected transfer rate - def rate_limit(chunk_size, now, last) - duration = now - last - exp_duration = chunk_size / @max_upload_rate - # Do not wait, but do not try to "make up" for the bandwidth - # we did not use. The goal is to not affect the rest of the - # system - return if duration > exp_duration - - sleep(exp_duration - duration) - end - end - end - end - end -end diff --git a/lib/syskit/process_managers/remote/server/log_upload_state.rb b/lib/syskit/process_managers/remote/server/log_upload_state.rb deleted file mode 100644 index 5c5e7602a..000000000 --- a/lib/syskit/process_managers/remote/server/log_upload_state.rb +++ /dev/null @@ -1,29 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module ProcessManagers - module Remote - module Server - # State of the asynchronous file transfers managed by {Server} - class LogUploadState - attr_reader :pending_count - - Result = Struct.new :file, :success, :message do - def success? - success - end - end - - def initialize(pending_count, results) - @pending_count = pending_count - @results = results - end - - def each_result(&block) - @results.each(&block) - end - end - end - end - end -end diff --git a/lib/syskit/process_managers/remote/server/server.rb b/lib/syskit/process_managers/remote/server/server.rb index dc62a96d6..3b0afb69e 100644 --- a/lib/syskit/process_managers/remote/server/server.rb +++ b/lib/syskit/process_managers/remote/server/server.rb @@ -337,7 +337,7 @@ def handle_command(socket) # :nodoc: create_log_dir(time_tag, metadata) socket.write(RET_YES) rescue StandardError => e - warn "failed to create log directory #{log_dir}: " \ + warn "failed to create log directory #{app.log_dir}: " \ "#{e.message}" (e.backtrace || []).each do |line| warn " #{line}" @@ -581,13 +581,15 @@ def log_upload_file(socket, parameters) localfile = log_upload_sanitize_path(Pathname(localfile)) rescue Exception => e # rubocop:disable Lint/RescueException @log_upload_results_queue << - LogUploadState::Result.new(localfile, false, e.message) + RobyApp::LogTransferServer::LogUploadState::Result.new( + localfile, false, e.message + ) return end info "queueing upload of #{localfile} to #{host}:#{port}" @log_upload_command_queue << - FTPUpload.new( + RobyApp::LogTransferServer::FTPUpload.new( host, port, certificate, user, password, localfile, max_upload_rate: max_upload_rate || Float::INFINITY, @@ -631,7 +633,9 @@ def log_upload_state end end - LogUploadState.new(@log_upload_pending.value, results) + RobyApp::LogTransferServer::LogUploadState.new( + @log_upload_pending.value, results + ) end end end diff --git a/lib/syskit/process_managers/unmanaged/process.rb b/lib/syskit/process_managers/unmanaged/process.rb index 6b6acfb84..0b071fc79 100644 --- a/lib/syskit/process_managers/unmanaged/process.rb +++ b/lib/syskit/process_managers/unmanaged/process.rb @@ -111,19 +111,29 @@ def spawn(_options = {}) # Calls the name service until all of the tasks are resolved. Ignores # whenever a Orocos::NotFound exception is raised. # + # @param [Float] warning_period period for warning message in seconds + # # @raises RuntimeError # @raises Orocos::CORBA::ComError # @return [Hash] - def name_service_get_all_tasks + def name_service_get_all_tasks(warning_period: 5.0) expected_names = mapped_task_names.dup result = {} + warning_time_deadline = Time.at(0) + until expected_names.empty? expected_names.delete_if do |name| result[name] = name_service.get(name) rescue Orocos::NotFound + if Time.now > warning_time_deadline + ::Robot.warn "could not find unmanaged task #{name}" + warning_time_deadline = Time.now + warning_period + end false end + + sleep 0.1 if expected_names.any? end result end diff --git a/lib/syskit/queries/port_matcher.rb b/lib/syskit/queries/port_matcher.rb index e670fc00b..bce3c7c40 100644 --- a/lib/syskit/queries/port_matcher.rb +++ b/lib/syskit/queries/port_matcher.rb @@ -57,9 +57,9 @@ def with_type(type) def ===(port) return unless port.kind_of?(Port) - (@name_filter === object.name) && - (!@type_filter || @type_filter == object.type) && - (@component_matcher === object.component) + (@name_filter === port.name) && + (!@type_filter || @type_filter == port.type) && + (@component_matcher === port.component) end def each_in_plan(plan, &block) diff --git a/lib/syskit/roby_app/configuration.rb b/lib/syskit/roby_app/configuration.rb index ad037210c..c4769c4f4 100644 --- a/lib/syskit/roby_app/configuration.rb +++ b/lib/syskit/roby_app/configuration.rb @@ -121,6 +121,24 @@ class Configuration # likely want this attr_predicate :kill_all_on_process_server_connection?, true + # Indicates where the deployment stage happens + # + # If false, it will happen at the end of the whole network generation + # (the historical behaviour). If true, it will happen just after + # instantiation + # + # The default is false + # + # @see early_deploy= + def early_deploy? + @early_deploy + end + + # Controls where the deployment stage happens + # + # @see early_deploy? + attr_writer :early_deploy + # Controls whether the orogen types should be exported as Ruby # constants # @@ -154,6 +172,7 @@ def initialize(app) @kill_all_on_process_server_connection = false @register_self_on_name_server = (ENV["SYSKIT_REGISTER_SELF_ON_NAME_SERVER"] != "0") @strict_model_for = false + @early_deploy = false @log_rotation_period = nil @log_transfer = LogTransferManager::Configuration.new( @@ -165,7 +184,7 @@ def initialize(app) target_dir: nil, # Use the app's log dir default_max_upload_rate: Float::INFINITY, max_upload_rates: {}, - implicit_ftps: LogTransferServer.use_implicit_ftps? + implicit_ftps: Runtime::Server.use_implicit_ftps? ) clear diff --git a/lib/syskit/roby_app/log_transfer_manager.rb b/lib/syskit/roby_app/log_transfer_manager.rb index 121a4123d..9d9aa7e56 100644 --- a/lib/syskit/roby_app/log_transfer_manager.rb +++ b/lib/syskit/roby_app/log_transfer_manager.rb @@ -36,7 +36,7 @@ def server_start raise ArgumentError, "log transfer server already running" if @server server_update_self_spawned_conf - @server = LogTransferServer::SpawnServer.new( + @server = Runtime::Server::SpawnServer.new( @conf.target_dir, @conf.user, @conf.password, @self_signed_ca.private_certificate_path, interface: @conf.ip, diff --git a/lib/syskit/roby_app/log_transfer_server.rb b/lib/syskit/roby_app/log_transfer_server.rb index d28a66602..e19d378cd 100644 --- a/lib/syskit/roby_app/log_transfer_server.rb +++ b/lib/syskit/roby_app/log_transfer_server.rb @@ -5,6 +5,6 @@ require "ipaddr" require "pathname" -require "syskit/roby_app/log_transfer_server/write_only_disk_file_system" -require "syskit/roby_app/log_transfer_server/driver" -require "syskit/roby_app/log_transfer_server/spawn_server" +require "syskit/runtime/server/write_only_disk_file_system" +require "syskit/runtime/server/driver" +require "syskit/runtime/server/spawn_server" diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb new file mode 100644 index 000000000..228a98d9a --- /dev/null +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -0,0 +1,116 @@ +# frozen_string_literal: true + +require "syskit/roby_app/log_transfer_server/log_upload_state" + +module Syskit + module RobyApp + module LogTransferServer + # Encapsulation of the log file upload process + class FTPUpload + def initialize( # rubocop:disable Metrics/ParameterLists + host, port, certificate, user, password, file, + max_upload_rate: Float::INFINITY, + implicit_ftps: false + ) + + @host = host + @port = port + @certificate = certificate + @user = user + @password = password + @file = file + + @max_upload_rate = Float(max_upload_rate) + if @max_upload_rate <= 0 + raise ArgumentError, + "invalid value for max_upload_rate: given " \ + "#{@max_upload_rate}, but should be strictly positive" + end + @implicit_ftps = implicit_ftps + end + + # Create a temporary file with the FTP server's public key, to pass + # to FTP.open + # + # @yieldparam [String] path the certificate path + def with_certificate + Tempfile.create do |cert_io| + cert_io.write @certificate + cert_io.flush + yield(cert_io.path) + end + end + + # Open the FTP connection + # + # @yieldparam [Net::FTP] + def open + with_certificate do |cert_path| + Net::FTP.open( + @host, + private_data_connection: false, port: @port, + implicit_ftps: @implicit_ftps, + ssl: { verify_mode: OpenSSL::SSL::VERIFY_PEER, + ca_file: cert_path } + ) do |ftp| + ftp.login(@user, @password) + yield(ftp) + end + end + end + + # Open the connection and transfer the file + # + # @return [LogUploadState::Result] + def open_and_transfer(root: nil) + open { |ftp| transfer(ftp, root) } + LogUploadState::Result.new(@file, true, nil) + rescue StandardError => e + LogUploadState::Result.new(@file, false, e.message) + end + + def chdir_to_file_directory(ftp, root) + dataset_path = @file.relative_path_from(root).dirname + + dataset_path.each_filename do |folder| + ftp.chdir(folder) + rescue Net::FTPPermError => _e + ftp.mkdir(folder) + ftp.chdir(folder) + end + end + + # Do transfer the file through the given connection + # + # @param [Net::FTP] ftp + # @param [Pathname] root the archive root folder + def transfer(ftp, root) + last = Time.now + chdir_to_file_directory(ftp, root) if root + File.open(@file) do |file_io| + ftp.storbinary("STOR #{File.basename(@file)}", + file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| + now = Time.now + rate_limit(buf.size, now, last) + last = Time.now + end + end + end + + # @api private + # + # Sleep when needed to keep the expected transfer rate + def rate_limit(chunk_size, now, last) + duration = now - last + exp_duration = chunk_size / @max_upload_rate + # Do not wait, but do not try to "make up" for the bandwidth + # we did not use. The goal is to not affect the rest of the + # system + return if duration > exp_duration + + sleep(exp_duration - duration) + end + end + end + end +end diff --git a/lib/syskit/roby_app/log_transfer_server/log_upload_state.rb b/lib/syskit/roby_app/log_transfer_server/log_upload_state.rb new file mode 100644 index 000000000..709eaab26 --- /dev/null +++ b/lib/syskit/roby_app/log_transfer_server/log_upload_state.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module Syskit + module RobyApp + module LogTransferServer + # State of the asynchronous file transfers managed by {Server} + class LogUploadState + attr_reader :pending_count + + Result = Struct.new :file, :success, :message do + def success? + success + end + end + + def initialize(pending_count, results) + @pending_count = pending_count + @results = results + end + + def each_result(&block) + @results.each(&block) + end + end + end + end +end diff --git a/lib/syskit/roby_app/log_transfer_server/driver.rb b/lib/syskit/runtime/server/driver.rb similarity index 92% rename from lib/syskit/roby_app/log_transfer_server/driver.rb rename to lib/syskit/runtime/server/driver.rb index 5677232b3..7ff0ea380 100644 --- a/lib/syskit/roby_app/log_transfer_server/driver.rb +++ b/lib/syskit/runtime/server/driver.rb @@ -1,8 +1,10 @@ # frozen_string_literal: true +require "syskit/runtime/server/write_only_disk_file_system" + module Syskit - module RobyApp - module LogTransferServer + module Runtime + module Server # Driver for log transfer FTP server class Driver def initialize(user, password, data_dir) diff --git a/lib/syskit/roby_app/log_transfer_server/spawn_server.rb b/lib/syskit/runtime/server/spawn_server.rb similarity index 92% rename from lib/syskit/roby_app/log_transfer_server/spawn_server.rb rename to lib/syskit/runtime/server/spawn_server.rb index 1405d5c57..f69a22933 100644 --- a/lib/syskit/roby_app/log_transfer_server/spawn_server.rb +++ b/lib/syskit/runtime/server/spawn_server.rb @@ -1,10 +1,11 @@ # frozen_string_literal: true require "English" +require "syskit/runtime/server/driver" module Syskit - module RobyApp - module LogTransferServer # :nodoc: + module Runtime + module Server # :nodoc: # Whether we should configure client and server to use implicit FTPs by # default # @@ -20,13 +21,13 @@ class SpawnServer attr_reader :port # tgt_dir must be an absolute path - def initialize( + def initialize( # rubocop:disable Metrics/AbcSize, Metrics/ParameterLists tgt_dir, user, password, certfile_path, interface: "127.0.0.1", - implicit_ftps: LogTransferServer.use_implicit_ftps?, + implicit_ftps: Server.use_implicit_ftps?, port: 0, session_timeout: default_session_timeout, nat_ip: nil, @@ -85,7 +86,7 @@ def wait_until_stopped puts "FTP server started. Press ENTER or c-C to stop it" $stdout.flush begin - gets + sleep rescue Interrupt puts "Interrupt" end diff --git a/lib/syskit/roby_app/log_transfer_server/write_only_disk_file_system.rb b/lib/syskit/runtime/server/write_only_disk_file_system.rb similarity index 74% rename from lib/syskit/roby_app/log_transfer_server/write_only_disk_file_system.rb rename to lib/syskit/runtime/server/write_only_disk_file_system.rb index bf05a6152..0c753163c 100644 --- a/lib/syskit/roby_app/log_transfer_server/write_only_disk_file_system.rb +++ b/lib/syskit/runtime/server/write_only_disk_file_system.rb @@ -1,16 +1,23 @@ # frozen_string_literal: true module Syskit - module RobyApp - module LogTransferServer + module Runtime + module Server # Custom write-only file system that detects collision between files class WriteOnlyDiskFileSystem include Ftpd::DiskFileSystem::Base + include Ftpd::DiskFileSystem::Mkdir include Ftpd::DiskFileSystem::FileWriting include Ftpd::TranslateExceptions def initialize(data_dir) - set_data_dir data_dir + # Ftpd base methods expect data_dir to be a string + unless data_dir.respond_to?(:to_s) + raise ArgumentError, + "data_dir should be convertible into string" + end + + set_data_dir data_dir.to_s end # Write a file to disk if it does not already exist. diff --git a/lib/syskit/task_context.rb b/lib/syskit/task_context.rb index ff709859e..d130a617e 100644 --- a/lib/syskit/task_context.rb +++ b/lib/syskit/task_context.rb @@ -822,7 +822,7 @@ def dynamic_input_port_connections(existing_port_names) dynamic_ports.each do |name| if existing_port_names.include?(name) - Syskit.fatal( + fatal( "task #{orocos_task} did not clear #{name}, a dynamic input " \ "port, during cleanup, as it should have. Go fix it." ) @@ -853,7 +853,7 @@ def dynamic_output_port_connections(existing_port_names) dynamic_ports.each do |name| if existing_port_names.include?(name) - Syskit.fatal( + fatal( "task #{orocos_task} did not clear #{name}, a dynamic " \ "output port, during cleanup, as it should have. Go fix it." ) @@ -986,6 +986,9 @@ def setting_up!(promise) # (see Component#setup_failed!)_ def setup_failed!(exception) unless exception.kind_of?(Orocos::StateTransitionFailed) + fatal "#{exception} received while configuring #{orocos_name}, " \ + "expected a StateTransitionFailed error. The component is " \ + "put in quarantine and cannot be reused" execution_agent.register_task_context_in_fatal(orocos_name) end @@ -1036,6 +1039,10 @@ def setup_failed!(exception) start_event.achieve_asynchronously(promise, emit_on_success: false) promise.on_error do |exception| unless exception.kind_of?(Orocos::StateTransitionFailed) + fatal "#{exception} received while configuring " \ + "#{orocos_name}, expected a StateTransitionFailed " \ + "error. The component is put in quarantine and " \ + "cannot be reused" execution_agent.register_task_context_in_fatal(orocos_name) end end diff --git a/lib/syskit/telemetry/async.rb b/lib/syskit/telemetry/async.rb new file mode 100644 index 000000000..b7dacc731 --- /dev/null +++ b/lib/syskit/telemetry/async.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +require "syskit/telemetry/async/name_service" +require "syskit/telemetry/async/task_context" +require "syskit/telemetry/async/interface_object" +require "syskit/telemetry/async/readable_interface_object" +require "syskit/telemetry/async/listener" +require "syskit/telemetry/async/attribute" +require "syskit/telemetry/async/property" +require "syskit/telemetry/async/input_port" +require "syskit/telemetry/async/output_port" +require "syskit/telemetry/async/output_port_subfield" +require "syskit/telemetry/async/output_reader" +require "syskit/telemetry/async/port_read_manager" + +module Syskit + module Telemetry + # Asynchronous access to remote state + module Async + end + end +end diff --git a/lib/syskit/telemetry/async/attribute.rb b/lib/syskit/telemetry/async/attribute.rb new file mode 100644 index 000000000..9f4ea2fb8 --- /dev/null +++ b/lib/syskit/telemetry/async/attribute.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Callback-based API for remote task ports + class Attribute < ReadableInterfaceObject + def on_raw_change(&block) + on_raw_data(&block) + end + + def on_change(&block) + on_data(&block) + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/input_port.rb b/lib/syskit/telemetry/async/input_port.rb new file mode 100644 index 000000000..7f7b4c492 --- /dev/null +++ b/lib/syskit/telemetry/async/input_port.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Async interface compatible with the orocos.rb's API + class InputPort < ReadableInterfaceObject + def output? + false + end + + def input? + true + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/interface_object.rb b/lib/syskit/telemetry/async/interface_object.rb new file mode 100644 index 000000000..4424c0b26 --- /dev/null +++ b/lib/syskit/telemetry/async/interface_object.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Class that defines hooks for {InterfaceObjects} + # + # This class is needed so that we can cleanly overload the hook definition + # methods. + class InterfaceObjectHooks + include Roby::Hooks + include Roby::Hooks::InstanceHooks + + define_hooks :on_reachable + define_hooks :on_unreachable + define_hooks :on_error + end + + # Callback-based API to the orocos.rb property API + class InterfaceObject < InterfaceObjectHooks + # @return [TaskContext] the underlying task context + attr_reader :task_context + # @return [String] the property name + attr_reader :name + # @return [Class] the property type + attr_reader :type + + # Hash code + # + # Two interface objects are considered the same from a hash key + # perspective if they are of the same name, type and point to the + # same remote task, even if they are two different objects + attr_reader :hash + + def initialize(task_context, name, type) + super() + + @task_context = task_context + @hash = [task_context, self.class, name].hash + @name = name + @type = type + end + + def eql?(other) + other.task_context.eql?(task_context) && + other.name == name && + other.class == self.class + end + + def reachable? + @raw_object + end + + # Tie this async property with the underlying direct access object + def reachable!(raw_object) + @raw_object = raw_object + run_hook :on_reachable, raw_object + end + + # Tie this async property with the underlying object + def unreachable! + @raw_object = nil + run_hook :on_unreachable + end + + def on_reachable(&block) + disposable = super + + block.call(@raw_object) if @raw_object + disposable + end + + def once_on_reachable(&block) + # on_reachable might call the block right away, in which case + # `listener` will be nil. Use the called flag to allow disposing + # of the listener the second time without causing a double call + # to the block + called = false + listener = on_reachable do + block.call unless called + called = true + listener&.dispose + end + end + + def new_sample + @type.zero + end + + def type_name + @type.name + end + + def to_proxy + self + end + + def full_name + "#{@task_context.name}.#{@name}" + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/listener.rb b/lib/syskit/telemetry/async/listener.rb new file mode 100644 index 000000000..c4f4f966e --- /dev/null +++ b/lib/syskit/telemetry/async/listener.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Adapter object to provide Orocos::Async listener API + # + # Unlike the disposable returned by the hooks, the listener API + # from Orocos::Async allows to stop and start listening + class Listener + # @param [#call] register_with a callable that will register the callback + def initialize(register_with) + @register_with = register_with + end + + # Register the callback on the configured object and event + # + # Does nothing if the listener is already started + def start + return if @disposable + + @disposable = @register_with.call + end + + # De-registers the callback + # + # Does nothing if the listener is already started + def stop + @disposable&.dispose + @disposable = nil + end + + def dispose + stop + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/name_service.rb b/lib/syskit/telemetry/async/name_service.rb new file mode 100644 index 000000000..dd4fd79e6 --- /dev/null +++ b/lib/syskit/telemetry/async/name_service.rb @@ -0,0 +1,352 @@ +# frozen_string_literal: true + +require "orocos/async" + +module Syskit + module Telemetry + module Async + # In-process name service + # + # It is exclusively filled using information that comes from the async + # {Client} + class NameService < Orocos::NameServiceBase + # A new NameService instance + # + # @param [Hash] tasks The tasks which are + # known by the name service. + # @note The namespace is always "Local" + def initialize( + discovery_executor: + Concurrent::ThreadPoolExecutor.new(max_threads: 10), + port_read_manager: PortReadManager.new + ) + super() + + @iors = Concurrent::AtomicReference.new({}) + @registered_tasks = Concurrent::Hash.new + @task_added_callbacks = Concurrent::Array.new + @task_removed_callbacks = Concurrent::Array.new + @orogen_models = Concurrent::Hash.new + @discovery = {} + @discovery_executor = discovery_executor + @port_read_manager = port_read_manager + end + + def tasks + @registered_tasks.values + end + + def dispose + cleanup + @discovery_executor.shutdown + end + + def names + @registered_tasks.keys + end + + def include?(name) + @registered_tasks.key?(name) + end + + # Asynchronously update the name server given the known set of tasks + # + # After this call, any task not in the tasks parameter will have been + # removed from the name server + # + # @param [#ior,#name] list of IOR and name of remote tasks to resolve + # @return [Array] list of task names that are either known, or + # that are being discovered + def async_update_tasks(tasks) + iors = tasks.each_with_object({}) { |t, h| h[t.name] = t.ior } + @iors.set(iors) + + remove_changed_tasks(iors) + + # Resolve finished futures + resolve_discovered_tasks + + # Then check what tasks need to be discovered, and discover them + # + # We never spawn two futures to resolve the same name. Instead, + # when we get the feature result, we check whether the + # IOR has changed, and act accordingly + queue_new_tasks_discovery(tasks) + end + + # @api private + # + # Filter a list of tasks, queueing futures to discover the new ones + # + # @param [#ior,#name] tasks list of tasks to be discovered + def queue_new_tasks_discovery(tasks) + tasks.each do |t| + next if @discovery[t.name] + next if t.ior == @registered_tasks[t.name]&.identity + + async_discover_task(t) + end + end + + # Deregister and dispose of tasks who disappeared or have a + # different IOR + def remove_changed_tasks(iors) + @registered_tasks.dup.each do |name, task| + new_ior = iors[name] + deregister(name).dispose if !new_ior || task.identity != new_ior + end + end + + class AsyncDiscoveryError < RuntimeError; end + + AsyncDiscovery = Struct.new( + :task, :future, :ior, :async_task, keyword_init: true + ) do + def update_from_result + fulfilled, (ior, async_task), reason = future.result + unless fulfilled + raise AsyncDiscoveryError, + "unexpected error during asynchronous " \ + "task discovery: #{reason}" + end + + self.ior = ior + self.async_task = async_task + end + + def wait + future.result + end + + def resolved? + future.resolved? + end + end + + # @api private + # + # Create a future that discovers a remote task + def async_discover_task(task) + future = Concurrent::Promises.future_on(@discovery_executor) do + ior = @iors.get[task.name] + discover_task(task.name, ior, task.orogen_model_name) + end + @discovery[task.name] = AsyncDiscovery.new(task: task, future: future) + end + + # @api private + # + # Process the tasks that have been (asynchronously) discovered + def resolve_discovered_tasks + while (async_discovery = pop_discovered_task) + register( + async_discovery.async_task, + name: async_discovery.task.name + ) + end + end + + # Whether some discoveries have been queued but not yet resolved + def has_pending_discoveries? + !@discovery.empty? + end + + # Wait for all pending discoveries to finish + def wait_for_task_discovery + @discovery.each_value(&:wait) + end + + # @api private + # + # Find a valid resolved task from the pending discoveries + # + # @return [AsyncDiscovery,nil] a valid resolved task or nil if there are + # none so far + def pop_discovered_task + loop do + return unless (async_discovery = pop_finished_discovery) + next unless finished_discovery_validate_ior(async_discovery) + next unless async_discovery.async_task + + return async_discovery + end + end + + # @api private + # + # Get one async discovery result from the terminated discovery futures + # + # Unlike {pop_discovered_task}, it will not try to find a valid discovered + # task. It only gets one finished result + # + # @return [AsyncDiscovery] + def pop_finished_discovery + async_discovery = @discovery.each_value.find(&:resolved?) + return unless async_discovery + + @discovery.delete(async_discovery.task.name) + async_discovery.update_from_result + async_discovery + end + + # @api private + # + # Validate that an async discovery result matches the expected IOR + # for the task + # + # To guard against race conditions, the name service object maintains + # a hash of the task names to the expected IORs. When we fetch an async + # discovery result, we validate that the found task is actually pointing + # to the expected IOR. If it is not, the result is thrown away and a + # new discovery is initiated + # + # @param [AsyncDiscovery] async_discovery + def finished_discovery_validate_ior(async_discovery) + current_ior = @iors.get[async_discovery.task.name] + return unless current_ior + + return true if async_discovery.ior == current_ior + + # The IOR associated with that name changed since the future + # started processing. Throw away the resolved task and start + # again + async_discovery.async_task&.dispose + async_discover_task(async_discovery.task) + false + end + + # @api private + # + # Discover a single task + # + # @param [String] name + # @param [String] ior + # @param [String] orogen_model_name + # @return [(String,(Orocos::Async::TaskContext,nil))] the IOR used to + # resolve the task, and the async taskcontext that represents it. The + # task is nil if the resolution failed + def discover_task(name, ior, orogen_model_name) + task = Orocos::TaskContext.new( + ior, + name: name, + model: orogen_model_from_name(orogen_model_name) + ) + + async_task = TaskContext.discover( + task, port_read_manager: @port_read_manager + ) + + [ior, async_task] + rescue StandardError => e + warn "Failed discovery of task #{name}: #{e.message}" + [ior, nil] + end + + # Re-create the orogen model from its name + # + # @param [String] name + # @return [OroGen::Spec::TaskContext] + def orogen_model_from_name(name) + @orogen_models[name] ||= Orocos.create_orogen_task_context_model(name) + end + + # (see NameServiceBase#get) + def ior(name) + task = @registered_tasks[name] + if (identity = task&.identity) + return identity + end + + raise Orocos::NotFound, "task context #{name} cannot be found." + end + + # Return a task from its name, or nil if it does not exist + # + # @param [String] name + # @return [TaskContext,nil] + def find(name) + @registered_tasks[name] + end + + # Return a task from its name, or raise if it does not exist + # + # @param [String] name + # @return [TaskContext] + # @raise [Orocos::NotFound] + def get(name, **) + task = find(name) + return task if task + + raise Orocos::NotFound, "task context #{name} cannot be found." + end + + # Registers the given {Orocos::TaskContext} on the name service. + # If a name is provided, it will be used as an alias. If no name is + # provided, the name of the task is used. This is true even if the + # task name is renamed later. + # + # @param [Orocos::TaskContext] task The task. + # @param [String] name Optional name which is used to register the task. + def register(task, name: task.name) + @registered_tasks[name] = task + trigger_task_added(name) + end + + # Deregisters the given name or task from the name service. + # + # @param [String,TaskContext] name The name or task + def deregister(name) + task = @registered_tasks.delete(name) + trigger_task_removed(name) + task + end + + # (see Base#cleanup) + def cleanup + names = @registered_tasks.keys + @registered_tasks.clear + @iors.set({}) + @discovery.clear + @orogen_models.clear + names.each { trigger_task_removed(_1) } + end + + def to_async + self + end + + def on_task_added(&block) + @task_added_callbacks << block + Roby.disposable { @task_added_callbacks.delete(block) } + end + + def trigger_task_added(name) + error = nil + @task_added_callbacks.each do |block| + block.call(name) + rescue RuntimeError => e + error = e + end + + raise error if error + end + + def on_task_removed(&block) + @task_removed_callbacks << block + Roby.disposable { @task_removed_callbacks.delete(block) } + end + + def trigger_task_removed(name) + error = nil + @task_removed_callbacks.each do |block| + block.call(name) + rescue RuntimeError => e + error = e + end + + raise error if error + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/output_port.rb b/lib/syskit/telemetry/async/output_port.rb new file mode 100644 index 000000000..55f88ab6d --- /dev/null +++ b/lib/syskit/telemetry/async/output_port.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Async interface compatible with the orocos.rb's API + class OutputPort < InterfaceObject + def initialize(task_context, name, type, port_read_manager) + super(task_context, name, type) + + @port_read_manager = port_read_manager + end + + def output? + true + end + + def input? + false + end + + def on_raw_data(period: 0.1, init: false, buffer_size: 1) + callback = proc do |value| + yield(value) if value + end + + register_with = proc do + @port_read_manager.register_callback( + self, callback, + period: period, init: init, buffer_size: buffer_size + ) + end + + listener = Listener.new(register_with) + listener.start + listener + end + + def on_data(period: 0.1, init: false, buffer_size: 1) + on_raw_data( + period: period, init: init, buffer_size: buffer_size + ) do |data| + yield Typelib.to_ruby(data) + end + end + + # Asynchronously create a data reader on this port + def reader(connect_on:, disconnect_on:, **policy) + OutputReader.new( + self, policy, + connect_on: connect_on, disconnect_on: disconnect_on + ) + end + + # Create a port-like accessor for a field of the port + def sub_port(subfields) + OutputPortSubfield.new(self, subfields, @port_read_manager) + end + + def type? + true + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/output_port_subfield.rb b/lib/syskit/telemetry/async/output_port_subfield.rb new file mode 100644 index 000000000..336a24512 --- /dev/null +++ b/lib/syskit/telemetry/async/output_port_subfield.rb @@ -0,0 +1,119 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # An API compatible with {OutputPort} but that will give access to a sub-part + # of a data sample + # + # For instance, a field in a struct + class OutputPortSubfield < InterfaceObject + def initialize(port, subfield, port_read_manager) + @path = normalize_subfield_path(subfield) + subname = compute_subname(@path) + subtype = compute_subtype(port.type, @path) + + super(port.task_context, "#{port.name}#{subname.join}", subtype) + + @port_read_manager = port_read_manager + + @orig_port = port + @on_port_reachable = port.on_reachable do |raw| + reachable!(raw) + end + @on_port_unreachable = port.on_unreachable do + unreachable! + end + end + + def output? + true + end + + def input? + false + end + + def dispose + @on_port_reachable.dispose + @on_port_unreachable.dispose + end + + def on_raw_data(period: 0.1, init: false, buffer_size: 1) + callback = proc do |value| + yield(self.class.resolve_subfield(value, @path)) if value + end + + register_with = proc do + @port_read_manager.register_callback( + @orig_port, callback, + period: period, init: init, buffer_size: buffer_size + ) + end + + listener = Listener.new(register_with) + listener.start + listener + end + + def on_data(**policy) + on_raw_data(**policy) do |sample| + sample = Typelib.to_ruby(sample) + yield(sample) + end + end + + def sub_port(subfield) + OutputPortSubfield.new( + @orig_port, + @subfield + Array(subfield) + ) + end + + def self.resolve_subfield(root_sample, path) + path.inject(root_sample) do |sample, f| + break(nil) if f.kind_of?(Integer) && sample.size <= f + + sample.raw_get(f) + end + end + + def compute_subtype(type, path) + path.inject(type) do |t, f| + case f + when Integer + t.deference + else + t[f] + end + end + end + + def normalize_subfield_path(subfield) + subfield.map do |field| + if /^\d+$/.match?(field) + Integer(field) + else + field.to_s + end + end + end + + def compute_subname(path) + path.map do |field| + case field + when Integer + "[#{field}]" + else + ".#{field}" + end + end + end + + def type? + true + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/output_reader.rb b/lib/syskit/telemetry/async/output_reader.rb new file mode 100644 index 000000000..9114ea3c4 --- /dev/null +++ b/lib/syskit/telemetry/async/output_reader.rb @@ -0,0 +1,137 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Holder for a resolved data reader + class OutputReader + # The async port this reader is connected to + attr_reader :port + + # The policy hash used to create this reader + attr_reader :policy + + def initialize(port, policy, connect_on:, disconnect_on:) + @port = port + @policy = policy.dup.freeze + + @cancel_event = Concurrent::Promises.resolvable_event + @connection_future = nil + @last_read_future = Concurrent::Promises.fulfilled_future(nil) + @reader = Concurrent::AtomicReference.new(nil) + + @connection_executor = connect_on + @disconnection_executor = disconnect_on + + @reachability_listener = port.on_reachable do |raw_port| + connect(raw_port, policy) + end + + @unreachability_listener = port.on_unreachable do + disconnect_on_unreachability + end + end + + def raw_reader + @reader.get + end + + def connected? + @reader.get + end + + def poll + resolve_connection unless @reader.get + end + + def resolve_connection + return unless @connection_future&.resolved? + + fulfilled, result, reason = @connection_future.result + if fulfilled + @reader.set(result) + else + warn "failed to create reader on #{@port}: #{reason}" + end + + @connection_future = nil + end + + def raw_read_with_result(executor, sample = nil, copy_old_data = true) # rubocop:disable Style/OptionalBooleanParameter + @last_read_future = @last_read_future.chain_on(executor) do + @reader.get&.raw_read_with_result(sample, copy_old_data) + end + end + + def raw_read(executor, sample = nil, copy_old_data: true) + raw_read_with_result(executor, sample, copy_old_data) + .then do |_, read_sample| + read_sample + end + end + + def raw_read_new(executor, sample = nil) + raw_read_with_result(executor, sample, false) + .then do |result, read_sample| + read_sample if result == Orocos::NEW_DATA + end + end + + # @api private + # + # Connect to the actual port + def connect(raw_port, policy) + if @reader.get + raise StateError, + "#connect called on an already connected reader" + end + + cancel_event = @cancel_event + future = Concurrent::Promises.future_on(@connection_executor) do + raw_port.reader(**policy) unless cancel_event.resolved? + end + @connection_future = future + end + + # @api private + # + # Internal disconnection method, leaving the reader reconnect when the + # port is reachable again + def disconnect_on_unreachability + @cancel_event.resolve + @cancel_event = Concurrent::Promises.resolvable_event + + disconnect_future = + (@connection_future || @last_read_future) + .then_on(@disconnection_executor, @reader.get) do |_, reader| + reader&.disconnect + end + + @reader.set(nil) + @connection_future = nil + @last_read_future = Concurrent::Promises.fulfilled_future(nil) + disconnect_future + end + + # Disconnect from the remote port + # + # Note that this discards any data that is still being read. The + # port will automatically reconnect + def disconnect + dispose + end + + # Disconnect and disable this reader + def dispose + @reachability_listener.dispose + @unreachability_listener.dispose + disconnect_on_unreachability + end + + def disposed? + @reachability_listener.disposed? + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/port_read_manager.rb b/lib/syskit/telemetry/async/port_read_manager.rb new file mode 100644 index 000000000..f07a46eee --- /dev/null +++ b/lib/syskit/telemetry/async/port_read_manager.rb @@ -0,0 +1,301 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # @api private + # + # Central class that manages data readers for ports + class PortReadManager + def initialize( + connection_executor: self.class.default_connection_executor, + disconnection_executor: self.class.default_disconnection_executor, + read_executor: self.class.default_read_executor + ) + @callbacks = {} + @pollers = {} + + @connection_executor = connection_executor + @disconnection_executor = disconnection_executor + @read_executor = read_executor + end + + CONNECTION_DEFAULT_THREADS = 20 + DISCONNECTION_DEFAULT_THREADS = 20 + READ_DEFAULT_THREADS = 5 + + def self.default_connection_executor + @default_connection_executor ||= + Concurrent::ThreadPoolExecutor.new( + max_threads: CONNECTION_DEFAULT_THREADS + ) + end + + def self.default_disconnection_executor + @default_disconnection_executor ||= + Concurrent::ThreadPoolExecutor.new( + max_threads: DISCONNECTION_DEFAULT_THREADS + ) + end + + def self.default_read_executor + @default_read_executor ||= + Concurrent::ThreadPoolExecutor.new( + max_threads: READ_DEFAULT_THREADS + ) + end + + Callback = Struct.new( + :port, :callback, :period, :buffer_size, + :init, :needs_last_received_value, keyword_init: true + ) do + def dispatch(value) + self.needs_last_received_value = false + callback.call(value) + end + end + + Poller = Struct.new( + :port, :reader, :next_time, :period, :read_future, + :propagate_last_received_value, :last_value, + keyword_init: true + ) do + def connected? + reader.connected? + end + + def dispose + reader.dispose + end + + def poll + reader.poll + end + + def scheduled_read? + read_future + end + + def result + read_future&.result + end + + def to_s(relative_to: PortReadManager.monotonic_time) + next_time_delta_ms = (next_time - relative_to) * 1000 if next_time + + format( + "poller %s: connected=%s " \ + "scheduled=%s " \ + "next_time=%.3f (in %i ms)", + name: port.full_name, + next_time: next_time || 0, + next_time_delta_ms: next_time_delta_ms || 0, + connected: connected? ? "yes" : "no", + scheduled: read_future ? "yes" : "no" + ) + end + + def schedule_read_if_needed(now, executor) + return if next_time && next_time > now + + self.read_future = reader.raw_read_new(executor) + end + + def prepare_next_read(now) + self.next_time ||= now + delta_in_periods = ((now - next_time) / period).ceil + # delta_in_periods == 0 should be impossible. + # But, you know, little cost + self.next_time += [delta_in_periods, 1].max * period + self.read_future = nil + end + + def resolved_read? + read_future&.resolved? + end + + def reset_read_tracking + self.read_future = nil + self.next_time = nil + end + + def policy + reader&.policy + end + end + + # Register a callback for data from a port + # + # @param [Async::OutputPort] port the port whose data is needed + # @param [#call] callback the object that will receive data when + # available + # @param [Numeric] period reading period in seconds + # @param [Integer] buffer_size the size of the sample buffer requested + # by the callback. The actual buffer will be of *at least* that many + # samples. + def register_callback(port, callback, period:, buffer_size:, init: false) + callback = Callback.new( + port: port, callback: callback, + period: period, buffer_size: buffer_size, init: init, + needs_last_received_value: true + ) + + (@callbacks[port] ||= []) << callback + ensure_reader_uptodate(port) + propagate_last_received_value(port) + Roby.disposable do + deregister_callback(port, callback) + end + end + + # Request that the last received value is sent to the callbacks + def propagate_last_received_value(port) + find_poller_for_port(port)&.propagate_last_received_value = true + end + + # @api private + # + # De-registers a callback + # + # This is not meant to be called directly. Use the disposable + # returned by {#register_callback} instead. + def deregister_callback(port, callback) + return unless (callbacks = @callbacks[port]) + + callbacks.delete(callback) + if callbacks.empty? + remove_poller(port) + else + ensure_reader_uptodate(port) + end + end + + # Reconnect the reader for this port if needed + # + # The main reason is the modification of the buffer policy + def ensure_reader_uptodate(port) + poller = find_poller_for_port(port) || + Poller.new(port: port) + + policy = required_policy_for(port) + if policy != poller.policy + poller.reader&.dispose + poller.reader = port.reader( + connect_on: @connection_executor, + disconnect_on: @disconnection_executor, + **policy + ) + end + + update_poller_period(poller) + @pollers[port] = poller + end + + # @api private + # + # Remove the poller for a given port + def remove_poller(port) + return unless (poller = @pollers.delete(port)) + + poller.dispose + end + + def dispose + @pollers.each_value(&:dispose) + @pollers = {} + end + + # Whether we are currently polling the given port + def polling?(port) + @pollers.key?(port) + end + + # Update a poller's period to match the callbacks currently listening + # to it + def update_poller_period(poller) + poller.period = @callbacks[poller.port].map(&:period).min + end + + # Return the Reader for the given port + # + # @return [Reader,nil] + def find_poller_for_port(port) + @pollers[port] + end + + # Method called regularly to update the asynchronous class state + def poll + now = monotonic_time + @pollers.each_value do |p| + p.poll + + process_poller_state(p, now) + end + end + + # @api private + # + # Helper for {#poll} to process a single poller + def process_poller_state(poller, now) + unless poller.connected? + poller.reset_read_tracking + return + end + + if poller.propagate_last_received_value && poller.last_value && + !poller.resolved_read? + dispatch_last_received_value(poller) + end + + if !poller.scheduled_read? + poller.schedule_read_if_needed(now, @read_executor) + elsif poller.resolved_read? + dispatch_read_result(poller) + poller.prepare_next_read(now) + end + end + + # Time in seconds returned by CLOCK_MONOTONIC + def monotonic_time + self.class.monotonic_time + end + + # Time in seconds returned by CLOCK_MONOTONIC + def self.monotonic_time + Process.clock_gettime(Process::CLOCK_MONOTONIC) + end + + # Send read data to registered callbacks + def dispatch_read_result(poller) + fulfilled, value, reason = poller.result + if fulfilled + @callbacks[poller.port].each { |c| c.dispatch(value) } + poller.last_value = value + poller.propagate_last_received_value = false + else + warn "failed to read #{poller.port}: #{reason}" + end + end + + # Send last received value to the callbacks that require it + def dispatch_last_received_value(poller) + @callbacks[poller.port].each do |c| + c.dispatch(poller.last_value) + end + poller.propagate_last_received_value = false + end + + # Return the buffer size needed by all callbacks of a port, in aggregate + # + # @return [Integer] + def required_policy_for(port) + return unless (callbacks = @callbacks[port]) + + buffer_size = callbacks.map { _1.buffer_size }.max + init = callbacks.map { _1.init }.inject(&:|) + { type: :circular_buffer, size: buffer_size, init: init, pull: true } + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/property.rb b/lib/syskit/telemetry/async/property.rb new file mode 100644 index 000000000..352173650 --- /dev/null +++ b/lib/syskit/telemetry/async/property.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Callback-based API to the orocos.rb property API + class Property < ReadableInterfaceObject + def on_raw_change(&block) + on_raw_data(&block) + end + + def on_change(&block) + on_data(&block) + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/readable_interface_object.rb b/lib/syskit/telemetry/async/readable_interface_object.rb new file mode 100644 index 000000000..da9c640a1 --- /dev/null +++ b/lib/syskit/telemetry/async/readable_interface_object.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Definition of hooks related to reading data + class ReadableInterfaceObjectHooks < InterfaceObject + define_hooks :on_data + define_hooks :on_raw_data + end + + # Base class for interface objects that allow to read data + class ReadableInterfaceObject < ReadableInterfaceObjectHooks + # Callback management object with the same API than orocos.rb's + class Listener + def initialize(object, event, block) + @object = object + @event = event + @block = block + end + + def start + return if @disposable + + @disposable = @object.send(@event, &@block) + end + + def stop + @disposable&.dispose + @disposable = nil + end + + def dispose + stop + end + end + + alias __on_raw_data on_raw_data + def on_raw_data(&block) + listener = Listener.new(self, :__on_raw_data, block) + listener.start + listener + end + + alias __on_data on_data + def on_data(&block) + listener = Listener.new(self, :__on_data, block) + listener.start + listener + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/task_context.rb b/lib/syskit/telemetry/async/task_context.rb new file mode 100644 index 000000000..4e2fe148c --- /dev/null +++ b/lib/syskit/telemetry/async/task_context.rb @@ -0,0 +1,310 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Definition of hooks for the {TaskContext} class + # + # This is made separately to allow overloading them in the main class in + # a natural way + class TaskContextHooks + include Roby::Hooks + include Roby::Hooks::InstanceHooks + + define_hooks :on_state_change + define_hooks :on_reachable + define_hooks :on_unreachable + define_hooks :on_attribute_reachable + define_hooks :on_attribute_unreachable + define_hooks :on_property_reachable + define_hooks :on_property_unreachable + define_hooks :on_port_reachable + define_hooks :on_port_unreachable + end + + # Callback-based API to the orocos.rb task contexts + class TaskContext < TaskContextHooks + # The task context name + # + # @return [String] + attr_reader :name + + # A unique string that allows to identify a remote task + # + # @return [String] + attr_reader :identity + + # The task model + # + # @return [OroGen::Spec::TaskContext] + attr_reader :model + + # Hash code for this task context + # + # Two different TaskContext objects that point to the same remote object + # will be considered the same from the perspective of a hash key + attr_reader :hash + + def states_index_to_symbols + return @states_index_to_symbols if @states_index_to_symbols + + @states_index_to_symbols = [] + @states_index_to_symbols[Orocos::TaskContext::STATE_PRE_OPERATIONAL] = + :PRE_OPERATIONAL + @states_index_to_symbols[Orocos::TaskContext::STATE_STOPPED] = + :STOPPED + @states_index_to_symbols[Orocos::TaskContext::STATE_RUNNING] = + :RUNNING + @states_index_to_symbols[Orocos::TaskContext::STATE_RUNTIME_ERROR] = + :RUNTIME_ERROR + @states_index_to_symbols[Orocos::TaskContext::STATE_EXCEPTION] = + :EXCEPTION + @states_index_to_symbols[Orocos::TaskContext::STATE_FATAL_ERROR] = + :FATAL_ERROR + @states_index_to_symbols + end + + # Discover information about a Orocos::TaskContext and create the + # corresponding {TaskContext} + # + # This is meant to be called in a separate thread + def self.discover(task, port_read_manager:) + async_task = TaskContext.new( + task.name, port_read_manager: port_read_manager + ) + + # Already do an initial discovery of all the task's interface objects + discover_attributes(async_task, task) + discover_properties(async_task, task) + discover_ports(async_task, task) + + # We can do this here ONLY BECAUSE we're populating an initial + # state. Further updates need to call the `discover_` methods in + # the main thread + async_task.reachable!(task) + async_task + end + + # @api private + # + # Discover a remote task's attributes + def self.discover_attributes(async_task, task) + raw_attributes = task.attribute_names.map { task.attribute(_1) } + async_task.discover_attributes(raw_attributes) + end + + # @api private + # + # Discover a remote task's properties + def self.discover_properties(async_task, task) + raw_properties = task.property_names.map { task.property(_1) } + async_task.discover_properties(raw_properties) + end + + # @api private + # + # Discover a remote task's ports + def self.discover_ports(async_task, task) + raw_ports = task.port_names.map { task.port(_1) } + async_task.discover_ports(raw_ports) + end + + def initialize( + name, port_read_manager:, model: self.class.dummy_orogen_model(name) + ) + super() + + @name = name + @model = model + # !!!! DO NOT add the identity to the hash code, or it will change + # the hash whenever the remote task changes. From the Async + # perspective, a task's identity is determined by its name + # (we can't have two different tasks with the same name) + @hash = name.hash + + @port_read_manager = port_read_manager + @attributes = {} + @properties = {} + @ports = {} + + @current_state = nil + end + + @dummy_orogen_models = Concurrent::Hash.new + + def self.dummy_orogen_model(name) + @dummy_orogen_models[name] ||= + Orocos.create_orogen_task_context_model(name) + end + + def to_proxy + self + end + + def eql?(other) + name == other.name + end + + # Declare that the remote task is not reachable anymore + # + # Must be called from the main thread. It also dispose of the underlying + # resources + def unreachable! + run_hook :on_unreachable + + run_interface_unreachable_hooks( + @attributes.each_value, :on_attribute_unreachable + ) + run_interface_unreachable_hooks( + @properties.each_value, :on_property_unreachable + ) + run_interface_unreachable_hooks( + @ports.each_value, :on_port_unreachable + ) + + dispose + end + + def run_interface_unreachable_hooks(objects, event) + objects.each do + run_hook event, _1.name + _1.unreachable! + end + end + + def reachable? + @raw_task_context + end + + # Set the underlying task context + # + # Must be called from the main thread + def reachable!(task_context) + @raw_task_context = task_context + @identity = task_context.ior + + run_hook :on_reachable, task_context + @state_reader_callback = + port("state").on_data(init: true, buffer_size: 20) do |new_state| + new_state = states_index_to_symbols[new_state] || new_state + @current_state = new_state + run_hook :on_state_change, new_state + end + end + + def on_reachable(&block) + super + + block.call if reachable? + end + + def on_state_change(&block) + super + + # Explicitly ask to send the last received value + @port_read_manager.propagate_last_received_value(port("state")) + end + + def each_attribute(&block) + @attributes.each_value(&block) + end + + def each_property(&block) + @properties.each_value(&block) + end + + def each_port(&block) + @ports.each_value(&block) + end + + def each_input_port(&block) + @ports.each_value.find_all(&:input?).each(&block) + end + + def each_output_port(&block) + @ports.each_value.find_all { !_1.input? }.each(&block) + end + + def on_attribute_reachable(&block) + super + + @attributes.each_key { block.call(_1) } + end + + def attribute(name) + @attributes.fetch(name) + end + + def on_property_reachable(&block) + super + + @properties.each_key { block.call(_1) } + end + + def property(name) + @properties.fetch(name) + end + + def on_port_reachable(&block) + super + + @ports.each_key { block.call(_1) } + end + + def port(name) + @ports.fetch(name) + end + + def discover_attributes(raw_attributes) + @attributes = + raw_attributes.each_with_object({}) do |p, h| + async = Attribute.new(self, p.name, p.type) + async.reachable!(p) + h[p.name] = async + end + + @attributes.each_value { run_hook :on_attribute_reachable, _1 } + end + + def discover_properties(raw_properties) + @properties = + raw_properties.each_with_object({}) do |p, h| + async = Property.new(self, p.name, p.type) + async.reachable!(p) + h[p.name] = async + end + + @properties.each_value { run_hook :on_property_reachable, _1 } + end + + def discover_ports(raw_ports) + @ports = + raw_ports.each_with_object({}) do |p, h| + async = + case p + when Orocos::InputPort + InputPort.new(self, p.name, p.type) + else + OutputPort.new( + self, p.name, p.type, @port_read_manager + ) + end + + async.reachable!(p) + h[p.name] = async + end + + @ports.each_value { run_hook :on_port_reachable, _1 } + end + + def dispose + @raw_task_context = nil + @current_state = nil + + @properties.clear + @state_reader_callback.dispose + end + end + end + end +end diff --git a/lib/syskit/telemetry/cli.rb b/lib/syskit/telemetry/cli.rb index 54bb8f810..30873ea5f 100644 --- a/lib/syskit/telemetry/cli.rb +++ b/lib/syskit/telemetry/cli.rb @@ -27,6 +27,11 @@ def ui require "syskit/telemetry/ui/runtime_state" $qApp.disable_threading # rubocop:disable Style/GlobalVars + @thread_pass_timer = Qt::Timer.new + @thread_pass_timer.connect(SIGNAL("timeout()")) do + sleep 0.01 + end + @thread_pass_timer.start(10) require "syskit/scripts/common" Syskit::Scripts.run do @@ -35,11 +40,16 @@ def ui end no_commands do # rubocop:disable Metrics/BlockLength - def roby_setup + def roby_setup # rubocop:disable Metrics/AbcSize Roby.app.using "syskit" + Roby.app.guess_app_dir + Roby.app.load_config_yaml + Roby.app.require_v2_protocol_extensions + Syskit.conf.only_load_models = true # We don't need the process server, win some startup time Syskit.conf.disables_local_process_server = true + Syskit.conf.export_types = false Roby.app.ignore_all_load_errors = true Roby.app.development_mode = false @@ -71,7 +81,7 @@ def runtime_state(host, port) main.restore_from_settings main.show - Vizkit.exec + Vizkit.exec(global_shortcuts: false) main.save_to_settings main.settings.sync end diff --git a/lib/syskit/telemetry/ui/name_service.rb b/lib/syskit/telemetry/ui/name_service.rb deleted file mode 100644 index 794965eed..000000000 --- a/lib/syskit/telemetry/ui/name_service.rb +++ /dev/null @@ -1,109 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module Telemetry - module UI - # Copy of Runkit's local name service to use with orocos.rb - class NameService < Orocos::NameServiceBase - # A new NameService instance - # - # @param [Hash] tasks The tasks which are - # known by the name service. - # @note The namespace is always "Local" - def initialize(tasks = []) - @registered_tasks = Concurrent::Hash.new - @task_added_callbacks = Concurrent::Array.new - @task_removed_callbacks = Concurrent::Array.new - tasks.each { |task| register(task) } - end - - def names - @registered_tasks.keys - end - - def include?(name) - @registered_tasks.key?(name) - end - - # (see NameServiceBase#get) - def ior(name) - task = @registered_tasks[name] - return task.ior if task.respond_to?(:ior) - - raise Orocos::NotFound, "task context #{name} cannot be found." - end - - # (see NameServiceBase#get) - def get(name, **) - task = @registered_tasks[name] - return task if task - - raise Orocos::NotFound, "task context #{name} cannot be found." - end - - # Registers the given {Orocos::TaskContext} on the name service. - # If a name is provided, it will be used as an alias. If no name is - # provided, the name of the task is used. This is true even if the - # task name is renamed later. - # - # @param [Orocos::TaskContext] task The task. - # @param [String] name Optional name which is used to register the task. - def register(task, name: task.name) - @registered_tasks[name] = task - trigger_task_added(name) - end - - # Deregisters the given name or task from the name service. - # - # @param [String,TaskContext] name The name or task - def deregister(name) - @registered_tasks.delete(name) - trigger_task_removed(name) - end - - # (see Base#cleanup) - def cleanup - names = @registered_tasks.keys - @registered_tasks.clear - names.each { trigger_task_removed(name) } - end - - def to_async - self - end - - def on_task_added(&block) - @task_added_callbacks << block - Roby.disposable { @task_added_callbacks.delete(block) } - end - - def trigger_task_added(name) - error = nil - @task_added_callbacks.each do |block| - block.call(name) - rescue RuntimeError => e - error = e - end - - raise error if error - end - - def on_task_removed(&block) - @task_removed_callbacks << block - Roby.disposable { @task_removed_callbacks.delete(block) } - end - - def trigger_task_removed(name) - error = nil - @task_removed_callbacks.each do |block| - block.call(name) - rescue RuntimeError => e - error = e - end - - raise error if error - end - end - end - end -end diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index 145ef750a..09300882a 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -15,9 +15,13 @@ require "syskit/telemetry/ui/global_state_label" require "syskit/telemetry/ui/app_start_dialog" require "syskit/telemetry/ui/batch_manager" -require "syskit/telemetry/ui/name_service" +require "syskit/telemetry/async" require "syskit/interface/v2" +# Monkey patching from Vizkit +Syskit::Telemetry::Async::OutputPort.include Orocos::QtOrocos +Syskit::Telemetry::Async::OutputPortSubfield.include Orocos::QtOrocos + module Syskit module Telemetry module UI @@ -54,9 +58,6 @@ class RuntimeState < Qt::Widget attr_reader :ui_task_inspector # A logging configuration widget we use to manage logging attr_reader :ui_logging_configuration - # The list of task names of the task currently displayed by the task - # inspector - attr_reader :current_orocos_tasks # Returns a list of actions that can be performed on the Roby # instance @@ -143,8 +144,8 @@ def initialize(parent: nil, create_ui @current_job = nil - @current_orocos_tasks = Set.new - @proxies = {} + @current_job_tasks = [] + @current_tasks = [] syskit.on_ui_event do |event_name, *args| if (w = @ui_event_widgets[event_name]) @@ -237,8 +238,13 @@ def reset @call_guards = {} @orogen_models = {} - @name_service = NameService.new - @async_name_service = Orocos::Async::NameService.new(@name_service) + @port_read_manager&.dispose + @name_service&.dispose + + @port_read_manager = Async::PortReadManager.new + @name_service = Async::NameService.new( + port_read_manager: @port_read_manager + ) end def hide_loggers? @@ -417,9 +423,6 @@ def create_ui @ui_task_inspector = Vizkit.default_loader.TaskInspector ) @ui_hide_loggers.checked = false - @ui_hide_loggers.connect SIGNAL("toggled(bool)") do |_checked| - update_tasks_info - end @ui_show_expanded_job.checked = true @ui_show_expanded_job.connect SIGNAL("toggled(bool)") do |checked| job_expanded_status.visible = checked @@ -541,12 +544,10 @@ def create_ui_new_job # Sets up polling on a given syskit interface def poll_syskit_interface if syskit.connected? - begin - display_current_cycle_index_and_time - update_current_deployments - update_current_job_task_names if current_job - rescue Roby::Interface::ComError # rubocop:disable Lint/SuppressedException - end + display_current_cycle_index_and_time + query_deployment_update + update_current_job_task_names if current_job + @port_read_manager.poll else reset_current_deployments reset_current_job @@ -572,20 +573,36 @@ def reset_current_job @current_job = nil @current_job_task_names = [] - update_task_inspector(@name_service.names) + update_task_inspector(@name_service.tasks) end - def update_current_deployments - polling_call ["syskit"], "deployments" do |deployments| - @current_deployments = deployments - update_name_service(deployments) + def process_current_deployments + update_name_service(@current_deployments) - names = @name_service.names - names &= @current_job_task_names if @current_job - update_task_inspector(names) + if @current_job + update_task_inspector(@current_job_tasks) + else + update_task_inspector(@name_service.tasks) end end + def query_deployment_update + polling_call( + ["syskit"], "poll_ready_deployments", + known: @current_deployments.map(&:id) + ) do |updated, removed| + update_current_deployments(updated, removed) + process_current_deployments + end + end + + def update_current_deployments(updated, removed) + @current_deployments.delete_if do |d| + removed.include?(d.id) + end + @current_deployments.concat(updated) + end + def reset_current_deployments @current_deployments = [] reset_task_inspector @@ -593,40 +610,38 @@ def reset_current_deployments def update_current_job_task_names polling_call [], "tasks_of_job", @current_job.job_id do |tasks| - @current_job_task_names = + # TODO: handle asynchronicity, the tasks may not be already + # discovered and/or the + @current_job_tasks = tasks .map { _1.arguments[:orocos_name] } .compact + .map { @name_service.find(_1) } end end - def update_task_inspector(task_names) - orocos_tasks = task_names.to_set - removed = current_orocos_tasks - orocos_tasks - new = orocos_tasks - current_orocos_tasks - removed.each do |task_name| - ui_task_inspector.remove_task(task_name) + def update_task_inspector(tasks) + removed = @current_tasks - tasks + new = tasks - @current_tasks + removed.each do |task| + ui_task_inspector.remove_task(task.name) end - new.each do |task_name| - @proxies[task_name] ||= Orocos::Async::TaskContextProxy.new( - task_name, name_service: @async_name_service - ) - - ui_task_inspector.add_task(@proxies[task_name]) + new.each do |task| + ui_task_inspector.add_task(task) end - @current_orocos_tasks = orocos_tasks.dup + @current_tasks = tasks.dup end def reset_task_inspector update_task_inspector([]) end - def polling_call(path, method_name, *args) - key = [path, method_name, args] + def polling_call(path, method_name, *args, **kw) + key = [path, method_name, args, kw] return if @call_guards.key?(key) && @call_guards[key] @call_guards[key] = true - syskit.async_call(path, method_name, *args) do |error, ret| + syskit.async_call(path, method_name, *args, **kw) do |error, ret| @call_guards[key] = false if error report_app_error(error) @@ -654,44 +669,22 @@ def report_app_error(error) end def update_name_service(deployments) - # Now remove all tasks that are not in deployments - existing = @name_service.names - - deployments.each do |d| - d.deployed_tasks.each do |deployed_task| - task_name = deployed_task.name - if existing.include?(task_name) - existing.delete(task_name) - next if deployed_task.ior == @name_service.ior(task_name) - end - - existing.delete(task_name) - task = Orocos::TaskContext.new( - deployed_task.ior, - name: task_name, - model: orogen_model_from_name( - deployed_task.orogen_model_name - ) - ) - - async_task = Orocos::Async::CORBA::TaskContext.new(use: task) - @name_service.register(async_task, name: task_name) + all_deployed_tasks = deployments.flat_map do |d| + d.deployed_tasks.find_all do |deployed_task| + model_name = deployed_task.orogen_model_name + !hide_loggers? || !OROGEN_LOGGER_NAMES.include?(model_name) end end - - existing.each { @name_service.deregister(_1) } - @name_service.names + @name_service.async_update_tasks(all_deployed_tasks) end + OROGEN_LOGGER_NAMES = %w[logger::Logger OroGen.logger.Logger].freeze + def reset_name_service - all = @name_service.names.dup - all.each { @name_service.deregister(_1) } + @name_service.cleanup end def orogen_model_from_name(name) - @orogen_models[name] ||= Orocos.default_loader.task_model_from_name(name) - rescue OroGen::NotFound - Orocos.warn "#{name} is a task context of class #{name}, but I cannot find the description for it, falling back" @orogen_models[name] ||= Orocos.create_orogen_task_context_model(name) end diff --git a/lib/syskit/test/network_manipulation.rb b/lib/syskit/test/network_manipulation.rb index caf9814c1..c45743a7d 100644 --- a/lib/syskit/test/network_manipulation.rb +++ b/lib/syskit/test/network_manipulation.rb @@ -162,7 +162,8 @@ def syskit_generate_network(*to_instanciate, add_missions: true) engine = NetworkGeneration::Engine.new(plan, work_plan: trsc) mapping = engine.compute_system_network( tasks_to_instanciate.map(&:planning_task), - validate_generated_network: false + validate_generated_network: false, + early_deploy: false ) trsc.commit_transaction mapping diff --git a/lib/syskit/test/polling_executor.rb b/lib/syskit/test/polling_executor.rb new file mode 100644 index 000000000..862b55fb9 --- /dev/null +++ b/lib/syskit/test/polling_executor.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +module Syskit + module Test + # Test executor for classes that use concurrent-ruby + # + # This is meant for testing and debugging. Each call to process_one will + # process a single queued task in the current thread + class PollingExecutor < Concurrent::ImmediateExecutor + def initialize + super + + @task_queue = Queue.new + end + + def post(*args, &task) + @task_queue << [args, task] + end + + def take_one_task + @task_queue.pop(true) + rescue ThreadError + # queue full + end + + def execute_one + args, task = take_one_task + task&.call(*args) + task + end + + def execute_all + while execute_one + end + end + end + end +end diff --git a/lib/syskit/test/profile_assertions.rb b/lib/syskit/test/profile_assertions.rb index 9606d51e8..9838cdd13 100644 --- a/lib/syskit/test/profile_assertions.rb +++ b/lib/syskit/test/profile_assertions.rb @@ -206,13 +206,10 @@ def assert_is_self_contained( action_or_profile = subject_syskit_model, message: "%s is not self contained", exclude: [], **instanciate_options ) - actions = validate_actions(action_or_profile, exclude: exclude) do |skip| - flunk "could not validate some non-Syskit actions: #{skip}, " \ - "probably because of required arguments. Pass the action to " \ - "the 'exclude' option of #{__method__}, and add a separate " \ - "assertion test with the arguments added explicitly" - end - + actions = validate_assert_actions_argument( + action_or_profile, + exclude: exclude + ) actions.each do |act| syskit_assert_action_is_self_contained( act, message: message, **instanciate_options @@ -308,23 +305,14 @@ def assert_can_instanciate( action_or_profile = subject_syskit_model, exclude: [], together_with: [] ) - actions = validate_actions(action_or_profile, exclude: exclude) do |skip| - flunk "could not validate some non-Syskit actions: #{skip}, " \ - "probably because of required arguments. Pass the action to " \ - "the 'exclude' option of #{__method__}, and add a separate " \ - "assertion test with the arguments added explicitly" - end - - together_with = - validate_actions(together_with, exclude: exclude) do |skip| - flunk "could not validate some non-Syskit actions given to " \ - "`together_with` in #{__method__}: #{skip}, " \ - "probably because of " \ - "missing arguments. If you are passing a profile or " \ - "action interface and do not require to test against " \ - "that particular action, pass it to the 'exclude' argument" - end - + actions = validate_assert_actions_argument( + action_or_profile, + exclude: exclude + ) + together_with = validate_assert_together_with_argument( + together_with, + exclude: exclude + ) actions.each do |action| assert_can_instanciate_together(action, *together_with) end @@ -420,23 +408,14 @@ def assert_can_deploy( action_or_profile = subject_syskit_model, exclude: [], together_with: [] ) - actions = validate_actions(action_or_profile, exclude: exclude) do |skip| - flunk "could not validate some non-Syskit actions: #{skip}, " \ - "probably because of required arguments. Pass the action to " \ - "the 'exclude' option of #{__method__}, and add a separate " \ - "assertion test with the arguments added explicitly" - end - - together_with = - validate_actions(together_with, exclude: exclude) do |skip| - flunk "could not validate some non-Syskit actions given to " \ - "`together_with` in #{__method__}: #{skip}, " \ - "probably because of " \ - "missing arguments. If you are passing a profile or action " \ - "interface and do not require to test against that " \ - "particular action, pass it to the 'exclude' argument" - end - + actions = validate_assert_actions_argument( + action_or_profile, + exclude: exclude + ) + together_with = validate_assert_together_with_argument( + together_with, + exclude: exclude + ) actions.each do |action| assert_can_deploy_together(action, *together_with) end @@ -465,6 +444,53 @@ def assert_can_deploy_together(*actions) e.message, e.backtrace end + # Tests that the following syskit-generated actions can be ALL deployed at the + # same time, that is they result in a valid, non-abstract network whose all + # components have a deployment + # + # When resolving actions that are not directly defined from profile + # definitions, the method will attempte to resolve method action by + # calling them. If there is a problem, pass the action model to the + # `exclude` argument. + # + # In particular, in the presence of action methods with required + # arguments, run one assert first with the action method excluded and + # another with that action and sample arguments. + # + # @param action_or_profile if an action interface or profile, test all + # definitions that are reachable from it. In the case of action interfaces, + # this means looking into method actions and action state machines. + # @param together_with test that all actions in `action_or_profile` + # can be instanciated when all actions in `together_with` are instanciated + # at the same time. This can be used if the former depend on the presence + # of the latter, or if you want to test against conflicts. + def assert_can_deploy_all( + action_or_profile = subject_syskit_model, + exclude: [], together_with: [] + ) + actions = validate_assert_actions_argument( + action_or_profile, + exclude: exclude + ) + together_with = validate_assert_together_with_argument( + together_with, + exclude: exclude + ) + assert_can_deploy_together(*actions.flatten, *together_with) + end + + # Spec-style call for {#assert_can_deploy_all} + # + # @example verify that each definition of a profile can be deployed + # describe MyBundle::Profiles::MyProfile do + # it { can_deploy_all } + # end + def can_deploy_all( + action_or_profile = subject_syskit_model, together_with: [] + ) + assert_can_deploy_all(action_or_profile, together_with: together_with) + end + def syskit_run_deploy_in_bulk( actions, compute_policies:, compute_deployments: ) @@ -482,6 +508,12 @@ def syskit_run_deploy_in_bulk( end end + # @api private + # + # Yield the cartesian product of a list of list of actions + # + # Given a list of list of actions [A, B, C], it yields all possible + # combinations [a, b, c], where a is from A, b is from B and c from C. def self.each_combination(*arrays) return enum_for(__method__, *arrays) unless block_given? @@ -556,6 +588,10 @@ def can_configure_together(*actions) end # @api private + # Validate actions, and yields rejected actions, excluding those included in + # 'exclude'. + # + # @param action_or_profile an action interface or profile def validate_actions(action_or_profile, exclude: []) actions, skipped = BulkAssertAtomicActions(action_or_profile, exclude: exclude) @@ -567,6 +603,37 @@ def validate_actions(action_or_profile, exclude: []) actions end + + # @api private + # Assert actions, flunking in case actions could not be validated, excluding + # those included in 'exclude'. + # + # @param action_or_profile an action interface or profile + def validate_assert_actions_argument(action_or_profile, exclude: []) + validate_actions(action_or_profile, exclude: exclude) do |skip| + caller_method = caller_locations(3, 1).first.label + flunk "could not validate some non-Syskit actions: #{skip}, " \ + "probably because of required arguments. Pass the action to " \ + "the 'exclude' option of #{caller_method}, and add a separate " \ + "assertion test with the arguments added explicitly" + end + end + + # @api private + # Assert together_with actions, flunking in case actions could not be + # validated,excluding those included in 'exclude'. + # + # @param together_with an action interface or profile + def validate_assert_together_with_argument(together_with, exclude: []) + validate_actions(together_with, exclude: exclude) do |skip| + caller_method = caller_locations(3, 1).first.label + flunk "could not validate some non-Syskit actions given to " \ + "`together_with` in #{caller_method}: #{skip}, probably " \ + "because of missing arguments. If you are passing a profile " \ + "or action interface and do not require to test against that " \ + "particular action, pass it to the 'exclude' argument" + end + end end end end diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 7bb32b97e..e55403caf 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -2,6 +2,7 @@ require "syskit/test/self" require "syskit/cli/log_runtime_archive" +require "syskit/runtime/server/spawn_server" module Syskit module CLI @@ -365,7 +366,7 @@ module CLI describe ".process_root_folder" do before do @archive_dir = make_tmppath - @process = LogRuntimeArchive.new(@root, @archive_dir) + @process = LogRuntimeArchive.new(@root, target_dir: @archive_dir) end it "archives all folders, the last one only partially" do @@ -391,9 +392,9 @@ module CLI .write(test1 = Base64.encode64(Random.bytes(1024))) (dataset / "test.2.log").write(Base64.encode64(Random.bytes(1024))) process = LogRuntimeArchive.new( - @root, @archive_dir, max_archive_size: 1024 + @root, target_dir: @archive_dir ) - process.process_root_folder + process.process_root_folder(max_archive_size: 1024) entries = read_archive(path: @archive_dir / "20220434-2023.0.tar") assert_equal 1, entries.size @@ -419,12 +420,12 @@ module CLI (dataset / "test.2.log") .write(test2 = Base64.encode64(Random.bytes(128))) process = LogRuntimeArchive.new( - @root, @archive_dir, max_archive_size: 1024 + @root, target_dir: @archive_dir ) - process.process_root_folder + process.process_root_folder(max_archive_size: 1024) (dataset / "test.3.log").write(Base64.encode64(Random.bytes(1024))) - process.process_root_folder + process.process_root_folder(max_archive_size: 1024) entries = read_archive(path: @archive_dir / "20220434-2023.1.tar") assert_equal 2, entries.size @@ -445,12 +446,12 @@ module CLI test1 = make_random_file "test.1.log", root: dataset test2 = make_random_file "test.2.log", root: dataset process = LogRuntimeArchive.new( - @root, @archive_dir, max_archive_size: 1024 + @root, target_dir: @archive_dir ) - process.process_root_folder + process.process_root_folder(max_archive_size: 1024) make_random_file "test.3.log", root: dataset - process.process_root_folder + process.process_root_folder(max_archive_size: 1024) entries = read_archive(path: @archive_dir / "20220434-2023.1.tar") assert_equal 1, entries.size @@ -516,6 +517,147 @@ def should_archive_dataset(dataset, archive_basename, full:) end end + describe "FTP" do + before do + host = "127.0.0.1" + @ca = RobyApp::TmpRootCA.new(host) + params = LogRuntimeArchive::FTPParameters.new( + host: host, port: 0, + certificate: @ca.certificate, + user: "user", password: "password", + implicit_ftps: true, + max_upload_rate: 10_000_000 + ) + + @target_dir = make_tmppath + @server = create_server(params) + params.port = @server.port + @params = params + @process = LogRuntimeArchive.new(@root) + end + + after do + @server.stop + @server.join + @ca.dispose + @ca = nil + @server = nil + end + + def create_server(params) + Runtime::Server::SpawnServer.new( + @target_dir, params.user, params.password, + @ca.private_certificate_path, + interface: params.host, + implicit_ftps: params.implicit_ftps + ) + end + + describe ".process_root_folder_transfer" do + it "transfers all finished dataset files from root folder " \ + "through FTP" do + dataset_a = make_valid_folder("20220434-2023") + dataset_b = make_valid_folder("20220434-2024") + make_random_file "test.0.log", root: dataset_a + make_random_file "test.1.log", root: dataset_a + make_random_file "test.0.log", root: dataset_b + make_random_file "test.1.log", root: dataset_b + + @process.process_root_folder_transfer(@params) + + assert(File.exist?(@target_dir / "20220434-2023" / "test.0.log")) + assert(File.exist?(@target_dir / "20220434-2023" / "test.1.log")) + assert(File.exist?(@target_dir / "20220434-2024" / "test.0.log")) + # log manager considers dataset_b logs as currently running + # Because it isn't finished yet it does not transfer the last log + refute(File.exist?(@target_dir / "20220434-2024" / "test.1.log")) + end + end + + describe ".process_dataset_transfer" do + it "transfers all files from a folder through FTP" do + dataset = make_valid_folder("PATH") + make_random_file "test.0.log", root: dataset + make_random_file "test.1.log", root: dataset + @process.process_dataset_transfer( + dataset, @params, @root, full: true + ) + + assert(File.exist?(@target_dir / "PATH" / "test.0.log")) + assert(File.exist?(@target_dir / "PATH" / "test.1.log")) + end + + it "makes sure hierarchy of dataset folders is created" do + dataset = make_valid_folder("PATH/TO/DATASET") + make_random_file "test.0.log", root: dataset + make_random_file "test.1.log", root: dataset + + @process.process_dataset_transfer( + dataset, @params, @root, full: true + ) + + assert( + File.exist?(@target_dir / "PATH/TO/DATASET" / "test.0.log") + ) + end + end + + describe ".transfer_dataset" do + before do + @dataset = make_valid_folder("PATH") + make_random_file "test.0.log", root: @dataset + end + + it "transfers a dataset through FTP" do + results = LogRuntimeArchive.transfer_dataset( + @dataset, @params, @root, full: true + ) + + assert results.success? + # Datasets that have pocolog files are not complete + refute results.complete + assert(File.exist?(@target_dir / "PATH" / "test.0.log")) + end + + it "removes the source file if the transfer was successful" do + results = LogRuntimeArchive.transfer_dataset( + @dataset, @params, @root, full: true + ) + + assert results.success? + refute((@dataset / "test.0.log").exist?) + end + + it "does not remove the source file if the transfer failed" do + result = RobyApp::LogTransferServer::LogUploadState::Result.new( + "/PATH", false, "message" + ) + flexmock(LogRuntimeArchive) + .should_receive(:transfer_file) + .and_return(result) + results = LogRuntimeArchive.transfer_dataset( + @dataset, @params, @root, full: true + ) + + refute results.success? + assert((@dataset / "test.0.log").exist?) + end + end + + describe ".transfer_file" do + it "transfers a file through FTP" do + dataset = make_valid_folder("PATH") + make_random_file "test.log", root: dataset + result = LogRuntimeArchive.transfer_file( + dataset / "test.log", @params, @root + ) + + assert(File.exist?(@target_dir / "PATH" / "test.log")) + assert result.success?, "transfer failed: #{result.message}" + end + end + end + describe "#ensure_free_space" do before do @archive_dir = make_tmppath @@ -523,12 +665,13 @@ def should_archive_dataset(dataset, archive_basename, full:) 10.times { |i| (@archive_dir / i.to_s).write(i.to_s) } - @archiver = LogRuntimeArchive.new(@root, @archive_dir) + @archiver = LogRuntimeArchive.new(@root, target_dir: @archive_dir) end it "does nothing if there is enough free space" do mock_available_space(2) - @archiver.ensure_free_space(1, 10) + mock_mtime + assert @archiver.ensure_free_space(1, 10) assert_deleted_files([]) end @@ -536,31 +679,73 @@ def should_archive_dataset(dataset, archive_basename, full:) size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] mock_files_size(size_files) mock_available_space(0.5) + mock_mtime - @archiver.ensure_free_space(1, 10) + assert @archiver.ensure_free_space(1, 10) assert_deleted_files([0, 1, 2, 3]) end + it "removes enough files to reach the freed limit in chosen directory" do + different_dir = make_tmppath + size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] + mock_files_size(size_files, directory: different_dir) + mock_available_space(0.5, directory: different_dir) + mock_mtime(directory: different_dir) + + assert @archiver.ensure_free_space(1, 10, directory: different_dir) + assert_deleted_files([0, 1, 2, 3], directory: different_dir) + end + + it "removes files based on modified timestamp" do + different_dir = make_tmppath + size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] + mock_files_size(size_files, directory: different_dir) + mock_available_space(0.5, directory: different_dir) + mock_mtime(directory: different_dir, reverse_alphabetical: true) + + assert @archiver.ensure_free_space(1, 10, directory: different_dir) + assert_deleted_files([8, 9, 10], directory: different_dir) + end + it "stops removing files when there is no file in folder even if freed limit is not achieved" do size_files = Array.new(10, 1) mock_files_size(size_files) mock_available_space(0.5) + mock_mtime - @archiver.ensure_free_space(1, 15) + refute @archiver.ensure_free_space(1, 15) assert_deleted_files([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) end - def mock_files_size(sizes) + def mock_files_size(sizes, directory: @archive_dir) @mocked_files_sizes = sizes @mocked_files_sizes.each_with_index do |size, i| - (@archive_dir / i.to_s).write(" " * size) + (directory / i.to_s).write(" " * size) + end + end + + # Mock the modification time of the files to be alphabetical order + # @param [String] directory the directory to mock the items modification + # time + # @param [Bool] reverse_alphabetical true if use reverse alphabetical + # order + def mock_mtime(directory: @archive_dir, reverse_alphabetical: false) + items = directory.children + .select { |child| child.file? || child.directory? } + + items = items.sort_by(&:to_s) + items = items.reverse if reverse_alphabetical + items.each_with_index do |item, i| + File.utime(i, i, item.to_s) end end - def mock_available_space(total_available_disk_space) + def mock_available_space( + total_available_disk_space, directory: @archive_dir + ) flexmock(Sys::Filesystem) - .should_receive(:stat).with(@archive_dir) + .should_receive(:stat).with(directory) .and_return do flexmock( bytes_available: total_available_disk_space @@ -568,17 +753,17 @@ def mock_available_space(total_available_disk_space) end end - def assert_deleted_files(deleted_files) + def assert_deleted_files(deleted_files, directory: @archive_dir) if deleted_files.empty? - files = @archive_dir.each_child.select(&:file?) + files = directory.each_child.select(&:file?) assert_equal 10, files.size else (0..9).each do |i| if deleted_files.include?(i) - refute (@archive_dir / i.to_s).exist?, + refute (directory / i.to_s).exist?, "#{i} was expected to be deleted, but has not been" else - assert (@archive_dir / i.to_s).exist?, + assert (directory / i.to_s).exist?, "#{i} was expected to be present, but got deleted" end end @@ -620,7 +805,7 @@ def decompress_data(data) end def assert_entry_matches(entry, data, name:, content:) - assert entry.file? + assert entry.file?, "expected #{entry} to be a file" assert_equal name, entry.full_name assert_equal content, decompress_data(data) end diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index e30adb9d0..889f38037 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -2,6 +2,7 @@ require "syskit/test/self" require "syskit/cli/log_runtime_archive_main" +require "syskit/roby_app/tmp_root_ca" module Syskit module CLI @@ -128,20 +129,270 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) end end + describe "#transfer_server" do + before do + @server_params = server_params + @server = call_create_server(make_tmppath, @server_params) + end + + after do + @server.stop + @server.join + end + + it "successfully creates an FTP server" do + Net::FTP.open( + @server_params[:host], + port: @server.port, + implicit_ftps: @server_params[:implicit_ftps], + ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE } + ) do |ftp| + ftp.login(@server_params[:user], @server_params[:password]) + end + end + end + + describe "#watch_transfer" do + before do + @source_dir = make_tmppath + @server_params = server_params + @max_upload_rate = rate_mbps_to_bps(10) + @ftp_params = LogRuntimeArchive::FTPParameters.new( + host: @server_params[:host], port: @server_params[:port], + certificate: File.read(@server_params[:certificate]), + user: @server_params[:user], password: @server_params[:password], + implicit_ftps: @server_params[:implicit_ftps], + max_upload_rate: @max_upload_rate + ) + + @server = call_create_server(make_tmppath, @server_params) + end + + after do + @server.stop + @server.join + end + + it "calls transfer with the specified period" do + quit = Class.new(RuntimeError) + called = 0 + flexmock(LogRuntimeArchive) + .new_instances + .should_receive(:process_root_folder_transfer) + .with( + @ftp_params + ) + .pass_thru do + called += 1 + raise quit if called == 3 + end + + tic = Time.now + assert_raises(quit) do + args = [ + "watch_transfer", + @source_dir, + *@server_params.values, + "--period", 0.5, + "--max_upload_rate_mbps", 10 + ] + LogRuntimeArchiveMain.start(args) + end + + assert called == 3 + assert_operator(Time.now - tic, :>, 0.9) + end + + # Converts rate in Mbps to bps + def rate_mbps_to_bps(rate_mbps) + rate_mbps * (10**6) + end + end + + describe "#transfer" do + before do + @server_params = server_params + end + + it "raises ArgumentError if source_dir does not exist" do + e = assert_raises ArgumentError do + call_transfer("/does/not/exist") + end + assert_equal "/does/not/exist does not exist, or is not a directory", + e.message + end + + it "actually transfer files" do + dataset_tmp_path = make_tmppath + root_tmp_path = make_tmppath + + server = call_create_server(root_tmp_path, @server_params) + + make_dataset(dataset_tmp_path, "19981222-1301") + make_dataset(dataset_tmp_path, "19981222-1302") + + call_transfer(dataset_tmp_path, server_port: server.port) + assert(File.exist?(root_tmp_path / "19981222-1301" / "test.0.log")) + end + + # Call 'transfer' function instead of 'watch' to call transfer once + def call_transfer(source_dir, server_port: nil) + updated_server_params = @server_params + updated_server_params[:port] = server_port if server_port + args = [ + "transfer", + source_dir, + *updated_server_params.values + ] + LogRuntimeArchiveMain.start(args) + end + + def make_dataset(path, name) + dataset = (path / name) + dataset.mkpath + FileUtils.touch(dataset / "info.yml") + make_random_file("test.0.log", root: dataset) + dataset + end + + def make_random_file(name, root: @root, size: 1024) + content = Base64.encode64(Random.bytes(size)) + make_in_file name, content, root: root + content + end + + def make_in_file(name, content, root: @root) + path = (root / name) + path.write(content) + [] << path + path + end + end + + describe "#ensure_free_space" do + before do + @directory = make_tmppath + @sub_directory = Pathname.new(@directory / "subdir") + @sub_directory2 = Pathname.new(@directory / "subdir_2") + @sub_directory.mkdir unless @sub_directory.exist? + @sub_directory2.mkdir unless @sub_directory2.exist? + @mocked_files_sizes = [] + + 10.times { |i| (@sub_directory / i.to_s).write(i.to_s) } + 10.times { |i| (@sub_directory2 / i.to_s).write(i.to_s) } + + @archiver = LogRuntimeArchive.new(@directory) + end + + it "removes enough files to reach the freed limit" do + size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] + mock_files_size(size_files, directory: @sub_directory) + mock_files_size(size_files, directory: @sub_directory2) + mock_available_space(0, directory: @sub_directory) + mock_available_space(100.5, directory: @sub_directory2) + mock_mtime(directory: @sub_directory) + mock_mtime(directory: @sub_directory2) + mock_mtime(directory: @directory) + + call_ensure_free_space(@directory, 101, 110) + assert_deleted_files( + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], directory: @sub_directory + ) + assert_deleted_files([0, 1, 2, 3], directory: @sub_directory2) + end + + it "removes from directories based on modification time" do + size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] + mock_files_size(size_files, directory: @sub_directory) + mock_files_size(size_files, directory: @sub_directory2) + mock_available_space(0.5, directory: @sub_directory) + mock_available_space(0.5, directory: @sub_directory2) + mock_mtime(directory: @sub_directory) + mock_mtime(directory: @sub_directory2) + mock_mtime(directory: @directory, reverse_alphabetical: true) + + call_ensure_free_space(@directory, 1, 10) + assert_deleted_files([0, 1, 2, 3], directory: @sub_directory2) + # Does not delete any file from newest directory + assert_equal 10, @sub_directory.each_child.select(&:file?).size + end + + def call_ensure_free_space(source_dir, low_limit, freed_limit) + args = [ + "ensure_free_space", + source_dir, + "--free-space-low-limit", low_limit, + "--free-space-freed-limit", freed_limit + ] + LogRuntimeArchiveMain.start(args) + end + end + + describe "#watch_ensure_free_space" do + before do + @directory = make_tmppath + @sub_directory = Pathname.new(@directory / "subdir") + @sub_directory.mkdir unless @sub_directory.exist? + + @mocked_files_sizes = [] + 5.times { |i| (@sub_directory / i.to_s).write(i.to_s) } + end + + it "calls ensure free space with the specified period" do + mock_files_size([], directory: @sub_directory) + mock_available_space(200, directory: @sub_directory) # 70 MB + + quit = Class.new(RuntimeError) + called = 0 + flexmock(LogRuntimeArchive) + .new_instances + .should_receive(:ensure_free_space) + .pass_thru do + called += 1 + raise quit if called == 3 + end + + tic = Time.now + assert_raises(quit) do + LogRuntimeArchiveMain.start( + ["watch_ensure_free_space", @directory, "--period", 0.5] + ) + end + + assert called == 3 + assert_operator(Time.now - tic, :>, 0.9) + end + end + + def call_create_server(tgt_dir, server_params) + cli = LogRuntimeArchiveMain.new + cli.create_server(tgt_dir, *server_params.values) + end + + def server_params + interface = "127.0.0.1" + ca = RobyApp::TmpRootCA.new(interface) + + { host: interface, port: 0, + certificate: ca.private_certificate_path, + user: "nilvo", password: "nilvo123", + implicit_ftps: true } + end + # Mock files sizes in bytes # @param [Array] size of files in MB - def mock_files_size(sizes) + def mock_files_size(sizes, directory: @archive_dir) @mocked_files_sizes = sizes @mocked_files_sizes.each_with_index do |size, i| - (@archive_dir / i.to_s).write(" " * size * 1e6) + (directory / i.to_s).write(" " * size * 1e6) end end # Mock total disk available space in bytes # @param [Float] total_available_disk_space total available space in MB - def mock_available_space(total_available_disk_space) + def mock_available_space(total_available_disk_space, directory: @archive_dir) flexmock(Sys::Filesystem) - .should_receive(:stat).with(@archive_dir) + .should_receive(:stat).with(directory) .and_return do flexmock( bytes_available: total_available_disk_space * 1e6 @@ -149,17 +400,31 @@ def mock_available_space(total_available_disk_space) end end - def assert_deleted_files(deleted_files) + # Mock the modification time of the files to be alphabetical order + # @param [String] directory the directory to mock the items modification time + # @param [Bool] reverse_alphabetical true if use reverse alphabetical order + def mock_mtime(directory: @archive_dir, reverse_alphabetical: false) + items = directory.children + .select { |child| child.file? || child.directory? } + + items = items.sort_by(&:to_s) + items = items.reverse if reverse_alphabetical + items.each_with_index do |item, i| + File.utime(i, i, item.to_s) + end + end + + def assert_deleted_files(deleted_files, directory: @archive_dir) if deleted_files.empty? - files = @archive_dir.each_child.select(&:file?) + files = directory.each_child.select(&:file?) assert_equal 5, files.size else (0..4).each do |i| if deleted_files.include?(i) - refute (@archive_dir / i.to_s).exist?, + refute (directory / i.to_s).exist?, "#{i} was expected to be deleted, but has not been" else - assert (@archive_dir / i.to_s).exist?, + assert (directory / i.to_s).exist?, "#{i} was expected to be present, but got deleted" end end diff --git a/test/coordination/test_task_script.rb b/test/coordination/test_task_script.rb index af2d5ac8f..5091e2930 100644 --- a/test/coordination/test_task_script.rb +++ b/test/coordination/test_task_script.rb @@ -320,7 +320,8 @@ def start it "does port mapping if necessary" do composition_m = Syskit::Composition.new_submodel composition_m.add srv_m, as: "test" - composition = syskit_deploy_and_configure(composition_m.use("test" => component)) + syskit_stub_configured_deployment task_m + composition = syskit_deploy_and_configure(composition_m.use("test" => task_m)) reader = nil composition.script do @@ -328,7 +329,7 @@ def start end syskit_start(composition) - component.orocos_task.local_ruby_task.out.write(10) + composition.test_child.orocos_task.local_ruby_task.out.write(10) assert_equal 10, reader.read end diff --git a/test/features/early_deploy.rb b/test/features/early_deploy.rb new file mode 100644 index 000000000..fd48a6f6b --- /dev/null +++ b/test/features/early_deploy.rb @@ -0,0 +1,3 @@ +# frozen_string_literal: true + +Syskit.conf.early_deploy = true diff --git a/test/interface/test_commands.rb b/test/interface/test_commands.rb index 10de835b8..dbb40e321 100644 --- a/test/interface/test_commands.rb +++ b/test/interface/test_commands.rb @@ -47,6 +47,56 @@ module Interface end end + describe "#poll_ready_deployments" do + attr_reader :task_m, :task + + before do + @task_m = TaskContext.new_submodel + @task = syskit_stub_deploy_configure_and_start( + syskit_stub_requirements(task_m).with_conf("default") + ) + plan.add_mission_task(task) + end + + it "returns a deployment that is ready" do + new_deployments, old_deployments = subject.poll_ready_deployments + assert_equal [], old_deployments + assert_equal 1, new_deployments.size + deployment = new_deployments.first + assert_equal @task.execution_agent, deployment + end + + it "ignores a deployment that is not ready yet" do + flexmock(@task.execution_agent).should_receive(ready?: false) + new_deployments, old_deployments = subject.poll_ready_deployments + assert_equal [], new_deployments + assert_equal [], old_deployments + end + + it "does not return a deployment that is already known" do + new_deployments, old_deployments = + subject.poll_ready_deployments( + known: [@task.execution_agent.droby_id.id] + ) + + assert_equal [], new_deployments + assert_equal [], old_deployments + end + + it "lists deployments that have been removed" do + droby_id = @task.execution_agent.droby_id.id + expect_execution do + plan.unmark_mission_task(task) + plan.unmark_permanent_task(task.execution_agent) + end.garbage_collect(true).to { emit task.execution_agent.stop_event } + + new_deployments, old_deployments = + subject.poll_ready_deployments(known: [droby_id]) + assert_equal [], new_deployments + assert_equal [droby_id], old_deployments + end + end + describe "#restart_deployments" do attr_reader :task_m, :task diff --git a/test/interface/v2/test_protocol.rb b/test/interface/v2/test_protocol.rb index 26f8d67dc..15116376b 100644 --- a/test/interface/v2/test_protocol.rb +++ b/test/interface/v2/test_protocol.rb @@ -51,6 +51,34 @@ module Protocol marshalled.deployed_tasks.map(&:to_h) end end + + describe "Device support" do + before do + @channel = Roby::Interface::V2::Channel.new( + IO.pipe.last, flexmock + ) + Protocol.register_marshallers(@channel) + + @device_m = Syskit::Device.new_submodel(name: "Dev") + @driver_m = Syskit::TaskContext.new_submodel + @driver_m.driver_for @device_m, as: "driver" + + profile = Actions::Profile.new("Test") + @robot = profile.robot + end + + it "marshals a master device" do + @robot.device @device_m, as: "master_device" + marshalled = @channel.marshal_filter_object( + @robot.master_device_dev + ) + + assert_kind_of MasterDeviceInstance, marshalled + assert_equal "master_device", marshalled.name + assert_kind_of DeviceModel, marshalled.model + assert_equal "Dev", marshalled.model.name + end + end end end end diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index 5dfd15e8e..db65349d6 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -240,6 +240,46 @@ module NetworkGeneration policy_graph[[task0, task1]][%w[out in]]) end + it "adds init: true policy if available and saves it " \ + "in the graph's policy_graph" do + plan.add(task0 = @task_m.new) + plan.add(task1 = @task_m.new) + + add_agents(tasks = [task0, task1]) + flexmock(@dynamics).should_receive(:propagate).with(tasks) + + task0.out_port.model.init_policy(true) + task0.out_port.connect_to(task1.in_port) + + @dynamics.should_receive(:policy_for) + .with(task0, "out", "in", task1, nil) + .and_return(type: :buffer, size: 42, init: true) + policy_graph = @dynamics.compute_connection_policies + + assert_equal({ type: :buffer, size: 42, init: true }, + policy_graph[[task0, task1]][%w[out in]]) + end + + it "adds init: false policy if available and saves it " \ + "in the graph's policy_graph" do + plan.add(task0 = @task_m.new) + plan.add(task1 = @task_m.new) + + add_agents(tasks = [task0, task1]) + flexmock(@dynamics).should_receive(:propagate).with(tasks) + + task0.out_port.model.init_policy(false) + task0.out_port.connect_to(task1.in_port) + + @dynamics.should_receive(:policy_for) + .with(task0, "out", "in", task1, nil) + .and_return(type: :buffer, size: 42, init: false) + policy_graph = @dynamics.compute_connection_policies + + assert_equal({ type: :buffer, size: 42, init: false }, + policy_graph[[task0, task1]][%w[out in]]) + end + it "computes the policies on the concrete connections" do plan.add(task = @task_m.new) cmp = @cmp_m.instanciate(plan) @@ -258,7 +298,7 @@ module NetworkGeneration policy_graph[[cmp.c_child, task]][%w[out in]]) end - it "uses in-graph policies over the computed ones" do + it "merges in-graph policies with the computed ones" do plan.add(task0 = @task_m.new) plan.add(task1 = @task_m.new) @@ -267,10 +307,13 @@ module NetworkGeneration task0.out_port.connect_to(task1.in_port, type: :buffer, size: 42) - @dynamics.should_receive(:policy_for).never + @dynamics + .should_receive(:policy_for) + .with(task0, "out", "in", task1, nil) + .and_return(type: :buffer, size: 10, init: nil) policy_graph = @dynamics.compute_connection_policies - assert_equal({ type: :buffer, size: 42 }, + assert_equal({ type: :buffer, size: 42, init: nil }, policy_graph[[task0, task1]][%w[out in]]) end @@ -320,6 +363,64 @@ module NetworkGeneration add_agents(tasks[0, 2]) flexmock(@dynamics).should_receive(:propagate).with(tasks[0, 2]) end + + it "handles the case where the explicit policy sets the type to :data" do + plan.add(task0 = @task_m.new) + plan.add(task1 = @task_m.new) + + add_agents(tasks = [task0, task1]) + flexmock(@dynamics).should_receive(:propagate).with(tasks) + + task0.out_port.connect_to task1.in_port, type: :data + + flexmock(@dynamics) + .should_receive(:policy_for) + .with(task0, "out", "in", task1, nil) + .and_return(type: :buffer, size: 10, init: true) + + policy_graph = @dynamics.compute_connection_policies + expected_policy = { type: :data, init: true } + assert_equal(expected_policy, + policy_graph[[task0, task1]][%w[out in]]) + end + end + + describe "merge_policy" do + before do + @dynamics = NetworkGeneration::DataFlowDynamics.new(plan) + end + + it "merges policies by preferring explicit values over " \ + "computed values" do + explicit_policy = { type: :buffer, size: 20, init: true } + computed_policy = { type: :buffer, size: 10, init: true } + + merged_policy = + @dynamics.merge_policy(explicit_policy, computed_policy) + + assert_equal({ type: :buffer, size: 20, init: true }, merged_policy) + end + + it "removes the size value when the type is set to :data" do + explicit_policy = { type: :data, init: true } + computed_policy = { type: :buffer, size: 10, init: true } + + merged_policy = + @dynamics.merge_policy(explicit_policy, computed_policy) + + assert_equal({ type: :data, init: true }, merged_policy) + end + + it "falls back to computed values when explicit values " \ + "are not provided" do + explicit_policy = {} + computed_policy = { type: :buffer, size: 10, init: true } + + merged_policy = + @dynamics.merge_policy(explicit_policy, computed_policy) + + assert_equal({ type: :buffer, size: 10, init: true }, merged_policy) + end end describe "#policy_for" do @@ -380,15 +481,51 @@ module NetworkGeneration "the sink port is marked as needs_reliable_connection" do @sink_task_m.in_port.needs_reliable_connection fallback_policy = flexmock + expected_policy = flexmock + + expected_policy + .should_receive(:merge) + .and_return(expected_policy) + flexmock(@dynamics) .should_receive(:compute_reliable_connection_policy) .with(@source_t.out_port, @sink_t.in_port, fallback_policy) - .once.and_return(expected_policy = flexmock) + .once.and_return(expected_policy) policy = @dynamics.policy_for( @source_t, "out", "in", @sink_t, fallback_policy ) assert_equal expected_policy, policy end + + it "merges init policy when sink requires reliable connection" do + @sink_task_m.in_port.needs_reliable_connection + @source_t.out_port.model.init_policy(true) + fallback_policy = flexmock + + flexmock(@dynamics) + .should_receive(:compute_reliable_connection_policy) + .with(@source_t.out_port, @sink_t.in_port, fallback_policy) + .once.and_return({}) + + policy = @dynamics.policy_for( + @source_t, "out", "in", @sink_t, fallback_policy + ) + + assert policy[:init] + end + + it "merges init policy when sink requires 'buffer' connection type" do + @sink_task_m.in_port.needs_buffered_connection + + flexmock(@source_t.out_port.model) + .should_receive(:init_policy?).explicitly + .and_return(true) + + @source_t.out_port.model.init_policy(true) + policy = @dynamics.policy_for(@source_t, "out", "in", @sink_t, nil) + + assert policy[:init] + end end describe "#compute_reliable_connection_policy" do diff --git a/test/network_generation/test_engine.rb b/test/network_generation/test_engine.rb index ef9ff6ada..2fe037db7 100644 --- a/test/network_generation/test_engine.rb +++ b/test/network_generation/test_engine.rb @@ -104,7 +104,11 @@ def work_plan it "saves the mapping from requirement task in real_plan to instanciated task in work_plan" do flexmock(requirements).should_receive(:instanciate) .and_return(instanciated_task = simple_component_model.new) - mapping = syskit_engine.compute_system_network([planning_task]) + syskit_stub_configured_deployment(simple_component_model) + mapping = syskit_engine.compute_system_network( + [planning_task], + default_deployment_group: default_deployment_group + ) assert_equal instanciated_task, mapping[planning_task] end end @@ -859,7 +863,7 @@ def deploy_dev_and_bus syskit_configure(cmp) assert_equal( - { %w[out in] => { type: :buffer, size: 4 } }, + { %w[out in] => { type: :buffer, size: 4, init: nil } }, RequiredDataFlow.edge_info(cmp.source_child, cmp.sink_child) ) end diff --git a/test/network_generation/test_merge_solver.rb b/test/network_generation/test_merge_solver.rb index f0414eb29..3a0b43742 100644 --- a/test/network_generation/test_merge_solver.rb +++ b/test/network_generation/test_merge_solver.rb @@ -48,12 +48,32 @@ target_task.should_receive(:can_merge?).with(task).and_return(false).once assert !solver.may_merge_task_contexts?(task, target_task) end - it "returns false for tasks that have execution agents" do - plan.add(t1 = simple_component_model.new) - plan.add(t2 = simple_composition_model.new) - flexmock(t1).should_receive(:execution_agent).and_return(true) - assert !solver.may_merge_task_contexts?(t1, t2) - assert !solver.may_merge_task_contexts?(t2, t1) + it "returns false if both tasks have execution agents and " \ + "merge_when_identical_agents is false" do + plan.add(task1 = simple_component_model.new) + plan.add(task2 = simple_composition_model.new) + [task1, task2].permutation.each do |t1, t2| + flexmock(t1).should_receive(:execution_agent).and_return(true) + t1.should_receive(:can_merge?).with(t2).and_return(true).once + end + refute solver.may_merge_task_contexts?(task1, task2) + refute solver.may_merge_task_contexts?(task2, task1) + end + it "returns false for tasks that do not have execution agents when " \ + "merge_when_identical_agents is true" do + plan.add(task1 = simple_component_model.new) + plan.add(task2 = simple_composition_model.new) + + [task1, task2].permutation.each do |t1, t2| + flexmock(t1).should_receive(:execution_agent).and_return(false) + t1.should_receive(:can_merge?).with(t2).and_return(true).once + end + + local_solver = Syskit::NetworkGeneration::MergeSolver.new(plan) + local_solver.merge_task_contexts_with_same_agent = true + + refute local_solver.may_merge_task_contexts?(task1, task2) + refute local_solver.may_merge_task_contexts?(task2, task1) end end @@ -358,6 +378,31 @@ def mock_merged_task_with_concrete_input_connections(*connections) end end + describe "#apply_merge_group" do + attr_reader :local_plan, :solver + + before do + @local_plan = Roby::Plan.new + @solver = Syskit::NetworkGeneration::MergeSolver.new(@local_plan) + end + + it "applyes merged task plan marks to the destination task" do + task1 = Roby::Task.new + task2 = Roby::Task.new + + local_plan.add_permanent_task task1 + local_plan.add_mission_task task1 + + refute local_plan.permanent_task? task2 + refute local_plan.mission_task? task2 + + solver.apply_merge_group({ task1 => task2 }) + + assert local_plan.permanent_task? task2 + assert local_plan.mission_task? task2 + end + end + describe "functional tests" do describe "merging compositions" do attr_reader :plan, :srv_m, :task_m, :cmp_m diff --git a/test/network_generation/test_system_network_generator.rb b/test/network_generation/test_system_network_generator.rb index d0f672254..7a4037108 100644 --- a/test/network_generation/test_system_network_generator.rb +++ b/test/network_generation/test_system_network_generator.rb @@ -111,6 +111,117 @@ def arg=(value) flexmock(generator).should_receive(:validate_generated_network).once generator.compute_system_network([], validate_generated_network: true) end + + describe "early deploy" do + attr_reader :net_gen, :device_m, :cmp_m, :task_m, :net_gen_plan + + before do # rubocop:disable Metrics/BlockLength + @device_m = Device.new_submodel(name: "D") do + output_port "out", "/double" + end + device_m = @device_m + driver_m = TaskContext.new_submodel(name: "Driver") do + output_port "out", "/double" + driver_for device_m, as: "test" + end + + @task_m = TaskContext.new_submodel(name: "Task") do + argument :arg + input_port "in", "/double" + end + task_m = @task_m + + @cmp_m = Syskit::Composition.new_submodel + cmp_m = @cmp_m + cmp_m.add device_m, as: "device" + cmp_m.add task_m, as: "task" + cmp_m.device_child.connect_to cmp_m.task_child + + syskit_stub_configured_deployment(driver_m) + syskit_stub_configured_deployment(task_m, "task1") + + @net_gen = SystemNetworkGenerator.new( + @net_gen_plan = Roby::Plan.new, + default_deployment_group: default_deployment_group, + early_deploy: true + ) + @net_gen.merge_solver.merge_task_contexts_with_same_agent = true + end + + it "can merge tasks with same execution agent" do + d = robot.device(device_m, as: "d") + assert net_gen.compute_system_network( + [cmp_m.use("device" => d), cmp_m.use("device" => d)], + validate_generated_network: true + ) + end + + it "raises when a deployment is used more than once" do + d = robot.device(device_m, as: "d") + assert_raises(ConflictingDeploymentAllocation) do + net_gen.compute_system_network( + [cmp_m.use("task" => task_m.with_arguments(arg: 1), + "device" => d), + cmp_m.use("task" => task_m.with_arguments(arg: 2), + "device" => d)] + ) + end + end + + it "early resolves deployments with hints" do + syskit_stub_configured_deployment(task_m, "task2") + local_net_gen = SystemNetworkGenerator.new( + local_net_gen_plan = Roby::Plan.new, + default_deployment_group: default_deployment_group, + early_deploy: true + ) + local_net_gen.merge_solver + .merge_task_contexts_with_same_agent = true + + d = robot.device(device_m, as: "d") + assert local_net_gen.compute_system_network( + [1, 2].map do |x| + cmp_m.use("task" => task_m.prefer_deployed_tasks(/task#{x}/), + "device" => d) + end + ) + + [1, 2].each do |x| + tasks = local_net_gen_plan.find_local_tasks(TaskContext) + .select do |t| + t.respond_to?(:orocos_name) && t.orocos_name == "task#{x}" + end + assert tasks.size == 1 + end + end + + it "does not merge a task without deployments " \ + "with a compatible task that has one" do + local_net_gen = SystemNetworkGenerator.new( + local_net_gen_plan = Roby::Plan.new, + default_deployment_group: Models::DeploymentGroup.new, + early_deploy: true + ) + + task_m = self.task_m + deployment_m = Syskit::Deployment.new_submodel do + task "task", task_m + end + + local_net_gen.merge_solver + .merge_task_contexts_with_same_agent = true + e = assert_raises(MissingDeployments) do + local_net_gen.compute_system_network( + [task_m.to_instance_requirements, + task_m.to_instance_requirements + .use_deployment(deployment_m)], + validate_deployed_network: true + ) + end + + assert_equal 1, e.tasks.size + end + end end describe "#generate" do diff --git a/test/process_managers/test_remote.rb b/test/process_managers/test_remote.rb index de6ac680b..867e7d530 100644 --- a/test/process_managers/test_remote.rb +++ b/test/process_managers/test_remote.rb @@ -506,7 +506,7 @@ def assert_upload_succeeds(timeout: 1) end end - class TestLogTransferServer < Syskit::RobyApp::LogTransferServer::SpawnServer + class TestLogTransferServer < Syskit::Runtime::Server::SpawnServer attr_reader :certfile_path def initialize(target_dir, user, password) diff --git a/test/queries/test_port_matcher.rb b/test/queries/test_port_matcher.rb index ab22e9221..f34148e07 100644 --- a/test/queries/test_port_matcher.rb +++ b/test/queries/test_port_matcher.rb @@ -30,6 +30,13 @@ module Queries PortMatcher.new(@task_m).with_name("out_d") end + it "can find ports with ===" do + plan.add(task = @task_m.new) + matcher = PortMatcher.new(@task_m).with_name("out_d") + assert matcher === task.out_d_port + refute matcher === task.out_f_port + end + it "optionally allows to filter with a name pattern" do plan.add(task = @task_m.new) assert_matcher_finds [task.out_d_port, task.out_f_port], diff --git a/test/roby_app/test_log_transfer_manager.rb b/test/roby_app/test_log_transfer_manager.rb index 55f2e3edf..fe4427b62 100644 --- a/test/roby_app/test_log_transfer_manager.rb +++ b/test/roby_app/test_log_transfer_manager.rb @@ -16,7 +16,7 @@ module RobyApp ip: "127.0.0.1", self_spawned: true, max_upload_rates: {}, - implicit_ftps: LogTransferServer.use_implicit_ftps? + implicit_ftps: Runtime::Server.use_implicit_ftps? ) @conf.target_dir = make_tmpdir @manager = nil @@ -75,7 +75,7 @@ module RobyApp @conf.target_dir = target_path.to_s ca = TmpRootCA.new("127.0.0.1") @conf.certificate = ca.certificate - server = LogTransferServer::SpawnServer.new( + server = Runtime::Server::SpawnServer.new( target_path.to_s, "user", "password", ca.private_certificate_path ) diff --git a/test/roby_app/spawn_server/test_spawn_server.rb b/test/runtime/server/test_spawn_server.rb similarity index 97% rename from test/roby_app/spawn_server/test_spawn_server.rb rename to test/runtime/server/test_spawn_server.rb index 38e4fef78..92c848eb5 100644 --- a/test/roby_app/spawn_server/test_spawn_server.rb +++ b/test/runtime/server/test_spawn_server.rb @@ -5,8 +5,8 @@ require "net/ftp" module Syskit - module RobyApp - module LogTransferServer + module Runtime + module Server describe SpawnServer do ### AUXILIARY FUNCTIONS ### def spawn_server @@ -19,7 +19,7 @@ def spawn_server File.join(__dir__, "..", "..", "process_managers", "cert-private.crt") - @implicit_ftps = LogTransferServer.use_implicit_ftps? + @implicit_ftps = Server.use_implicit_ftps? @server = SpawnServer.new( @temp_serverdir, @user, @password, private_key_path, diff --git a/test/telemetry/async/test_interface_object.rb b/test/telemetry/async/test_interface_object.rb new file mode 100644 index 000000000..b6c8df9be --- /dev/null +++ b/test/telemetry/async/test_interface_object.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" + +module Syskit + module Telemetry + module Async + describe InterfaceObject do + before do + @object = InterfaceObject.new(flexmock, "something", flexmock) + end + + describe "on_reachable" do + it "registers a callback called when the object " \ + "becomes reachable" do + recorder = flexmock + recorder.should_receive(:called).with(raw = flexmock).once + @object.on_reachable do + recorder.called(_1) + end + @object.reachable!(raw) + end + + it "calls the callback right away if the object is already " \ + "reachable" do + recorder = flexmock + recorder.should_receive(:called).with(raw = flexmock).once + @object.reachable!(raw) + @object.on_reachable do + recorder.called(_1) + end + end + + it "stops calling if the value returned on registration was " \ + "disposed" do + recorder = flexmock + recorder.should_receive(:called).never + disposable = @object.on_reachable do + recorder.called(_1) + end + disposable.dispose + @object.reachable!(flexmock) + end + end + end + end + end +end diff --git a/test/telemetry/async/test_name_service.rb b/test/telemetry/async/test_name_service.rb new file mode 100644 index 000000000..1f94d9ae0 --- /dev/null +++ b/test/telemetry/async/test_name_service.rb @@ -0,0 +1,295 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" + +module Syskit + module Telemetry + module Async + describe NameService do + before do + @ns = NameService.new + @ruby_tasks = [] + end + + after do + @ruby_tasks.each(&:dispose) + end + + describe "asynchronous update" do + it "asynchronously resolves a task from name and IOR" do + deployed_task, task = make_deployed_task("test", "something") + + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + assert_equal task.ior, @ns.get("test").identity + end + + it "does not re-resolve a registered task if the IOR matches" do + deployed_task, = make_deployed_task("test", "something") + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + @ns.async_update_tasks([deployed_task]) + refute @ns.has_pending_discoveries? + end + + it "re-resolves a registered task if the IOR differs" do + deployed_task, = make_deployed_task("test", "something") + deployed_task2, task2 = make_deployed_task("test", "something") + + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + + @ns.async_update_tasks([deployed_task2]) + assert @ns.has_pending_discoveries? + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + assert_equal task2.ior, @ns.get("test").identity + end + + it "requeues the discovery if a task's IOR changed" do + deployed_task, = make_deployed_task("test", "something") + deployed_task2, task2 = make_deployed_task("test", "something") + + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + @ns.async_update_tasks([deployed_task2]) + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + assert_equal task2.ior, @ns.get("test").identity + end + + it "does not register a task if it has been removed while it was " \ + "being discovered" do + deployed_task, = make_deployed_task("test", "something") + + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + # async_update_tasks resolves the discovered tasks + @ns.async_update_tasks([]) + refute @ns.include?("test") + refute @ns.has_pending_discoveries? + end + + it "deregisters tasks that are not in the set of known tasks" do + deployed_task, = make_deployed_task("test", "something") + + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + + @ns.async_update_tasks([]) + refute @ns.has_pending_discoveries? + refute @ns.include?("test") + end + + it "stops the discovery of an IOR if its resolution failed" do + deployed_task, = make_deployed_task("test", "something") + + flexmock(Orocos::TaskContext) + .should_receive(:new) + .once.and_raise(RuntimeError.new("some reason")) + + @ns.async_update_tasks([deployed_task]) + assert @ns.has_pending_discoveries? + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + refute @ns.has_pending_discoveries? + refute @ns.include?("test") + end + + it "raises in resolve_discovered_tasks if an unexpected exceptions " \ + "was raised by discover_task" do + error_m = Class.new(RuntimeError) + flexmock(@ns).should_receive(:discover_task).and_raise(error_m) + deployed_task, = make_deployed_task("test", "something") + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + assert_raises(NameService::AsyncDiscoveryError) do + @ns.resolve_discovered_tasks + end + end + end + + describe "on_task_added" do + it "calls the block when a new task is registered" do + mock = flexmock + mock.should_receive(:registered).with("test").once + @ns.on_task_added { |name| mock.registered(name) } + @ns.register(flexmock, name: "test") + end + + it "already has registered the task when the callback is called" do + test_task = flexmock + mock = flexmock + mock.should_receive(:registered).with(test_task).once + @ns.on_task_added do |name| + mock.registered(@ns.get(name)) + end + + @ns.register(test_task, name: "test") + end + + it "accepts more than one callback" do + mock = flexmock + mock.should_receive(:registered).with("test", 1).once + mock.should_receive(:registered).with("test", 2).once + @ns.on_task_added { |name| mock.registered(name, 1) } + @ns.on_task_added { |name| mock.registered(name, 2) } + + @ns.register(flexmock, name: "test") + end + + it "processes all callbacks even if one raises" do + mock = flexmock + mock.should_receive(:registered).with("test", 1).once + mock.should_receive(:registered).with("test", 2).once + error_m = Class.new(RuntimeError) + @ns.on_task_added do |name| + mock.registered(name, 1) + raise error_m + end + @ns.on_task_added { |name| mock.registered(name, 2) } + + assert_raises(error_m) do + @ns.register(flexmock, name: "test") + end + end + + it "stops calling after the callback is disposed" do + mock = flexmock + mock.should_receive(:registered).never + @ns.on_task_added { |name| mock.registered(name) } + .dispose + + @ns.register(flexmock, name: "test") + end + end + + describe "on_task_removed" do + before do + @ns.register(@test_task = flexmock, name: "test") + end + + it "calls the block when a task is removed" do + mock = flexmock + mock.should_receive(:removed).with("test").once + @ns.on_task_removed { |name| mock.removed(name) } + @ns.deregister("test") + end + + it "already has removed the task when the callback is called" do + @ns.on_task_removed do |name| + refute @ns.include?(name) + end + + @ns.deregister("test") + end + + it "accepts more than one callback" do + mock = flexmock + mock.should_receive(:removed).with("test", 1).once + mock.should_receive(:removed).with("test", 2).once + @ns.on_task_removed { |name| mock.removed(name, 1) } + @ns.on_task_removed { |name| mock.removed(name, 2) } + + @ns.deregister("test") + end + + it "processes all callbacks even if one raises" do + mock = flexmock + mock.should_receive(:removed).with("test", 1).once + mock.should_receive(:removed).with("test", 2).once + error_m = Class.new(RuntimeError) + @ns.on_task_removed do |name| + mock.removed(name, 1) + raise error_m + end + @ns.on_task_removed { |name| mock.removed(name, 2) } + + assert_raises(error_m) { @ns.deregister("test") } + end + + it "stops calling after the callback is disposed" do + mock = flexmock + mock.should_receive(:removed).never + @ns.on_task_removed { |name| mock.removed(name) } + .dispose + + @ns.deregister("test") + end + + it "is called for all tasks when the name service is cleared, " \ + "after the items have been removed" do + mock = flexmock + mock.should_receive(:removed).with("test", false).once + @ns.on_task_removed do |name| + mock.removed(name, @ns.include?("test")) + end + + @ns.cleanup + end + end + + describe "#get" do + it "raises if the task is not registered" do + assert_raises(Orocos::NotFound) do + @ns.get("does_not_exist") + end + end + end + + describe "#ior" do + it "returns the IOR of a registered task" do + _, task = make_deployed_task("test", "some") + async_task = Orocos.allow_blocking_calls do + TaskContext.discover( + task, port_read_manager: PortReadManager.new + ) + end + assert_equal "test", async_task.name + @ns.register(async_task) + assert_equal task.ior, @ns.ior("test") + end + + it "does not return the IOR of a task being discovered" do + deployed_task, = make_deployed_task("test", "some") + @ns.async_update_tasks([deployed_task]) + assert_raises(Orocos::NotFound) do + @ns.ior("test") + end + end + + it "raises if the given name is not registered" do + assert_raises(Orocos::NotFound) do + @ns.ior("test") + end + end + end + + def deployed_task_s + @deployed_task_s ||= + Struct.new(:name, :ior, :orogen_model_name, keyword_init: true) + end + + def make_deployed_task(name, orogen_model_name) + task = make_ruby_task(name) + deployed_task = deployed_task_s.new( + name: name, ior: task.ior, orogen_model_name: orogen_model_name + ) + [deployed_task, task] + end + + def make_ruby_task(name) + t = Orocos.allow_blocking_calls do + Orocos::RubyTasks::TaskContext.new(name) + end + @ruby_tasks << t + t + end + end + end + end +end diff --git a/test/telemetry/async/test_output_port_subfield.rb b/test/telemetry/async/test_output_port_subfield.rb new file mode 100644 index 000000000..a321f5419 --- /dev/null +++ b/test/telemetry/async/test_output_port_subfield.rb @@ -0,0 +1,137 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" + +module Syskit + module Telemetry + module Async + describe OutputPortSubfield do + before do + @ruby_tasks = [] + @port_read_manager = PortReadManager.new + end + + after do + @ruby_tasks.each(&:dispose) + @port_read_manager.dispose + end + + it "computes the subfield name and type for compound types" do + _, async = make_async_task("test") + port = async.port("rbs").sub_port(%w[time microseconds]) + assert_equal "/int64_t", port.type.name + assert_equal "rbs.time.microseconds", port.name + end + + it "computes the subfield name and type for container types" do + _, async = make_async_task("test") + port = async.port("joints").sub_port(%w[elements 10 effort]) + assert_equal "/float", port.type.name + assert_equal "joints.elements[10].effort", port.name + end + + it "is reachable" do + _, async = make_async_task("test") + port = async.port("joints").sub_port(%w[elements 10 effort]) + assert port.reachable? + end + + it "becomes unreachable when the underlying port is" do + _, async = make_async_task("test") + port = async.port("joints").sub_port(%w[elements 10 effort]) + + mock = flexmock + mock.should_receive(:unreachable).once + port.on_unreachable { mock.unreachable } + async.port("joints").unreachable! + refute port.reachable? + end + + describe "#subfield" do + before do + @task = make_ruby_task("test") + end + + it "resolves a subfield in a compound type" do + rbs = @task.rbs.new_sample + rbs.raw_get(:time).microseconds = 42 + assert_equal 42, OutputPortSubfield.resolve_subfield( + rbs, %w[time microseconds] + ) + end + + it "resolves a subfield in a container type" do + joints = @task.joints.new_sample + joints.elements = 11.times.map { { effort: _1 } } + assert_equal 10, OutputPortSubfield.resolve_subfield( + joints, ["elements", 10, "effort"] + ) + end + + it "returns nil if the path refers to a container element that " \ + "does not exist" do + joints = @task.joints.new_sample + joints.elements = 10.times.map { { effort: _1 } } + assert_nil OutputPortSubfield.resolve_subfield( + joints, ["elements", 10, "effort"] + ) + end + end + + it "yields the subfield's data when available" do + task, async = make_async_task("test") + full_port = async.port("joints") + port = full_port.sub_port(%w[elements 10 effort]) + + received = [] + port.on_raw_data do |value| + received << value + end + + assert_polling_eventually do + @port_read_manager.find_poller_for_port(full_port).connected? + end + + Orocos.allow_blocking_calls do + joint_states = 11.times.map { |i| { effort: i } } + task.joints.write({ elements: joint_states }) + end + + assert_polling_eventually { received == [10] } + end + + def make_ruby_task(name) + ruby_task = Orocos.allow_blocking_calls do + t = Orocos::RubyTasks::TaskContext.new(name) + t.create_output_port "rbs", "/base/samples/RigidBodyState" + t.create_output_port "joints", "/base/samples/Joints" + t + end + @ruby_tasks << ruby_task + ruby_task + end + + def make_async_task(name) + t = make_ruby_task name + async = Orocos.allow_blocking_calls do + TaskContext.discover(t, port_read_manager: @port_read_manager) + end + [t, async] + end + + def assert_polling_eventually(period: 0.01, timeout: 2, &block) + deadline = Time.now + timeout + while Time.now < deadline + @port_read_manager.poll + return if block.call + + sleep(period) + end + + flunk("condition not reached in #{timeout} seconds") + end + end + end + end +end diff --git a/test/telemetry/async/test_output_reader.rb b/test/telemetry/async/test_output_reader.rb new file mode 100644 index 000000000..6050bb6a3 --- /dev/null +++ b/test/telemetry/async/test_output_reader.rb @@ -0,0 +1,246 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" +require "syskit/test/polling_executor" + +module Syskit + module Telemetry + module Async + describe OutputReader do + before do + @connection_executor = Test::PollingExecutor.new + @disconnection_executor = Test::PollingExecutor.new + @read_executor = Test::PollingExecutor.new + @ruby_tasks = [] + + @port_read_manager = PortReadManager.new + end + + after do + @ruby_tasks.each(&:dispose) + @port_read_manager.dispose + end + + it "asynchronously connects to the port" do + _, async = make_async_task("test") + reader = make_reader(async.port("out")) + + execute_all(@connection_executor) + reader.poll + assert reader.connected? + end + + describe "#disconnect" do + before do + @task, @async = make_async_task("test") + end + + it "asynchronously disconnects" do + reader = make_connected_reader(@async.port("out")) + flexmock(reader.raw_reader).should_receive(:disconnect).once + future = reader.disconnect + execute_all(@disconnection_executor) + future.value! + end + + it "synchronizes the disconnection on the last read" do + reader = make_connected_reader(@async.port("out")) + executed = [] + 3.times do |i| + reader.raw_read_new(@read_executor).then { executed << i } + end + future = reader.disconnect.then { executed << 3 } + + execute_all(@disconnection_executor) + execute_all(@read_executor) + execute_all(@disconnection_executor) + future.value! + + assert_equal (0...4).to_a, executed + end + + it "synchronizes the disconnection on the connection" do + reader = make_reader(@async.port("out")) + executed = [] + flexmock(Orocos::OutputReader) + .new_instances.should_receive(:disconnect).once + future = reader.disconnect.then { executed << 1 } + + execute_all(@disconnection_executor) + execute_all(@connection_executor) + execute_all(@disconnection_executor) + future.value! + end + end + + describe "#raw_read_with_result" do + before do + @task, @async = make_async_task("test") + end + + it "makes all reads sequential" do + reader = make_reader(@async.port("out")) + @task.out.write(42) + executor0 = Test::PollingExecutor.new + executor1 = Test::PollingExecutor.new + future0 = reader.raw_read_with_result(executor0) + future1 = reader.raw_read_with_result(executor1) + + execute_all(executor1) + refute future1.resolved? + execute_all(executor0) + assert future0.resolved? + execute_all(executor1) + assert future1.resolved? + end + + it "returns nil if the reader is disconnected, and it does not " \ + "attempt to read the old reader object" do + reader = make_connected_reader(@async.port("out")) + flexmock(reader.raw_reader) + .should_receive(:raw_read_with_result) + .never + disconnect_reader(reader) + + future = reader.raw_read_with_result(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + end + + describe "#raw_read" do + before do + @task, @async = make_async_task("test") + end + + it "returns nil if the reader is not connected" do + reader = make_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + + it "returns nil if the reader is connected but " \ + "there has never been any samples" do + reader = make_connected_reader(@async.port("out")) + future = reader.raw_read(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + + it "reads a new sample once the reader is connected" do + reader = make_connected_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read(@read_executor) + execute_all(@read_executor) + assert_equal 42, Typelib.to_ruby(future.value!) + end + + it "returns the old sample if there are no new samples" do + reader = make_connected_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read(@read_executor) + execute_all(@read_executor) + assert_equal 42, Typelib.to_ruby(future.value!) + + future = reader.raw_read(@read_executor) + execute_all(@read_executor) + assert_equal 42, Typelib.to_ruby(future.value!) + end + end + + describe "#raw_read_new" do + before do + @task, @async = make_async_task("test") + end + + it "returns nil if the reader is not connected" do + reader = make_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read_new(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + + it "returns nil if the reader is connected but " \ + "there has never been any samples" do + reader = make_connected_reader(@async.port("out")) + future = reader.raw_read_new(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + + it "reads a new sample once the reader is connected" do + reader = make_connected_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read_new(@read_executor) + execute_all(@read_executor) + assert_equal 42, Typelib.to_ruby(future.value!) + end + + it "returns nil if there are no new samples" do + reader = make_connected_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read_new(@read_executor) + execute_all(@read_executor) + assert_equal 42, Typelib.to_ruby(future.value!) + + future = reader.raw_read_new(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + end + + def make_ruby_task(name) + ruby_task = Orocos.allow_blocking_calls do + t = Orocos::RubyTasks::TaskContext.new(name) + t.create_output_port "out", "/double" + t + end + @ruby_tasks << ruby_task + ruby_task + end + + def make_async_task(name) + t = make_ruby_task name + async = Orocos.allow_blocking_calls do + TaskContext.discover(t, port_read_manager: @port_read_manager) + end + [t, async] + end + + def make_reader(port) + OutputReader.new( + port, {}, + connect_on: @connection_executor, + disconnect_on: @disconnection_executor + ) + end + + def make_connected_reader(port) + reader = make_reader(port) + execute_all(@connection_executor) + reader.poll + assert reader.connected? + reader + end + + def disconnect_reader(reader) + future = reader.disconnect + execute_all(@disconnection_executor) + future.value! + end + + def execute_all(executor) + Orocos.allow_blocking_calls { executor.execute_all } + end + + def execute_one(executor) + Orocos.allow_blocking_calls { executor.execute_one } + end + end + end + end +end diff --git a/test/telemetry/async/test_port_read_manager.rb b/test/telemetry/async/test_port_read_manager.rb new file mode 100644 index 000000000..6645d38a6 --- /dev/null +++ b/test/telemetry/async/test_port_read_manager.rb @@ -0,0 +1,249 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" +require "syskit/test/polling_executor" + +module Syskit + module Telemetry + module Async + describe PortReadManager do + before do + @connection_executor = Test::PollingExecutor.new + @disconnection_executor = Test::PollingExecutor.new + @read_executor = Test::PollingExecutor.new + @manager = PortReadManager.new( + connection_executor: @connection_executor, + disconnection_executor: @disconnection_executor, + read_executor: @read_executor + ) + @ruby_tasks = [] + end + + after do + @manager.dispose + @ruby_tasks.each(&:dispose) + end + + describe "#register_callback" do + it "creates a poller when a callback is first registered" do + _, async = make_async_task("test") + @manager.register_callback( + async.port("out"), proc {}, period: 0.1, buffer_size: 1 + ) + assert @manager.polling?(async.port("out")) + end + + it "keeps the current reader if the buffer size is compatible" do + _, async = make_async_task("test") + out_p = async.port("out") + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 1 + ) + reader = @manager.find_poller_for_port(out_p).reader + + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 1 + ) + assert_same reader, @manager.find_poller_for_port(out_p).reader + end + + it "creates a new reader if the buffer size is greater " \ + "than the actual" do + _, async = make_async_task("test") + out_p = async.port("out") + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 1 + ) + orig_reader = @manager.find_poller_for_port(out_p).reader + + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 5 + ) + reader = @manager.find_poller_for_port(out_p).reader + refute_same orig_reader, reader + assert_equal 5, reader.policy[:size] + assert orig_reader.disposed? + end + + it "updates the poller period at each new callback " \ + "(reusing poller)" do + _, async = make_async_task("test") + out_p = async.port("out") + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 1 + ) + assert_equal 0.1, @manager.find_poller_for_port(out_p).period + + @manager.register_callback( + out_p, proc {}, period: 0.05, buffer_size: 1 + ) + assert_equal 0.05, @manager.find_poller_for_port(out_p).period + end + + it "updates the poller period at each new callback " \ + "(new poller)" do + _, async = make_async_task("test") + out_p = async.port("out") + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 1 + ) + assert_equal 0.1, @manager.find_poller_for_port(out_p).period + + @manager.register_callback( + out_p, proc {}, period: 0.05, buffer_size: 5 + ) + assert_equal 0.05, @manager.find_poller_for_port(out_p).period + end + + it "returns a disposable that will clear the callback" do + _, async = make_async_task("test") + port = async.port("out") + disposable = @manager.register_callback( + port, proc {}, period: 0.1, buffer_size: 1 + ) + poller = @manager.find_poller_for_port(port) + flexmock(poller).should_receive(:dispose).once.pass_thru + disposable.dispose + refute @manager.polling?(async.port("out")) + end + + it "keeps the poller if there are callbacks remaining" do + _, async = make_async_task("test") + port = async.port("out") + disposable = @manager.register_callback( + port, proc {}, period: 0.1, buffer_size: 1 + ) + disposable2 = @manager.register_callback( + port, proc {}, period: 0.1, buffer_size: 1 + ) + poller = @manager.find_poller_for_port(port) + flexmock(poller).should_receive(:dispose).once.pass_thru + disposable.dispose + assert @manager.polling?(async.port("out")) + disposable2.dispose + refute @manager.polling?(async.port("out")) + end + end + + describe "#poll" do + before do + @task, @async = make_async_task("test") + @received_samples = [] + @out_p = @async.port("out") + @manager.register_callback( + @out_p, + proc { @received_samples << _1 }, + period: 0.1, buffer_size: 1 + ) + @poller = @manager.find_poller_for_port(@out_p) + end + + it "does nothing if the reader is not connected" do + @manager.poll + end + + it "immediately schedules the next read once connected" do + execute_all(@connection_executor) + @manager.poll + + assert @poller.reader.connected? + assert @poller.read_future + + Orocos.allow_blocking_calls { @task.out.write 42 } + execute_all(@read_executor) + assert_equal 42, @poller.read_future.value + @manager.poll + + assert_equal [42], @received_samples + end + + it "reschedules the second read based on the end of the first" do + execute_all(@connection_executor) + @manager.poll + + execute_all(@read_executor) + @poller.read_future.wait + time = freeze_monotonic_time + + @manager.poll + next_t = @poller.next_time + assert_in_delta time + 0.1, next_t, 1e-6 + end + + it "reschedules reads > 2 based on the period" do + execute_all(@connection_executor) + @manager.poll + execute_all(@read_executor) + @poller.read_future.wait + @manager.poll + sleep 0.2 + @manager.poll + execute_all(@read_executor) + @poller.read_future.wait + + current_t = @poller.next_time + time = freeze_monotonic_time + + @manager.poll + next_t = @poller.next_time + delta_in_periods = (next_t - current_t) / 0.1 + assert_in_delta delta_in_periods, delta_in_periods.round, 1e-6 + assert_operator next_t, :>, time + assert_operator next_t - time, :<, 0.1 + end + + it "does not do anything for pollers whose next time " \ + "has not been reached" do + execute_all(@connection_executor) + @manager.poll + + execute_all(@read_executor) + @poller.read_future.wait + time = freeze_monotonic_time + @manager.poll + @manager.poll + refute @poller.read_future + + freeze_monotonic_time(time + 0.1) + @manager.poll + assert @poller.read_future + end + + def freeze_monotonic_time(time = @manager.monotonic_time) + @frozen_time = time + flexmock(@manager) + .should_receive(:monotonic_time) + .and_return { @frozen_time } + time + end + end + + def make_ruby_task(name) + ruby_task = Orocos.allow_blocking_calls do + t = Orocos::RubyTasks::TaskContext.new(name) + t.create_attribute "attr", "/int16_t" + t.create_property "prop", "/int32_t" + t.create_input_port "in", "/float" + t.create_output_port "out", "/double" + t + end + @ruby_tasks << ruby_task + ruby_task + end + + def make_async_task(name) + t = make_ruby_task name + async = Orocos.allow_blocking_calls do + TaskContext.discover(t, port_read_manager: @manager) + end + [t, async] + end + + def execute_all(executor) + Orocos.allow_blocking_calls { executor.execute_all } + end + end + end + end +end diff --git a/test/telemetry/async/test_task_context.rb b/test/telemetry/async/test_task_context.rb new file mode 100644 index 000000000..1585b81bb --- /dev/null +++ b/test/telemetry/async/test_task_context.rb @@ -0,0 +1,352 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" + +module Syskit + module Telemetry + module Async + describe TaskContext do + before do + @ns = NameService.new + @port_read_manager = PortReadManager.new + @ruby_tasks = [] + end + + after do + @port_read_manager.dispose + @ruby_tasks.each(&:dispose) + end + + it "is the same as another async task with the same remote task when " \ + "used as hash key" do + t, async = make_async_task "test" + async2 = discover_task(t) + + _, async3 = make_async_task "test2" + + hash = { async => 42 } + assert_equal 42, hash[async2] + assert_nil hash[async3] + assert_nil hash[42] + assert_nil hash["test"] + end + + describe ".discover" do + it "creates an async task already initialized with the remote " \ + "task's interface" do + t, async = make_async_task "test" + assert async.reachable? + assert_equal t.ior, async.identity + assert_equal Set["state", "in", "out"], + async.each_port.to_set(&:name) + assert_equal ["prop"], async.each_property.map(&:name) + assert_includes async.each_attribute.map(&:name), "attr" + end + + it "registers attributes" do + _, async = make_async_task "test" + attr = async.attribute("attr") + assert_includes async.each_attribute.to_a, attr + assert_kind_of Attribute, attr + assert_equal "attr", attr.name + end + + it "registers properties" do + _, async = make_async_task "test" + prop = async.property("prop") + assert_equal [prop], async.each_property.to_a + assert_kind_of Property, prop + assert_equal "prop", prop.name + end + + it "registers input ports" do + _, async = make_async_task "test" + in_p = async.port("in") + assert_equal [in_p], async.each_input_port.to_a + assert_includes async.each_port.to_a, in_p + assert_kind_of InputPort, in_p + assert_equal "in", in_p.name + end + + it "registers output ports" do + _, async = make_async_task "test" + out_p = async.port("out") + assert_equal Set[async.port("state"), out_p], + async.each_output_port.to_set + assert_includes async.each_port.to_a, out_p + assert_kind_of OutputPort, out_p + assert_equal "out", out_p.name + end + end + + describe "state change notifications" do + it "adds a callback that is called when a state change " \ + "is received by #poll" do + task, async = make_async_task "test" + + states = [] + async.on_state_change { states << _1 } + assert_polling_eventually { states == [:PRE_OPERATIONAL] } + + Orocos.allow_blocking_calls do + task.configure + task.start + end + assert_polling_eventually do + states == %I[PRE_OPERATIONAL STOPPED RUNNING] + end + end + it "calls the block with the currently known state" do + task, async = make_async_task "test" + Orocos.allow_blocking_calls do + task.configure + task.start + end + states = [] + async.on_state_change { states << _1 } + assert_polling_eventually do + states[-1] == :RUNNING + end + + states2 = [] + async.on_state_change { states2 << _1 } + assert_polling_eventually do + states2[-1] == :RUNNING + end + assert_equal [:RUNNING], states2 + end + + it "does not call the block is no state is known" do + _, async = make_async_task "test" + record = flexmock + record.should_receive(:called).never + async.on_state_change { record.called } + end + end + + describe "reachability" do + it "is reachable right after .discover" do + _, async = make_async_task "test" + assert async.reachable? + end + + it "calls the reachability callback on registration" do + _, async = make_async_task "test" + record = flexmock + record.should_receive(:called).once + async.on_reachable { record.called } + end + + it "calls on_unreachable when unreachable! is called" do + _, async = make_async_task "test" + record = flexmock + record.should_receive(:called).once + async.on_unreachable { record.called } + async.unreachable! + end + + it "does not call new reachable callbacks " \ + "if the task is not reachable" do + _, async = make_async_task "test" + async.unreachable! + record = flexmock + record.should_receive(:called).never + async.on_reachable { record.called } + end + end + + describe "attributes" do + it "calls the on_attribute_reachable hooks on registration" do + _, async = make_async_task "test" + attributes = [] + async.on_attribute_reachable { attributes << _1 } + assert_includes attributes, "attr" + end + + it "calls the attribute's on_reachable hook on registration" do + _, async = make_async_task "test" + m = flexmock + m.should_receive(:called).once + async.attribute("attr").on_reachable { m.called } + end + + it "calls the on_attribute_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + attributes = [] + async.on_attribute_unreachable { attributes << _1 } + async.unreachable! + assert_includes attributes, "attr" + end + + it "calls the attribute's on_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + m = flexmock + m.should_receive(:called).once + async.attribute("attr").on_unreachable { m.called } + async.unreachable! + end + + it "is usable as a hash key" do + t, async = make_async_task "test" + attr = async.attribute("attr") + + async2 = discover_task(t) + attr2 = async2.attribute("attr") + + _, async3 = make_async_task "test2" + attr3 = async3.attribute("attr") + + hash = { attr => 42 } + assert_equal 42, hash[attr2] + assert_nil hash[attr3] + assert_nil hash[42] + assert_nil hash["test"] + end + end + + describe "properties" do + it "calls the on_property_reachable hooks on registration" do + _, async = make_async_task "test" + properties = [] + async.on_property_reachable { properties << _1 } + assert_equal ["prop"], properties + end + + it "calls the property's on_reachable hook on registration" do + _, async = make_async_task "test" + m = flexmock + m.should_receive(:called).once + async.property("prop").on_reachable { m.called } + end + + it "calls the on_property_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + properties = [] + async.on_property_unreachable { properties << _1 } + async.unreachable! + assert_equal ["prop"], properties + end + + it "calls the propertie's on_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + properties = [] + async.on_property_reachable { properties << _1 } + m = flexmock + m.should_receive(:called).once + async.property("prop").on_unreachable { m.called } + async.unreachable! + end + + it "is usable as a hash key" do + t, async = make_async_task "test" + prop = async.property("prop") + + async2 = discover_task(t) + prop2 = async2.property("prop") + + _, async3 = make_async_task "test2" + prop3 = async3.property("prop") + + hash = { prop => 42 } + assert_equal 42, hash[prop2] + assert_nil hash[prop3] + assert_nil hash[42] + assert_nil hash["test"] + end + end + + describe "ports" do + it "calls the on_port_reachable hooks on registration" do + _, async = make_async_task "test" + ports = [] + async.on_port_reachable { ports << _1 } + assert_equal Set["state", "in", "out"], ports.to_set + end + + it "calls the attribute's on_reachable hook on registration" do + _, async = make_async_task "test" + m = flexmock + m.should_receive(:called).once + async.port("in").on_reachable { m.called } + end + + it "calls the on_port_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + ports = [] + async.on_port_unreachable { ports << _1 } + async.unreachable! + assert_equal Set["in", "out", "state"], ports.to_set + end + + it "calls the port's on_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + m = flexmock + m.should_receive(:called).once + async.port("in").on_unreachable { m.called } + async.unreachable! + end + + it "is usable as a hash key" do + t, async = make_async_task "test" + port = async.port("out") + + async2 = discover_task(t) + port2 = async2.port("out") + + _, async3 = make_async_task "test2" + port3 = async3.port("out") + + hash = { port => 42 } + assert_equal 42, hash[port2] + assert_nil hash[port3] + assert_nil hash[42] + assert_nil hash["test"] + end + end + + def make_ruby_task(name) + ruby_task = Orocos.allow_blocking_calls do + t = Orocos::RubyTasks::TaskContext.new(name) + t.create_attribute "attr", "/int16_t" + t.create_property "prop", "/int32_t" + t.create_input_port "in", "/float" + t.create_output_port "out", "/double" + t + end + @ruby_tasks << ruby_task + ruby_task + end + + def make_async_task(name) + t = make_ruby_task name + [t, discover_task(t)] + end + + def discover_task(task) + Orocos.allow_blocking_calls do + TaskContext.discover(task, port_read_manager: @port_read_manager) + end + end + + def assert_polling_eventually(period: 0.01, timeout: 2, &block) + deadline = Time.now + timeout + while Time.now < deadline + @port_read_manager.poll + return if block.call + + sleep(period) + end + + flunk("condition not reached in #{timeout} seconds") + end + end + end + end +end diff --git a/test/telemetry/ui/test_name_service.rb b/test/telemetry/ui/test_name_service.rb deleted file mode 100644 index 8dee62362..000000000 --- a/test/telemetry/ui/test_name_service.rb +++ /dev/null @@ -1,125 +0,0 @@ -# frozen_string_literal: true - -require "syskit/test/self" -require "syskit/telemetry/ui/name_service" - -module Syskit - module Telemetry - module UI - describe NameService do - before do - @name_service = NameService.new - end - - describe "on_task_added" do - it "calls the block when a new task is registered" do - mock = flexmock - mock.should_receive(:registered).with("test").once - @name_service.on_task_added { |name| mock.registered(name) } - @name_service.register(flexmock, name: "test") - end - - it "already has registered the task when the callback is called" do - test_task = flexmock - mock = flexmock - mock.should_receive(:registered).with(test_task).once - @name_service.on_task_added do |name| - mock.registered(@name_service.get(name)) - end - - @name_service.register(test_task, name: "test") - end - - it "accepts more than one callback" do - mock = flexmock - mock.should_receive(:registered).with("test", 1).once - mock.should_receive(:registered).with("test", 2).once - @name_service.on_task_added { |name| mock.registered(name, 1) } - @name_service.on_task_added { |name| mock.registered(name, 2) } - - @name_service.register(flexmock, name: "test") - end - - it "processes all callbacks even if one raises" do - mock = flexmock - mock.should_receive(:registered).with("test", 1).once - mock.should_receive(:registered).with("test", 2).once - error_m = Class.new(RuntimeError) - @name_service.on_task_added do |name| - mock.registered(name, 1) - raise error_m - end - @name_service.on_task_added { |name| mock.registered(name, 2) } - - assert_raises(error_m) do - @name_service.register(flexmock, name: "test") - end - end - - it "stops calling after the callback is disposed" do - mock = flexmock - mock.should_receive(:registered).never - @name_service.on_task_added { |name| mock.registered(name) } - .dispose - - @name_service.register(flexmock, name: "test") - end - end - - describe "on_task_removed" do - before do - @name_service.register(@test_task = flexmock, name: "test") - end - - it "calls the block when a task is removed" do - mock = flexmock - mock.should_receive(:removed).with("test").once - @name_service.on_task_removed { |name| mock.removed(name) } - @name_service.deregister("test") - end - - it "already has removed the task when the callback is called" do - @name_service.on_task_removed do |name| - refute @name_service.include?(name) - end - - @name_service.deregister("test") - end - - it "accepts more than one callback" do - mock = flexmock - mock.should_receive(:removed).with("test", 1).once - mock.should_receive(:removed).with("test", 2).once - @name_service.on_task_removed { |name| mock.removed(name, 1) } - @name_service.on_task_removed { |name| mock.removed(name, 2) } - - @name_service.deregister("test") - end - - it "processes all callbacks even if one raises" do - mock = flexmock - mock.should_receive(:removed).with("test", 1).once - mock.should_receive(:removed).with("test", 2).once - error_m = Class.new(RuntimeError) - @name_service.on_task_removed do |name| - mock.removed(name, 1) - raise error_m - end - @name_service.on_task_removed { |name| mock.removed(name, 2) } - - assert_raises(error_m) { @name_service.deregister("test") } - end - - it "stops calling after the callback is disposed" do - mock = flexmock - mock.should_receive(:removed).never - @name_service.on_task_removed { |name| mock.removed(name) } - .dispose - - @name_service.deregister("test") - end - end - end - end - end -end diff --git a/test/test/test_profile_assertions.rb b/test/test/test_profile_assertions.rb index 6d7ed0134..b83b58615 100644 --- a/test/test/test_profile_assertions.rb +++ b/test/test/test_profile_assertions.rb @@ -440,7 +440,7 @@ module Test assert_can_instanciate(@cmp_m.use(@srv_m => @task_m)) end - it "allows deploying together with the actions or profile" do + it "allows instantiating together with the actions or profile" do @task_m.argument :bla @test_profile.define "test", @cmp_m.use(@srv_m => @task_m) assert_can_instanciate( @@ -582,7 +582,9 @@ module Test end it "allows deploying together with the actions or profile" do - @test_profile.define("test", @cmp_m.use(@srv_m => @task_m)) + @test_profile.define("test", @cmp_m.use(@srv_m => \ + @task_m.to_instance_requirements + .use_deployment(@deployment_m))) assert_can_deploy( @test_profile.test_def, together_with: @task_m.to_instance_requirements @@ -636,6 +638,170 @@ module Test end end + describe "assert_can_deploy_all" do + include ProfileAssertions + + # Needed by ProfileAssertions + attr_reader :subject_syskit_model + + before do + @test_profile = Actions::Profile.new("TestProfile") + @deployment_m = syskit_stub_deployment_model(@task_m) + @subject_syskit_model = @test_profile + end + + it "passes for definitions that refer to deployed tasks" do + @test_profile.use_deployment @deployment_m + @test_profile.define( + "test", @cmp_m.use(@srv_m => @task_m) + ) + assert_can_deploy_all(@test_profile) + end + + it "fails for definitions that have tasks that are not deployed" do + @test_profile.define "test", @cmp_m.use(@srv_m => @task_m) + e = assert_raises(ProfileAssertions::ProfileAssertionFailed) do + assert_can_deploy_all(@test_profile) + end + + assert_match( + /cannot deploy the following tasks.*Task.*child test of Cmp/m, + PP.pp(e.each_original_exception.first, +"") + ) + end + + it "fails for definitions whose services are represented by tags" do + @test_profile.tag "test", @srv_m + @test_profile.define( + "test", @cmp_m.use(@srv_m => @test_profile.test_tag) + ) + e = assert_raises(ProfileAssertions::ProfileAssertionFailed) do + assert_can_deploy_all(@test_profile) + end + assert_match( + /cannot\ find\ a\ concrete\ implementation.* + TestProfile.test_tag/mx, + PP.pp(e.each_original_exception.first, +"") + ) + end + + it "fails for definitions with abstract elements that are not tags" do + @test_profile.define "test", @cmp_m + e = assert_raises(ProfileAssertions::ProfileAssertionFailed) do + assert_can_deploy_all(@test_profile) + end + assert_match( + /cannot\ find\ a\ concrete\ implementation.* + Models::Placeholder/mx, + PP.pp(e.each_original_exception.first, +"") + ) + end + + it "fails for definitions that use tags from other profiles" do + other_profile = Actions::Profile.new("Other") + other_profile.tag "test", @srv_m + @test_profile.define( + "test", @cmp_m.use(@srv_m => other_profile.test_tag) + ) + + e = assert_raises(ProfileAssertions::ProfileAssertionFailed) do + assert_can_deploy_all(@test_profile) + end + assert_match( + /cannot find a concrete implementation.*Other.test_tag/m, + PP.pp(e.each_original_exception.first, +"") + ) + end + + it "handles plain instance requirements" do + assert_can_deploy_all( + @cmp_m + .to_instance_requirements + .use_deployment(@deployment_m) + .use(@srv_m => @task_m) + ) + end + + it "allows deploying together with the actions or profile" do + @test_profile.define("test", @cmp_m.use(@srv_m => \ + @task_m.to_instance_requirements + .use_deployment(@deployment_m))) + assert_can_deploy_all( + @test_profile.test_def, + together_with: @task_m.to_instance_requirements + .use_deployment(@deployment_m) + ) + end + + it "fails if some actions are not resolvable" do + flexmock(self) + .should_receive(:BulkAssertAtomicActions) + .with(action = flexmock, exclude: (excluded = flexmock)) + .and_return([[], + [flexmock(name: "some"), flexmock(name: "action")]]) + + e = assert_raises(Minitest::Assertion) do + assert_can_deploy_all(action, exclude: excluded) + end + message = "could not validate some non-Syskit actions: 'action', " \ + "'some', probably because of required arguments. Pass " \ + "the action to the 'exclude' option of " \ + "assert_can_deploy_all, and add a separate assertion " \ + "test with the arguments added explicitly" + assert_equal message, e.message + end + + it "fails if some actions in together_with are not resolvable" do + action, together_with, exclude = 3.times.map { flexmock } + flexmock(self) + .should_receive(:BulkAssertAtomicActions) + .with(action, exclude: exclude) + .and_return([[], []]) + flexmock(self) + .should_receive(:BulkAssertAtomicActions) + .with(together_with, exclude: exclude) + .and_return([[], + [flexmock(name: "some"), flexmock(name: "action")]]) + + e = assert_raises(Minitest::Assertion) do + assert_can_deploy_all( + action, exclude: exclude, together_with: together_with + ) + end + message = + "could not validate some non-Syskit actions given " \ + "to `together_with` in assert_can_deploy_all: 'action', " \ + "'some', probably because of " \ + "missing arguments. If you are passing a profile or action " \ + "interface and do not require to test against that particular " \ + "action, pass it to the 'exclude' argument" + assert_equal message, e.message + end + + it "runs syskit_run_deploy_in_bulk with all actions" do + @test_profile.define("test", @cmp_m.use(@srv_m => @task_m)) + @test_profile.define("test_42", @cmp_m.use(@srv_m => @task_m)) + + actions, skipped = BulkAssertAtomicActions( + [ + @test_profile.test_def, + @test_profile.test_42_def, + @task_m.to_instance_requirements + .use_deployment(@deployment_m) + ] + ) + flexmock(self) + .should_receive(:syskit_run_deploy_in_bulk) + .with(actions, compute_policies: true, compute_deployments: true) + + assert skipped.empty? + assert_can_deploy_all( + together_with: @task_m.to_instance_requirements + .use_deployment(@deployment_m) + ) + end + end + describe ".each_combination" do it "calculates and yields each possible combination of its arguments" do result = ProfileAssertions.each_combination( diff --git a/test/test_dynamic_port_binding.rb b/test/test_dynamic_port_binding.rb index 759225df9..3fafc3e20 100644 --- a/test/test_dynamic_port_binding.rb +++ b/test/test_dynamic_port_binding.rb @@ -382,6 +382,45 @@ module Syskit @task = syskit_stub_deploy_configure_and_start(@task_m) end + describe "init policy" do + attr_reader :task, :port_binding + + before do + @port_binding = flexmock + @accessor = DynamicPortBinding::Accessor.new(@port_binding) + flexmock(@accessor) + .should_receive(:create_accessor) + .explicitly + .with(@task.out_port).and_return { @task.out_port.reader } + end + + it "expects no policy if init_policy is not called" do + flexmock(@task.out_port) + .should_receive(:reader) + .with({}) + + @accessor.create_accessor(@task.out_port) + end + + it "expects init: true policy if init_policy(true) is called" do + @task.out_port.model.init_policy(true) + flexmock(@task.out_port) + .should_receive(:reader) + .with({ init: true }) + + @accessor.create_accessor(@task.out_port) + end + + it "expects init: false policy if init_policy(false) is called" do + @task.out_port.model.init_policy(false) + flexmock(@task.out_port) + .should_receive(:reader) + .with({ init: false }) + + @accessor.create_accessor(@task.out_port) + end + end + describe "#update" do attr_reader :task, :port_binding @@ -520,7 +559,21 @@ def wait_until_connected(accessor) reader.attach_to_task(task) reader.update - assert_equal({ type: :buffer, size: 20 }, reader.resolved_accessor.policy) + assert_equal({ type: :buffer, size: 20, init: nil }, + reader.resolved_accessor.policy) + end + + it "does not override existing :init value in policy" do + reader = Models::DynamicPortBinding + .create(@task_m.out_port) + .instanciate + .to_data_accessor(type: :buffer, size: 20, init: true) + task = syskit_stub_deploy_and_configure(@task_m) + reader.attach_to_task(task) + reader.update + + assert_equal({ type: :buffer, size: 20, init: true }, + reader.resolved_accessor.policy) end describe "#read_new" do diff --git a/test/test_exceptions.rb b/test/test_exceptions.rb index af3b71b08..08f4f1d42 100644 --- a/test/test_exceptions.rb +++ b/test/test_exceptions.rb @@ -95,9 +95,11 @@ module Syskit arg: 1, conf: ["default"], read_only: false - Chain 1 is needed by the following definitions: + T(arg: 2, conf: ["default"], read_only: false, \ + test_dev: device(D, as: test)) is needed by the following definitions: Test.test2_def - Chain 2 is needed by the following definitions: + T(arg: 1, conf: ["default"], read_only: false, \ + test_dev: device(D, as: test)) is needed by the following definitions: Test.test1_def PP assert_equal expected, formatted.gsub(//, "").chomp @@ -166,4 +168,83 @@ module Syskit assert_equal expected, formatted.gsub(//, "").chomp end end + + describe ConflictingDeploymentAllocation do + # This exception appears only in early_deploy context + + attr_reader :net_gen, :profile + + before do + Roby.app.using_task_library "orogen_syskit_tests" + + task_m = OroGen.orogen_syskit_tests.Empty + cmp_m = Syskit::Composition.new_submodel + cmp_m.add task_m, as: "task" + + @net_gen = NetworkGeneration::SystemNetworkGenerator.new( + @net_gen_plan = Roby::Plan.new, + default_deployment_group: default_deployment_group, + early_deploy: true + ) + @net_gen.default_deployment_group.use_deployment( + OroGen::Deployments.syskit_tests_empty => "test_" + ) + @net_gen.merge_solver.merge_task_contexts_with_same_agent = true + + @profile = Actions::Profile.new("Test") + @profile.define("test1", cmp_m) + .use("task" => task_m.with_arguments(arg: 1)) + @profile.define("test2", cmp_m) + .use("task" => task_m.with_arguments(arg: 2)) + + @old_early_deply = Syskit.conf.early_deploy? + Syskit.conf.early_deploy = true + end + + after do + Syskit.conf.early_deploy = @old_early_deply + end + + it "displays deployment allocation conflicts, depicts one failed merge chain " \ + "and list non deployed toplevel definitions" do + e = assert_raises(ConflictingDeploymentAllocation) do + net_gen.compute_system_network( + [profile.test1_def, profile.test2_def] + ) + end + formatted = PP.pp(e, +"") + + expected = <<~PP.chomp + deployed task 'test_syskit_tests_empty' from deployment \ + 'syskit_tests_empty' defined in 'orogen_syskit_tests' on 'localhost' is \ + assigned to 2 tasks. Below is the list of \ + the dependent non-deployed actions. Right after the list is \ + a detailed explanation of why the first two tasks are not merged: + OroGen.orogen_syskit_tests.Empty(arg: 1, conf: ["default"], \ + orocos_name: test_syskit_tests_empty, read_only: false) is needed by the following definitions: + Test.test1_def + OroGen.orogen_syskit_tests.Empty(arg: 2, conf: ["default"], \ + orocos_name: test_syskit_tests_empty, read_only: false) is needed by the following definitions: + Test.test2_def + Chain 1 cannot be merged in chain 2: + Chain 1: + OroGen.orogen_syskit_tests.Empty + no owners + arguments: + orocos_name: "test_syskit_tests_empty", + read_only: false, + conf: ["default"], + arg: 1 + Chain 2: + OroGen.orogen_syskit_tests.Empty + no owners + arguments: + orocos_name: "test_syskit_tests_empty", + read_only: false, + conf: ["default"], + arg: 2 + PP + assert_equal expected, formatted.gsub(//, "").chomp + end + end end