From 78c753eabaac204835fb816a6c5594d4520e96c0 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Thu, 5 Sep 2024 19:08:31 -0300 Subject: [PATCH 001/158] chore: use app.log_dir log_dir does not exist within this context --- lib/syskit/process_managers/remote/server/server.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/process_managers/remote/server/server.rb b/lib/syskit/process_managers/remote/server/server.rb index 8d550fe38..42361e8cf 100644 --- a/lib/syskit/process_managers/remote/server/server.rb +++ b/lib/syskit/process_managers/remote/server/server.rb @@ -337,7 +337,7 @@ def handle_command(socket) # :nodoc: create_log_dir(time_tag, metadata) socket.write(RET_YES) rescue StandardError => e - warn "failed to create log directory #{log_dir}: "\ + warn "failed to create log directory #{app.log_dir}: "\ "#{e.message}" (e.backtrace || []).each do |line| warn " #{line}" From ff5831453b91d0212da5f335088932cd413a77a0 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Wed, 4 Dec 2024 09:17:35 -0300 Subject: [PATCH 002/158] chore: move log transfer to log_runtime_archive --- lib/syskit/cli/log_runtime_archive.rb | 22 +++++++++++ lib/syskit/cli/log_runtime_archive_main.rb | 44 +++++++++++++++++++++- 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 66e01a84e..f1224d93a 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -2,6 +2,7 @@ require "archive/tar/minitar" require "sys/filesystem" +require "syskit/process_managers/remote/protocol" module Syskit module CLI @@ -45,6 +46,27 @@ def process_root_folder end end + # Transfer logs from a process server to the main computer server + # + # @param [Pathname] src_dir the log folder on the process server + # @param [Params] server_params the FTP server parameters: + # { host, port, certificate, user, password } + def process_transfer(src_dir, server_params) + host, port = server_params[:host], server_params[:port] + socket = + begin TCPSocket.new(host, port) + rescue Errno::ECONNREFUSED => e + raise e.class, "cannot contact process server at " \ + "'#{host}:#{port}': #{e.message}" + end + socket.write(ProcessManagers::Remote::COMMAND_LOG_UPLOAD_FILE) + + candidates = self.class.find_all_dataset_folders(src_dir) + candidates.each do |child| + Marshal.dump([server_params, Pathname(child)], socket) + end + end + # Manages folder available space # # The method will check if there is enough space to save more log files diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index aa536b575..84fa4a49d 100644 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -6,6 +6,7 @@ require "pathname" require "thor" require "syskit/cli/log_runtime_archive" +require "lib/syskit/roby_app/log_transfer_server/spawn_server" module Syskit module CLI @@ -16,7 +17,6 @@ def self.exit_on_failure? end desc "watch", "watch a dataset root folder and call archiver" - option :period, type: :numeric, default: 600, desc: "polling period in seconds" option :max_size, @@ -61,6 +61,42 @@ def archive(root_dir, target_dir) archiver.process_root_folder end + desc "watch_transfer", "watches a dataset root folder \ + and periodically performs transfer" + option :period, + type: :numeric, default: 600, desc: "polling period in seconds" + option :max_size, + type: :numeric, default: 10_000, desc: "max log size in MB" + default_task def watch_transfer(src_dir, tgt_dir, server_params) + loop do + begin + transfer(src_dir, tgt_dir, server_params) + rescue Errno::ENOSPC + next + end + + puts "Transferred pending logs, sleeping #{options[:period]}s" + sleep options[:period] + end + end + + desc "transfer", "transfers the datasets" + option :max_size, + type: :numeric, default: 10_000, desc: "max log size in MB" + def transfer(src_dir, tgt_dir, server_params) + src_dir = validate_directory_exists(src_dir) + tgt_dir = validate_directory_exists(tgt_dir) + archiver = make_archiver(src_dir, tgt_dir) + + archiver.process_transfer(src_dir, server_params) + end + + desc "transfer_server", "creates the log transfer FTP server \ + that runs on the main computer" + def transfer_server(tgt_dir, user, password, certfile) + create_server(tgt_dir, user, password, certfile) + end + no_commands do def validate_directory_exists(dir) dir = Pathname.new(dir) @@ -80,6 +116,12 @@ def make_archiver(root_dir, target_dir) logger: logger, max_archive_size: options[:max_size] * (1024**2) ) end + + def create_server(tgt_dir, user, password, certfile) + RobyApp::LogTransferServer::SpawnServer.new( + tgt_dir, user, password, certfile + ) + end end end end From 593289e45919b56b76318c82c264de6dec6fa36b Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 5 Dec 2024 18:30:29 -0300 Subject: [PATCH 003/158] test: add tests for transfer and watch_transfer --- lib/syskit/cli/log_runtime_archive.rb | 3 +- test/cli/test_log_runtime_archive_main.rb | 80 +++++++++++++++++++++++ 2 files changed, 82 insertions(+), 1 deletion(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index f1224d93a..6ba025189 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -52,7 +52,8 @@ def process_root_folder # @param [Params] server_params the FTP server parameters: # { host, port, certificate, user, password } def process_transfer(src_dir, server_params) - host, port = server_params[:host], server_params[:port] + host = server_params[:host] + port = server_params[:port] socket = begin TCPSocket.new(host, port) rescue Errno::ECONNREFUSED => e diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index e30adb9d0..178ec754f 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -2,6 +2,7 @@ require "syskit/test/self" require "syskit/cli/log_runtime_archive_main" +require "syskit/roby_app/tmp_root_ca" module Syskit module CLI @@ -128,6 +129,78 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) end end + describe "#watch_transfer" do + before do + @src_dir = make_tmppath + @tgt_dir = make_tmppath + host = "127.0.0.1" + ca = RobyApp::TmpRootCA.new(host) + user = "nilvo" + password = "nilvo123" + + server = spawn_server(@tgt_dir, user, password, ca) + port = server.port + + @server_params = { + host: host, port: port, certificate: "", + user: user, password: password + } + end + + it "calls transfer with the specified period" do + quit = Class.new(RuntimeError) + called = 0 + flexmock(LogRuntimeArchive) + .new_instances + .should_receive(:process_transfer) + .pass_thru do + called += 1 + raise quit if called == 3 + end + + tic = Time.now + assert_raises(quit) do + LogRuntimeArchiveMain.start( + ["watch_transfer", + @src_dir, @tgt_dir, @server_params, "--period", 0.5] + ) + end + + assert called == 3 + assert_operator(Time.now - tic, :>, 0.9) + end + end + + describe "#transfer" do + before do + @src_dir = make_tmppath + @tgt_dir = make_tmppath + end + + it "raises ArgumentError if src_dir does not exist" do + e = assert_raises ArgumentError do + call_transfer("/does/not/exist", @tgt_dir, {}) + end + assert_equal "/does/not/exist does not exist, or is not a directory", + e.message + end + + it "raises ArgumentError if tgt_dir does not exist" do + e = assert_raises ArgumentError do + call_transfer(@src_dir, "/does/not/exist", {}) + end + assert_equal "/does/not/exist does not exist, or is not a directory", + e.message + end + + # Call 'transfer' function instead of 'watch' to call transfer once + def call_transfer(src_dir, tgt_dir, params) + LogRuntimeArchiveMain.start( + ["transfer", src_dir, tgt_dir, params] + ) + end + end + # Mock files sizes in bytes # @param [Array] size of files in MB def mock_files_size(sizes) @@ -149,6 +222,13 @@ def mock_available_space(total_available_disk_space) end end + def spawn_server(tgt_dir, user, password, cert) + LogRuntimeArchiveMain.start( + ["transfer_server", + tgt_dir, user, password, cert.private_certificate_path] + ) + end + def assert_deleted_files(deleted_files) if deleted_files.empty? files = @archive_dir.each_child.select(&:file?) From 6739824f71528a0124633e3fbbae17da717f7fac Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 6 Dec 2024 11:05:37 -0300 Subject: [PATCH 004/158] chore: general fixes and variable renaming --- lib/syskit/cli/log_runtime_archive.rb | 54 ++++++++++------ lib/syskit/cli/log_runtime_archive_main.rb | 51 ++++++++++----- test/cli/test_log_runtime_archive.rb | 69 ++++++++++++++++++-- test/cli/test_log_runtime_archive_main.rb | 75 ++++++++++------------ 4 files changed, 168 insertions(+), 81 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 6ba025189..e612d1b74 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -3,6 +3,7 @@ require "archive/tar/minitar" require "sys/filesystem" require "syskit/process_managers/remote/protocol" +require 'net/ftp' module Syskit module CLI @@ -19,7 +20,7 @@ class LogRuntimeArchive DEFAULT_MAX_ARCHIVE_SIZE = 10_000_000_000 # 10G def initialize( - root_dir, target_dir, + root_dir, target_dir: nil, logger: LogRuntimeArchive.null_logger, max_archive_size: DEFAULT_MAX_ARCHIVE_SIZE ) @@ -46,26 +47,18 @@ def process_root_folder end end - # Transfer logs from a process server to the main computer server + # Creates a FTP server and decides which logs to transfer # - # @param [Pathname] src_dir the log folder on the process server + # @param [Pathname] root_dir the log folder on the process server # @param [Params] server_params the FTP server parameters: - # { host, port, certificate, user, password } - def process_transfer(src_dir, server_params) - host = server_params[:host] - port = server_params[:port] - socket = - begin TCPSocket.new(host, port) - rescue Errno::ECONNREFUSED => e - raise e.class, "cannot contact process server at " \ - "'#{host}:#{port}': #{e.message}" - end - socket.write(ProcessManagers::Remote::COMMAND_LOG_UPLOAD_FILE) - - candidates = self.class.find_all_dataset_folders(src_dir) + # { user, password, certfile_path, interface, port } + def process_root_folder_transfer(server_params) + ftp = self.class.connect_to_remote_server(server_params) + candidates = self.class.find_all_dataset_folders(@root_dir) candidates.each do |child| - Marshal.dump([server_params, Pathname(child)], socket) + process_dataset_transfer(child, ftp) end + self.class.disconnect_from_remote_server(ftp) end # Manages folder available space @@ -106,9 +99,10 @@ def ensure_free_space(free_space_low_limit, free_space_delete_until) def process_dataset(child, full:) use_existing = true + basename = child.basename.to_s loop do open_archive_for( - child.basename.to_s, use_existing: use_existing + basename, use_existing: use_existing ) do |io| if io.tell > @max_archive_size use_existing = false @@ -127,6 +121,15 @@ def process_dataset(child, full:) end end + def process_dataset_transfer(child, ftp) + basename = child.basename.to_s + self.class.transfer_dataset( + @root_dir / basename, + basename, + ftp + ) + end + # Create or open an archive # # The method will find an archive to open or create, do it and @@ -170,6 +173,17 @@ def find_last_archive_index(basename) end end + def self.connect_to_remote_server(server_params) + ftp = Net::FTP.new + ftp.connect(server_params[:interface], server_params[:port]) + ftp.login(server_params[:user], server_params[:password]) + ftp + end + + def self.disconnect_from_remote_server(ftp) + ftp.close if ftp + end + # Find all dataset-looking folders within a root log folder def self.find_all_dataset_folders(root_dir) candidates = root_dir.enum_for(:each_entry).map do |child| @@ -278,6 +292,10 @@ def self.null_logger logger end + def self.transfer_dataset(local_path, remote_path, ftp) + ftp.putbinaryfile(local_path, remote_path) + end + # Archive the given dataset # # @param [IO] archive_io the IO of the target archive diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index 84fa4a49d..c8dff5b9f 100644 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -6,7 +6,7 @@ require "pathname" require "thor" require "syskit/cli/log_runtime_archive" -require "lib/syskit/roby_app/log_transfer_server/spawn_server" +require "syskit/roby_app/log_transfer_server/spawn_server" module Syskit module CLI @@ -52,7 +52,7 @@ def self.exit_on_failure? def archive(root_dir, target_dir) root_dir = validate_directory_exists(root_dir) target_dir = validate_directory_exists(target_dir) - archiver = make_archiver(root_dir, target_dir) + archiver = make_archiver(root_dir, target_dir: target_dir) archiver.ensure_free_space( options[:free_space_low_limit] * 1_000_000, @@ -67,10 +67,13 @@ def archive(root_dir, target_dir) type: :numeric, default: 600, desc: "polling period in seconds" option :max_size, type: :numeric, default: 10_000, desc: "max log size in MB" - default_task def watch_transfer(src_dir, tgt_dir, server_params) + def watch_transfer( # rubocop:disable Metrics/ParameterLists + base_log_dir, user, password, certfile_path, interface, port + ) loop do begin - transfer(src_dir, tgt_dir, server_params) + transfer(base_log_dir, user, password, certfile_path, + interface, port) rescue Errno::ENOSPC next end @@ -83,18 +86,27 @@ def archive(root_dir, target_dir) desc "transfer", "transfers the datasets" option :max_size, type: :numeric, default: 10_000, desc: "max log size in MB" - def transfer(src_dir, tgt_dir, server_params) - src_dir = validate_directory_exists(src_dir) - tgt_dir = validate_directory_exists(tgt_dir) - archiver = make_archiver(src_dir, tgt_dir) - - archiver.process_transfer(src_dir, server_params) + def transfer( # rubocop:disable Metrics/ParameterLists + base_log_dir, user, password, certfile_path, interface, port + ) + server_params = { + user: user, password: password, certfile_path: certfile_path, + interface: interface, port: port + } + base_log_dir = validate_directory_exists(base_log_dir) + archiver = make_archiver(base_log_dir) + + archiver.process_root_folder_transfer(server_params) end desc "transfer_server", "creates the log transfer FTP server \ that runs on the main computer" - def transfer_server(tgt_dir, user, password, certfile) - create_server(tgt_dir, user, password, certfile) + def transfer_server( # rubocop:disable Metrics/ParameterLists + tgt_log_dir, user, password, certfile_path, interface, port + ) + create_server( + tgt_log_dir, user, password, certfile_path, interface, port + ) end no_commands do @@ -108,19 +120,24 @@ def validate_directory_exists(dir) dir end - def make_archiver(root_dir, target_dir) + def make_archiver(root_dir, target_dir: nil) logger = Logger.new($stdout) Syskit::CLI::LogRuntimeArchive.new( - root_dir, target_dir, + root_dir, target_dir: target_dir, logger: logger, max_archive_size: options[:max_size] * (1024**2) ) end - def create_server(tgt_dir, user, password, certfile) - RobyApp::LogTransferServer::SpawnServer.new( - tgt_dir, user, password, certfile + def create_server( # rubocop:disable Metrics/ParameterLists + tgt_log_dir, user, password, certfile_path, + interface, port + ) + server = RobyApp::LogTransferServer::SpawnServer.new( + tgt_log_dir, user, password, certfile_path, + interface: interface, port: port ) + server.run end end end diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 7bb32b97e..b268b7f81 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -365,7 +365,7 @@ module CLI describe ".process_root_folder" do before do @archive_dir = make_tmppath - @process = LogRuntimeArchive.new(@root, @archive_dir) + @process = LogRuntimeArchive.new(@root, target_dir: @archive_dir) end it "archives all folders, the last one only partially" do @@ -391,7 +391,7 @@ module CLI .write(test1 = Base64.encode64(Random.bytes(1024))) (dataset / "test.2.log").write(Base64.encode64(Random.bytes(1024))) process = LogRuntimeArchive.new( - @root, @archive_dir, max_archive_size: 1024 + @root, target_dir: @archive_dir, max_archive_size: 1024 ) process.process_root_folder @@ -419,7 +419,7 @@ module CLI (dataset / "test.2.log") .write(test2 = Base64.encode64(Random.bytes(128))) process = LogRuntimeArchive.new( - @root, @archive_dir, max_archive_size: 1024 + @root, target_dir: @archive_dir, max_archive_size: 1024 ) process.process_root_folder @@ -445,7 +445,7 @@ module CLI test1 = make_random_file "test.1.log", root: dataset test2 = make_random_file "test.2.log", root: dataset process = LogRuntimeArchive.new( - @root, @archive_dir, max_archive_size: 1024 + @root, target_dir: @archive_dir, max_archive_size: 1024 ) process.process_root_folder @@ -516,6 +516,65 @@ def should_archive_dataset(dataset, archive_basename, full:) end end + describe ".process_transfer" do + before do + @process = LogRuntimeArchive.new(@root) + interface = "127.0.0.1" + ca = RobyApp::TmpRootCA.new(interface) + @params = { + interface: interface, port: 0, + certfile_path: ca.private_certificate_path, + user: "nilvo", password: "nilvo123" + } + @target_dir = make_tmppath + @threads = [] + + create_server + end + + it "transfers datasets" do + ftp = connect_to_server + + datasets = [ + make_valid_folder("20220434-2023"), + make_valid_folder("20220434-2024"), + make_valid_folder("20220434-2025") + ] + + datasets.map do |dataset| + transfer_dataset(ftp, @root / dataset, @target_dir / dataset) + end + + datasets.each do |dataset| + assert (@target_dir / dataset).file? + end + end + + def create_server + thread = Thread.new do + server = RobyApp::LogTransferServer::SpawnServer.new( + @target_dir, @params[:user], @params[:password], + @params[:certfile_path], interface: @params[:interface], + port: @params[:port] + ) + server.run + end + thread.join + end + + def transfer_dataset(ftp, src_path, tgt_path) + ftp.putbinaryfile(src_path, tgt_path) + end + + def connect_to_server + ftp = Net::FTP.new + ftp.connect(@params[:interface], @params[:port]) + ftp.login(@params[:user], @params[:password]) + ftp.passive = true + ftp + end + end + describe "#ensure_free_space" do before do @archive_dir = make_tmppath @@ -523,7 +582,7 @@ def should_archive_dataset(dataset, archive_basename, full:) 10.times { |i| (@archive_dir / i.to_s).write(i.to_s) } - @archiver = LogRuntimeArchive.new(@root, @archive_dir) + @archiver = LogRuntimeArchive.new(@root, target_dir: @archive_dir) end it "does nothing if there is enough free space" do diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 178ec754f..6d9375253 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -131,20 +131,20 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) describe "#watch_transfer" do before do - @src_dir = make_tmppath - @tgt_dir = make_tmppath - host = "127.0.0.1" - ca = RobyApp::TmpRootCA.new(host) + @base_log_dir = make_tmppath + @tgt_log_dir = make_tmppath + interface = "127.0.0.1" + port = 0 + ca = RobyApp::TmpRootCA.new(interface) user = "nilvo" password = "nilvo123" - server = spawn_server(@tgt_dir, user, password, ca) - port = server.port - @server_params = { - host: host, port: port, certificate: "", - user: user, password: password + user: user, password: password, + certfile_path: ca.private_certificate_path, + interface: interface, port: port } + server = spawn_server end it "calls transfer with the specified period" do @@ -152,7 +152,8 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) called = 0 flexmock(LogRuntimeArchive) .new_instances - .should_receive(:process_transfer) + .should_receive(:process_root_folder_transfer) + .with(@server_params) .pass_thru do called += 1 raise quit if called == 3 @@ -160,10 +161,14 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) tic = Time.now assert_raises(quit) do - LogRuntimeArchiveMain.start( - ["watch_transfer", - @src_dir, @tgt_dir, @server_params, "--period", 0.5] - ) + args = [ + "watch_transfer", + @base_log_dir, + *@server_params.values, + "--period", 0.5 + ] + pp "***", args + LogRuntimeArchiveMain.start(args) end assert called == 3 @@ -173,31 +178,17 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) describe "#transfer" do before do - @src_dir = make_tmppath - @tgt_dir = make_tmppath - end - - it "raises ArgumentError if src_dir does not exist" do - e = assert_raises ArgumentError do - call_transfer("/does/not/exist", @tgt_dir, {}) - end - assert_equal "/does/not/exist does not exist, or is not a directory", - e.message - end - - it "raises ArgumentError if tgt_dir does not exist" do - e = assert_raises ArgumentError do - call_transfer(@src_dir, "/does/not/exist", {}) - end - assert_equal "/does/not/exist does not exist, or is not a directory", - e.message + @base_log_dir = make_tmppath end # Call 'transfer' function instead of 'watch' to call transfer once - def call_transfer(src_dir, tgt_dir, params) - LogRuntimeArchiveMain.start( - ["transfer", src_dir, tgt_dir, params] - ) + def call_transfer(src_dir, params) + args = [ + "transfer", + src_dir, + *params.values + ] + LogRuntimeArchiveMain.start(args) end end @@ -222,11 +213,13 @@ def mock_available_space(total_available_disk_space) end end - def spawn_server(tgt_dir, user, password, cert) - LogRuntimeArchiveMain.start( - ["transfer_server", - tgt_dir, user, password, cert.private_certificate_path] - ) + def spawn_server + args = [ + "transfer_server", + @tgt_log_dir, + *@server_params.values + ] + LogRuntimeArchiveMain.start(args) end def assert_deleted_files(deleted_files) From daf2442aa114cd5d22f9878ac42d6d3d00f74f03 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 9 Dec 2024 14:43:11 -0300 Subject: [PATCH 005/158] chore: fix rubocop offenses --- lib/syskit/cli/log_runtime_archive.rb | 5 +- lib/syskit/cli/log_runtime_archive_main.rb | 8 +-- .../log_transfer_server/spawn_server.rb | 2 +- test/cli/test_log_runtime_archive.rb | 6 +-- test/cli/test_log_runtime_archive_main.rb | 53 +++++++++++++------ 5 files changed, 49 insertions(+), 25 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index e612d1b74..ed279e4dd 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -3,7 +3,7 @@ require "archive/tar/minitar" require "sys/filesystem" require "syskit/process_managers/remote/protocol" -require 'net/ftp' +require "net/ftp" module Syskit module CLI @@ -177,11 +177,12 @@ def self.connect_to_remote_server(server_params) ftp = Net::FTP.new ftp.connect(server_params[:interface], server_params[:port]) ftp.login(server_params[:user], server_params[:password]) + ftp.passive = true ftp end def self.disconnect_from_remote_server(ftp) - ftp.close if ftp + ftp&.close end # Find all dataset-looking folders within a root log folder diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index c8dff5b9f..d997c8456 100644 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -109,7 +109,7 @@ def transfer_server( # rubocop:disable Metrics/ParameterLists ) end - no_commands do + no_commands do # rubocop:disable Metrics/BlockLength def validate_directory_exists(dir) dir = Pathname.new(dir) unless dir.directory? @@ -124,8 +124,9 @@ def make_archiver(root_dir, target_dir: nil) logger = Logger.new($stdout) Syskit::CLI::LogRuntimeArchive.new( - root_dir, target_dir: target_dir, - logger: logger, max_archive_size: options[:max_size] * (1024**2) + root_dir, + target_dir: target_dir, logger: logger, + max_archive_size: options[:max_size] * (1024**2) ) end @@ -138,6 +139,7 @@ def create_server( # rubocop:disable Metrics/ParameterLists interface: interface, port: port ) server.run + server end end end diff --git a/lib/syskit/roby_app/log_transfer_server/spawn_server.rb b/lib/syskit/roby_app/log_transfer_server/spawn_server.rb index 1405d5c57..7b9260c45 100644 --- a/lib/syskit/roby_app/log_transfer_server/spawn_server.rb +++ b/lib/syskit/roby_app/log_transfer_server/spawn_server.rb @@ -85,7 +85,7 @@ def wait_until_stopped puts "FTP server started. Press ENTER or c-C to stop it" $stdout.flush begin - gets + $stdin.readline rescue Interrupt puts "Interrupt" end diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index b268b7f81..86e6ddb5b 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -534,7 +534,7 @@ def should_archive_dataset(dataset, archive_basename, full:) it "transfers datasets" do ftp = connect_to_server - + datasets = [ make_valid_folder("20220434-2023"), make_valid_folder("20220434-2024"), @@ -554,8 +554,8 @@ def create_server thread = Thread.new do server = RobyApp::LogTransferServer::SpawnServer.new( @target_dir, @params[:user], @params[:password], - @params[:certfile_path], interface: @params[:interface], - port: @params[:port] + @params[:certfile_path], + interface: @params[:interface], port: @params[:port] ) server.run end diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 6d9375253..7658a7d52 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -129,22 +129,48 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) end end + describe "#transfer_server" do + before do + @tgt_log_dir = make_tmppath + interface = "127.0.0.1" + ca = RobyApp::TmpRootCA.new(interface) + + server_params = { + user: "nilvo", password: "nilvo123", + certfile_path: ca.private_certificate_path, + interface: interface, port: 0 + } + end + end + describe "#watch_transfer" do before do @base_log_dir = make_tmppath @tgt_log_dir = make_tmppath interface = "127.0.0.1" - port = 0 ca = RobyApp::TmpRootCA.new(interface) - user = "nilvo" - password = "nilvo123" @server_params = { - user: user, password: password, + user: "nilvo", password: "nilvo123", certfile_path: ca.private_certificate_path, - interface: interface, port: port + interface: interface, port: 0 } - server = spawn_server + @threads = [] + server = nil + flexmock(RobyApp::LogTransferServer::SpawnServer) + .should_receive(:new) + .with_any_args + .pass_thru do |arg| + server = arg + end + call_create_server + @server = server + end + + after do + @server.stop + @server.join + @threads.each(&:kill) end it "calls transfer with the specified period" do @@ -167,13 +193,17 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) *@server_params.values, "--period", 0.5 ] - pp "***", args LogRuntimeArchiveMain.start(args) end assert called == 3 assert_operator(Time.now - tic, :>, 0.9) end + + def call_create_server + cli = LogRuntimeArchiveMain.new + cli.create_server(@tgt_log_dir, *@server_params.values) + end end describe "#transfer" do @@ -213,15 +243,6 @@ def mock_available_space(total_available_disk_space) end end - def spawn_server - args = [ - "transfer_server", - @tgt_log_dir, - *@server_params.values - ] - LogRuntimeArchiveMain.start(args) - end - def assert_deleted_files(deleted_files) if deleted_files.empty? files = @archive_dir.each_child.select(&:file?) From 3d19f545f6f26dafa250ae23e600ff601c430117 Mon Sep 17 00:00:00 2001 From: kapeps Date: Tue, 17 Dec 2024 17:17:34 -0300 Subject: [PATCH 006/158] feat: can_deploy_all method in profile assertions --- lib/syskit/test/profile_assertions.rb | 138 ++++++++++++++++------ test/test/test_profile_assertions.rb | 162 ++++++++++++++++++++++++++ 2 files changed, 264 insertions(+), 36 deletions(-) diff --git a/lib/syskit/test/profile_assertions.rb b/lib/syskit/test/profile_assertions.rb index 9606d51e8..d67aa3170 100644 --- a/lib/syskit/test/profile_assertions.rb +++ b/lib/syskit/test/profile_assertions.rb @@ -206,12 +206,7 @@ def assert_is_self_contained( action_or_profile = subject_syskit_model, message: "%s is not self contained", exclude: [], **instanciate_options ) - actions = validate_actions(action_or_profile, exclude: exclude) do |skip| - flunk "could not validate some non-Syskit actions: #{skip}, " \ - "probably because of required arguments. Pass the action to " \ - "the 'exclude' option of #{__method__}, and add a separate " \ - "assertion test with the arguments added explicitly" - end + actions = assert_actions(action_or_profile, exclude: exclude) actions.each do |act| syskit_assert_action_is_self_contained( @@ -308,22 +303,9 @@ def assert_can_instanciate( action_or_profile = subject_syskit_model, exclude: [], together_with: [] ) - actions = validate_actions(action_or_profile, exclude: exclude) do |skip| - flunk "could not validate some non-Syskit actions: #{skip}, " \ - "probably because of required arguments. Pass the action to " \ - "the 'exclude' option of #{__method__}, and add a separate " \ - "assertion test with the arguments added explicitly" - end + actions = assert_actions(action_or_profile, exclude: exclude) - together_with = - validate_actions(together_with, exclude: exclude) do |skip| - flunk "could not validate some non-Syskit actions given to " \ - "`together_with` in #{__method__}: #{skip}, " \ - "probably because of " \ - "missing arguments. If you are passing a profile or " \ - "action interface and do not require to test against " \ - "that particular action, pass it to the 'exclude' argument" - end + together_with = assert_together_with(together_with, exclude: exclude) actions.each do |action| assert_can_instanciate_together(action, *together_with) @@ -420,22 +402,9 @@ def assert_can_deploy( action_or_profile = subject_syskit_model, exclude: [], together_with: [] ) - actions = validate_actions(action_or_profile, exclude: exclude) do |skip| - flunk "could not validate some non-Syskit actions: #{skip}, " \ - "probably because of required arguments. Pass the action to " \ - "the 'exclude' option of #{__method__}, and add a separate " \ - "assertion test with the arguments added explicitly" - end + actions = assert_actions(action_or_profile, exclude: exclude) - together_with = - validate_actions(together_with, exclude: exclude) do |skip| - flunk "could not validate some non-Syskit actions given to " \ - "`together_with` in #{__method__}: #{skip}, " \ - "probably because of " \ - "missing arguments. If you are passing a profile or action " \ - "interface and do not require to test against that " \ - "particular action, pass it to the 'exclude' argument" - end + together_with = assert_together_with(together_with, exclude: exclude) actions.each do |action| assert_can_deploy_together(action, *together_with) @@ -465,6 +434,62 @@ def assert_can_deploy_together(*actions) e.message, e.backtrace end + # Tests that the following syskit-generated actions can be ALL deployed at the + # same time, that is they result in a valid, non-abstract network whose all + # components have a deployment + # + # When resolving actions that are not directly defined from profile + # definitions, the method will attempte to resolve method action by + # calling them. If there is a problem, pass the action model to the + # `exclude` argument. + # + # In particular, in the presence of action methods with required + # arguments, run one assert first with the action method excluded and + # another with that action and sample arguments. + # + # @param action_or_profile if an action interface or profile, test all + # definitions that are reachable from it. In the case of action interfaces, + # this means looking into method actions and action state machines. + # @param together_with test that all actions in `action_or_profile` + # can be instanciated when all actions in `together_with` are instanciated + # at the same time. This can be used if the former depend on the presence + # of the latter, or if you want to test against conflicts. + def assert_can_deploy_all( + action_or_profile = subject_syskit_model, + exclude: [], together_with: [] + ) + actions = assert_actions(action_or_profile, exclude: exclude) + + together_with = assert_together_with(together_with, exclude: exclude) + + assert_can_deploy_all_together(actions, *together_with) + end + + # Spec-style call for {#assert_can_deploy_all} + # + # @example verify that each definition of a profile can be deployed + # describe MyBundle::Profiles::MyProfile do + # it { can_deploy_all } + # end + def can_deploy_all( + action_or_profile = subject_syskit_model, together_with: [] + ) + assert_can_deploy_all(action_or_profile, together_with: together_with) + end + + # Tests that the given syskit-generated actions can be ALL deployed together + # + # It is stronger (and therefore includes) + # {assert_can_deploy_together} + def assert_can_deploy_all_together(*actions) + syskit_run_deploy_in_bulk( + actions.flatten, compute_policies: true, compute_deployments: true + ) + rescue Minitest::Assertion, StandardError => e + raise ProfileAssertionFailed.new("deploy all together", actions, e), + e.message, e.backtrace + end + def syskit_run_deploy_in_bulk( actions, compute_policies:, compute_deployments: ) @@ -482,6 +507,12 @@ def syskit_run_deploy_in_bulk( end end + # @api private + # + # Yield the cartesian product of a list of list of actions + # + # Given a list of list of actions [A, B, C], it yields all possible + # combinations [a, b, c], where a is from A, b is from B and c from C. def self.each_combination(*arrays) return enum_for(__method__, *arrays) unless block_given? @@ -556,6 +587,10 @@ def can_configure_together(*actions) end # @api private + # Validate actions, and yields rejected actions, excluding those included in + # 'exclude'. + # + # @param action_or_profile an action interface or profile def validate_actions(action_or_profile, exclude: []) actions, skipped = BulkAssertAtomicActions(action_or_profile, exclude: exclude) @@ -567,6 +602,37 @@ def validate_actions(action_or_profile, exclude: []) actions end + + # @api private + # Assert actions, flunking in case actions could not be validated, excluding + # those included in 'exclude'. + # + # @param action_or_profile an action interface or profile + def assert_actions(action_or_profile, exclude: []) + validate_actions(action_or_profile, exclude: exclude) do |skip| + caller_method = caller[2].split("`").last.split("'").first + flunk "could not validate some non-Syskit actions: #{skip}, " \ + "probably because of required arguments. Pass the action to " \ + "the 'exclude' option of #{caller_method}, and add a separate " \ + "assertion test with the arguments added explicitly" + end + end + + # @api private + # Assert together_with actions, flunking in case actions could not be + # validated,excluding those included in 'exclude'. + # + # @param together_with an action interface or profile + def assert_together_with(together_with, exclude: []) + validate_actions(together_with, exclude: exclude) do |skip| + caller_method = caller[2].split("`").last.split("'").first + flunk "could not validate some non-Syskit actions given to " \ + "`together_with` in #{caller_method}: #{skip}, probably " \ + "because of missing arguments. If you are passing a profile " \ + "or action interface and do not require to test against that " \ + "particular action, pass it to the 'exclude' argument" + end + end end end end diff --git a/test/test/test_profile_assertions.rb b/test/test/test_profile_assertions.rb index 6d7ed0134..bd80dca83 100644 --- a/test/test/test_profile_assertions.rb +++ b/test/test/test_profile_assertions.rb @@ -636,6 +636,168 @@ module Test end end + describe "assert_can_deploy_all" do + include ProfileAssertions + + # Needed by ProfileAssertions + attr_reader :subject_syskit_model + + before do + @test_profile = Actions::Profile.new("TestProfile") + @deployment_m = syskit_stub_deployment_model(@task_m) + @subject_syskit_model = @test_profile + end + + it "passes for definitions that refer to deployed tasks" do + @test_profile.use_deployment @deployment_m + @test_profile.define( + "test", @cmp_m.use(@srv_m => @task_m) + ) + assert_can_deploy_all(@test_profile) + end + + it "fails for definitions that have tasks that are not deployed" do + @test_profile.define "test", @cmp_m.use(@srv_m => @task_m) + e = assert_raises(ProfileAssertions::ProfileAssertionFailed) do + assert_can_deploy_all(@test_profile) + end + + assert_match( + /cannot deploy the following tasks.*Task.*child test of Cmp/m, + PP.pp(e.each_original_exception.first, +"") + ) + end + + it "fails for definitions whose services are represented by tags" do + @test_profile.tag "test", @srv_m + @test_profile.define( + "test", @cmp_m.use(@srv_m => @test_profile.test_tag) + ) + e = assert_raises(ProfileAssertions::ProfileAssertionFailed) do + assert_can_deploy_all(@test_profile) + end + assert_match( + /cannot\ find\ a\ concrete\ implementation.* + TestProfile.test_tag/mx, + PP.pp(e.each_original_exception.first, +"") + ) + end + + it "fails for definitions with abstract elements that are not tags" do + @test_profile.define "test", @cmp_m + e = assert_raises(ProfileAssertions::ProfileAssertionFailed) do + assert_can_deploy_all(@test_profile) + end + assert_match( + /cannot\ find\ a\ concrete\ implementation.* + Models::Placeholder/mx, + PP.pp(e.each_original_exception.first, +"") + ) + end + + it "fails for definitions that use tags from other profiles" do + other_profile = Actions::Profile.new("Other") + other_profile.tag "test", @srv_m + @test_profile.define( + "test", @cmp_m.use(@srv_m => other_profile.test_tag) + ) + + e = assert_raises(ProfileAssertions::ProfileAssertionFailed) do + assert_can_deploy_all(@test_profile) + end + assert_match( + /cannot find a concrete implementation.*Other.test_tag/m, + PP.pp(e.each_original_exception.first, +"") + ) + end + + it "handles plain instance requirements" do + assert_can_deploy_all( + @cmp_m + .to_instance_requirements + .use_deployment(@deployment_m) + .use(@srv_m => @task_m) + ) + end + + it "allows deploying together with the actions or profile" do + @test_profile.define("test", @cmp_m.use(@srv_m => @task_m)) + assert_can_deploy_all( + @test_profile.test_def, + together_with: @task_m.to_instance_requirements + .use_deployment(@deployment_m) + ) + end + + it "fails if some actions are not resolvable" do + flexmock(self) + .should_receive(:BulkAssertAtomicActions) + .with(action = flexmock, exclude: (excluded = flexmock)) + .and_return([[], + [flexmock(name: "some"), flexmock(name: "action")]]) + + e = assert_raises(Minitest::Assertion) do + assert_can_deploy_all(action, exclude: excluded) + end + message = "could not validate some non-Syskit actions: 'action', " \ + "'some', probably because of required arguments. Pass " \ + "the action to the 'exclude' option of " \ + "assert_can_deploy_all, and add a separate assertion " \ + "test with the arguments added explicitly" + assert_equal message, e.message + end + + it "fails if some actions in together_with are not resolvable" do + action, together_with, exclude = 3.times.map { flexmock } + flexmock(self) + .should_receive(:BulkAssertAtomicActions) + .with(action, exclude: exclude) + .and_return([[], []]) + flexmock(self) + .should_receive(:BulkAssertAtomicActions) + .with(together_with, exclude: exclude) + .and_return([[], + [flexmock(name: "some"), flexmock(name: "action")]]) + + e = assert_raises(Minitest::Assertion) do + assert_can_deploy_all( + action, exclude: exclude, together_with: together_with + ) + end + message = + "could not validate some non-Syskit actions given " \ + "to `together_with` in assert_can_deploy_all: 'action', " \ + "'some', probably because of " \ + "missing arguments. If you are passing a profile or action " \ + "interface and do not require to test against that particular " \ + "action, pass it to the 'exclude' argument" + assert_equal message, e.message + end + + it "runs syskit_run_deploy_in_bulk with all actions" do + @test_profile.define("test", @cmp_m.use(@srv_m => @task_m)) + @test_profile.define("test_42", @cmp_m.use(@srv_m => @task_m)) + + actions, skipped = BulkAssertAtomicActions( + [ + @test_profile.test_def, + @test_profile.test_42_def, + @task_m.to_instance_requirements + .use_deployment(@deployment_m) + ] + ) + flexmock(self) + .should_receive(:syskit_run_deploy_in_bulk) + .with(actions, compute_policies: true, compute_deployments: true) + + assert skipped.empty? + assert_can_deploy_all( + together_with: @task_m.to_instance_requirements + .use_deployment(@deployment_m) + ) + end + end + describe ".each_combination" do it "calculates and yields each possible combination of its arguments" do result = ProfileAssertions.each_combination( From d0d9c60f165106556b0b6101706de80b5e5b8e63 Mon Sep 17 00:00:00 2001 From: kapeps Date: Wed, 18 Dec 2024 10:32:44 -0300 Subject: [PATCH 007/158] fix: flatten and splat the actions arguments instead of creating the assert_can_deploy_all_together method --- lib/syskit/test/profile_assertions.rb | 65 ++++++++++++++------------- 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/lib/syskit/test/profile_assertions.rb b/lib/syskit/test/profile_assertions.rb index d67aa3170..9838cdd13 100644 --- a/lib/syskit/test/profile_assertions.rb +++ b/lib/syskit/test/profile_assertions.rb @@ -206,8 +206,10 @@ def assert_is_self_contained( action_or_profile = subject_syskit_model, message: "%s is not self contained", exclude: [], **instanciate_options ) - actions = assert_actions(action_or_profile, exclude: exclude) - + actions = validate_assert_actions_argument( + action_or_profile, + exclude: exclude + ) actions.each do |act| syskit_assert_action_is_self_contained( act, message: message, **instanciate_options @@ -303,10 +305,14 @@ def assert_can_instanciate( action_or_profile = subject_syskit_model, exclude: [], together_with: [] ) - actions = assert_actions(action_or_profile, exclude: exclude) - - together_with = assert_together_with(together_with, exclude: exclude) - + actions = validate_assert_actions_argument( + action_or_profile, + exclude: exclude + ) + together_with = validate_assert_together_with_argument( + together_with, + exclude: exclude + ) actions.each do |action| assert_can_instanciate_together(action, *together_with) end @@ -402,10 +408,14 @@ def assert_can_deploy( action_or_profile = subject_syskit_model, exclude: [], together_with: [] ) - actions = assert_actions(action_or_profile, exclude: exclude) - - together_with = assert_together_with(together_with, exclude: exclude) - + actions = validate_assert_actions_argument( + action_or_profile, + exclude: exclude + ) + together_with = validate_assert_together_with_argument( + together_with, + exclude: exclude + ) actions.each do |action| assert_can_deploy_together(action, *together_with) end @@ -458,11 +468,15 @@ def assert_can_deploy_all( action_or_profile = subject_syskit_model, exclude: [], together_with: [] ) - actions = assert_actions(action_or_profile, exclude: exclude) - - together_with = assert_together_with(together_with, exclude: exclude) - - assert_can_deploy_all_together(actions, *together_with) + actions = validate_assert_actions_argument( + action_or_profile, + exclude: exclude + ) + together_with = validate_assert_together_with_argument( + together_with, + exclude: exclude + ) + assert_can_deploy_together(*actions.flatten, *together_with) end # Spec-style call for {#assert_can_deploy_all} @@ -477,19 +491,6 @@ def can_deploy_all( assert_can_deploy_all(action_or_profile, together_with: together_with) end - # Tests that the given syskit-generated actions can be ALL deployed together - # - # It is stronger (and therefore includes) - # {assert_can_deploy_together} - def assert_can_deploy_all_together(*actions) - syskit_run_deploy_in_bulk( - actions.flatten, compute_policies: true, compute_deployments: true - ) - rescue Minitest::Assertion, StandardError => e - raise ProfileAssertionFailed.new("deploy all together", actions, e), - e.message, e.backtrace - end - def syskit_run_deploy_in_bulk( actions, compute_policies:, compute_deployments: ) @@ -608,9 +609,9 @@ def validate_actions(action_or_profile, exclude: []) # those included in 'exclude'. # # @param action_or_profile an action interface or profile - def assert_actions(action_or_profile, exclude: []) + def validate_assert_actions_argument(action_or_profile, exclude: []) validate_actions(action_or_profile, exclude: exclude) do |skip| - caller_method = caller[2].split("`").last.split("'").first + caller_method = caller_locations(3, 1).first.label flunk "could not validate some non-Syskit actions: #{skip}, " \ "probably because of required arguments. Pass the action to " \ "the 'exclude' option of #{caller_method}, and add a separate " \ @@ -623,9 +624,9 @@ def assert_actions(action_or_profile, exclude: []) # validated,excluding those included in 'exclude'. # # @param together_with an action interface or profile - def assert_together_with(together_with, exclude: []) + def validate_assert_together_with_argument(together_with, exclude: []) validate_actions(together_with, exclude: exclude) do |skip| - caller_method = caller[2].split("`").last.split("'").first + caller_method = caller_locations(3, 1).first.label flunk "could not validate some non-Syskit actions given to " \ "`together_with` in #{caller_method}: #{skip}, probably " \ "because of missing arguments. If you are passing a profile " \ From c321268d3e19112d7142d868c3af24e9fcefd00a Mon Sep 17 00:00:00 2001 From: Debora Date: Wed, 18 Dec 2024 17:36:46 -0300 Subject: [PATCH 008/158] fix: Moving the FTPUpload to syskit/runtime/remote/server diretory --- .../runtime/remote/server/ftp_upload.rb | 98 +++++++++++++++++++ .../runtime/remote/server/log_upload_state.rb | 29 ++++++ 2 files changed, 127 insertions(+) create mode 100644 lib/syskit/runtime/remote/server/ftp_upload.rb create mode 100644 lib/syskit/runtime/remote/server/log_upload_state.rb diff --git a/lib/syskit/runtime/remote/server/ftp_upload.rb b/lib/syskit/runtime/remote/server/ftp_upload.rb new file mode 100644 index 000000000..61d3b2e42 --- /dev/null +++ b/lib/syskit/runtime/remote/server/ftp_upload.rb @@ -0,0 +1,98 @@ +# frozen_string_literal: true + +module Syskit + module Runtime + module Remote + module Server + # Encapsulation of the log file upload process + class FTPUpload + def initialize( # rubocop:disable Metrics/ParameterLists + host, port, certificate, user, password, file, + max_upload_rate: Float::INFINITY, + implicit_ftps: false + ) + + @host = host + @port = port + @certificate = certificate + @user = user + @password = password + @file = file + + @max_upload_rate = Float(max_upload_rate) + @implicit_ftps = implicit_ftps + end + + # Create a temporary file with the FTP server's public key, to pass + # to FTP.open + # + # @yieldparam [String] path the certificate path + def with_certificate + Tempfile.create do |cert_io| + cert_io.write @certificate + cert_io.flush + yield(cert_io.path) + end + end + + # Open the FTP connection + # + # @yieldparam [Net::FTP] + def open + with_certificate do |cert_path| + Net::FTP.open( + @host, + private_data_connection: false, port: @port, + implicit_ftps: @implicit_ftps, + ssl: { verify_mode: OpenSSL::SSL::VERIFY_PEER, + ca_file: cert_path } + ) do |ftp| + ftp.login(@user, @password) + yield(ftp) + end + end + end + + # Open the connection and transfer the file + # + # @return [LogUploadState::Result] + def open_and_transfer + open { |ftp| transfer(ftp) } + LogUploadState::Result.new(@file, true, nil) + rescue StandardError => e + LogUploadState::Result.new(@file, false, e.message) + end + + # Do transfer the file through the given connection + # + # @param [Net::FTP] ftp + def transfer(ftp) + last = Time.now + File.open(@file) do |file_io| + ftp.storbinary("STOR #{File.basename(@file)}", + file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| + now = Time.now + rate_limit(buf.size, now, last) + last = Time.now + end + end + end + + # @api private + # + # Sleep when needed to keep the expected transfer rate + def rate_limit(chunk_size, now, last) + duration = now - last + exp_duration = chunk_size / @max_upload_rate + # Do not wait, but do not try to "make up" for the bandwidth + # we did not use. The goal is to not affect the rest of the + # system + return if duration > exp_duration + + sleep(exp_duration - duration) + end + end + end + end + end +end diff --git a/lib/syskit/runtime/remote/server/log_upload_state.rb b/lib/syskit/runtime/remote/server/log_upload_state.rb new file mode 100644 index 000000000..be6b60e0d --- /dev/null +++ b/lib/syskit/runtime/remote/server/log_upload_state.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +module Syskit + module Runtime + module Remote + module Server + # State of the asynchronous file transfers managed by {Server} + class LogUploadState + attr_reader :pending_count + + Result = Struct.new :file, :success, :message do + def success? + success + end + end + + def initialize(pending_count, results) + @pending_count = pending_count + @results = results + end + + def each_result(&block) + @results.each(&block) + end + end + end + end + end +end From 7ff3ef1136bb5410ec64768ee8f2190b1be8992f Mon Sep 17 00:00:00 2001 From: Debora Date: Wed, 18 Dec 2024 17:40:04 -0300 Subject: [PATCH 009/158] fix: refactoring the process_dataset_transfer to use the FTPUpload --- lib/syskit/cli/log_runtime_archive.rb | 57 +++++++------------ lib/syskit/cli/log_runtime_archive_main.rb | 49 ++++++++-------- .../runtime/remote/server/ftp_upload.rb | 4 +- 3 files changed, 48 insertions(+), 62 deletions(-) mode change 100644 => 100755 lib/syskit/cli/log_runtime_archive_main.rb diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index ed279e4dd..365841bae 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -31,6 +31,19 @@ def initialize( @max_archive_size = max_archive_size end + # Iterate over all datasets in a Roby log root folder and transfer them + # through FTP server + # + # @param [Params] server_params the FTP server parameters: + # { host, port, certificate, user, password, implicit_ftps, max_upload_rate } + def process_root_folder_transfer(server_params) + candidates = self.class.find_all_dataset_folders(@root_dir) + candidates.each do |child| + basename = child.basename.to_s + process_dataset_transfer(basename, server_params) + end + end + # Iterate over all datasets in a Roby log root folder and archive them # # The method assumes the last dataset is the current one (i.e. the running @@ -47,20 +60,6 @@ def process_root_folder end end - # Creates a FTP server and decides which logs to transfer - # - # @param [Pathname] root_dir the log folder on the process server - # @param [Params] server_params the FTP server parameters: - # { user, password, certfile_path, interface, port } - def process_root_folder_transfer(server_params) - ftp = self.class.connect_to_remote_server(server_params) - candidates = self.class.find_all_dataset_folders(@root_dir) - candidates.each do |child| - process_dataset_transfer(child, ftp) - end - self.class.disconnect_from_remote_server(ftp) - end - # Manages folder available space # # The method will check if there is enough space to save more log files @@ -121,13 +120,15 @@ def process_dataset(child, full:) end end - def process_dataset_transfer(child, ftp) - basename = child.basename.to_s - self.class.transfer_dataset( - @root_dir / basename, - basename, - ftp + def process_dataset_transfer(file, server_params) + ftp = Runtime::Remote::Server::FTPUpload.new( + server_params[:host], server_params[:port], + server_params[:certificate], server_params[:user], + server_params[:password], @root_dir / file, + max_upload_rate: server_params[:max_upload_rate] || Float::INFINITY, + implicit_ftps: server_params[:implicit_ftps] ) + ftp.open_and_transfer end # Create or open an archive @@ -173,18 +174,6 @@ def find_last_archive_index(basename) end end - def self.connect_to_remote_server(server_params) - ftp = Net::FTP.new - ftp.connect(server_params[:interface], server_params[:port]) - ftp.login(server_params[:user], server_params[:password]) - ftp.passive = true - ftp - end - - def self.disconnect_from_remote_server(ftp) - ftp&.close - end - # Find all dataset-looking folders within a root log folder def self.find_all_dataset_folders(root_dir) candidates = root_dir.enum_for(:each_entry).map do |child| @@ -293,10 +282,6 @@ def self.null_logger logger end - def self.transfer_dataset(local_path, remote_path, ftp) - ftp.putbinaryfile(local_path, remote_path) - end - # Archive the given dataset # # @param [IO] archive_io the IO of the target archive diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb old mode 100644 new mode 100755 index d997c8456..8b0e8752b --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -65,15 +65,15 @@ def archive(root_dir, target_dir) and periodically performs transfer" option :period, type: :numeric, default: 600, desc: "polling period in seconds" - option :max_size, - type: :numeric, default: 10_000, desc: "max log size in MB" + option :max_upload_rate, + type: :numeric, default: 10, desc: "max upload rate in Mbps" def watch_transfer( # rubocop:disable Metrics/ParameterLists - base_log_dir, user, password, certfile_path, interface, port + source_dir, user, password, certificate, host, port, implicit_ftps ) loop do begin - transfer(base_log_dir, user, password, certfile_path, - interface, port) + transfer(source_dir, user, password, certificate, host, port, + implicit_ftps) rescue Errno::ENOSPC next end @@ -84,29 +84,30 @@ def watch_transfer( # rubocop:disable Metrics/ParameterLists end desc "transfer", "transfers the datasets" - option :max_size, - type: :numeric, default: 10_000, desc: "max log size in MB" + option :max_upload_rate, + type: :numeric, default: 10, desc: "max upload rate in Mbps" def transfer( # rubocop:disable Metrics/ParameterLists - base_log_dir, user, password, certfile_path, interface, port + source_dir, user, password, certificate, host, port, implicit_ftps ) + source_dir = validate_directory_exists(source_dir) + archiver = make_archiver(source_dir) + server_params = { - user: user, password: password, certfile_path: certfile_path, - interface: interface, port: port + host: host, port: port, certificate: certificate, + user: user, password: password, + max_upload_rate: options[:max_upload_rate], + implicit_ftps: implicit_ftps } - base_log_dir = validate_directory_exists(base_log_dir) - archiver = make_archiver(base_log_dir) - archiver.process_root_folder_transfer(server_params) end desc "transfer_server", "creates the log transfer FTP server \ that runs on the main computer" def transfer_server( # rubocop:disable Metrics/ParameterLists - tgt_log_dir, user, password, certfile_path, interface, port + target_dir, user, password, certificate, host, implicit_ftps ) - create_server( - tgt_log_dir, user, password, certfile_path, interface, port - ) + create_server(target_dir, user, password, certificate, host, port, + implicit_ftps) end no_commands do # rubocop:disable Metrics/BlockLength @@ -131,15 +132,15 @@ def make_archiver(root_dir, target_dir: nil) end def create_server( # rubocop:disable Metrics/ParameterLists - tgt_log_dir, user, password, certfile_path, - interface, port + target_dir, user, password, certificate, host, port, implicit_ftps ) - server = RobyApp::LogTransferServer::SpawnServer.new( - tgt_log_dir, user, password, certfile_path, - interface: interface, port: port + RobyApp::LogTransferServer::SpawnServer.new( + target_dir, user, password, + certificate, + interface: host, + port: port, + implicit_ftps: implicit_ftps ) - server.run - server end end end diff --git a/lib/syskit/runtime/remote/server/ftp_upload.rb b/lib/syskit/runtime/remote/server/ftp_upload.rb index 61d3b2e42..169086264 100644 --- a/lib/syskit/runtime/remote/server/ftp_upload.rb +++ b/lib/syskit/runtime/remote/server/ftp_upload.rb @@ -45,7 +45,7 @@ def open private_data_connection: false, port: @port, implicit_ftps: @implicit_ftps, ssl: { verify_mode: OpenSSL::SSL::VERIFY_PEER, - ca_file: cert_path } + ca_file: cert_path } ) do |ftp| ftp.login(@user, @password) yield(ftp) @@ -70,7 +70,7 @@ def transfer(ftp) last = Time.now File.open(@file) do |file_io| ftp.storbinary("STOR #{File.basename(@file)}", - file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| + file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| now = Time.now rate_limit(buf.size, now, last) last = Time.now From 6b0d92aa0c8bc62a79ce6d58dffb330abdfcceb9 Mon Sep 17 00:00:00 2001 From: kapeps Date: Mon, 16 Dec 2024 15:08:03 -0300 Subject: [PATCH 010/158] chore: avoid re-resolving a query in dynamic port bindings if the current selection matches --- lib/syskit/dynamic_port_binding.rb | 16 ++++++++++++++++ lib/syskit/queries/port_matcher.rb | 6 +++--- test/queries/test_port_matcher.rb | 7 +++++++ 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/lib/syskit/dynamic_port_binding.rb b/lib/syskit/dynamic_port_binding.rb index b503a28d9..4c443f6ae 100644 --- a/lib/syskit/dynamic_port_binding.rb +++ b/lib/syskit/dynamic_port_binding.rb @@ -99,6 +99,10 @@ def attach_to_task(task) # the port was updated, and false otherwise. The tuple's second element # is the new resolved port which may be nil if no ports can be found def update + if @resolved_port && @port_resolver&.current_selection_valid?(@resolved_port) + return false, @resolved_port + end + port = @port_resolver&.update return false, @resolved_port if @resolved_port == port @@ -294,6 +298,10 @@ def initialize(plan, matcher) @last_provider_task = nil end + def current_selection_valid?(port) + @matcher === port + end + def update port = @matcher.each_in_plan(@plan).first port&.to_actual_port @@ -319,6 +327,10 @@ def initialize(port) @port = port end + def current_selection_valid?(port) + !!port.component.plan + end + def update @port if @port.component.plan end @@ -337,6 +349,10 @@ def initialize(port) @port = port end + def current_selection_valid?(port) + !!port.component.to_task.plan + end + def update @port if @port.component.to_task.plan end diff --git a/lib/syskit/queries/port_matcher.rb b/lib/syskit/queries/port_matcher.rb index e670fc00b..bce3c7c40 100644 --- a/lib/syskit/queries/port_matcher.rb +++ b/lib/syskit/queries/port_matcher.rb @@ -57,9 +57,9 @@ def with_type(type) def ===(port) return unless port.kind_of?(Port) - (@name_filter === object.name) && - (!@type_filter || @type_filter == object.type) && - (@component_matcher === object.component) + (@name_filter === port.name) && + (!@type_filter || @type_filter == port.type) && + (@component_matcher === port.component) end def each_in_plan(plan, &block) diff --git a/test/queries/test_port_matcher.rb b/test/queries/test_port_matcher.rb index ab22e9221..f34148e07 100644 --- a/test/queries/test_port_matcher.rb +++ b/test/queries/test_port_matcher.rb @@ -30,6 +30,13 @@ module Queries PortMatcher.new(@task_m).with_name("out_d") end + it "can find ports with ===" do + plan.add(task = @task_m.new) + matcher = PortMatcher.new(@task_m).with_name("out_d") + assert matcher === task.out_d_port + refute matcher === task.out_f_port + end + it "optionally allows to filter with a name pattern" do plan.add(task = @task_m.new) assert_matcher_finds [task.out_d_port, task.out_f_port], From 5bd512bb404957a7550e5156ee5c2fb85996f0c3 Mon Sep 17 00:00:00 2001 From: Debora Date: Mon, 23 Dec 2024 10:52:43 -0300 Subject: [PATCH 011/158] chore: Reorganize files --- .../log_transfer_server/ftp_upload.rb | 102 +++++++++++++++++ .../log_transfer_server/log_upload_state.rb | 27 +++++ .../runtime/remote/server/ftp_upload.rb | 98 ----------------- .../runtime/remote/server/log_upload_state.rb | 29 ----- lib/syskit/runtime/server/driver.rb | 41 +++++++ lib/syskit/runtime/server/spawn_server.rb | 104 ++++++++++++++++++ .../server/write_only_disk_file_system.rb | 38 +++++++ 7 files changed, 312 insertions(+), 127 deletions(-) create mode 100644 lib/syskit/roby_app/log_transfer_server/ftp_upload.rb create mode 100644 lib/syskit/roby_app/log_transfer_server/log_upload_state.rb delete mode 100644 lib/syskit/runtime/remote/server/ftp_upload.rb delete mode 100644 lib/syskit/runtime/remote/server/log_upload_state.rb create mode 100644 lib/syskit/runtime/server/driver.rb create mode 100644 lib/syskit/runtime/server/spawn_server.rb create mode 100644 lib/syskit/runtime/server/write_only_disk_file_system.rb diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb new file mode 100644 index 000000000..25aab5731 --- /dev/null +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -0,0 +1,102 @@ +# frozen_string_literal: true + +require "syskit/roby_app/log_transfer_server/log_upload_state" + +module Syskit + module RobyApp + module LogTransferServer + # Encapsulation of the log file upload process + class FTPUpload + def initialize( # rubocop:disable Metrics/ParameterLists + host, port, certificate, user, password, file, + max_upload_rate: Float::INFINITY, + implicit_ftps: false + ) + + @host = host + @port = port + @certificate = certificate + @user = user + @password = password + @file = file + + @max_upload_rate = Float(max_upload_rate) + @implicit_ftps = implicit_ftps + end + + # Create a temporary file with the FTP server's public key, to pass + # to FTP.open + # + # @yieldparam [String] path the certificate path + def with_certificate + Tempfile.create do |cert_io| + cert_io.write @certificate + cert_io.flush + yield(cert_io.path) + end + end + + # Open the FTP connection + # + # @yieldparam [Net::FTP] + def open + with_certificate do |cert_path| + Net::FTP.open( + @host, + private_data_connection: false, port: @port, + implicit_ftps: @implicit_ftps, + ssl: { verify_mode: OpenSSL::SSL::VERIFY_PEER, + ca_file: cert_path } + ) do |ftp| + ftp.login(@user, @password) + pp "login" + yield(ftp) + end + end + end + + # Open the connection and transfer the file + # + # @return [LogUploadState::Result] + def open_and_transfer + pp "open and transfer" + open { |ftp| transfer(ftp) } + LogUploadState::Result.new(@file, true, nil) + rescue StandardError => e + LogUploadState::Result.new(@file, false, e.message) + end + + # Do transfer the file through the given connection + # + # @param [Net::FTP] ftp + def transfer(ftp) + last = Time.now + pp "transfer file:", @file + File.open(@file) do |file_io| + pp "File io:", file_io + ftp.storbinary("STOR #{File.basename(@file)}", + file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| + now = Time.now + rate_limit(buf.size, now, last) + last = Time.now + end + end + end + + # @api private + # + # Sleep when needed to keep the expected transfer rate + def rate_limit(chunk_size, now, last) + duration = now - last + exp_duration = chunk_size / @max_upload_rate + # Do not wait, but do not try to "make up" for the bandwidth + # we did not use. The goal is to not affect the rest of the + # system + return if duration > exp_duration + + sleep(exp_duration - duration) + end + end + end + end +end diff --git a/lib/syskit/roby_app/log_transfer_server/log_upload_state.rb b/lib/syskit/roby_app/log_transfer_server/log_upload_state.rb new file mode 100644 index 000000000..709eaab26 --- /dev/null +++ b/lib/syskit/roby_app/log_transfer_server/log_upload_state.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module Syskit + module RobyApp + module LogTransferServer + # State of the asynchronous file transfers managed by {Server} + class LogUploadState + attr_reader :pending_count + + Result = Struct.new :file, :success, :message do + def success? + success + end + end + + def initialize(pending_count, results) + @pending_count = pending_count + @results = results + end + + def each_result(&block) + @results.each(&block) + end + end + end + end +end diff --git a/lib/syskit/runtime/remote/server/ftp_upload.rb b/lib/syskit/runtime/remote/server/ftp_upload.rb deleted file mode 100644 index 169086264..000000000 --- a/lib/syskit/runtime/remote/server/ftp_upload.rb +++ /dev/null @@ -1,98 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module Runtime - module Remote - module Server - # Encapsulation of the log file upload process - class FTPUpload - def initialize( # rubocop:disable Metrics/ParameterLists - host, port, certificate, user, password, file, - max_upload_rate: Float::INFINITY, - implicit_ftps: false - ) - - @host = host - @port = port - @certificate = certificate - @user = user - @password = password - @file = file - - @max_upload_rate = Float(max_upload_rate) - @implicit_ftps = implicit_ftps - end - - # Create a temporary file with the FTP server's public key, to pass - # to FTP.open - # - # @yieldparam [String] path the certificate path - def with_certificate - Tempfile.create do |cert_io| - cert_io.write @certificate - cert_io.flush - yield(cert_io.path) - end - end - - # Open the FTP connection - # - # @yieldparam [Net::FTP] - def open - with_certificate do |cert_path| - Net::FTP.open( - @host, - private_data_connection: false, port: @port, - implicit_ftps: @implicit_ftps, - ssl: { verify_mode: OpenSSL::SSL::VERIFY_PEER, - ca_file: cert_path } - ) do |ftp| - ftp.login(@user, @password) - yield(ftp) - end - end - end - - # Open the connection and transfer the file - # - # @return [LogUploadState::Result] - def open_and_transfer - open { |ftp| transfer(ftp) } - LogUploadState::Result.new(@file, true, nil) - rescue StandardError => e - LogUploadState::Result.new(@file, false, e.message) - end - - # Do transfer the file through the given connection - # - # @param [Net::FTP] ftp - def transfer(ftp) - last = Time.now - File.open(@file) do |file_io| - ftp.storbinary("STOR #{File.basename(@file)}", - file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| - now = Time.now - rate_limit(buf.size, now, last) - last = Time.now - end - end - end - - # @api private - # - # Sleep when needed to keep the expected transfer rate - def rate_limit(chunk_size, now, last) - duration = now - last - exp_duration = chunk_size / @max_upload_rate - # Do not wait, but do not try to "make up" for the bandwidth - # we did not use. The goal is to not affect the rest of the - # system - return if duration > exp_duration - - sleep(exp_duration - duration) - end - end - end - end - end -end diff --git a/lib/syskit/runtime/remote/server/log_upload_state.rb b/lib/syskit/runtime/remote/server/log_upload_state.rb deleted file mode 100644 index be6b60e0d..000000000 --- a/lib/syskit/runtime/remote/server/log_upload_state.rb +++ /dev/null @@ -1,29 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module Runtime - module Remote - module Server - # State of the asynchronous file transfers managed by {Server} - class LogUploadState - attr_reader :pending_count - - Result = Struct.new :file, :success, :message do - def success? - success - end - end - - def initialize(pending_count, results) - @pending_count = pending_count - @results = results - end - - def each_result(&block) - @results.each(&block) - end - end - end - end - end -end diff --git a/lib/syskit/runtime/server/driver.rb b/lib/syskit/runtime/server/driver.rb new file mode 100644 index 000000000..eba0825ad --- /dev/null +++ b/lib/syskit/runtime/server/driver.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +module Syskit + module Runtime + module Server + # Driver for log transfer FTP server + class Driver + def initialize(user, password, data_dir) + @user = user + @password = password + @data_dir = data_dir + end + + # Return true if the user should be allowed to log in. + # @param user [String] + # @param password [String] + # @return [Boolean] + # + # Depending upon the server's auth_level, some of these parameters + # may be nil. A parameter with a nil value is not required for + # authentication. Here are the parameters that are non-nil for + # each auth_level: + # * :user (user) + # * :password (user, password) + + def authenticate(user, password) + user == @user && + (password.nil? || password == @password) + end + + # Return the file system to use for a user. + # @param user [String] + # @return A file system driver + + def file_system(_user) + WriteOnlyDiskFileSystem.new(@data_dir) + end + end + end + end +end diff --git a/lib/syskit/runtime/server/spawn_server.rb b/lib/syskit/runtime/server/spawn_server.rb new file mode 100644 index 000000000..edbd32ca8 --- /dev/null +++ b/lib/syskit/runtime/server/spawn_server.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +require "English" + +module Syskit + module Runtime + module Server # :nodoc: + # Whether we should configure client and server to use implicit FTPs by + # default + # + # This workarounds some incompatibility between net-ftp and ftpd. They + # don't manage connecting properly in implicit mode before 2.7.0, and + # don't manage connecting properly in explicit mode afterwards + def self.use_implicit_ftps? + RUBY_VERSION >= "2.7.0" + end + + # Class responsible for spawning an FTP server for transfering logs + class SpawnServer + attr_reader :port + + # tgt_dir must be an absolute path + def initialize( + tgt_dir, + user, + password, + certfile_path, + interface: "127.0.0.1", + implicit_ftps: Server.use_implicit_ftps?, + port: 0, + session_timeout: default_session_timeout, + nat_ip: nil, + passive_ports: nil, + debug: false, + verbose: false + ) + @debug = debug + driver = Driver.new(user, password, tgt_dir) + server = Ftpd::FtpServer.new(driver) + server.interface = interface + server.port = port + server.tls = implicit_ftps ? :implicit : :explicit + server.passive_ports = passive_ports + server.certfile_path = certfile_path + server.auth_level = Ftpd.const_get("AUTH_PASSWORD") + server.session_timeout = session_timeout + server.log = make_log + server.nat_ip = nat_ip + @server = server + Thread.abort_on_exception = false + @server.start + sleep 0.1 until Thread.abort_on_exception + Thread.abort_on_exception = false + @port = @server.bound_port + display_connection_info if verbose + end + + # The user should call this function in order to spawn the server + def run + wait_until_stopped + end + + def stop + dispose + end + + def dispose + @server.stop + end + + def join + @server.join + end + + private + + def display_connection_info + puts "Interface: #{@server.interface}" + puts "Port: #{@server.bound_port}" + puts "TLS: #{@server.tls}" + puts "PID: #{$PROCESS_ID}" + end + + def wait_until_stopped + puts "FTP server started. Press ENTER or c-C to stop it" + $stdout.flush + begin + $stdin.readline + rescue Interrupt + puts "Interrupt" + end + end + + def make_log + @debug && Logger.new($stdout) + end + + def default_session_timeout + Ftpd::FtpServer::DEFAULT_SESSION_TIMEOUT + end + end + end + end +end diff --git a/lib/syskit/runtime/server/write_only_disk_file_system.rb b/lib/syskit/runtime/server/write_only_disk_file_system.rb new file mode 100644 index 000000000..8775214f2 --- /dev/null +++ b/lib/syskit/runtime/server/write_only_disk_file_system.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +module Syskit + module Runtime + module Server + # Custom write-only file system that detects collision between files + class WriteOnlyDiskFileSystem + include Ftpd::DiskFileSystem::Base + include Ftpd::DiskFileSystem::FileWriting + include Ftpd::TranslateExceptions + + def initialize(data_dir) + set_data_dir data_dir + end + + # Write a file to disk if it does not already exist. + # @param ftp_path [String] The virtual path + # @param stream [Ftpd::Stream] Stream that contains the data to write + # + # Called for: + # * STOR + # * STOU + # + # If missing, then these commands are not supported. + + def write(ftp_path, stream) + if Pathname.new(@data_dir + ftp_path).exist? + raise Ftpd::PermanentFileSystemError, + "Can't upload: File already exists" + end + + write_file ftp_path, stream, "wb" + end + translate_exceptions :write + end + end + end +end From a2daf26bcac5456aaf0fa27a198bbefe9a245c95 Mon Sep 17 00:00:00 2001 From: Debora Date: Mon, 23 Dec 2024 10:54:19 -0300 Subject: [PATCH 012/158] fix: Create FTPParameters struct --- lib/syskit/cli/log_runtime_archive.rb | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 365841bae..730dba7ee 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -3,6 +3,7 @@ require "archive/tar/minitar" require "sys/filesystem" require "syskit/process_managers/remote/protocol" +require "syskit/roby_app/log_transfer_server/ftp_upload" require "net/ftp" module Syskit @@ -19,6 +20,9 @@ class CompressionFailed < RuntimeError; end class LogRuntimeArchive DEFAULT_MAX_ARCHIVE_SIZE = 10_000_000_000 # 10G + FTPParameters = Struct.new(:host, :port, :certificate, :user, :password, + :implicit_ftps, :max_upload_rate, keyword_init: true) + def initialize( root_dir, target_dir: nil, logger: LogRuntimeArchive.null_logger, @@ -34,8 +38,7 @@ def initialize( # Iterate over all datasets in a Roby log root folder and transfer them # through FTP server # - # @param [Params] server_params the FTP server parameters: - # { host, port, certificate, user, password, implicit_ftps, max_upload_rate } + # @param [Params] server_params the FTP server parameters def process_root_folder_transfer(server_params) candidates = self.class.find_all_dataset_folders(@root_dir) candidates.each do |child| @@ -120,13 +123,12 @@ def process_dataset(child, full:) end end - def process_dataset_transfer(file, server_params) - ftp = Runtime::Remote::Server::FTPUpload.new( - server_params[:host], server_params[:port], - server_params[:certificate], server_params[:user], - server_params[:password], @root_dir / file, - max_upload_rate: server_params[:max_upload_rate] || Float::INFINITY, - implicit_ftps: server_params[:implicit_ftps] + def process_dataset_transfer(file, server) + ftp = RobyApp::LogTransferServer::FTPUpload.new( + server.host, server.port, server.certificate, server.user, + server.password, file, + max_upload_rate: server.max_upload_rate || Float::INFINITY, + implicit_ftps: server.implicit_ftps ) ftp.open_and_transfer end From 7e1f84c4a9cbd38d0257f383fb01983ed568e8ae Mon Sep 17 00:00:00 2001 From: Debora Date: Mon, 23 Dec 2024 10:55:40 -0300 Subject: [PATCH 013/158] fix: Change the spawn_server path required --- lib/syskit/cli/log_runtime_archive_main.rb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index 8b0e8752b..4988f888f 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -6,7 +6,7 @@ require "pathname" require "thor" require "syskit/cli/log_runtime_archive" -require "syskit/roby_app/log_transfer_server/spawn_server" +require "syskit/runtime/server/spawn_server" module Syskit module CLI @@ -104,9 +104,9 @@ def transfer( # rubocop:disable Metrics/ParameterLists desc "transfer_server", "creates the log transfer FTP server \ that runs on the main computer" def transfer_server( # rubocop:disable Metrics/ParameterLists - target_dir, user, password, certificate, host, implicit_ftps + target_dir, user, password, certfile_path, host, implicit_ftps ) - create_server(target_dir, user, password, certificate, host, port, + create_server(target_dir, user, password, certfile_path, host, port, implicit_ftps) end @@ -132,11 +132,11 @@ def make_archiver(root_dir, target_dir: nil) end def create_server( # rubocop:disable Metrics/ParameterLists - target_dir, user, password, certificate, host, port, implicit_ftps + target_dir, user, password, certfile_path, host, port, implicit_ftps ) - RobyApp::LogTransferServer::SpawnServer.new( + Runtime::Server::SpawnServer.new( target_dir, user, password, - certificate, + certfile_path, interface: host, port: port, implicit_ftps: implicit_ftps From d1f4cb6f8a34d75c237842cec4f72e88c5c44e98 Mon Sep 17 00:00:00 2001 From: Debora Date: Thu, 26 Dec 2024 15:23:19 -0300 Subject: [PATCH 014/158] fix: Refactor the process_root_folder_transfer --- lib/syskit/cli/log_runtime_archive.rb | 38 ++++++++++++++++++++--- lib/syskit/runtime/server/spawn_server.rb | 1 + 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 730dba7ee..6a3cf59c6 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -2,9 +2,7 @@ require "archive/tar/minitar" require "sys/filesystem" -require "syskit/process_managers/remote/protocol" require "syskit/roby_app/log_transfer_server/ftp_upload" -require "net/ftp" module Syskit module CLI @@ -41,9 +39,9 @@ def initialize( # @param [Params] server_params the FTP server parameters def process_root_folder_transfer(server_params) candidates = self.class.find_all_dataset_folders(@root_dir) + running = candidates.last candidates.each do |child| - basename = child.basename.to_s - process_dataset_transfer(basename, server_params) + process_dataset_transfer(child, server_params, full: child != running) end end @@ -123,13 +121,43 @@ def process_dataset(child, full:) end end - def process_dataset_transfer(file, server) + def process_dataset_transfer(child, server, full:) + # TODO: Create a folder if it does not exist open_dir_for + self.class.transfer_dataset(child, server, full: full, logger: @logger) + end + + # Transfer the given dataset + def self.transfer_dataset( + dataset_path, server, + full:, logger: null_logger + ) + logger.info( + "Transfering dataset #{dataset_path} in #{full ? 'full' : 'partial'} mode" + ) + candidates = each_file_from_path(dataset_path).to_a + + complete, candidates = + if full + archive_filter_candidates_full(candidates) + else + archive_filter_candidates_partial(candidates) + end + + candidates.each_with_index do |child_path, i| + transfer_file(child_path, server, logger: logger) + end + + complete + end + + def self.transfer_file(file, server, logger: null_logger) ftp = RobyApp::LogTransferServer::FTPUpload.new( server.host, server.port, server.certificate, server.user, server.password, file, max_upload_rate: server.max_upload_rate || Float::INFINITY, implicit_ftps: server.implicit_ftps ) + logger.info "Transfering #{file}" ftp.open_and_transfer end diff --git a/lib/syskit/runtime/server/spawn_server.rb b/lib/syskit/runtime/server/spawn_server.rb index edbd32ca8..3aafcb409 100644 --- a/lib/syskit/runtime/server/spawn_server.rb +++ b/lib/syskit/runtime/server/spawn_server.rb @@ -1,6 +1,7 @@ # frozen_string_literal: true require "English" +require "syskit/runtime/server/driver" module Syskit module Runtime From fc4836c6e8f70090d406e0042780079c9c0bd4ac Mon Sep 17 00:00:00 2001 From: Debora Date: Fri, 3 Jan 2025 14:32:21 -0300 Subject: [PATCH 015/158] fix: Reorganizing files --- lib/syskit.rb | 2 +- lib/syskit/cli/log_runtime_archive.rb | 9 +- lib/syskit/process_managers/remote/manager.rb | 4 +- lib/syskit/process_managers/remote/server.rb | 4 +- .../remote/server/ftp_upload.rb | 98 ----------------- .../remote/server/log_upload_state.rb | 29 ----- .../process_managers/remote/server/server.rb | 10 +- lib/syskit/roby_app/configuration.rb | 2 +- lib/syskit/roby_app/log_transfer_manager.rb | 2 +- lib/syskit/roby_app/log_transfer_server.rb | 6 +- .../roby_app/log_transfer_server/driver.rb | 41 ------- .../log_transfer_server/ftp_upload.rb | 4 +- .../log_transfer_server/spawn_server.rb | 104 ------------------ .../write_only_disk_file_system.rb | 38 ------- lib/syskit/runtime/server/driver.rb | 2 + test/process_managers/test_remote.rb | 2 +- test/roby_app/test_log_transfer_manager.rb | 4 +- .../server}/test_spawn_server.rb | 6 +- 18 files changed, 32 insertions(+), 335 deletions(-) delete mode 100644 lib/syskit/process_managers/remote/server/ftp_upload.rb delete mode 100644 lib/syskit/process_managers/remote/server/log_upload_state.rb delete mode 100644 lib/syskit/roby_app/log_transfer_server/driver.rb delete mode 100644 lib/syskit/roby_app/log_transfer_server/spawn_server.rb delete mode 100644 lib/syskit/roby_app/log_transfer_server/write_only_disk_file_system.rb rename test/{roby_app/spawn_server => runtime/server}/test_spawn_server.rb (97%) diff --git a/lib/syskit.rb b/lib/syskit.rb index 08a8c1b43..dc047072d 100644 --- a/lib/syskit.rb +++ b/lib/syskit.rb @@ -42,7 +42,7 @@ module ProcessManagers require "syskit/roby_app/log_transfer_server" require "syskit/process_managers/process_base" require "syskit/process_managers/status" -require "syskit/process_managers/remote/server/log_upload_state" +require "syskit/roby_app/log_transfer_server/log_upload_state" require "syskit/process_managers/remote/protocol" require "syskit/process_managers/remote/loader" require "syskit/process_managers/remote/manager" diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 6a3cf59c6..5d180dd77 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -19,7 +19,8 @@ class LogRuntimeArchive DEFAULT_MAX_ARCHIVE_SIZE = 10_000_000_000 # 10G FTPParameters = Struct.new(:host, :port, :certificate, :user, :password, - :implicit_ftps, :max_upload_rate, keyword_init: true) + :implicit_ftps, :max_upload_rate, + keyword_init: true) def initialize( root_dir, target_dir: nil, @@ -122,7 +123,6 @@ def process_dataset(child, full:) end def process_dataset_transfer(child, server, full:) - # TODO: Create a folder if it does not exist open_dir_for self.class.transfer_dataset(child, server, full: full, logger: @logger) end @@ -132,7 +132,8 @@ def self.transfer_dataset( full:, logger: null_logger ) logger.info( - "Transfering dataset #{dataset_path} in #{full ? 'full' : 'partial'} mode" + "Transfering dataset #{dataset_path} in " \ + "#{full ? 'full' : 'partial'} mode" ) candidates = each_file_from_path(dataset_path).to_a @@ -143,7 +144,7 @@ def self.transfer_dataset( archive_filter_candidates_partial(candidates) end - candidates.each_with_index do |child_path, i| + candidates.each do |child_path| transfer_file(child_path, server, logger: logger) end diff --git a/lib/syskit/process_managers/remote/manager.rb b/lib/syskit/process_managers/remote/manager.rb index 0c1d49945..c30596df9 100644 --- a/lib/syskit/process_managers/remote/manager.rb +++ b/lib/syskit/process_managers/remote/manager.rb @@ -16,7 +16,7 @@ module Remote # Defined here to make sure it is actually defined. Otherwise, the log # state reporting would fail at runtime, and unit-testing for this is # very hard. - LogUploadState = Server::LogUploadState + LogUploadState = RobyApp::LogTransferServer::LogUploadState # Syskit-side interface to the remote process server class Manager @@ -238,7 +238,7 @@ def queue_death_announcement def log_upload_file( host, port, certificate, user, password, localfile, max_upload_rate: Float::INFINITY, - implicit_ftps: RobyApp::LogTransferServer.use_implicit_ftps? + implicit_ftps: Runtime::Server.use_implicit_ftps? ) socket.write(COMMAND_LOG_UPLOAD_FILE) Marshal.dump( diff --git a/lib/syskit/process_managers/remote/server.rb b/lib/syskit/process_managers/remote/server.rb index a5ede01fd..5f4efdf7f 100644 --- a/lib/syskit/process_managers/remote/server.rb +++ b/lib/syskit/process_managers/remote/server.rb @@ -19,7 +19,7 @@ module Server end require "syskit/process_managers/remote/protocol" -require "syskit/process_managers/remote/server/ftp_upload" -require "syskit/process_managers/remote/server/log_upload_state" +require "syskit/roby_app/log_transfer_server/ftp_upload" +require "syskit/roby_app/log_transfer_server/log_upload_state" require "syskit/process_managers/remote/server/process" require "syskit/process_managers/remote/server/server" diff --git a/lib/syskit/process_managers/remote/server/ftp_upload.rb b/lib/syskit/process_managers/remote/server/ftp_upload.rb deleted file mode 100644 index 9f13b5c44..000000000 --- a/lib/syskit/process_managers/remote/server/ftp_upload.rb +++ /dev/null @@ -1,98 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module ProcessManagers - module Remote - module Server - # Encapsulation of the log file upload process - class FTPUpload - def initialize( # rubocop:disable Metrics/ParameterLists - host, port, certificate, user, password, file, - max_upload_rate: Float::INFINITY, - implicit_ftps: false - ) - - @host = host - @port = port - @certificate = certificate - @user = user - @password = password - @file = file - - @max_upload_rate = Float(max_upload_rate) - @implicit_ftps = implicit_ftps - end - - # Create a temporary file with the FTP server's public key, to pass - # to FTP.open - # - # @yieldparam [String] path the certificate path - def with_certificate - Tempfile.create do |cert_io| - cert_io.write @certificate - cert_io.flush - yield(cert_io.path) - end - end - - # Open the FTP connection - # - # @yieldparam [Net::FTP] - def open - with_certificate do |cert_path| - Net::FTP.open( - @host, - private_data_connection: false, port: @port, - implicit_ftps: @implicit_ftps, - ssl: { verify_mode: OpenSSL::SSL::VERIFY_PEER, - ca_file: cert_path } - ) do |ftp| - ftp.login(@user, @password) - yield(ftp) - end - end - end - - # Open the connection and transfer the file - # - # @return [LogUploadState::Result] - def open_and_transfer - open { |ftp| transfer(ftp) } - LogUploadState::Result.new(@file, true, nil) - rescue StandardError => e - LogUploadState::Result.new(@file, false, e.message) - end - - # Do transfer the file through the given connection - # - # @param [Net::FTP] ftp - def transfer(ftp) - last = Time.now - File.open(@file) do |file_io| - ftp.storbinary("STOR #{File.basename(@file)}", - file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| - now = Time.now - rate_limit(buf.size, now, last) - last = Time.now - end - end - end - - # @api private - # - # Sleep when needed to keep the expected transfer rate - def rate_limit(chunk_size, now, last) - duration = now - last - exp_duration = chunk_size / @max_upload_rate - # Do not wait, but do not try to "make up" for the bandwidth - # we did not use. The goal is to not affect the rest of the - # system - return if duration > exp_duration - - sleep(exp_duration - duration) - end - end - end - end - end -end diff --git a/lib/syskit/process_managers/remote/server/log_upload_state.rb b/lib/syskit/process_managers/remote/server/log_upload_state.rb deleted file mode 100644 index 5c5e7602a..000000000 --- a/lib/syskit/process_managers/remote/server/log_upload_state.rb +++ /dev/null @@ -1,29 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module ProcessManagers - module Remote - module Server - # State of the asynchronous file transfers managed by {Server} - class LogUploadState - attr_reader :pending_count - - Result = Struct.new :file, :success, :message do - def success? - success - end - end - - def initialize(pending_count, results) - @pending_count = pending_count - @results = results - end - - def each_result(&block) - @results.each(&block) - end - end - end - end - end -end diff --git a/lib/syskit/process_managers/remote/server/server.rb b/lib/syskit/process_managers/remote/server/server.rb index dc62a96d6..716ec69c5 100644 --- a/lib/syskit/process_managers/remote/server/server.rb +++ b/lib/syskit/process_managers/remote/server/server.rb @@ -581,13 +581,15 @@ def log_upload_file(socket, parameters) localfile = log_upload_sanitize_path(Pathname(localfile)) rescue Exception => e # rubocop:disable Lint/RescueException @log_upload_results_queue << - LogUploadState::Result.new(localfile, false, e.message) + RobyApp::LogTransferServer::LogUploadState::Result.new( + localfile, false, e.message + ) return end info "queueing upload of #{localfile} to #{host}:#{port}" @log_upload_command_queue << - FTPUpload.new( + RobyApp::LogTransferServer::FTPUpload.new( host, port, certificate, user, password, localfile, max_upload_rate: max_upload_rate || Float::INFINITY, @@ -631,7 +633,9 @@ def log_upload_state end end - LogUploadState.new(@log_upload_pending.value, results) + RobyApp::LogTransferServer::LogUploadState.new( + @log_upload_pending.value, results + ) end end end diff --git a/lib/syskit/roby_app/configuration.rb b/lib/syskit/roby_app/configuration.rb index ad037210c..045f66ef6 100644 --- a/lib/syskit/roby_app/configuration.rb +++ b/lib/syskit/roby_app/configuration.rb @@ -165,7 +165,7 @@ def initialize(app) target_dir: nil, # Use the app's log dir default_max_upload_rate: Float::INFINITY, max_upload_rates: {}, - implicit_ftps: LogTransferServer.use_implicit_ftps? + implicit_ftps: Runtime::Server.use_implicit_ftps? ) clear diff --git a/lib/syskit/roby_app/log_transfer_manager.rb b/lib/syskit/roby_app/log_transfer_manager.rb index 121a4123d..9d9aa7e56 100644 --- a/lib/syskit/roby_app/log_transfer_manager.rb +++ b/lib/syskit/roby_app/log_transfer_manager.rb @@ -36,7 +36,7 @@ def server_start raise ArgumentError, "log transfer server already running" if @server server_update_self_spawned_conf - @server = LogTransferServer::SpawnServer.new( + @server = Runtime::Server::SpawnServer.new( @conf.target_dir, @conf.user, @conf.password, @self_signed_ca.private_certificate_path, interface: @conf.ip, diff --git a/lib/syskit/roby_app/log_transfer_server.rb b/lib/syskit/roby_app/log_transfer_server.rb index d28a66602..e19d378cd 100644 --- a/lib/syskit/roby_app/log_transfer_server.rb +++ b/lib/syskit/roby_app/log_transfer_server.rb @@ -5,6 +5,6 @@ require "ipaddr" require "pathname" -require "syskit/roby_app/log_transfer_server/write_only_disk_file_system" -require "syskit/roby_app/log_transfer_server/driver" -require "syskit/roby_app/log_transfer_server/spawn_server" +require "syskit/runtime/server/write_only_disk_file_system" +require "syskit/runtime/server/driver" +require "syskit/runtime/server/spawn_server" diff --git a/lib/syskit/roby_app/log_transfer_server/driver.rb b/lib/syskit/roby_app/log_transfer_server/driver.rb deleted file mode 100644 index 5677232b3..000000000 --- a/lib/syskit/roby_app/log_transfer_server/driver.rb +++ /dev/null @@ -1,41 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module RobyApp - module LogTransferServer - # Driver for log transfer FTP server - class Driver - def initialize(user, password, data_dir) - @user = user - @password = password - @data_dir = data_dir - end - - # Return true if the user should be allowed to log in. - # @param user [String] - # @param password [String] - # @return [Boolean] - # - # Depending upon the server's auth_level, some of these parameters - # may be nil. A parameter with a nil value is not required for - # authentication. Here are the parameters that are non-nil for - # each auth_level: - # * :user (user) - # * :password (user, password) - - def authenticate(user, password) - user == @user && - (password.nil? || password == @password) - end - - # Return the file system to use for a user. - # @param user [String] - # @return A file system driver - - def file_system(_user) - WriteOnlyDiskFileSystem.new(@data_dir) - end - end - end - end -end diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index 25aab5731..dfefa5e14 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -46,7 +46,7 @@ def open private_data_connection: false, port: @port, implicit_ftps: @implicit_ftps, ssl: { verify_mode: OpenSSL::SSL::VERIFY_PEER, - ca_file: cert_path } + ca_file: cert_path } ) do |ftp| ftp.login(@user, @password) pp "login" @@ -75,7 +75,7 @@ def transfer(ftp) File.open(@file) do |file_io| pp "File io:", file_io ftp.storbinary("STOR #{File.basename(@file)}", - file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| + file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| now = Time.now rate_limit(buf.size, now, last) last = Time.now diff --git a/lib/syskit/roby_app/log_transfer_server/spawn_server.rb b/lib/syskit/roby_app/log_transfer_server/spawn_server.rb deleted file mode 100644 index 7b9260c45..000000000 --- a/lib/syskit/roby_app/log_transfer_server/spawn_server.rb +++ /dev/null @@ -1,104 +0,0 @@ -# frozen_string_literal: true - -require "English" - -module Syskit - module RobyApp - module LogTransferServer # :nodoc: - # Whether we should configure client and server to use implicit FTPs by - # default - # - # This workarounds some incompatibility between net-ftp and ftpd. They - # don't manage connecting properly in implicit mode before 2.7.0, and - # don't manage connecting properly in explicit mode afterwards - def self.use_implicit_ftps? - RUBY_VERSION >= "2.7.0" - end - - # Class responsible for spawning an FTP server for transfering logs - class SpawnServer - attr_reader :port - - # tgt_dir must be an absolute path - def initialize( - tgt_dir, - user, - password, - certfile_path, - interface: "127.0.0.1", - implicit_ftps: LogTransferServer.use_implicit_ftps?, - port: 0, - session_timeout: default_session_timeout, - nat_ip: nil, - passive_ports: nil, - debug: false, - verbose: false - ) - @debug = debug - driver = Driver.new(user, password, tgt_dir) - server = Ftpd::FtpServer.new(driver) - server.interface = interface - server.port = port - server.tls = implicit_ftps ? :implicit : :explicit - server.passive_ports = passive_ports - server.certfile_path = certfile_path - server.auth_level = Ftpd.const_get("AUTH_PASSWORD") - server.session_timeout = session_timeout - server.log = make_log - server.nat_ip = nat_ip - @server = server - Thread.abort_on_exception = false - @server.start - sleep 0.1 until Thread.abort_on_exception - Thread.abort_on_exception = false - @port = @server.bound_port - display_connection_info if verbose - end - - # The user should call this function in order to spawn the server - def run - wait_until_stopped - end - - def stop - dispose - end - - def dispose - @server.stop - end - - def join - @server.join - end - - private - - def display_connection_info - puts "Interface: #{@server.interface}" - puts "Port: #{@server.bound_port}" - puts "TLS: #{@server.tls}" - puts "PID: #{$PROCESS_ID}" - end - - def wait_until_stopped - puts "FTP server started. Press ENTER or c-C to stop it" - $stdout.flush - begin - $stdin.readline - rescue Interrupt - puts "Interrupt" - end - end - - def make_log - @debug && Logger.new($stdout) - end - - def default_session_timeout - Ftpd::FtpServer::DEFAULT_SESSION_TIMEOUT - end - end - end - end -end diff --git a/lib/syskit/roby_app/log_transfer_server/write_only_disk_file_system.rb b/lib/syskit/roby_app/log_transfer_server/write_only_disk_file_system.rb deleted file mode 100644 index bf05a6152..000000000 --- a/lib/syskit/roby_app/log_transfer_server/write_only_disk_file_system.rb +++ /dev/null @@ -1,38 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module RobyApp - module LogTransferServer - # Custom write-only file system that detects collision between files - class WriteOnlyDiskFileSystem - include Ftpd::DiskFileSystem::Base - include Ftpd::DiskFileSystem::FileWriting - include Ftpd::TranslateExceptions - - def initialize(data_dir) - set_data_dir data_dir - end - - # Write a file to disk if it does not already exist. - # @param ftp_path [String] The virtual path - # @param stream [Ftpd::Stream] Stream that contains the data to write - # - # Called for: - # * STOR - # * STOU - # - # If missing, then these commands are not supported. - - def write(ftp_path, stream) - if Pathname.new(@data_dir + ftp_path).exist? - raise Ftpd::PermanentFileSystemError, - "Can't upload: File already exists" - end - - write_file ftp_path, stream, "wb" - end - translate_exceptions :write - end - end - end -end diff --git a/lib/syskit/runtime/server/driver.rb b/lib/syskit/runtime/server/driver.rb index eba0825ad..7ff0ea380 100644 --- a/lib/syskit/runtime/server/driver.rb +++ b/lib/syskit/runtime/server/driver.rb @@ -1,5 +1,7 @@ # frozen_string_literal: true +require "syskit/runtime/server/write_only_disk_file_system" + module Syskit module Runtime module Server diff --git a/test/process_managers/test_remote.rb b/test/process_managers/test_remote.rb index de6ac680b..867e7d530 100644 --- a/test/process_managers/test_remote.rb +++ b/test/process_managers/test_remote.rb @@ -506,7 +506,7 @@ def assert_upload_succeeds(timeout: 1) end end - class TestLogTransferServer < Syskit::RobyApp::LogTransferServer::SpawnServer + class TestLogTransferServer < Syskit::Runtime::Server::SpawnServer attr_reader :certfile_path def initialize(target_dir, user, password) diff --git a/test/roby_app/test_log_transfer_manager.rb b/test/roby_app/test_log_transfer_manager.rb index 55f2e3edf..fe4427b62 100644 --- a/test/roby_app/test_log_transfer_manager.rb +++ b/test/roby_app/test_log_transfer_manager.rb @@ -16,7 +16,7 @@ module RobyApp ip: "127.0.0.1", self_spawned: true, max_upload_rates: {}, - implicit_ftps: LogTransferServer.use_implicit_ftps? + implicit_ftps: Runtime::Server.use_implicit_ftps? ) @conf.target_dir = make_tmpdir @manager = nil @@ -75,7 +75,7 @@ module RobyApp @conf.target_dir = target_path.to_s ca = TmpRootCA.new("127.0.0.1") @conf.certificate = ca.certificate - server = LogTransferServer::SpawnServer.new( + server = Runtime::Server::SpawnServer.new( target_path.to_s, "user", "password", ca.private_certificate_path ) diff --git a/test/roby_app/spawn_server/test_spawn_server.rb b/test/runtime/server/test_spawn_server.rb similarity index 97% rename from test/roby_app/spawn_server/test_spawn_server.rb rename to test/runtime/server/test_spawn_server.rb index 38e4fef78..92c848eb5 100644 --- a/test/roby_app/spawn_server/test_spawn_server.rb +++ b/test/runtime/server/test_spawn_server.rb @@ -5,8 +5,8 @@ require "net/ftp" module Syskit - module RobyApp - module LogTransferServer + module Runtime + module Server describe SpawnServer do ### AUXILIARY FUNCTIONS ### def spawn_server @@ -19,7 +19,7 @@ def spawn_server File.join(__dir__, "..", "..", "process_managers", "cert-private.crt") - @implicit_ftps = LogTransferServer.use_implicit_ftps? + @implicit_ftps = Server.use_implicit_ftps? @server = SpawnServer.new( @temp_serverdir, @user, @password, private_key_path, From fbded710b94525820d4f207ba0bdf628c70934ed Mon Sep 17 00:00:00 2001 From: Debora Date: Fri, 3 Jan 2025 16:58:12 -0300 Subject: [PATCH 016/158] fix: Creating unit tests for FTP --- test/cli/test_log_runtime_archive.rb | 105 +++++++++++++++++---------- 1 file changed, 65 insertions(+), 40 deletions(-) diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 86e6ddb5b..2e6c79f33 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -2,6 +2,7 @@ require "syskit/test/self" require "syskit/cli/log_runtime_archive" +require "syskit/runtime/server/spawn_server" module Syskit module CLI @@ -516,62 +517,86 @@ def should_archive_dataset(dataset, archive_basename, full:) end end - describe ".process_transfer" do + describe "FTP" do before do - @process = LogRuntimeArchive.new(@root) - interface = "127.0.0.1" - ca = RobyApp::TmpRootCA.new(interface) - @params = { - interface: interface, port: 0, - certfile_path: ca.private_certificate_path, - user: "nilvo", password: "nilvo123" - } + host = "127.0.0.1" + @ca = RobyApp::TmpRootCA.new(host) + @params = LogRuntimeArchive::FTPParameters.new(host: host, port: 21, + certificate:@ca.certificate, + user: "user", password: "password", + implicit_ftps: true, max_upload_rate: 10) + @target_dir = make_tmppath - @threads = [] + @server = create_server + @process = LogRuntimeArchive.new(@root) + end - create_server + after do + @server.stop + @server.join + @ca.dispose + @ca = nil + @server = nil end - it "transfers datasets" do - ftp = connect_to_server + def create_server + server = Runtime::Server::SpawnServer.new( + @target_dir, @params.user, @params.password, + @ca.private_certificate_path, + interface: @params.host, + implicit_ftps: @params.implicit_ftps, + debug: true, verbose: true + ) + @params.port = server.port + server + end - datasets = [ - make_valid_folder("20220434-2023"), - make_valid_folder("20220434-2024"), - make_valid_folder("20220434-2025") - ] + describe ".process_root_folder_transfer" do + it "transfers all files from root folder through FTP" do + dataset_A = make_valid_folder("PATH_A") + dataset_B = make_valid_folder("PATH_B") + make_random_file "test.0.log", root: dataset_A + make_random_file "test.1.log", root: dataset_A + make_random_file "test.log", root: dataset_B - datasets.map do |dataset| - transfer_dataset(ftp, @root / dataset, @target_dir / dataset) - end + @process.process_root_folder_transfer(@params) - datasets.each do |dataset| - assert (@target_dir / dataset).file? + assert(File.exist?(@target_dir / "PATH_A" / "test.0.log")) + assert(File.exist?(@target_dir / "PATH_A" / "test.1.log")) + assert(File.exist?(@target_dir / "PATH_B" / "test.log")) end end - def create_server - thread = Thread.new do - server = RobyApp::LogTransferServer::SpawnServer.new( - @target_dir, @params[:user], @params[:password], - @params[:certfile_path], - interface: @params[:interface], port: @params[:port] - ) - server.run + describe ".process_dataset_transfer" do + it "transfers all files from a folder through FTP" do + dataset = make_valid_folder("PATH") + make_random_file "test.0.log", root: dataset + make_random_file "test.1.log", root: dataset + @process.process_dataset_transfer(dataset, @params, full: true) + + assert(File.exist?(@target_dir / "PATH" / "test.0.log")) + assert(File.exist?(@target_dir / "PATH" / "test.1.log")) end - thread.join end - def transfer_dataset(ftp, src_path, tgt_path) - ftp.putbinaryfile(src_path, tgt_path) + describe ".transfer_dataset" do + it "transfers a dataset through FTP" do + dataset = make_valid_folder("PATH") + make_random_file "test.0.log", root: dataset + @process.transfer_dataset(dataset, @params, full: true) + + assert(File.exist?(@target_dir / "PATH" / "test.0.log")) + end end - def connect_to_server - ftp = Net::FTP.new - ftp.connect(@params[:interface], @params[:port]) - ftp.login(@params[:user], @params[:password]) - ftp.passive = true - ftp + describe ".transfer_file" do + it "transfers a file through FTP" do + dataset = make_valid_folder("PATH") + make_random_file "test.log", root: dataset + @process.transfer_file(dataset / "test.log", @params, full: true) + + assert(File.exist?(@target_dir / "PATH" / "test.log")) + end end end From 9bebe99b9b2c1ef8e13088d3b050090b4203816e Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 6 Jan 2025 15:52:04 -0300 Subject: [PATCH 017/158] chore: minor fixes and rubocop fixes --- lib/syskit/cli/log_runtime_archive_main.rb | 19 ++++++++++------ lib/syskit/runtime/server/spawn_server.rb | 2 +- test/cli/test_log_runtime_archive.rb | 22 +++++++++--------- test/cli/test_log_runtime_archive_main.rb | 26 +++++++--------------- 4 files changed, 33 insertions(+), 36 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index 4988f888f..5d0f35078 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -65,14 +65,16 @@ def archive(root_dir, target_dir) and periodically performs transfer" option :period, type: :numeric, default: 600, desc: "polling period in seconds" + option :max_size, + type: :numeric, default: 10_000, desc: "max log size in MB" option :max_upload_rate, type: :numeric, default: 10, desc: "max upload rate in Mbps" def watch_transfer( # rubocop:disable Metrics/ParameterLists - source_dir, user, password, certificate, host, port, implicit_ftps + source_dir, host, port, certfile, user, password, implicit_ftps ) loop do begin - transfer(source_dir, user, password, certificate, host, port, + transfer(source_dir, host, port, certfile, user, password, implicit_ftps) rescue Errno::ENOSPC next @@ -84,29 +86,32 @@ def watch_transfer( # rubocop:disable Metrics/ParameterLists end desc "transfer", "transfers the datasets" + option :max_size, + type: :numeric, default: 10_000, desc: "max log size in MB" option :max_upload_rate, type: :numeric, default: 10, desc: "max upload rate in Mbps" def transfer( # rubocop:disable Metrics/ParameterLists - source_dir, user, password, certificate, host, port, implicit_ftps + source_dir, host, port, certfile, user, password, implicit_ftps ) source_dir = validate_directory_exists(source_dir) archiver = make_archiver(source_dir) server_params = { - host: host, port: port, certificate: certificate, + host: host, port: port, certfile: certfile, user: user, password: password, max_upload_rate: options[:max_upload_rate], implicit_ftps: implicit_ftps } + pp "*** *** ***", server_params archiver.process_root_folder_transfer(server_params) end desc "transfer_server", "creates the log transfer FTP server \ that runs on the main computer" def transfer_server( # rubocop:disable Metrics/ParameterLists - target_dir, user, password, certfile_path, host, implicit_ftps + target_dir, host, port, certfile, user, password, implicit_ftps ) - create_server(target_dir, user, password, certfile_path, host, port, + create_server(target_dir, host, port, certfile, user, password, implicit_ftps) end @@ -132,7 +137,7 @@ def make_archiver(root_dir, target_dir: nil) end def create_server( # rubocop:disable Metrics/ParameterLists - target_dir, user, password, certfile_path, host, port, implicit_ftps + target_dir, host, port, certfile_path, user, password, implicit_ftps ) Runtime::Server::SpawnServer.new( target_dir, user, password, diff --git a/lib/syskit/runtime/server/spawn_server.rb b/lib/syskit/runtime/server/spawn_server.rb index 3aafcb409..9169b8d1b 100644 --- a/lib/syskit/runtime/server/spawn_server.rb +++ b/lib/syskit/runtime/server/spawn_server.rb @@ -21,7 +21,7 @@ class SpawnServer attr_reader :port # tgt_dir must be an absolute path - def initialize( + def initialize( # rubocop:disable Metrics/AbcSize, Metrics/ParameterLists tgt_dir, user, password, diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 2e6c79f33..cb3cc5cd4 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -521,10 +521,12 @@ def should_archive_dataset(dataset, archive_basename, full:) before do host = "127.0.0.1" @ca = RobyApp::TmpRootCA.new(host) - @params = LogRuntimeArchive::FTPParameters.new(host: host, port: 21, - certificate:@ca.certificate, + @params = LogRuntimeArchive::FTPParameters.new( + host: host, port: 21, + certificate: @ca.certificate, user: "user", password: "password", - implicit_ftps: true, max_upload_rate: 10) + implicit_ftps: true, max_upload_rate: 10 + ) @target_dir = make_tmppath @server = create_server @@ -553,11 +555,11 @@ def create_server describe ".process_root_folder_transfer" do it "transfers all files from root folder through FTP" do - dataset_A = make_valid_folder("PATH_A") - dataset_B = make_valid_folder("PATH_B") - make_random_file "test.0.log", root: dataset_A - make_random_file "test.1.log", root: dataset_A - make_random_file "test.log", root: dataset_B + dataset_a = make_valid_folder("PATH_A") + dataset_b = make_valid_folder("PATH_B") + make_random_file "test.0.log", root: dataset_a + make_random_file "test.1.log", root: dataset_a + make_random_file "test.log", root: dataset_b @process.process_root_folder_transfer(@params) @@ -583,7 +585,7 @@ def create_server it "transfers a dataset through FTP" do dataset = make_valid_folder("PATH") make_random_file "test.0.log", root: dataset - @process.transfer_dataset(dataset, @params, full: true) + LogRuntimeArchive.transfer_dataset(dataset, @params, full: true) assert(File.exist?(@target_dir / "PATH" / "test.0.log")) end @@ -593,7 +595,7 @@ def create_server it "transfers a file through FTP" do dataset = make_valid_folder("PATH") make_random_file "test.log", root: dataset - @process.transfer_file(dataset / "test.log", @params, full: true) + LogRuntimeArchive.transfer_file(dataset / "test.log", @params) assert(File.exist?(@target_dir / "PATH" / "test.log")) end diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 7658a7d52..cb793efdb 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -129,20 +129,6 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) end end - describe "#transfer_server" do - before do - @tgt_log_dir = make_tmppath - interface = "127.0.0.1" - ca = RobyApp::TmpRootCA.new(interface) - - server_params = { - user: "nilvo", password: "nilvo123", - certfile_path: ca.private_certificate_path, - interface: interface, port: 0 - } - end - end - describe "#watch_transfer" do before do @base_log_dir = make_tmppath @@ -151,13 +137,15 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) ca = RobyApp::TmpRootCA.new(interface) @server_params = { + host: interface, port: 0, + certficate: ca.private_certificate_path, user: "nilvo", password: "nilvo123", - certfile_path: ca.private_certificate_path, - interface: interface, port: 0 + max_upload_rate: 10, + implicit_ftps: true } @threads = [] server = nil - flexmock(RobyApp::LogTransferServer::SpawnServer) + flexmock(Runtime::Server::SpawnServer) .should_receive(:new) .with_any_args .pass_thru do |arg| @@ -202,7 +190,9 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) def call_create_server cli = LogRuntimeArchiveMain.new - cli.create_server(@tgt_log_dir, *@server_params.values) + modified_params = @server_params.dup + modified_params.delete(:max_upload_rate) + cli.create_server(@tgt_log_dir, *modified_params.values) end end From f9281aa75f34f221501bab2d340791204c80cd4f Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Tue, 7 Jan 2025 16:16:22 -0300 Subject: [PATCH 018/158] fix: convert data_dir to string Ftpd base methods expect data_dir to be a string --- lib/syskit/runtime/server/write_only_disk_file_system.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/syskit/runtime/server/write_only_disk_file_system.rb b/lib/syskit/runtime/server/write_only_disk_file_system.rb index 8775214f2..2e8cfeb23 100644 --- a/lib/syskit/runtime/server/write_only_disk_file_system.rb +++ b/lib/syskit/runtime/server/write_only_disk_file_system.rb @@ -10,7 +10,8 @@ class WriteOnlyDiskFileSystem include Ftpd::TranslateExceptions def initialize(data_dir) - set_data_dir data_dir + # Ftpd base methods expect data_dir to be a string + set_data_dir data_dir.to_s end # Write a file to disk if it does not already exist. From 62c3d8cdb169e38e02ff720ff841eb70d54d02ec Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Tue, 7 Jan 2025 16:20:55 -0300 Subject: [PATCH 019/158] chore: minor fixes and changes --- lib/syskit/cli/log_runtime_archive.rb | 1 - lib/syskit/cli/log_runtime_archive_main.rb | 4 ++-- lib/syskit/roby_app/log_transfer_server/ftp_upload.rb | 7 ++----- test/cli/test_log_runtime_archive.rb | 4 ++-- 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 5d180dd77..447fc6a1c 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -158,7 +158,6 @@ def self.transfer_file(file, server, logger: null_logger) max_upload_rate: server.max_upload_rate || Float::INFINITY, implicit_ftps: server.implicit_ftps ) - logger.info "Transfering #{file}" ftp.open_and_transfer end diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index 5d0f35078..9edc44a21 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -102,7 +102,6 @@ def transfer( # rubocop:disable Metrics/ParameterLists max_upload_rate: options[:max_upload_rate], implicit_ftps: implicit_ftps } - pp "*** *** ***", server_params archiver.process_root_folder_transfer(server_params) end @@ -144,7 +143,8 @@ def create_server( # rubocop:disable Metrics/ParameterLists certfile_path, interface: host, port: port, - implicit_ftps: implicit_ftps + implicit_ftps: implicit_ftps, + debug: true ) end end diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index dfefa5e14..7bc25187a 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -49,7 +49,6 @@ def open ca_file: cert_path } ) do |ftp| ftp.login(@user, @password) - pp "login" yield(ftp) end end @@ -59,7 +58,6 @@ def open # # @return [LogUploadState::Result] def open_and_transfer - pp "open and transfer" open { |ftp| transfer(ftp) } LogUploadState::Result.new(@file, true, nil) rescue StandardError => e @@ -71,9 +69,8 @@ def open_and_transfer # @param [Net::FTP] ftp def transfer(ftp) last = Time.now - pp "transfer file:", @file - File.open(@file) do |file_io| - pp "File io:", file_io + File.open(@file, "w+") do |file_io| + ensure_parent_path_exists(ftp) ftp.storbinary("STOR #{File.basename(@file)}", file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| now = Time.now diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index cb3cc5cd4..d99be1cd4 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -576,8 +576,8 @@ def create_server make_random_file "test.1.log", root: dataset @process.process_dataset_transfer(dataset, @params, full: true) - assert(File.exist?(@target_dir / "PATH" / "test.0.log")) - assert(File.exist?(@target_dir / "PATH" / "test.1.log")) + assert(File.exist?(@target_dir / "test.0.log")) + assert(File.exist?(@target_dir / "test.1.log")) end end From f3810bcc3c1b8bccc2207b6e2e8a022ef197c95e Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Wed, 8 Jan 2025 13:49:54 -0300 Subject: [PATCH 020/158] fix: creates dataset folder(s) if it does not exist --- lib/syskit/cli/log_runtime_archive.rb | 16 ++++++------- .../log_transfer_server/ftp_upload.rb | 22 ++++++++++++++---- .../server/write_only_disk_file_system.rb | 1 + test/cli/test_log_runtime_archive.rb | 23 ++++++++++++++----- 4 files changed, 44 insertions(+), 18 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 447fc6a1c..914655cec 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -39,10 +39,10 @@ def initialize( # # @param [Params] server_params the FTP server parameters def process_root_folder_transfer(server_params) - candidates = self.class.find_all_dataset_folders(@root_dir) + candidates = @root_dir.children running = candidates.last candidates.each do |child| - process_dataset_transfer(child, server_params, full: child != running) + process_dataset_transfer(child, server_params, @root_dir, full: child != running) end end @@ -122,13 +122,13 @@ def process_dataset(child, full:) end end - def process_dataset_transfer(child, server, full:) - self.class.transfer_dataset(child, server, full: full, logger: @logger) + def process_dataset_transfer(child, server, root, full:) + self.class.transfer_dataset(child, server, root, full: full, logger: @logger) end # Transfer the given dataset def self.transfer_dataset( - dataset_path, server, + dataset_path, server, root, full:, logger: null_logger ) logger.info( @@ -145,20 +145,20 @@ def self.transfer_dataset( end candidates.each do |child_path| - transfer_file(child_path, server, logger: logger) + transfer_file(child_path, server, root, logger: logger) end complete end - def self.transfer_file(file, server, logger: null_logger) + def self.transfer_file(file, server, root, logger: null_logger) ftp = RobyApp::LogTransferServer::FTPUpload.new( server.host, server.port, server.certificate, server.user, server.password, file, max_upload_rate: server.max_upload_rate || Float::INFINITY, implicit_ftps: server.implicit_ftps ) - ftp.open_and_transfer + ftp.open_and_transfer(root) end # Create or open an archive diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index 7bc25187a..063eadc4f 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -57,20 +57,34 @@ def open # Open the connection and transfer the file # # @return [LogUploadState::Result] - def open_and_transfer - open { |ftp| transfer(ftp) } + def open_and_transfer(root) + open { |ftp| transfer(ftp, root) } LogUploadState::Result.new(@file, true, nil) rescue StandardError => e LogUploadState::Result.new(@file, false, e.message) end + def ensure_dataset_path_exists(ftp, root) + dataset_path = File.dirname(@file.relative_path_from(root)) + + dataset_path.split("/") do |folder| + begin + ftp.chdir(folder) + rescue + ftp.mkdir(folder) + ftp.chdir(folder) + end + end + end + # Do transfer the file through the given connection # # @param [Net::FTP] ftp - def transfer(ftp) + # @param [Pathname] root the archive root folder + def transfer(ftp, root) last = Time.now File.open(@file, "w+") do |file_io| - ensure_parent_path_exists(ftp) + ensure_dataset_path_exists(ftp, root) ftp.storbinary("STOR #{File.basename(@file)}", file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| now = Time.now diff --git a/lib/syskit/runtime/server/write_only_disk_file_system.rb b/lib/syskit/runtime/server/write_only_disk_file_system.rb index 2e8cfeb23..57cdb9cbb 100644 --- a/lib/syskit/runtime/server/write_only_disk_file_system.rb +++ b/lib/syskit/runtime/server/write_only_disk_file_system.rb @@ -6,6 +6,7 @@ module Server # Custom write-only file system that detects collision between files class WriteOnlyDiskFileSystem include Ftpd::DiskFileSystem::Base + include Ftpd::DiskFileSystem::Mkdir include Ftpd::DiskFileSystem::FileWriting include Ftpd::TranslateExceptions diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index d99be1cd4..dc772c416 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -564,7 +564,6 @@ def create_server @process.process_root_folder_transfer(@params) assert(File.exist?(@target_dir / "PATH_A" / "test.0.log")) - assert(File.exist?(@target_dir / "PATH_A" / "test.1.log")) assert(File.exist?(@target_dir / "PATH_B" / "test.log")) end end @@ -574,10 +573,22 @@ def create_server dataset = make_valid_folder("PATH") make_random_file "test.0.log", root: dataset make_random_file "test.1.log", root: dataset - @process.process_dataset_transfer(dataset, @params, full: true) + @process.process_dataset_transfer(dataset, @params, @root, full: true) - assert(File.exist?(@target_dir / "test.0.log")) - assert(File.exist?(@target_dir / "test.1.log")) + assert(File.exist?(@target_dir / "PATH" / "test.0.log")) + assert(File.exist?(@target_dir / "PATH" / "test.1.log")) + end + + it "makes sure hierarchy of dataset folders is created" do + dataset = make_valid_folder("PATH/TO/DATASET") + make_random_file "test.0.log", root: dataset + make_random_file "test.1.log", root: dataset + + @process.process_dataset_transfer(dataset, @params, @root, full: true) + + assert( + File.exist?(@target_dir / "PATH/TO/DATASET" / "test.0.log") + ) end end @@ -585,7 +596,7 @@ def create_server it "transfers a dataset through FTP" do dataset = make_valid_folder("PATH") make_random_file "test.0.log", root: dataset - LogRuntimeArchive.transfer_dataset(dataset, @params, full: true) + LogRuntimeArchive.transfer_dataset(dataset, @params, @root, full: true) assert(File.exist?(@target_dir / "PATH" / "test.0.log")) end @@ -595,7 +606,7 @@ def create_server it "transfers a file through FTP" do dataset = make_valid_folder("PATH") make_random_file "test.log", root: dataset - LogRuntimeArchive.transfer_file(dataset / "test.log", @params) + LogRuntimeArchive.transfer_file(dataset / "test.log", @params, @root) assert(File.exist?(@target_dir / "PATH" / "test.log")) end From 5ee86c31630b29f5bf78b892649fc9f68716be64 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Wed, 8 Jan 2025 13:57:27 -0300 Subject: [PATCH 021/158] chore: fix rubocop offenses --- lib/syskit/cli/log_runtime_archive.rb | 12 ++++++++---- .../roby_app/log_transfer_server/ftp_upload.rb | 10 ++++------ test/cli/test_log_runtime_archive.rb | 16 ++++++++++++---- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 914655cec..76ec6676b 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -42,7 +42,9 @@ def process_root_folder_transfer(server_params) candidates = @root_dir.children running = candidates.last candidates.each do |child| - process_dataset_transfer(child, server_params, @root_dir, full: child != running) + process_dataset_transfer( + child, server_params, @root_dir, full: child != running + ) end end @@ -123,7 +125,9 @@ def process_dataset(child, full:) end def process_dataset_transfer(child, server, root, full:) - self.class.transfer_dataset(child, server, root, full: full, logger: @logger) + self.class.transfer_dataset( + child, server, root, full: full, logger: @logger + ) end # Transfer the given dataset @@ -145,13 +149,13 @@ def self.transfer_dataset( end candidates.each do |child_path| - transfer_file(child_path, server, root, logger: logger) + transfer_file(child_path, server, root) end complete end - def self.transfer_file(file, server, root, logger: null_logger) + def self.transfer_file(file, server, root) ftp = RobyApp::LogTransferServer::FTPUpload.new( server.host, server.port, server.certificate, server.user, server.password, file, diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index 063eadc4f..0cd7f201f 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -68,12 +68,10 @@ def ensure_dataset_path_exists(ftp, root) dataset_path = File.dirname(@file.relative_path_from(root)) dataset_path.split("/") do |folder| - begin - ftp.chdir(folder) - rescue - ftp.mkdir(folder) - ftp.chdir(folder) - end + ftp.chdir(folder) + rescue e + ftp.mkdir(folder) + ftp.chdir(folder) end end diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index dc772c416..8dcaaf937 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -573,7 +573,9 @@ def create_server dataset = make_valid_folder("PATH") make_random_file "test.0.log", root: dataset make_random_file "test.1.log", root: dataset - @process.process_dataset_transfer(dataset, @params, @root, full: true) + @process.process_dataset_transfer( + dataset, @params, @root, full: true + ) assert(File.exist?(@target_dir / "PATH" / "test.0.log")) assert(File.exist?(@target_dir / "PATH" / "test.1.log")) @@ -584,7 +586,9 @@ def create_server make_random_file "test.0.log", root: dataset make_random_file "test.1.log", root: dataset - @process.process_dataset_transfer(dataset, @params, @root, full: true) + @process.process_dataset_transfer( + dataset, @params, @root, full: true + ) assert( File.exist?(@target_dir / "PATH/TO/DATASET" / "test.0.log") @@ -596,7 +600,9 @@ def create_server it "transfers a dataset through FTP" do dataset = make_valid_folder("PATH") make_random_file "test.0.log", root: dataset - LogRuntimeArchive.transfer_dataset(dataset, @params, @root, full: true) + LogRuntimeArchive.transfer_dataset( + dataset, @params, @root, full: true + ) assert(File.exist?(@target_dir / "PATH" / "test.0.log")) end @@ -606,7 +612,9 @@ def create_server it "transfers a file through FTP" do dataset = make_valid_folder("PATH") make_random_file "test.log", root: dataset - LogRuntimeArchive.transfer_file(dataset / "test.log", @params, @root) + LogRuntimeArchive.transfer_file( + dataset / "test.log", @params, @root + ) assert(File.exist?(@target_dir / "PATH" / "test.log")) end From d34f6442c0899c12115284c9726e624f0386b905 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Wed, 8 Jan 2025 15:27:54 -0300 Subject: [PATCH 022/158] fix: rewrite and fix tests, minor fixes and cleanup --- lib/syskit/cli/log_runtime_archive_main.rb | 20 +++--- test/cli/test_log_runtime_archive_main.rb | 75 +++++++++++++++------- 2 files changed, 62 insertions(+), 33 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index 9edc44a21..f8b872188 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -70,11 +70,11 @@ def archive(root_dir, target_dir) option :max_upload_rate, type: :numeric, default: 10, desc: "max upload rate in Mbps" def watch_transfer( # rubocop:disable Metrics/ParameterLists - source_dir, host, port, certfile, user, password, implicit_ftps + source_dir, host, port, certificate, user, password, implicit_ftps ) loop do begin - transfer(source_dir, host, port, certfile, user, password, + transfer(source_dir, host, port, certificate, user, password, implicit_ftps) rescue Errno::ENOSPC next @@ -91,16 +91,16 @@ def watch_transfer( # rubocop:disable Metrics/ParameterLists option :max_upload_rate, type: :numeric, default: 10, desc: "max upload rate in Mbps" def transfer( # rubocop:disable Metrics/ParameterLists - source_dir, host, port, certfile, user, password, implicit_ftps + source_dir, host, port, certificate, user, password, implicit_ftps ) source_dir = validate_directory_exists(source_dir) archiver = make_archiver(source_dir) server_params = { - host: host, port: port, certfile: certfile, + host: host, port: port, certificate: certificate, user: user, password: password, - max_upload_rate: options[:max_upload_rate], - implicit_ftps: implicit_ftps + implicit_ftps: implicit_ftps, + max_upload_rate: options[:max_upload_rate] } archiver.process_root_folder_transfer(server_params) end @@ -108,9 +108,9 @@ def transfer( # rubocop:disable Metrics/ParameterLists desc "transfer_server", "creates the log transfer FTP server \ that runs on the main computer" def transfer_server( # rubocop:disable Metrics/ParameterLists - target_dir, host, port, certfile, user, password, implicit_ftps + target_dir, host, port, certificate, user, password, implicit_ftps ) - create_server(target_dir, host, port, certfile, user, password, + create_server(target_dir, host, port, certificate, user, password, implicit_ftps) end @@ -136,11 +136,11 @@ def make_archiver(root_dir, target_dir: nil) end def create_server( # rubocop:disable Metrics/ParameterLists - target_dir, host, port, certfile_path, user, password, implicit_ftps + target_dir, host, port, certificate, user, password, implicit_ftps ) Runtime::Server::SpawnServer.new( target_dir, user, password, - certfile_path, + certificate, interface: host, port: port, implicit_ftps: implicit_ftps, diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index cb793efdb..6fd84450c 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -128,22 +128,29 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) ) end end + + describe "#transfer_server" do + before do + @target_dir = make_tmppath + @server_params = server_params + end + + it "creates an FTP server" do + flexmock(Runtime::Server::SpawnServer) + .new_instances + .should_receive(:initialize).with( + @target_dir, *@server_params.values + ).once + end + end describe "#watch_transfer" do before do - @base_log_dir = make_tmppath - @tgt_log_dir = make_tmppath - interface = "127.0.0.1" - ca = RobyApp::TmpRootCA.new(interface) - - @server_params = { - host: interface, port: 0, - certficate: ca.private_certificate_path, - user: "nilvo", password: "nilvo123", - max_upload_rate: 10, - implicit_ftps: true - } - @threads = [] + @source_dir = make_tmppath + @target_dir = make_tmppath + @server_params = server_params + @max_upload_rate = 10 + server = nil flexmock(Runtime::Server::SpawnServer) .should_receive(:new) @@ -158,16 +165,22 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) after do @server.stop @server.join - @threads.each(&:kill) end it "calls transfer with the specified period" do + mock_files_size([]) + mock_available_space(200) # 70 MB + quit = Class.new(RuntimeError) called = 0 flexmock(LogRuntimeArchive) .new_instances .should_receive(:process_root_folder_transfer) - .with(@server_params) + .with( + @server_params.merge( + { max_upload_rate: @max_upload_rate } + ) + ) .pass_thru do called += 1 raise quit if called == 3 @@ -177,7 +190,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) assert_raises(quit) do args = [ "watch_transfer", - @base_log_dir, + @source_dir, *@server_params.values, "--period", 0.5 ] @@ -190,28 +203,44 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) def call_create_server cli = LogRuntimeArchiveMain.new - modified_params = @server_params.dup - modified_params.delete(:max_upload_rate) - cli.create_server(@tgt_log_dir, *modified_params.values) + cli.create_server(@target_dir, *@server_params.values) end end describe "#transfer" do before do - @base_log_dir = make_tmppath + @server_params = server_params + end + + it "raises ArgumentError if source_dir does not exist" do + e = assert_raises ArgumentError do + call_transfer("/does/not/exist") + end + assert_equal "/does/not/exist does not exist, or is not a directory", + e.message end # Call 'transfer' function instead of 'watch' to call transfer once - def call_transfer(src_dir, params) + def call_transfer(source_dir) args = [ "transfer", - src_dir, - *params.values + source_dir, + *@server_params.values ] LogRuntimeArchiveMain.start(args) end end + def server_params + interface = "127.0.0.1" + ca = RobyApp::TmpRootCA.new(interface) + + { host: interface, port: 0, + certificate: ca.private_certificate_path, + user: "nilvo", password: "nilvo123", + implicit_ftps: true } + end + # Mock files sizes in bytes # @param [Array] size of files in MB def mock_files_size(sizes) From e5ac3da28f0692c5d31b8425145953617bcc73f6 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Wed, 8 Jan 2025 17:14:46 -0300 Subject: [PATCH 023/158] fix: re-add function and fix tests re-add find_all_dataset_folders function and rewrite dataset folders in the root folder transfer test to match the pattern --- lib/syskit/cli/log_runtime_archive.rb | 2 +- test/cli/test_log_runtime_archive.rb | 11 +++++------ test/cli/test_log_runtime_archive_main.rb | 2 +- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 76ec6676b..d35102186 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -39,7 +39,7 @@ def initialize( # # @param [Params] server_params the FTP server parameters def process_root_folder_transfer(server_params) - candidates = @root_dir.children + candidates = self.class.find_all_dataset_folders(@root_dir) running = candidates.last candidates.each do |child| process_dataset_transfer( diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 8dcaaf937..c8ef2085b 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -546,8 +546,7 @@ def create_server @target_dir, @params.user, @params.password, @ca.private_certificate_path, interface: @params.host, - implicit_ftps: @params.implicit_ftps, - debug: true, verbose: true + implicit_ftps: @params.implicit_ftps ) @params.port = server.port server @@ -555,16 +554,16 @@ def create_server describe ".process_root_folder_transfer" do it "transfers all files from root folder through FTP" do - dataset_a = make_valid_folder("PATH_A") - dataset_b = make_valid_folder("PATH_B") + dataset_a = make_valid_folder("20220434-2023") + dataset_b = make_valid_folder("20220434-2024") make_random_file "test.0.log", root: dataset_a make_random_file "test.1.log", root: dataset_a make_random_file "test.log", root: dataset_b @process.process_root_folder_transfer(@params) - assert(File.exist?(@target_dir / "PATH_A" / "test.0.log")) - assert(File.exist?(@target_dir / "PATH_B" / "test.log")) + assert(File.exist?(@target_dir / "20220434-2023" / "test.0.log")) + assert(File.exist?(@target_dir / "20220434-2024" / "test.log")) end end diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 6fd84450c..9ed329bdd 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -128,7 +128,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) ) end end - + describe "#transfer_server" do before do @target_dir = make_tmppath From 0ae91df66aef3f42c1818592a015e7d0f0a50f47 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 9 Jan 2025 09:08:31 -0300 Subject: [PATCH 024/158] fix: add error to rescue --- lib/syskit/roby_app/log_transfer_server/ftp_upload.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index 0cd7f201f..e068026a5 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -69,7 +69,7 @@ def ensure_dataset_path_exists(ftp, root) dataset_path.split("/") do |folder| ftp.chdir(folder) - rescue e + rescue StandardError => e ftp.mkdir(folder) ftp.chdir(folder) end From 01db22cb99537a53eaea7c2d1f243b59ab291adf Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 9 Jan 2025 09:09:19 -0300 Subject: [PATCH 025/158] chore: rename test to make it clearer --- test/cli/test_log_runtime_archive.rb | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index c8ef2085b..a7a30a4fc 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -553,17 +553,21 @@ def create_server end describe ".process_root_folder_transfer" do - it "transfers all files from root folder through FTP" do + it "transfers all finished dataset files from root folder "\ + "through FTP" do dataset_a = make_valid_folder("20220434-2023") dataset_b = make_valid_folder("20220434-2024") make_random_file "test.0.log", root: dataset_a make_random_file "test.1.log", root: dataset_a - make_random_file "test.log", root: dataset_b + make_random_file "test.0.log", root: dataset_b + make_random_file "test.1.log", root: dataset_b @process.process_root_folder_transfer(@params) assert(File.exist?(@target_dir / "20220434-2023" / "test.0.log")) - assert(File.exist?(@target_dir / "20220434-2024" / "test.log")) + assert(File.exist?(@target_dir / "20220434-2023" / "test.1.log")) + assert(File.exist?(@target_dir / "20220434-2024" / "test.0.log")) + refute(File.exist?(@target_dir / "20220434-2024" / "test.1.log")) end end From 05b29c084da2b0f145e5b0db86f3efaf95c8e7f4 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 9 Jan 2025 11:15:47 -0300 Subject: [PATCH 026/158] chore: add comment explaining why last log is not transfered --- test/cli/test_log_runtime_archive.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index a7a30a4fc..494148a2b 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -567,6 +567,8 @@ def create_server assert(File.exist?(@target_dir / "20220434-2023" / "test.0.log")) assert(File.exist?(@target_dir / "20220434-2023" / "test.1.log")) assert(File.exist?(@target_dir / "20220434-2024" / "test.0.log")) + # log manager considers dataset_b logs as currently running + # Because it isn't finished yet it does not transfer the last log refute(File.exist?(@target_dir / "20220434-2024" / "test.1.log")) end end From d6e0a5ee6de64ab9ecebcd320db342fbfce09a9a Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 9 Jan 2025 11:16:15 -0300 Subject: [PATCH 027/158] chore: remove unnecessary code --- test/cli/test_log_runtime_archive_main.rb | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 9ed329bdd..db9aeeb01 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -168,9 +168,6 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) end it "calls transfer with the specified period" do - mock_files_size([]) - mock_available_space(200) # 70 MB - quit = Class.new(RuntimeError) called = 0 flexmock(LogRuntimeArchive) From 252f19ad4c5551f40ac4e6947f8e7953ab21fe7f Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 9 Jan 2025 11:57:27 -0300 Subject: [PATCH 028/158] chore: raise ArgumentError if data_dir is not convertible --- lib/syskit/runtime/server/write_only_disk_file_system.rb | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/syskit/runtime/server/write_only_disk_file_system.rb b/lib/syskit/runtime/server/write_only_disk_file_system.rb index 57cdb9cbb..571168e64 100644 --- a/lib/syskit/runtime/server/write_only_disk_file_system.rb +++ b/lib/syskit/runtime/server/write_only_disk_file_system.rb @@ -12,6 +12,11 @@ class WriteOnlyDiskFileSystem def initialize(data_dir) # Ftpd base methods expect data_dir to be a string + unless data_dir.respond_to?(to_s) + raise ArgumentError, + "data_dir should be convertible into string" + end + set_data_dir data_dir.to_s end From c4177574e284ea7bf374a077567c5bd6b941d166 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 9 Jan 2025 12:46:35 -0300 Subject: [PATCH 029/158] chore: fix rubocop offenses and respond_to arg --- lib/syskit/roby_app/log_transfer_server/ftp_upload.rb | 2 +- lib/syskit/runtime/server/write_only_disk_file_system.rb | 2 +- test/cli/test_log_runtime_archive.rb | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index e068026a5..a840a4e49 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -69,7 +69,7 @@ def ensure_dataset_path_exists(ftp, root) dataset_path.split("/") do |folder| ftp.chdir(folder) - rescue StandardError => e + rescue StandardError => _e ftp.mkdir(folder) ftp.chdir(folder) end diff --git a/lib/syskit/runtime/server/write_only_disk_file_system.rb b/lib/syskit/runtime/server/write_only_disk_file_system.rb index 571168e64..0c753163c 100644 --- a/lib/syskit/runtime/server/write_only_disk_file_system.rb +++ b/lib/syskit/runtime/server/write_only_disk_file_system.rb @@ -12,7 +12,7 @@ class WriteOnlyDiskFileSystem def initialize(data_dir) # Ftpd base methods expect data_dir to be a string - unless data_dir.respond_to?(to_s) + unless data_dir.respond_to?(:to_s) raise ArgumentError, "data_dir should be convertible into string" end diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 494148a2b..922135054 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -553,7 +553,7 @@ def create_server end describe ".process_root_folder_transfer" do - it "transfers all finished dataset files from root folder "\ + it "transfers all finished dataset files from root folder " \ "through FTP" do dataset_a = make_valid_folder("20220434-2023") dataset_b = make_valid_folder("20220434-2024") From 55f31bf4ac8696283613863b110f5c7791eff56f Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 9 Jan 2025 15:37:50 -0300 Subject: [PATCH 030/158] fix: fix transfer_server test --- lib/syskit/cli/log_runtime_archive_main.rb | 3 +- test/cli/test_log_runtime_archive_main.rb | 42 ++++++++++++++-------- 2 files changed, 29 insertions(+), 16 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index f8b872188..f0fe70cc3 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -143,8 +143,7 @@ def create_server( # rubocop:disable Metrics/ParameterLists certificate, interface: host, port: port, - implicit_ftps: implicit_ftps, - debug: true + implicit_ftps: implicit_ftps ) end end diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index db9aeeb01..95a727ff1 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -131,23 +131,37 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) describe "#transfer_server" do before do - @target_dir = make_tmppath @server_params = server_params - end - it "creates an FTP server" do + server = nil flexmock(Runtime::Server::SpawnServer) - .new_instances - .should_receive(:initialize).with( - @target_dir, *@server_params.values - ).once + .should_receive(:new) + .with_any_args + .pass_thru do |arg| + server = arg + end + call_create_server(make_tmppath, @server_params) + @server = server + end + + after do + @server.stop + end + + it "successfully creates an FTP server" do + Net::FTP.open( + @server_params[:host], port: @server.port, + implicit_ftps: @server_params[:implicit_ftps], + ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE } + ) do |ftp| + ftp.login(@server_params[:user], @server_params[:password]) + end end end describe "#watch_transfer" do before do @source_dir = make_tmppath - @target_dir = make_tmppath @server_params = server_params @max_upload_rate = 10 @@ -158,7 +172,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) .pass_thru do |arg| server = arg end - call_create_server + call_create_server(make_tmppath, @server_params) @server = server end @@ -197,11 +211,6 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) assert called == 3 assert_operator(Time.now - tic, :>, 0.9) end - - def call_create_server - cli = LogRuntimeArchiveMain.new - cli.create_server(@target_dir, *@server_params.values) - end end describe "#transfer" do @@ -228,6 +237,11 @@ def call_transfer(source_dir) end end + def call_create_server(tgt_dir, server_params) + cli = LogRuntimeArchiveMain.new + cli.create_server(tgt_dir, *server_params.values) + end + def server_params interface = "127.0.0.1" ca = RobyApp::TmpRootCA.new(interface) From 04145bace83859a56704566d32dc2873aed3ffc4 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 10 Jan 2025 12:43:43 -0300 Subject: [PATCH 031/158] fix: add root as optional argument --- lib/syskit/cli/log_runtime_archive.rb | 2 +- lib/syskit/roby_app/log_transfer_server/ftp_upload.rb | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index d35102186..cd13348a2 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -162,7 +162,7 @@ def self.transfer_file(file, server, root) max_upload_rate: server.max_upload_rate || Float::INFINITY, implicit_ftps: server.implicit_ftps ) - ftp.open_and_transfer(root) + ftp.open_and_transfer(root: root) end # Create or open an archive diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index a840a4e49..f12539379 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -57,7 +57,7 @@ def open # Open the connection and transfer the file # # @return [LogUploadState::Result] - def open_and_transfer(root) + def open_and_transfer(root: nil) open { |ftp| transfer(ftp, root) } LogUploadState::Result.new(@file, true, nil) rescue StandardError => e @@ -81,8 +81,9 @@ def ensure_dataset_path_exists(ftp, root) # @param [Pathname] root the archive root folder def transfer(ftp, root) last = Time.now - File.open(@file, "w+") do |file_io| - ensure_dataset_path_exists(ftp, root) + opening_mode = root ? "w+" : "r" + File.open(@file, opening_mode) do |file_io| + ensure_dataset_path_exists(ftp, root) if root ftp.storbinary("STOR #{File.basename(@file)}", file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| now = Time.now From 0bf635c047654f807edc3de5b1f8b67e9e353dfd Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 10 Jan 2025 19:00:08 -0300 Subject: [PATCH 032/158] chore: fix rubocop offenses --- test/cli/test_log_runtime_archive_main.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 95a727ff1..89e85bcf1 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -150,7 +150,8 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) it "successfully creates an FTP server" do Net::FTP.open( - @server_params[:host], port: @server.port, + @server_params[:host], + port: @server.port, implicit_ftps: @server_params[:implicit_ftps], ssl: { verify_mode: OpenSSL::SSL::VERIFY_NONE } ) do |ftp| From 3ced9dc9f62994fe99a291fb1f3a159459942766 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 13 Jan 2025 12:46:51 -0300 Subject: [PATCH 033/158] chore: rescue specific error --- lib/syskit/roby_app/log_transfer_server/ftp_upload.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index f12539379..fdf925a2c 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -69,7 +69,7 @@ def ensure_dataset_path_exists(ftp, root) dataset_path.split("/") do |folder| ftp.chdir(folder) - rescue StandardError => _e + rescue Net::FTPPermError => _e ftp.mkdir(folder) ftp.chdir(folder) end From 31ba782c242203c0806b2f00a624c714894eeb44 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 13 Jan 2025 13:49:41 -0300 Subject: [PATCH 034/158] fix: specify rate in mbps and convert to bps when needed --- lib/syskit/cli/log_runtime_archive_main.rb | 11 ++++++++--- test/cli/test_log_runtime_archive.rb | 7 ++++++- test/cli/test_log_runtime_archive_main.rb | 7 ++++++- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index f0fe70cc3..a21949e5e 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -67,7 +67,7 @@ def archive(root_dir, target_dir) type: :numeric, default: 600, desc: "polling period in seconds" option :max_size, type: :numeric, default: 10_000, desc: "max log size in MB" - option :max_upload_rate, + option :max_upload_rate_mbps, type: :numeric, default: 10, desc: "max upload rate in Mbps" def watch_transfer( # rubocop:disable Metrics/ParameterLists source_dir, host, port, certificate, user, password, implicit_ftps @@ -88,7 +88,7 @@ def watch_transfer( # rubocop:disable Metrics/ParameterLists desc "transfer", "transfers the datasets" option :max_size, type: :numeric, default: 10_000, desc: "max log size in MB" - option :max_upload_rate, + option :max_upload_rate_mbps, type: :numeric, default: 10, desc: "max upload rate in Mbps" def transfer( # rubocop:disable Metrics/ParameterLists source_dir, host, port, certificate, user, password, implicit_ftps @@ -100,7 +100,7 @@ def transfer( # rubocop:disable Metrics/ParameterLists host: host, port: port, certificate: certificate, user: user, password: password, implicit_ftps: implicit_ftps, - max_upload_rate: options[:max_upload_rate] + max_upload_rate: rate_mbps_to_bps(options[:max_upload_rate_mbps]) } archiver.process_root_folder_transfer(server_params) end @@ -115,6 +115,11 @@ def transfer_server( # rubocop:disable Metrics/ParameterLists end no_commands do # rubocop:disable Metrics/BlockLength + # Converts rate in Mbps to bps + def rate_mbps_to_bps(rate_mbps) + rate_mbps / 10 ** 6 + end + def validate_directory_exists(dir) dir = Pathname.new(dir) unless dir.directory? diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 922135054..42b448482 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -525,7 +525,7 @@ def should_archive_dataset(dataset, archive_basename, full:) host: host, port: 21, certificate: @ca.certificate, user: "user", password: "password", - implicit_ftps: true, max_upload_rate: 10 + implicit_ftps: true, max_upload_rate: rate_mbps_to_bps(10) ) @target_dir = make_tmppath @@ -624,6 +624,11 @@ def create_server assert(File.exist?(@target_dir / "PATH" / "test.log")) end end + + # Converts rate in Mbps to bps + def rate_mbps_to_bps(rate_mbps) + rate_mbps / 10 ** 6 + end end describe "#ensure_free_space" do diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 89e85bcf1..6164bfe39 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -164,7 +164,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) before do @source_dir = make_tmppath @server_params = server_params - @max_upload_rate = 10 + @max_upload_rate = rate_mbps_to_bps(10) server = nil flexmock(Runtime::Server::SpawnServer) @@ -212,6 +212,11 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) assert called == 3 assert_operator(Time.now - tic, :>, 0.9) end + + # Converts rate in Mbps to bps + def rate_mbps_to_bps(rate_mbps) + rate_mbps / 10 ** 6 + end end describe "#transfer" do From 60952383d15a80bcbca1e4004354ceee1c77d330 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 13 Jan 2025 14:01:10 -0300 Subject: [PATCH 035/158] chore: ensure path exists before opening file --- lib/syskit/roby_app/log_transfer_server/ftp_upload.rb | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index fdf925a2c..d55ded370 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -81,9 +81,8 @@ def ensure_dataset_path_exists(ftp, root) # @param [Pathname] root the archive root folder def transfer(ftp, root) last = Time.now - opening_mode = root ? "w+" : "r" - File.open(@file, opening_mode) do |file_io| - ensure_dataset_path_exists(ftp, root) if root + ensure_dataset_path_exists(ftp, root) if root + File.open(@file) do |file_io| ftp.storbinary("STOR #{File.basename(@file)}", file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| now = Time.now From da4102a4d8236cd83a8108b6dc6d1099a2498345 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 13 Jan 2025 14:06:53 -0300 Subject: [PATCH 036/158] chore: remove unnecessary variable --- lib/syskit/cli/log_runtime_archive.rb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index cd13348a2..4974f88e0 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -102,10 +102,9 @@ def ensure_free_space(free_space_low_limit, free_space_delete_until) def process_dataset(child, full:) use_existing = true - basename = child.basename.to_s loop do open_archive_for( - basename, use_existing: use_existing + child.basename.to_s, use_existing: use_existing ) do |io| if io.tell > @max_archive_size use_existing = false From ef78abda73c5e134fb8a2e0598aaa0893cb05d8d Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 13 Jan 2025 16:35:56 -0300 Subject: [PATCH 037/158] chore: change function name, document target_dir and fix rubocop offenses --- lib/syskit/cli/log_runtime_archive.rb | 8 ++++++++ lib/syskit/cli/log_runtime_archive_main.rb | 2 +- lib/syskit/roby_app/log_transfer_server/ftp_upload.rb | 4 ++-- test/cli/test_log_runtime_archive.rb | 2 +- test/cli/test_log_runtime_archive_main.rb | 2 +- 5 files changed, 13 insertions(+), 5 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 4974f88e0..b390fb461 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -22,6 +22,14 @@ class LogRuntimeArchive :implicit_ftps, :max_upload_rate, keyword_init: true) + # Initializes the LogRuntimeArchive + # + # @param [Pathname] root_dir the logs directory + # @param [Pathname] target_dir the path to store the file in the archive, + # should be nil in transfer mode, as the logs will be transferred directly + # to the ftp server @see process_root_folder_transfer + # @param [Logger] logger the log structure + # @param [Integer] max_archive_size the max size of the archive def initialize( root_dir, target_dir: nil, logger: LogRuntimeArchive.null_logger, diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index a21949e5e..fbaabd107 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -117,7 +117,7 @@ def transfer_server( # rubocop:disable Metrics/ParameterLists no_commands do # rubocop:disable Metrics/BlockLength # Converts rate in Mbps to bps def rate_mbps_to_bps(rate_mbps) - rate_mbps / 10 ** 6 + rate_mbps / (10**6) end def validate_directory_exists(dir) diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index d55ded370..8d2d799ca 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -64,7 +64,7 @@ def open_and_transfer(root: nil) LogUploadState::Result.new(@file, false, e.message) end - def ensure_dataset_path_exists(ftp, root) + def chdir_to_file_directory(ftp, root) dataset_path = File.dirname(@file.relative_path_from(root)) dataset_path.split("/") do |folder| @@ -81,7 +81,7 @@ def ensure_dataset_path_exists(ftp, root) # @param [Pathname] root the archive root folder def transfer(ftp, root) last = Time.now - ensure_dataset_path_exists(ftp, root) if root + chdir_to_file_directory(ftp, root) if root File.open(@file) do |file_io| ftp.storbinary("STOR #{File.basename(@file)}", file_io, Net::FTP::DEFAULT_BLOCKSIZE) do |buf| diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 42b448482..b15ca3a90 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -627,7 +627,7 @@ def create_server # Converts rate in Mbps to bps def rate_mbps_to_bps(rate_mbps) - rate_mbps / 10 ** 6 + rate_mbps / (10**6) end end diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 6164bfe39..15795d330 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -215,7 +215,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) # Converts rate in Mbps to bps def rate_mbps_to_bps(rate_mbps) - rate_mbps / 10 ** 6 + rate_mbps / (10**6) end end From 4608166706af7c5b58480063305db78b27ac84d2 Mon Sep 17 00:00:00 2001 From: Sylvain Date: Mon, 27 Jan 2025 09:18:56 -0300 Subject: [PATCH 038/158] fix: pick the server returned by call_create_server instead of horrible flexmock gymnastics --- test/cli/test_log_runtime_archive_main.rb | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 15795d330..5c7350e19 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -132,16 +132,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) describe "#transfer_server" do before do @server_params = server_params - - server = nil - flexmock(Runtime::Server::SpawnServer) - .should_receive(:new) - .with_any_args - .pass_thru do |arg| - server = arg - end - call_create_server(make_tmppath, @server_params) - @server = server + @server = call_create_server(make_tmppath, @server_params) end after do @@ -166,15 +157,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) @server_params = server_params @max_upload_rate = rate_mbps_to_bps(10) - server = nil - flexmock(Runtime::Server::SpawnServer) - .should_receive(:new) - .with_any_args - .pass_thru do |arg| - server = arg - end - call_create_server(make_tmppath, @server_params) - @server = server + @server = call_create_server(make_tmppath, @server_params) end after do From ed689b11fbfcad71b39a8c6ca8c8b16822692479 Mon Sep 17 00:00:00 2001 From: Sylvain Date: Mon, 27 Jan 2025 09:19:14 -0300 Subject: [PATCH 039/158] fix: add missing @server.join on test teardown --- test/cli/test_log_runtime_archive_main.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 5c7350e19..a914eda3c 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -137,6 +137,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) after do @server.stop + @server.join end it "successfully creates an FTP server" do From 1c96bbbbb7dbeb0d3adba5407542c53b85f1c53d Mon Sep 17 00:00:00 2001 From: Sylvain Date: Mon, 27 Jan 2025 09:26:16 -0300 Subject: [PATCH 040/158] chore: make explicit the update of the FTP server port in tests We use 0 as server port to have it picked by Linux, but then we do have to save the port for the clients to connect. This was hidden in the server creation method. --- test/cli/test_log_runtime_archive.rb | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index b15ca3a90..79d5f5947 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -521,15 +521,18 @@ def should_archive_dataset(dataset, archive_basename, full:) before do host = "127.0.0.1" @ca = RobyApp::TmpRootCA.new(host) - @params = LogRuntimeArchive::FTPParameters.new( - host: host, port: 21, + params = LogRuntimeArchive::FTPParameters.new( + host: host, port: 0, certificate: @ca.certificate, user: "user", password: "password", - implicit_ftps: true, max_upload_rate: rate_mbps_to_bps(10) + implicit_ftps: true, + max_upload_rate: rate_mbps_to_bps(10) ) @target_dir = make_tmppath - @server = create_server + @server = create_server(params) + params.port = @server.port + @params = params @process = LogRuntimeArchive.new(@root) end @@ -541,15 +544,13 @@ def should_archive_dataset(dataset, archive_basename, full:) @server = nil end - def create_server - server = Runtime::Server::SpawnServer.new( - @target_dir, @params.user, @params.password, + def create_server(params) + Runtime::Server::SpawnServer.new( + @target_dir, params.user, params.password, @ca.private_certificate_path, - interface: @params.host, - implicit_ftps: @params.implicit_ftps + interface: params.host, + implicit_ftps: params.implicit_ftps ) - @params.port = server.port - server end describe ".process_root_folder_transfer" do From c987d461a99be4d9d4be01dbe5e8163637a3c5ab Mon Sep 17 00:00:00 2001 From: Sylvain Date: Mon, 27 Jan 2025 09:28:13 -0300 Subject: [PATCH 041/158] fix: report FTP transfer results in the various transfer-related methods --- lib/syskit/cli/log_runtime_archive.rb | 24 +++++++++++++++++++++--- test/cli/test_log_runtime_archive.rb | 6 ++++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index b390fb461..8c45e5a9c 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -46,10 +46,11 @@ def initialize( # through FTP server # # @param [Params] server_params the FTP server parameters + # @return [Array] def process_root_folder_transfer(server_params) candidates = self.class.find_all_dataset_folders(@root_dir) running = candidates.last - candidates.each do |child| + candidates.map do |child| process_dataset_transfer( child, server_params, @root_dir, full: child != running ) @@ -137,6 +138,18 @@ def process_dataset_transfer(child, server, root, full:) ) end + TransferDatasetResult = Struct.new( + :complete, :transfer_results, keyword_init: true + ) do + def success? + transfer_results.all?(&:success?) + end + + def failures + transfer_results.find_all { !_1.success? } + end + end + # Transfer the given dataset def self.transfer_dataset( dataset_path, server, root, @@ -155,13 +168,18 @@ def self.transfer_dataset( archive_filter_candidates_partial(candidates) end - candidates.each do |child_path| + transfer_results = candidates.map do |child_path| transfer_file(child_path, server, root) end - complete + TransferDatasetResult.new( + complete: complete, transfer_results: transfer_results + ) end + # Transfer a file to the central log server via FTP + # + # @return [LogUploadState:Result] def self.transfer_file(file, server, root) ftp = RobyApp::LogTransferServer::FTPUpload.new( server.host, server.port, server.certificate, server.user, diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 79d5f5947..8a0e737c1 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -606,10 +606,11 @@ def create_server(params) it "transfers a dataset through FTP" do dataset = make_valid_folder("PATH") make_random_file "test.0.log", root: dataset - LogRuntimeArchive.transfer_dataset( + results = LogRuntimeArchive.transfer_dataset( dataset, @params, @root, full: true ) + assert results.success? assert(File.exist?(@target_dir / "PATH" / "test.0.log")) end end @@ -618,11 +619,12 @@ def create_server(params) it "transfers a file through FTP" do dataset = make_valid_folder("PATH") make_random_file "test.log", root: dataset - LogRuntimeArchive.transfer_file( + result = LogRuntimeArchive.transfer_file( dataset / "test.log", @params, @root ) assert(File.exist?(@target_dir / "PATH" / "test.log")) + assert result.success?, "transfer failed: #{result.message}" end end From 2dc81d7c7813a775c4afe18eb04dc7c13c13f2bb Mon Sep 17 00:00:00 2001 From: Sylvain Date: Mon, 27 Jan 2025 09:29:00 -0300 Subject: [PATCH 042/158] fix: set nonzero max_upload_rate in FTP transfer tests --- lib/syskit/roby_app/log_transfer_server/ftp_upload.rb | 5 +++++ test/cli/test_log_runtime_archive.rb | 9 ++------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index 8d2d799ca..e9c58ea3d 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -21,6 +21,11 @@ def initialize( # rubocop:disable Metrics/ParameterLists @file = file @max_upload_rate = Float(max_upload_rate) + if @max_upload_rate <= 0 + raise ArgumentError, + "invalid value for max_upload_rate: given " \ + "#{@max_upload_rate}, but should be strictly positive" + end @implicit_ftps = implicit_ftps end diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 8a0e737c1..caba623d2 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -526,7 +526,7 @@ def should_archive_dataset(dataset, archive_basename, full:) certificate: @ca.certificate, user: "user", password: "password", implicit_ftps: true, - max_upload_rate: rate_mbps_to_bps(10) + max_upload_rate: 10_000_000 ) @target_dir = make_tmppath @@ -627,11 +627,6 @@ def create_server(params) assert result.success?, "transfer failed: #{result.message}" end end - - # Converts rate in Mbps to bps - def rate_mbps_to_bps(rate_mbps) - rate_mbps / (10**6) - end end describe "#ensure_free_space" do @@ -738,7 +733,7 @@ def decompress_data(data) end def assert_entry_matches(entry, data, name:, content:) - assert entry.file? + assert entry.file?, "expected #{entry} to be a file" assert_equal name, entry.full_name assert_equal content, decompress_data(data) end From adf0af3df8a2de1cc748097915e7797ece4b1461 Mon Sep 17 00:00:00 2001 From: Sylvain Date: Mon, 27 Jan 2025 09:30:05 -0300 Subject: [PATCH 043/158] fix: delete source files after a successful transfer --- lib/syskit/cli/log_runtime_archive.rb | 5 ++++- test/cli/test_log_runtime_archive.rb | 32 ++++++++++++++++++++++++--- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 8c45e5a9c..b99afad7d 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -169,7 +169,10 @@ def self.transfer_dataset( end transfer_results = candidates.map do |child_path| - transfer_file(child_path, server, root) + result = transfer_file(child_path, server, root) + child_path.unlink if result.success? + + result end TransferDatasetResult.new( diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index caba623d2..8c13c7557 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -603,16 +603,42 @@ def create_server(params) end describe ".transfer_dataset" do + before do + @dataset = make_valid_folder("PATH") + make_random_file "test.0.log", root: @dataset + end + it "transfers a dataset through FTP" do - dataset = make_valid_folder("PATH") - make_random_file "test.0.log", root: dataset results = LogRuntimeArchive.transfer_dataset( - dataset, @params, @root, full: true + @dataset, @params, @root, full: true ) assert results.success? + # Datasets that have pocolog files are not complete + refute results.complete assert(File.exist?(@target_dir / "PATH" / "test.0.log")) end + + it "removes the source file if the transfer was successful" do + results = LogRuntimeArchive.transfer_dataset( + @dataset, @params, @root, full: true + ) + + assert results.success? + refute((@dataset / "test.0.log").exist?) + end + + it "does not remove the source file if the transfer failed" do + flexmock(LogRuntimeArchive) + .should_receive(:transfer_file) + .and_return(flexmock(success?: false)) + results = LogRuntimeArchive.transfer_dataset( + @dataset, @params, @root, full: true + ) + + refute results.success? + assert((@dataset / "test.0.log").exist?) + end end describe ".transfer_file" do From 64bb0f62a8818b2d0e9186e7f8412d8f49a88f41 Mon Sep 17 00:00:00 2001 From: kapeps Date: Mon, 27 Jan 2025 13:44:53 -0300 Subject: [PATCH 044/158] feat: log the transfering results when transfering datasets --- lib/syskit/cli/log_runtime_archive.rb | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index b99afad7d..54be37788 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -175,9 +175,33 @@ def self.transfer_dataset( result end - TransferDatasetResult.new( + result = TransferDatasetResult.new( complete: complete, transfer_results: transfer_results ) + log_transfer_results(dataset_path, result, logger: logger) + end + + def self.log_transfer_results(dataset_path, result, logger: null_logger) + failed_results = result[:transfer_results].reject do |result| + result.success + end + + if failed_results.empty? + logger.info( + "Transfering of #{result[:complete] ? "complete" : "incomplete"}"\ + " #{dataset_path} finished" + ) + else + failed_results.each do |failed_result| + failed_message = "with message : " + + failed_result.message if failed_result.message + logger.info( + "Failed on file #{failed_result.file} #{failed_message}" + ) + end + end + + result end # Transfer a file to the central log server via FTP From eea0130b49557e44058de56849d1e830be976571 Mon Sep 17 00:00:00 2001 From: kapeps Date: Mon, 27 Jan 2025 13:50:10 -0300 Subject: [PATCH 045/158] chore: comment log_transfer_result method --- lib/syskit/cli/log_runtime_archive.rb | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 54be37788..3227f4732 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -181,6 +181,13 @@ def self.transfer_dataset( log_transfer_results(dataset_path, result, logger: logger) end + # Logs the transfer dataset results + # + # @param [String] the dataset path + # @param [TransferDatasetResult] the transfer dataset result + # @param [Logger] optional logger, if unfilled will use null logger + # + # @result [TransferDatasetResult] the received transfer dataset result def self.log_transfer_results(dataset_path, result, logger: null_logger) failed_results = result[:transfer_results].reject do |result| result.success From 2b305dc27c9f6efa9ee9ba5891e436fbf116a49f Mon Sep 17 00:00:00 2001 From: kapeps Date: Mon, 27 Jan 2025 14:25:56 -0300 Subject: [PATCH 046/158] fix: rubocop grievances --- lib/syskit/cli/log_runtime_archive.rb | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 3227f4732..3ed32c2d9 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -189,19 +189,20 @@ def self.transfer_dataset( # # @result [TransferDatasetResult] the received transfer dataset result def self.log_transfer_results(dataset_path, result, logger: null_logger) - failed_results = result[:transfer_results].reject do |result| - result.success - end + failed_results = result[:transfer_results].reject(&:success) if failed_results.empty? logger.info( - "Transfering of #{result[:complete] ? "complete" : "incomplete"}"\ - " #{dataset_path} finished" + "Transfering of " \ + "#{result[:complete] ? 'complete' : 'incomplete'} " \ + "#{dataset_path} finished" ) else failed_results.each do |failed_result| - failed_message = "with message : " + - failed_result.message if failed_result.message + failed_message = + if failed_result.message + "with message : #{failed_result.message}" + end logger.info( "Failed on file #{failed_result.file} #{failed_message}" ) From b212df5d5e54e92d77407a7ac2883d18878d0b24 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Wed, 4 Dec 2024 21:14:54 -0300 Subject: [PATCH 047/158] fix: implement v2 protocol support for master device instances --- lib/syskit/interface/v2/protocol.rb | 17 +++++++++++++++++ test/interface/v2/test_protocol.rb | 28 ++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/lib/syskit/interface/v2/protocol.rb b/lib/syskit/interface/v2/protocol.rb index 36a32bb8e..4d2d335db 100644 --- a/lib/syskit/interface/v2/protocol.rb +++ b/lib/syskit/interface/v2/protocol.rb @@ -9,6 +9,8 @@ module V2 module Protocol ROBY_TASK_MEMBERS = Roby::Interface::V2::Protocol::Task.new.members + DeviceModel = Struct.new(:name, keyword_init: true) + MasterDeviceInstance = Struct.new(:name, :model, keyword_init: true) Deployment = Struct.new( *ROBY_TASK_MEMBERS, :pid, :ready_since, :deployed_tasks, keyword_init: true @@ -36,12 +38,27 @@ def self.register_marshallers(protocol) protocol.add_marshaller( Syskit::Deployment, &method(:marshal_deployment_task) ) + protocol.add_marshaller( + Syskit::Robot::MasterDeviceInstance, + &method(:marshal_master_device_instance) + ) protocol.allow_objects( Orocos::RubyTasks::TaskContext, Orocos::RubyTasks::StubTaskContext ) end + def self.marshal_device_model(model) + DeviceModel.new(name: model.name) + end + + def self.marshal_master_device_instance(_channel, device) + MasterDeviceInstance.new( + name: device.name, + model: marshal_device_model(device.device_model) + ) + end + def self.marshal_remote_task_handle(name, remote_task_handle) ior = remote_task_handle.handle.ior model_name = remote_task_handle.handle.model.name diff --git a/test/interface/v2/test_protocol.rb b/test/interface/v2/test_protocol.rb index 26f8d67dc..15116376b 100644 --- a/test/interface/v2/test_protocol.rb +++ b/test/interface/v2/test_protocol.rb @@ -51,6 +51,34 @@ module Protocol marshalled.deployed_tasks.map(&:to_h) end end + + describe "Device support" do + before do + @channel = Roby::Interface::V2::Channel.new( + IO.pipe.last, flexmock + ) + Protocol.register_marshallers(@channel) + + @device_m = Syskit::Device.new_submodel(name: "Dev") + @driver_m = Syskit::TaskContext.new_submodel + @driver_m.driver_for @device_m, as: "driver" + + profile = Actions::Profile.new("Test") + @robot = profile.robot + end + + it "marshals a master device" do + @robot.device @device_m, as: "master_device" + marshalled = @channel.marshal_filter_object( + @robot.master_device_dev + ) + + assert_kind_of MasterDeviceInstance, marshalled + assert_equal "master_device", marshalled.name + assert_kind_of DeviceModel, marshalled.model + assert_equal "Dev", marshalled.model.name + end + end end end end From d12db6189daafee603ad8daeac15d1b1a9570cb1 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Sat, 14 Dec 2024 16:59:30 -0300 Subject: [PATCH 048/158] fix: make sure Qt lets Ruby threads being executed --- lib/syskit/telemetry/cli.rb | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/syskit/telemetry/cli.rb b/lib/syskit/telemetry/cli.rb index 54bb8f810..f6a4e0d22 100644 --- a/lib/syskit/telemetry/cli.rb +++ b/lib/syskit/telemetry/cli.rb @@ -27,6 +27,11 @@ def ui require "syskit/telemetry/ui/runtime_state" $qApp.disable_threading # rubocop:disable Style/GlobalVars + @thread_pass_timer = Qt::Timer.new + @thread_pass_timer.connect(SIGNAL("timeout()")) do + Thread.pass + end + @thread_pass_timer.start(0.01) require "syskit/scripts/common" Syskit::Scripts.run do From 7700a3ed3b08a959738167c39ae4a4a4ef1e371b Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Sat, 14 Dec 2024 17:07:02 -0300 Subject: [PATCH 049/158] fix: improve responsiveness by resolving tasks asynchronously The Orocos::Async implementation needs to be re-done. I'm de-facto starting this here, focusing on the Syskit telemetry agent as the remote agent, instead of a generic RTT-focussed implementation This does not need the agent, but already helps a lot with the performance --- lib/syskit/telemetry/ui/name_service.rb | 3 +- lib/syskit/telemetry/ui/runtime_state.rb | 133 +++++++++++++++++++---- 2 files changed, 113 insertions(+), 23 deletions(-) diff --git a/lib/syskit/telemetry/ui/name_service.rb b/lib/syskit/telemetry/ui/name_service.rb index 794965eed..4cf13fe73 100644 --- a/lib/syskit/telemetry/ui/name_service.rb +++ b/lib/syskit/telemetry/ui/name_service.rb @@ -57,8 +57,9 @@ def register(task, name: task.name) # # @param [String,TaskContext] name The name or task def deregister(name) - @registered_tasks.delete(name) + task = @registered_tasks.delete(name) trigger_task_removed(name) + task end # (see Base#cleanup) diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index 145ef750a..85228c890 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -127,6 +127,16 @@ def initialize(parent: nil, super(parent) + @task_discovery_queue = Concurrent::Hash.new + @task_discovery_result = Queue.new + @task_discovery_mtx = Mutex.new + @task_discovery_signal = ConditionVariable.new + @task_discovery_thread = Thread.new do + loop do + task_discovery_thread + end + end + @syskit = syskit @syskit_run_arguments = SyskitRunArguments.new(robot: "default", set: []) @@ -543,15 +553,18 @@ def poll_syskit_interface if syskit.connected? begin display_current_cycle_index_and_time - update_current_deployments + query_deployment_update_v1 update_current_job_task_names if current_job rescue Roby::Interface::ComError # rubocop:disable Lint/SuppressedException end + + task_discovery_apply_result else reset_current_deployments reset_current_job reset_name_service reset_task_inspector + reset_task_discovery end syskit.poll @@ -575,15 +588,26 @@ def reset_current_job update_task_inspector(@name_service.names) end - def update_current_deployments - polling_call ["syskit"], "deployments" do |deployments| + def process_current_deployments + update_name_service(@current_deployments) + + names = @name_service.names + names &= @current_job_task_names if @current_job + update_task_inspector(names) + end + + def query_deployment_update_v1 + polling_call(["syskit"], "deployments") do |deployments| @current_deployments = deployments - update_name_service(deployments) + process_current_deployments + end + end - names = @name_service.names - names &= @current_job_task_names if @current_job - update_task_inspector(names) + def update_current_deployments(updated, removed) + @current_deployments.delete_if do |d| + removed.include?(d.id) end + @current_deployments.concat(updated) end def reset_current_deployments @@ -654,28 +678,93 @@ def report_app_error(error) end def update_name_service(deployments) - # Now remove all tasks that are not in deployments - existing = @name_service.names + removed_tasks = @task_discovery_mtx.synchronize do + to_remove = discover_new_tasks(deployments) + + removed_tasks = to_remove.map do + @task_discovery_queue.delete(_1) + @name_service.deregister(_1) + end.compact + + @task_discovery_signal.broadcast + removed_tasks + end + + removed_tasks.each(&:dispose) + end + + def task_discovery_thread + task_name, deployed_task = @task_discovery_mtx.synchronize do + until (t = @task_discovery_queue.first) + @task_discovery_signal.wait(@task_discovery_mtx) + end + t + end + + ior = deployed_task.ior + task = Orocos::TaskContext.new( + deployed_task.ior, + name: task_name, + model: orogen_model_from_name(deployed_task.orogen_model_name) + ) + + @task_discovery_result << [task_name, task] + rescue Orocos::ComError => e + STDERR.puts "Failed discovery of task #{deployed_task.name}: #{e.message}" + ensure + @task_discovery_mtx.synchronize do + if ior == @task_discovery_queue[task_name]&.ior + @task_discovery_queue.delete(task_name) + end + end + end + + def task_discovery_apply_result + loop do + name, task = @task_discovery_result.pop(true) + async_task = Orocos::Async::CORBA::TaskContext.new(use: task) + @name_service.register(async_task, name: name) + end + rescue ThreadError + end + + def reset_task_discovery + @task_discovery_queue.clear + @task_discovery_result.clear + end + + OROGEN_LOGGER_NAMES = %w[logger::Logger OroGen.logger.Logger] + + # @api private + # + # Process the deployment information received from the syskit master, + # updating task discovery + # + # This method MUST be called with the mutex that protects the discovery + # queue taken + # + # @param [Array] deployments + # @return [Array,Array] names of the tasks that should + # be added to the discovery queue, and of the tasks that should be + # de-registered from the name service + def discover_new_tasks(deployments) + # Get all the names and remove them when we find them in the + # deployments. What's left is what needs to be removed + names_discovered = @name_service.names + names_in_discovery = @task_discovery_queue.keys deployments.each do |d| d.deployed_tasks.each do |deployed_task| + model_name = deployed_task.orogen_model_name task_name = deployed_task.name - if existing.include?(task_name) - existing.delete(task_name) + + task_name = deployed_task.name + if names_discovered.delete(task_name) next if deployed_task.ior == @name_service.ior(task_name) end - existing.delete(task_name) - task = Orocos::TaskContext.new( - deployed_task.ior, - name: task_name, - model: orogen_model_from_name( - deployed_task.orogen_model_name - ) - ) - - async_task = Orocos::Async::CORBA::TaskContext.new(use: task) - @name_service.register(async_task, name: task_name) + names_in_discovery.delete(task_name) + @task_discovery_queue[task_name] = deployed_task end end From a0ad12a4a8597194e81c1b0792adad1f7eb19c07 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Sat, 14 Dec 2024 17:11:32 -0300 Subject: [PATCH 050/158] fix: implement an incremental method to update active deployments IORs are ... actually big. The call at 10Hz is currently taking 1 MB/s. Implement the incremental version right away to keep the bandwidth small All work on the telemetry MUST have a corresponding bandwidth measurement to avoid this kind of surprises. --- lib/syskit/interface/commands.rb | 16 ++++++++ lib/syskit/telemetry/ui/runtime_state.rb | 33 +++++++++++++--- test/interface/test_commands.rb | 50 ++++++++++++++++++++++++ 3 files changed, 94 insertions(+), 5 deletions(-) diff --git a/lib/syskit/interface/commands.rb b/lib/syskit/interface/commands.rb index b39f6c983..a2bf5336c 100644 --- a/lib/syskit/interface/commands.rb +++ b/lib/syskit/interface/commands.rb @@ -16,6 +16,22 @@ def deployments command :deployments, "returns information about running deployments" + # Return incremental update about deployments + # + # @return [Protocol::Deployment] + def poll_ready_deployments(known: []) + deployments = + plan.find_tasks(Syskit::Deployment).running.find_all(&:ready?) + deployment_ids = deployments.map { _1.droby_id.id } + new_deployments = + deployments.find_all { !known.include?(_1.droby_id.id) } + removed_deployments = + known.find_all { |id| !deployment_ids.include?(id) } + [new_deployments, removed_deployments] + end + command :poll_ready_deployments, + "incremental information about deployments" + # Save the configuration of all running tasks of the given model to disk # # @param [String,nil] name the section name for the new configuration. diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index 85228c890..67ea2b8b2 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -127,6 +127,8 @@ def initialize(parent: nil, super(parent) + @has_poll_ready_deployments = true + @task_discovery_queue = Concurrent::Hash.new @task_discovery_result = Queue.new @task_discovery_mtx = Mutex.new @@ -553,7 +555,11 @@ def poll_syskit_interface if syskit.connected? begin display_current_cycle_index_and_time + if @has_poll_ready_deployments + query_deployment_update_v2 + else query_deployment_update_v1 + end update_current_job_task_names if current_job rescue Roby::Interface::ComError # rubocop:disable Lint/SuppressedException end @@ -603,6 +609,16 @@ def query_deployment_update_v1 end end + def query_deployment_update_v2 + polling_call( + ["syskit"], "poll_ready_deployments", + known: @current_deployments.map(&:id) + ) do |updated, removed| + update_current_deployments(updated, removed) + process_current_deployments + end + end + def update_current_deployments(updated, removed) @current_deployments.delete_if do |d| removed.include?(d.id) @@ -645,12 +661,12 @@ def reset_task_inspector update_task_inspector([]) end - def polling_call(path, method_name, *args) - key = [path, method_name, args] + def polling_call(path, method_name, *args, **kw) + key = [path, method_name, args, kw] return if @call_guards.key?(key) && @call_guards[key] @call_guards[key] = true - syskit.async_call(path, method_name, *args) do |error, ret| + syskit.async_call(path, method_name, *args, **kw) do |error, ret| @call_guards[key] = false if error report_app_error(error) @@ -671,6 +687,14 @@ def async_call(path, method_name, *args) end def report_app_error(error) + if error.class_name == "NoMethodError" && + error.message.match?(/poll_ready_deployments/) + warn "remote interface does not support poll_ready_deployments" + warn "switching to old inefficient polling method" + @has_poll_ready_deployments = false + return + end + warn error.message error.backtrace.each do |line| warn " #{line}" @@ -768,8 +792,7 @@ def discover_new_tasks(deployments) end end - existing.each { @name_service.deregister(_1) } - @name_service.names + names_discovered | names_in_discovery end def reset_name_service diff --git a/test/interface/test_commands.rb b/test/interface/test_commands.rb index 10de835b8..dbb40e321 100644 --- a/test/interface/test_commands.rb +++ b/test/interface/test_commands.rb @@ -47,6 +47,56 @@ module Interface end end + describe "#poll_ready_deployments" do + attr_reader :task_m, :task + + before do + @task_m = TaskContext.new_submodel + @task = syskit_stub_deploy_configure_and_start( + syskit_stub_requirements(task_m).with_conf("default") + ) + plan.add_mission_task(task) + end + + it "returns a deployment that is ready" do + new_deployments, old_deployments = subject.poll_ready_deployments + assert_equal [], old_deployments + assert_equal 1, new_deployments.size + deployment = new_deployments.first + assert_equal @task.execution_agent, deployment + end + + it "ignores a deployment that is not ready yet" do + flexmock(@task.execution_agent).should_receive(ready?: false) + new_deployments, old_deployments = subject.poll_ready_deployments + assert_equal [], new_deployments + assert_equal [], old_deployments + end + + it "does not return a deployment that is already known" do + new_deployments, old_deployments = + subject.poll_ready_deployments( + known: [@task.execution_agent.droby_id.id] + ) + + assert_equal [], new_deployments + assert_equal [], old_deployments + end + + it "lists deployments that have been removed" do + droby_id = @task.execution_agent.droby_id.id + expect_execution do + plan.unmark_mission_task(task) + plan.unmark_permanent_task(task.execution_agent) + end.garbage_collect(true).to { emit task.execution_agent.stop_event } + + new_deployments, old_deployments = + subject.poll_ready_deployments(known: [droby_id]) + assert_equal [], new_deployments + assert_equal [droby_id], old_deployments + end + end + describe "#restart_deployments" do attr_reader :task_m, :task From 444170d3fd10fe1647548647da702b0532b3fcca Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Sat, 14 Dec 2024 17:14:21 -0300 Subject: [PATCH 051/158] fix: filtering of loggers if hiding them is enabled --- lib/syskit/telemetry/ui/runtime_state.rb | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index 67ea2b8b2..cdd1b93f8 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -782,6 +782,10 @@ def discover_new_tasks(deployments) model_name = deployed_task.orogen_model_name task_name = deployed_task.name + if hide_loggers? && OROGEN_LOGGER_NAMES.include?(model_name) + next + end + task_name = deployed_task.name if names_discovered.delete(task_name) next if deployed_task.ior == @name_service.ior(task_name) From 5c1614fb2f441035385fd692276983eda1c16878 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Sat, 14 Dec 2024 17:15:26 -0300 Subject: [PATCH 052/158] chore: display fatal error messages explaining why a component ends up in quarantine --- lib/syskit/task_context.rb | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/syskit/task_context.rb b/lib/syskit/task_context.rb index ff709859e..d130a617e 100644 --- a/lib/syskit/task_context.rb +++ b/lib/syskit/task_context.rb @@ -822,7 +822,7 @@ def dynamic_input_port_connections(existing_port_names) dynamic_ports.each do |name| if existing_port_names.include?(name) - Syskit.fatal( + fatal( "task #{orocos_task} did not clear #{name}, a dynamic input " \ "port, during cleanup, as it should have. Go fix it." ) @@ -853,7 +853,7 @@ def dynamic_output_port_connections(existing_port_names) dynamic_ports.each do |name| if existing_port_names.include?(name) - Syskit.fatal( + fatal( "task #{orocos_task} did not clear #{name}, a dynamic " \ "output port, during cleanup, as it should have. Go fix it." ) @@ -986,6 +986,9 @@ def setting_up!(promise) # (see Component#setup_failed!)_ def setup_failed!(exception) unless exception.kind_of?(Orocos::StateTransitionFailed) + fatal "#{exception} received while configuring #{orocos_name}, " \ + "expected a StateTransitionFailed error. The component is " \ + "put in quarantine and cannot be reused" execution_agent.register_task_context_in_fatal(orocos_name) end @@ -1036,6 +1039,10 @@ def setup_failed!(exception) start_event.achieve_asynchronously(promise, emit_on_success: false) promise.on_error do |exception| unless exception.kind_of?(Orocos::StateTransitionFailed) + fatal "#{exception} received while configuring " \ + "#{orocos_name}, expected a StateTransitionFailed " \ + "error. The component is put in quarantine and " \ + "cannot be reused" execution_agent.register_task_context_in_fatal(orocos_name) end end From b4b64d342c56d6fbd8c50a9126669fb613f518b8 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Sat, 14 Dec 2024 17:15:57 -0300 Subject: [PATCH 053/158] fix: do not load orogen models when creating remote task context handles This load local task models, which is both heavy and useless (since the code is not using the model at all for now). Do not load anything for the time being. The right thing long-term is to use the remote model --- lib/syskit/telemetry/ui/runtime_state.rb | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index cdd1b93f8..372b388be 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -805,9 +805,6 @@ def reset_name_service end def orogen_model_from_name(name) - @orogen_models[name] ||= Orocos.default_loader.task_model_from_name(name) - rescue OroGen::NotFound - Orocos.warn "#{name} is a task context of class #{name}, but I cannot find the description for it, falling back" @orogen_models[name] ||= Orocos.create_orogen_task_context_model(name) end From 6d0a7c7d84679cf29ed3db3da8c4cdb074dec62b Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Sat, 14 Dec 2024 17:17:08 -0300 Subject: [PATCH 054/158] chore: implement Telemetry::Async::NameService A well-unit-tested combination of UI::NameService and the async-update code from UI::RuntimeState --- .rubocop_todo.yml | 1 - lib/syskit/telemetry/async/name_service.rb | 298 +++++++++++++++++++++ test/telemetry/async/test_name_service.rb | 289 ++++++++++++++++++++ 3 files changed, 587 insertions(+), 1 deletion(-) create mode 100644 lib/syskit/telemetry/async/name_service.rb create mode 100644 test/telemetry/async/test_name_service.rb diff --git a/.rubocop_todo.yml b/.rubocop_todo.yml index 2a5e34f0e..ee9cf2f1a 100644 --- a/.rubocop_todo.yml +++ b/.rubocop_todo.yml @@ -356,7 +356,6 @@ Lint/MissingSuper: - 'lib/syskit/robot/master_device_instance.rb' - 'lib/syskit/robot/slave_device_instance.rb' - 'lib/syskit/telemetry/agent/server.rb' - - 'lib/syskit/telemetry/ui/name_service.rb' - 'lib/syskit/test/stub_network.rb' # Offense count: 2 diff --git a/lib/syskit/telemetry/async/name_service.rb b/lib/syskit/telemetry/async/name_service.rb new file mode 100644 index 000000000..993bcd4df --- /dev/null +++ b/lib/syskit/telemetry/async/name_service.rb @@ -0,0 +1,298 @@ +# frozen_string_literal: true + +require "orocos/async" + +module Syskit + module Telemetry + module Async + # In-process name service + # + # It is exclusively filled using information that comes from the async + # {Client} + class NameService < Orocos::NameServiceBase + # A new NameService instance + # + # @param [Hash] tasks The tasks which are + # known by the name service. + # @note The namespace is always "Local" + def initialize(thread_count: 1) + super() + + @iors = Concurrent::AtomicReference.new({}) + @registered_tasks = Concurrent::Hash.new + @task_added_callbacks = Concurrent::Array.new + @task_removed_callbacks = Concurrent::Array.new + @orogen_models = Concurrent::Hash.new + @discovery = {} + @discovery_executor = + Concurrent::FixedThreadPool.new(thread_count) + end + + def dispose + cleanup + @discovery_executor.shutdown + end + + def names + @registered_tasks.keys + end + + def include?(name) + @registered_tasks.key?(name) + end + + # Asynchronously update the name server given the known set of tasks + # + # After this call, any task not in the tasks parameter will have been + # removed from the name server + # + # @param [#ior,#name] list of IOR and name of remote tasks to resolve + # @return [Array] list of task names that are either known, or + # that are being discovered + def async_update_tasks(tasks) + iors = tasks.each_with_object({}) { |t, h| h[t.name] = t.ior } + @iors.set(iors) + + remove_changed_tasks(iors) + + # Resolve finished futures + resolve_discovered_tasks + + # Then check what tasks need to be discovered, and discover them + # + # We never spawn two futures to resolve the same name. Instead, + # when we get the feature result, we check whether the + # IOR has changed, and act accordingly + tasks.each do |t| + next if @discovery[t.name] + next if t.ior == @registered_tasks[t.name]&.ior + + async_discover_task(t) + end + end + + # Deregister and dispose of tasks who disappeared or have a + # different IOR + def remove_changed_tasks(iors) + @registered_tasks.dup.each do |name, task| + new_ior = iors[name] + deregister(name).dispose if !new_ior || task.ior != new_ior + end + end + + class AsyncDiscoveryError < RuntimeError; end + + AsyncDiscovery = Struct.new( + :task, :future, :ior, :async_task, keyword_init: true + ) do + def update_from_result + fulfilled, (ior, async_task), reason = future.result + unless fulfilled + raise AsyncDiscoveryError, + "unexpected error during asynchronous " \ + "task discovery: #{reason}" + end + + self.ior = ior + self.async_task = async_task + end + + def wait + future.result + end + + def resolved? + future.resolved? + end + end + + # @api private + # + # Create a future that discovers a remote task + def async_discover_task(task) + future = Concurrent::Promises.future_on(@discovery_executor) do + ior = @iors.get[task.name] + discover_task(task.name, ior, task.orogen_model_name) + end + @discovery[task.name] = AsyncDiscovery.new(task: task, future: future) + end + + # @api private + # + # Process the tasks that have been (asynchronously) discovered + def resolve_discovered_tasks + while (async_discovery = pop_discovered_task) + register( + async_discovery.async_task, + name: async_discovery.task.name + ) + end + end + + # Whether some discoveries have been queued but not yet resolved + def has_pending_discoveries? + !@discovery.empty? + end + + # Wait for all pending discoveries to finish + def wait_for_task_discovery + @discovery.each_value(&:wait) + end + + # @api private + # + # Remove a resolved task from the pending discoveries + # + # @return [(String,Orocos::Async::TaskContext),nil] a resolved task or nil + # if there are none so far + def pop_discovered_task + loop do + async_discovery, current_ior = pop_resolved_discovery + return unless async_discovery + next unless current_ior + + if async_discovery.ior != current_ior + # The IOR associated with that name changed since the future + # started processing. Throw away the resolved task and start + # again + async_discovery.async_task&.dispose + async_discover_task(async_discovery.task) + next + end + + next unless async_discovery.async_task + + return async_discovery + end + end + + def pop_resolved_discovery + async_discovery = @discovery.each_value.find(&:resolved?) + return unless async_discovery + + @discovery.delete(async_discovery.task.name) + async_discovery.update_from_result + current_ior = @iors.get[async_discovery.task.name] + [async_discovery, current_ior] + end + + # @api private + # + # Discover a single task + # + # @param [String] name + # @param [String] ior + # @param [String] orogen_model_name + # @return [(String,(Orocos::Async::TaskContext,nil))] the IOR used to + # resolve the task, and the async taskcontext that represents it. The + # task is nil if the resolution failed + def discover_task(name, ior, orogen_model_name) + task = Orocos::TaskContext.new( + ior, + name: name, + model: orogen_model_from_name(orogen_model_name) + ) + async_task = Orocos::Async::CORBA::TaskContext.new( + ior, use: task + ) + + [ior, async_task] + rescue StandardError => e + warn "Failed discovery of task #{name}: #{e.message}" + [ior, nil] + end + + # Re-create the orogen model from its name + # + # @param [String] name + # @return [OroGen::Spec::TaskContext] + def orogen_model_from_name(name) + @orogen_models[name] ||= Orocos.create_orogen_task_context_model(name) + end + + # (see NameServiceBase#get) + def ior(name) + task = @registered_tasks[name] + return task.ior if task.respond_to?(:ior) + + raise Orocos::NotFound, "task context #{name} cannot be found." + end + + # (see NameServiceBase#get) + def get(name, **) + task = @registered_tasks[name] + return task if task + + raise Orocos::NotFound, "task context #{name} cannot be found." + end + + # Registers the given {Orocos::TaskContext} on the name service. + # If a name is provided, it will be used as an alias. If no name is + # provided, the name of the task is used. This is true even if the + # task name is renamed later. + # + # @param [Orocos::TaskContext] task The task. + # @param [String] name Optional name which is used to register the task. + def register(task, name: task.name) + @registered_tasks[name] = task + trigger_task_added(name) + end + + # Deregisters the given name or task from the name service. + # + # @param [String,TaskContext] name The name or task + def deregister(name) + task = @registered_tasks.delete(name) + trigger_task_removed(name) + task + end + + # (see Base#cleanup) + def cleanup + names = @registered_tasks.keys + @registered_tasks.clear + @iors.set({}) + @discovery.clear + @orogen_models.clear + names.each { trigger_task_removed(_1) } + end + + def to_async + self + end + + def on_task_added(&block) + @task_added_callbacks << block + Roby.disposable { @task_added_callbacks.delete(block) } + end + + def trigger_task_added(name) + error = nil + @task_added_callbacks.each do |block| + block.call(name) + rescue RuntimeError => e + error = e + end + + raise error if error + end + + def on_task_removed(&block) + @task_removed_callbacks << block + Roby.disposable { @task_removed_callbacks.delete(block) } + end + + def trigger_task_removed(name) + error = nil + @task_removed_callbacks.each do |block| + block.call(name) + rescue RuntimeError => e + error = e + end + + raise error if error + end + end + end + end +end diff --git a/test/telemetry/async/test_name_service.rb b/test/telemetry/async/test_name_service.rb new file mode 100644 index 000000000..3afce24c2 --- /dev/null +++ b/test/telemetry/async/test_name_service.rb @@ -0,0 +1,289 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async/name_service" + +module Syskit + module Telemetry + module Async + describe NameService do + before do + @ns = NameService.new + @ruby_tasks = [] + end + + after do + @ruby_tasks.each(&:dispose) + end + + describe "asynchronous update" do + it "asynchronously resolves a task from name and IOR" do + deployed_task, task = make_deployed_task("test", "something") + + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + assert_equal task.ior, @ns.get("test").ior + end + + it "does not re-resolve a registered task if the IOR matches" do + deployed_task, = make_deployed_task("test", "something") + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + @ns.async_update_tasks([deployed_task]) + refute @ns.has_pending_discoveries? + end + + it "re-resolves a registered task if the IOR differs" do + deployed_task, = make_deployed_task("test", "something") + deployed_task2, task2 = make_deployed_task("test", "something") + + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + + @ns.async_update_tasks([deployed_task2]) + assert @ns.has_pending_discoveries? + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + assert_equal task2.ior, @ns.get("test").ior + end + + it "requeues the discovery if a task's IOR changed" do + deployed_task, = make_deployed_task("test", "something") + deployed_task2, task2 = make_deployed_task("test", "something") + + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + @ns.async_update_tasks([deployed_task2]) + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + assert_equal task2.ior, @ns.get("test").ior + end + + it "does not register a task if it has been removed while it was " \ + "being discovered" do + deployed_task, = make_deployed_task("test", "something") + + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + # async_update_tasks resolves the discovered tasks + @ns.async_update_tasks([]) + refute @ns.include?("test") + refute @ns.has_pending_discoveries? + end + + it "deregisters tasks that are not in the set of known tasks" do + deployed_task, = make_deployed_task("test", "something") + + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + + @ns.async_update_tasks([]) + refute @ns.has_pending_discoveries? + refute @ns.include?("test") + end + + it "stops the discovery of an IOR if its resolution failed" do + deployed_task, = make_deployed_task("test", "something") + + flexmock(Orocos::TaskContext) + .should_receive(:new) + .once.and_raise(RuntimeError.new("some reason")) + + @ns.async_update_tasks([deployed_task]) + assert @ns.has_pending_discoveries? + @ns.wait_for_task_discovery + @ns.resolve_discovered_tasks + refute @ns.has_pending_discoveries? + refute @ns.include?("test") + end + + it "raises in resolve_discovered_tasks if an unexpected exceptions " \ + "was raised by discover_task" do + error_m = Class.new(RuntimeError) + flexmock(@ns).should_receive(:discover_task).and_raise(error_m) + deployed_task, = make_deployed_task("test", "something") + @ns.async_update_tasks([deployed_task]) + @ns.wait_for_task_discovery + assert_raises(NameService::AsyncDiscoveryError) do + @ns.resolve_discovered_tasks + end + end + end + + describe "on_task_added" do + it "calls the block when a new task is registered" do + mock = flexmock + mock.should_receive(:registered).with("test").once + @ns.on_task_added { |name| mock.registered(name) } + @ns.register(flexmock, name: "test") + end + + it "already has registered the task when the callback is called" do + test_task = flexmock + mock = flexmock + mock.should_receive(:registered).with(test_task).once + @ns.on_task_added do |name| + mock.registered(@ns.get(name)) + end + + @ns.register(test_task, name: "test") + end + + it "accepts more than one callback" do + mock = flexmock + mock.should_receive(:registered).with("test", 1).once + mock.should_receive(:registered).with("test", 2).once + @ns.on_task_added { |name| mock.registered(name, 1) } + @ns.on_task_added { |name| mock.registered(name, 2) } + + @ns.register(flexmock, name: "test") + end + + it "processes all callbacks even if one raises" do + mock = flexmock + mock.should_receive(:registered).with("test", 1).once + mock.should_receive(:registered).with("test", 2).once + error_m = Class.new(RuntimeError) + @ns.on_task_added do |name| + mock.registered(name, 1) + raise error_m + end + @ns.on_task_added { |name| mock.registered(name, 2) } + + assert_raises(error_m) do + @ns.register(flexmock, name: "test") + end + end + + it "stops calling after the callback is disposed" do + mock = flexmock + mock.should_receive(:registered).never + @ns.on_task_added { |name| mock.registered(name) } + .dispose + + @ns.register(flexmock, name: "test") + end + end + + describe "on_task_removed" do + before do + @ns.register(@test_task = flexmock, name: "test") + end + + it "calls the block when a task is removed" do + mock = flexmock + mock.should_receive(:removed).with("test").once + @ns.on_task_removed { |name| mock.removed(name) } + @ns.deregister("test") + end + + it "already has removed the task when the callback is called" do + @ns.on_task_removed do |name| + refute @ns.include?(name) + end + + @ns.deregister("test") + end + + it "accepts more than one callback" do + mock = flexmock + mock.should_receive(:removed).with("test", 1).once + mock.should_receive(:removed).with("test", 2).once + @ns.on_task_removed { |name| mock.removed(name, 1) } + @ns.on_task_removed { |name| mock.removed(name, 2) } + + @ns.deregister("test") + end + + it "processes all callbacks even if one raises" do + mock = flexmock + mock.should_receive(:removed).with("test", 1).once + mock.should_receive(:removed).with("test", 2).once + error_m = Class.new(RuntimeError) + @ns.on_task_removed do |name| + mock.removed(name, 1) + raise error_m + end + @ns.on_task_removed { |name| mock.removed(name, 2) } + + assert_raises(error_m) { @ns.deregister("test") } + end + + it "stops calling after the callback is disposed" do + mock = flexmock + mock.should_receive(:removed).never + @ns.on_task_removed { |name| mock.removed(name) } + .dispose + + @ns.deregister("test") + end + + it "is called for all tasks when the name service is cleared, " \ + "after the items have been removed" do + mock = flexmock + mock.should_receive(:removed).with("test", false).once + @ns.on_task_removed do |name| + mock.removed(name, @ns.include?("test")) + end + + @ns.cleanup + end + end + + describe "#get" do + it "raises if the task is not registered" do + assert_raises(Orocos::NotFound) do + @ns.get("does_not_exist") + end + end + end + + describe "#ior" do + it "returns the IOR of a registered task" do + _, task = make_deployed_task("test", "some") + @ns.register(task) + assert_equal task.ior, @ns.ior("test") + end + + it "does not return the IOR of a task being discovered" do + deployed_task, = make_deployed_task("test", "some") + @ns.async_update_tasks([deployed_task]) + assert_raises(Orocos::NotFound) do + @ns.ior("test") + end + end + + it "raises if the given name is not registered" do + assert_raises(Orocos::NotFound) do + @ns.ior("test") + end + end + end + + def deployed_task_s + @deployed_task_s ||= + Struct.new(:name, :ior, :orogen_model_name, keyword_init: true) + end + + def make_deployed_task(name, orogen_model_name) + task = make_ruby_task(name) + deployed_task = deployed_task_s.new( + name: name, ior: task.ior, orogen_model_name: orogen_model_name + ) + [deployed_task, task] + end + + def make_ruby_task(name) + t = Orocos.allow_blocking_calls do + Orocos::RubyTasks::TaskContext.new(name) + end + @ruby_tasks << t + t + end + end + end + end +end From 1dbeb4621dbfeb7411776f0e7d0d6633a579696d Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Wed, 18 Dec 2024 22:13:26 -0300 Subject: [PATCH 055/158] feat: replace the task resolution code in Telemetry::UI::RuntimeState by Async::NameService --- lib/syskit/telemetry/async.rb | 11 ++ lib/syskit/telemetry/ui/name_service.rb | 110 -------------------- lib/syskit/telemetry/ui/runtime_state.rb | 118 ++------------------- test/telemetry/ui/test_name_service.rb | 125 ----------------------- 4 files changed, 20 insertions(+), 344 deletions(-) create mode 100644 lib/syskit/telemetry/async.rb delete mode 100644 lib/syskit/telemetry/ui/name_service.rb delete mode 100644 test/telemetry/ui/test_name_service.rb diff --git a/lib/syskit/telemetry/async.rb b/lib/syskit/telemetry/async.rb new file mode 100644 index 000000000..778585e17 --- /dev/null +++ b/lib/syskit/telemetry/async.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require "syskit/telemetry/async/name_service" + +module Syskit + module Telemetry + # Asynchronous access to remote state + module Async + end + end +end diff --git a/lib/syskit/telemetry/ui/name_service.rb b/lib/syskit/telemetry/ui/name_service.rb deleted file mode 100644 index 4cf13fe73..000000000 --- a/lib/syskit/telemetry/ui/name_service.rb +++ /dev/null @@ -1,110 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module Telemetry - module UI - # Copy of Runkit's local name service to use with orocos.rb - class NameService < Orocos::NameServiceBase - # A new NameService instance - # - # @param [Hash] tasks The tasks which are - # known by the name service. - # @note The namespace is always "Local" - def initialize(tasks = []) - @registered_tasks = Concurrent::Hash.new - @task_added_callbacks = Concurrent::Array.new - @task_removed_callbacks = Concurrent::Array.new - tasks.each { |task| register(task) } - end - - def names - @registered_tasks.keys - end - - def include?(name) - @registered_tasks.key?(name) - end - - # (see NameServiceBase#get) - def ior(name) - task = @registered_tasks[name] - return task.ior if task.respond_to?(:ior) - - raise Orocos::NotFound, "task context #{name} cannot be found." - end - - # (see NameServiceBase#get) - def get(name, **) - task = @registered_tasks[name] - return task if task - - raise Orocos::NotFound, "task context #{name} cannot be found." - end - - # Registers the given {Orocos::TaskContext} on the name service. - # If a name is provided, it will be used as an alias. If no name is - # provided, the name of the task is used. This is true even if the - # task name is renamed later. - # - # @param [Orocos::TaskContext] task The task. - # @param [String] name Optional name which is used to register the task. - def register(task, name: task.name) - @registered_tasks[name] = task - trigger_task_added(name) - end - - # Deregisters the given name or task from the name service. - # - # @param [String,TaskContext] name The name or task - def deregister(name) - task = @registered_tasks.delete(name) - trigger_task_removed(name) - task - end - - # (see Base#cleanup) - def cleanup - names = @registered_tasks.keys - @registered_tasks.clear - names.each { trigger_task_removed(name) } - end - - def to_async - self - end - - def on_task_added(&block) - @task_added_callbacks << block - Roby.disposable { @task_added_callbacks.delete(block) } - end - - def trigger_task_added(name) - error = nil - @task_added_callbacks.each do |block| - block.call(name) - rescue RuntimeError => e - error = e - end - - raise error if error - end - - def on_task_removed(&block) - @task_removed_callbacks << block - Roby.disposable { @task_removed_callbacks.delete(block) } - end - - def trigger_task_removed(name) - error = nil - @task_removed_callbacks.each do |block| - block.call(name) - rescue RuntimeError => e - error = e - end - - raise error if error - end - end - end - end -end diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index 372b388be..f824ed239 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -15,7 +15,7 @@ require "syskit/telemetry/ui/global_state_label" require "syskit/telemetry/ui/app_start_dialog" require "syskit/telemetry/ui/batch_manager" -require "syskit/telemetry/ui/name_service" +require "syskit/telemetry/async" require "syskit/interface/v2" module Syskit @@ -129,16 +129,6 @@ def initialize(parent: nil, @has_poll_ready_deployments = true - @task_discovery_queue = Concurrent::Hash.new - @task_discovery_result = Queue.new - @task_discovery_mtx = Mutex.new - @task_discovery_signal = ConditionVariable.new - @task_discovery_thread = Thread.new do - loop do - task_discovery_thread - end - end - @syskit = syskit @syskit_run_arguments = SyskitRunArguments.new(robot: "default", set: []) @@ -249,7 +239,7 @@ def reset @call_guards = {} @orogen_models = {} - @name_service = NameService.new + @name_service = Async::NameService.new @async_name_service = Orocos::Async::NameService.new(@name_service) end @@ -563,14 +553,11 @@ def poll_syskit_interface update_current_job_task_names if current_job rescue Roby::Interface::ComError # rubocop:disable Lint/SuppressedException end - - task_discovery_apply_result else reset_current_deployments reset_current_job reset_name_service reset_task_inspector - reset_task_discovery end syskit.poll @@ -702,106 +689,19 @@ def report_app_error(error) end def update_name_service(deployments) - removed_tasks = @task_discovery_mtx.synchronize do - to_remove = discover_new_tasks(deployments) - - removed_tasks = to_remove.map do - @task_discovery_queue.delete(_1) - @name_service.deregister(_1) - end.compact - - @task_discovery_signal.broadcast - removed_tasks - end - - removed_tasks.each(&:dispose) - end - - def task_discovery_thread - task_name, deployed_task = @task_discovery_mtx.synchronize do - until (t = @task_discovery_queue.first) - @task_discovery_signal.wait(@task_discovery_mtx) - end - t - end - - ior = deployed_task.ior - task = Orocos::TaskContext.new( - deployed_task.ior, - name: task_name, - model: orogen_model_from_name(deployed_task.orogen_model_name) - ) - - @task_discovery_result << [task_name, task] - rescue Orocos::ComError => e - STDERR.puts "Failed discovery of task #{deployed_task.name}: #{e.message}" - ensure - @task_discovery_mtx.synchronize do - if ior == @task_discovery_queue[task_name]&.ior - @task_discovery_queue.delete(task_name) - end - end - end - - def task_discovery_apply_result - loop do - name, task = @task_discovery_result.pop(true) - async_task = Orocos::Async::CORBA::TaskContext.new(use: task) - @name_service.register(async_task, name: name) - end - rescue ThreadError - end - - def reset_task_discovery - @task_discovery_queue.clear - @task_discovery_result.clear - end - - OROGEN_LOGGER_NAMES = %w[logger::Logger OroGen.logger.Logger] - - # @api private - # - # Process the deployment information received from the syskit master, - # updating task discovery - # - # This method MUST be called with the mutex that protects the discovery - # queue taken - # - # @param [Array] deployments - # @return [Array,Array] names of the tasks that should - # be added to the discovery queue, and of the tasks that should be - # de-registered from the name service - def discover_new_tasks(deployments) - # Get all the names and remove them when we find them in the - # deployments. What's left is what needs to be removed - names_discovered = @name_service.names - names_in_discovery = @task_discovery_queue.keys - - deployments.each do |d| - d.deployed_tasks.each do |deployed_task| + all_deployed_tasks = deployments.flat_map do |d| + d.deployed_tasks.find_all do |deployed_task| model_name = deployed_task.orogen_model_name - task_name = deployed_task.name - - if hide_loggers? && OROGEN_LOGGER_NAMES.include?(model_name) - next - end - - task_name = deployed_task.name - if names_discovered.delete(task_name) - next if deployed_task.ior == @name_service.ior(task_name) - end - - names_in_discovery.delete(task_name) - @task_discovery_queue[task_name] = deployed_task + !hide_loggers? || !OROGEN_LOGGER_NAMES.include?(model_name) end end - - names_discovered | names_in_discovery + @name_service.async_update_tasks(all_deployed_tasks) end + OROGEN_LOGGER_NAMES = %w[logger::Logger OroGen.logger.Logger].freeze + def reset_name_service - all = @name_service.names.dup - all.each { @name_service.deregister(_1) } + @name_service.cleanup end def orogen_model_from_name(name) diff --git a/test/telemetry/ui/test_name_service.rb b/test/telemetry/ui/test_name_service.rb deleted file mode 100644 index 8dee62362..000000000 --- a/test/telemetry/ui/test_name_service.rb +++ /dev/null @@ -1,125 +0,0 @@ -# frozen_string_literal: true - -require "syskit/test/self" -require "syskit/telemetry/ui/name_service" - -module Syskit - module Telemetry - module UI - describe NameService do - before do - @name_service = NameService.new - end - - describe "on_task_added" do - it "calls the block when a new task is registered" do - mock = flexmock - mock.should_receive(:registered).with("test").once - @name_service.on_task_added { |name| mock.registered(name) } - @name_service.register(flexmock, name: "test") - end - - it "already has registered the task when the callback is called" do - test_task = flexmock - mock = flexmock - mock.should_receive(:registered).with(test_task).once - @name_service.on_task_added do |name| - mock.registered(@name_service.get(name)) - end - - @name_service.register(test_task, name: "test") - end - - it "accepts more than one callback" do - mock = flexmock - mock.should_receive(:registered).with("test", 1).once - mock.should_receive(:registered).with("test", 2).once - @name_service.on_task_added { |name| mock.registered(name, 1) } - @name_service.on_task_added { |name| mock.registered(name, 2) } - - @name_service.register(flexmock, name: "test") - end - - it "processes all callbacks even if one raises" do - mock = flexmock - mock.should_receive(:registered).with("test", 1).once - mock.should_receive(:registered).with("test", 2).once - error_m = Class.new(RuntimeError) - @name_service.on_task_added do |name| - mock.registered(name, 1) - raise error_m - end - @name_service.on_task_added { |name| mock.registered(name, 2) } - - assert_raises(error_m) do - @name_service.register(flexmock, name: "test") - end - end - - it "stops calling after the callback is disposed" do - mock = flexmock - mock.should_receive(:registered).never - @name_service.on_task_added { |name| mock.registered(name) } - .dispose - - @name_service.register(flexmock, name: "test") - end - end - - describe "on_task_removed" do - before do - @name_service.register(@test_task = flexmock, name: "test") - end - - it "calls the block when a task is removed" do - mock = flexmock - mock.should_receive(:removed).with("test").once - @name_service.on_task_removed { |name| mock.removed(name) } - @name_service.deregister("test") - end - - it "already has removed the task when the callback is called" do - @name_service.on_task_removed do |name| - refute @name_service.include?(name) - end - - @name_service.deregister("test") - end - - it "accepts more than one callback" do - mock = flexmock - mock.should_receive(:removed).with("test", 1).once - mock.should_receive(:removed).with("test", 2).once - @name_service.on_task_removed { |name| mock.removed(name, 1) } - @name_service.on_task_removed { |name| mock.removed(name, 2) } - - @name_service.deregister("test") - end - - it "processes all callbacks even if one raises" do - mock = flexmock - mock.should_receive(:removed).with("test", 1).once - mock.should_receive(:removed).with("test", 2).once - error_m = Class.new(RuntimeError) - @name_service.on_task_removed do |name| - mock.removed(name, 1) - raise error_m - end - @name_service.on_task_removed { |name| mock.removed(name, 2) } - - assert_raises(error_m) { @name_service.deregister("test") } - end - - it "stops calling after the callback is disposed" do - mock = flexmock - mock.should_receive(:removed).never - @name_service.on_task_removed { |name| mock.removed(name) } - .dispose - - @name_service.deregister("test") - end - end - end - end - end -end From 41ab7983357d838215fa0ea2bb44aa3f9244d784 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Thu, 19 Dec 2024 14:49:08 -0300 Subject: [PATCH 056/158] feat: enable v2 protocol extensions in 'telemetry ui' --- lib/syskit/telemetry/cli.rb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/syskit/telemetry/cli.rb b/lib/syskit/telemetry/cli.rb index f6a4e0d22..f772df56b 100644 --- a/lib/syskit/telemetry/cli.rb +++ b/lib/syskit/telemetry/cli.rb @@ -40,8 +40,12 @@ def ui end no_commands do # rubocop:disable Metrics/BlockLength - def roby_setup + def roby_setup # rubocop:disable Metrics/AbcSize Roby.app.using "syskit" + Roby.app.guess_app_dir + Roby.app.load_config_yaml + Roby.app.require_v2_protocol_extensions + Syskit.conf.only_load_models = true # We don't need the process server, win some startup time Syskit.conf.disables_local_process_server = true From 57454e91d7c46c6b7fb610a18a17fbc34b701faa Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Thu, 19 Dec 2024 14:49:33 -0300 Subject: [PATCH 057/158] fix: Qt's timer.start is in milliseconds, no seconds --- lib/syskit/telemetry/cli.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/telemetry/cli.rb b/lib/syskit/telemetry/cli.rb index f772df56b..bf7c1c5af 100644 --- a/lib/syskit/telemetry/cli.rb +++ b/lib/syskit/telemetry/cli.rb @@ -31,7 +31,7 @@ def ui @thread_pass_timer.connect(SIGNAL("timeout()")) do Thread.pass end - @thread_pass_timer.start(0.01) + @thread_pass_timer.start(10) require "syskit/scripts/common" Syskit::Scripts.run do From acb527d95f1975f7d1dabe05489c2255a7fe5bc9 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Thu, 19 Dec 2024 15:24:53 -0300 Subject: [PATCH 058/158] fix: disable Vizkit's global shortcut We seldom use it, and it actually is very heavy on CPU --- lib/syskit/telemetry/cli.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/telemetry/cli.rb b/lib/syskit/telemetry/cli.rb index bf7c1c5af..733e00a4c 100644 --- a/lib/syskit/telemetry/cli.rb +++ b/lib/syskit/telemetry/cli.rb @@ -80,7 +80,7 @@ def runtime_state(host, port) main.restore_from_settings main.show - Vizkit.exec + Vizkit.exec(global_shortcuts: false) main.save_to_settings main.settings.sync end From 291acff5413a0d408d92f7baa4bc147692571faa Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Thu, 19 Dec 2024 15:26:12 -0300 Subject: [PATCH 059/158] fix: do not keep backward compatibility on the v2 interface It's seldom usable as it is. --- lib/syskit/telemetry/ui/runtime_state.rb | 25 ++---------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index f824ed239..ca1a8c988 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -127,8 +127,6 @@ def initialize(parent: nil, super(parent) - @has_poll_ready_deployments = true - @syskit = syskit @syskit_run_arguments = SyskitRunArguments.new(robot: "default", set: []) @@ -545,11 +543,7 @@ def poll_syskit_interface if syskit.connected? begin display_current_cycle_index_and_time - if @has_poll_ready_deployments - query_deployment_update_v2 - else - query_deployment_update_v1 - end + query_deployment_update update_current_job_task_names if current_job rescue Roby::Interface::ComError # rubocop:disable Lint/SuppressedException end @@ -589,14 +583,7 @@ def process_current_deployments update_task_inspector(names) end - def query_deployment_update_v1 - polling_call(["syskit"], "deployments") do |deployments| - @current_deployments = deployments - process_current_deployments - end - end - - def query_deployment_update_v2 + def query_deployment_update polling_call( ["syskit"], "poll_ready_deployments", known: @current_deployments.map(&:id) @@ -674,14 +661,6 @@ def async_call(path, method_name, *args) end def report_app_error(error) - if error.class_name == "NoMethodError" && - error.message.match?(/poll_ready_deployments/) - warn "remote interface does not support poll_ready_deployments" - warn "switching to old inefficient polling method" - @has_poll_ready_deployments = false - return - end - warn error.message error.backtrace.each do |line| warn " #{line}" From 6caa5af2865e587021cc50bed2a13df52a89f76c Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 16:49:24 -0300 Subject: [PATCH 060/158] fix: reactivity of the ruby threads Thread.pass is definitely NOT enough. Sleep is the only way to go. This makes the Syskit task inspector resolve tasks at a proper speed. --- lib/syskit/telemetry/cli.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/telemetry/cli.rb b/lib/syskit/telemetry/cli.rb index 733e00a4c..9b2f9518f 100644 --- a/lib/syskit/telemetry/cli.rb +++ b/lib/syskit/telemetry/cli.rb @@ -29,7 +29,7 @@ def ui $qApp.disable_threading # rubocop:disable Style/GlobalVars @thread_pass_timer = Qt::Timer.new @thread_pass_timer.connect(SIGNAL("timeout()")) do - Thread.pass + sleep 0.01 end @thread_pass_timer.start(10) From 6cdce399677471a186d48188c9c06f5f8b30332c Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Thu, 19 Dec 2024 15:26:48 -0300 Subject: [PATCH 061/158] fix: the hide loggers checkbox does not need to trigger an update anymore It will be applied on next update --- lib/syskit/telemetry/ui/runtime_state.rb | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index ca1a8c988..228a44328 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -417,9 +417,6 @@ def create_ui @ui_task_inspector = Vizkit.default_loader.TaskInspector ) @ui_hide_loggers.checked = false - @ui_hide_loggers.connect SIGNAL("toggled(bool)") do |_checked| - update_tasks_info - end @ui_show_expanded_job.checked = true @ui_show_expanded_job.connect SIGNAL("toggled(bool)") do |checked| job_expanded_status.visible = checked From cb0c6591ab115c7a26332f529539b346330ac947 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Thu, 19 Dec 2024 16:14:13 -0300 Subject: [PATCH 062/158] feat: start implementing a replacement for Orocos::Async::TaskContextProxy --- lib/syskit/telemetry/async.rb | 5 + lib/syskit/telemetry/async/attribute.rb | 11 + .../telemetry/async/interface_object.rb | 58 ++++ lib/syskit/telemetry/async/name_service.rb | 26 +- lib/syskit/telemetry/async/port.rb | 11 + lib/syskit/telemetry/async/property.rb | 11 + lib/syskit/telemetry/async/task_context.rb | 241 +++++++++++++++++ lib/syskit/telemetry/ui/runtime_state.rb | 56 ++-- test/telemetry/async/test_name_service.rb | 14 +- test/telemetry/async/test_task_context.rb | 256 ++++++++++++++++++ 10 files changed, 647 insertions(+), 42 deletions(-) create mode 100644 lib/syskit/telemetry/async/attribute.rb create mode 100644 lib/syskit/telemetry/async/interface_object.rb create mode 100644 lib/syskit/telemetry/async/port.rb create mode 100644 lib/syskit/telemetry/async/property.rb create mode 100644 lib/syskit/telemetry/async/task_context.rb create mode 100644 test/telemetry/async/test_task_context.rb diff --git a/lib/syskit/telemetry/async.rb b/lib/syskit/telemetry/async.rb index 778585e17..2aacd51b0 100644 --- a/lib/syskit/telemetry/async.rb +++ b/lib/syskit/telemetry/async.rb @@ -1,6 +1,11 @@ # frozen_string_literal: true require "syskit/telemetry/async/name_service" +require "syskit/telemetry/async/task_context" +require "syskit/telemetry/async/interface_object" +require "syskit/telemetry/async/attribute" +require "syskit/telemetry/async/property" +require "syskit/telemetry/async/port" module Syskit module Telemetry diff --git a/lib/syskit/telemetry/async/attribute.rb b/lib/syskit/telemetry/async/attribute.rb new file mode 100644 index 000000000..f3043f767 --- /dev/null +++ b/lib/syskit/telemetry/async/attribute.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Callback-based API for remote task ports + class Attribute < InterfaceObject + end + end + end +end diff --git a/lib/syskit/telemetry/async/interface_object.rb b/lib/syskit/telemetry/async/interface_object.rb new file mode 100644 index 000000000..d1a883dc1 --- /dev/null +++ b/lib/syskit/telemetry/async/interface_object.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Class that defines hooks for {InterfaceObjects} + # + # This class is needed so that we can cleanly overload the hook definition + # methods. + class InterfaceObjectHooks + include Roby::Hooks + include Roby::Hooks::InstanceHooks + + define_hooks :on_reachable + define_hooks :on_unreachable + define_hooks :on_data + define_hooks :on_raw_data + end + + # Callback-based API to the orocos.rb property API + class InterfaceObject < InterfaceObjectHooks + # @return [String] the property name + attr_reader :name + # @return [Class] the property type + attr_reader :type + + def initialize(name, type) + super() + + @name = name + @type = type + end + + def reachable? + @raw_object + end + + # Tie this async property with the underlying direct access object + def reachable!(raw_object) + @raw_object = raw_object + run_hook :on_reachable, raw_object + end + + # Tie this async property with the underlying object + def unreachable! + @raw_object = nil + run_hook :on_unreachable + end + + def on_reachable(&block) + super(&block) + + block.call if @raw_object + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/name_service.rb b/lib/syskit/telemetry/async/name_service.rb index 993bcd4df..c3c44800f 100644 --- a/lib/syskit/telemetry/async/name_service.rb +++ b/lib/syskit/telemetry/async/name_service.rb @@ -28,6 +28,10 @@ def initialize(thread_count: 1) Concurrent::FixedThreadPool.new(thread_count) end + def tasks + @registered_tasks.values + end + def dispose cleanup @discovery_executor.shutdown @@ -63,9 +67,18 @@ def async_update_tasks(tasks) # We never spawn two futures to resolve the same name. Instead, # when we get the feature result, we check whether the # IOR has changed, and act accordingly + queue_new_tasks_discovery(tasks) + end + + # @api private + # + # Filter a list of tasks, queueing futures to discover the new ones + # + # @param [#ior,#name] tasks list of tasks to be discovered + def queue_new_tasks_discovery(tasks) tasks.each do |t| next if @discovery[t.name] - next if t.ior == @registered_tasks[t.name]&.ior + next if t.ior == @registered_tasks[t.name]&.identity async_discover_task(t) end @@ -76,7 +89,7 @@ def async_update_tasks(tasks) def remove_changed_tasks(iors) @registered_tasks.dup.each do |name, task| new_ior = iors[name] - deregister(name).dispose if !new_ior || task.ior != new_ior + deregister(name).dispose if !new_ior || task.identity != new_ior end end @@ -143,7 +156,7 @@ def wait_for_task_discovery # # Remove a resolved task from the pending discoveries # - # @return [(String,Orocos::Async::TaskContext),nil] a resolved task or nil + # @return [(String,TaskContext),nil] a resolved task or nil # if there are none so far def pop_discovered_task loop do @@ -192,9 +205,8 @@ def discover_task(name, ior, orogen_model_name) name: name, model: orogen_model_from_name(orogen_model_name) ) - async_task = Orocos::Async::CORBA::TaskContext.new( - ior, use: task - ) + + async_task = TaskContext.discover(task) [ior, async_task] rescue StandardError => e @@ -213,7 +225,7 @@ def orogen_model_from_name(name) # (see NameServiceBase#get) def ior(name) task = @registered_tasks[name] - return task.ior if task.respond_to?(:ior) + return task.identity if task.respond_to?(:identity) raise Orocos::NotFound, "task context #{name} cannot be found." end diff --git a/lib/syskit/telemetry/async/port.rb b/lib/syskit/telemetry/async/port.rb new file mode 100644 index 000000000..07992d8b5 --- /dev/null +++ b/lib/syskit/telemetry/async/port.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Callback-based API for remote task ports + class Port < InterfaceObject + end + end + end +end \ No newline at end of file diff --git a/lib/syskit/telemetry/async/property.rb b/lib/syskit/telemetry/async/property.rb new file mode 100644 index 000000000..e7bdf9924 --- /dev/null +++ b/lib/syskit/telemetry/async/property.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Callback-based API to the orocos.rb property API + class Property < InterfaceObject + end + end + end +end diff --git a/lib/syskit/telemetry/async/task_context.rb b/lib/syskit/telemetry/async/task_context.rb new file mode 100644 index 000000000..eafe2f4e1 --- /dev/null +++ b/lib/syskit/telemetry/async/task_context.rb @@ -0,0 +1,241 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + class TaskContextHooks + include Roby::Hooks + include Roby::Hooks::InstanceHooks + + define_hooks :on_state_change + define_hooks :on_reachable + define_hooks :on_unreachable + define_hooks :on_attribute_reachable + define_hooks :on_attribute_unreachable + define_hooks :on_property_reachable + define_hooks :on_property_unreachable + define_hooks :on_port_reachable + define_hooks :on_port_unreachable + end + + # Callback-based API to the orocos.rb task contexts + class TaskContext < TaskContextHooks + # The task context name + # + # @return [String] + attr_reader :name + + # A unique string that allows to identify a remote task + # + # @return [String] + attr_reader :identity + + # Discover information about a Orocos::TaskContext and create the + # corresponding {TaskContext} + # + # This is meant to be called in a separate thread + def self.discover(task) + async_task = TaskContext.new(task.name) + + # Already do an initial discovery of all the task's interface objects + state_reader = task.state_reader( + pull: true, type: :circular_buffer, size: 10 + ) + puts "#{Time.now} #{task.name}: created state reader" + raw_attributes = task.attribute_names.map { task.attribute(_1) } + puts "#{Time.now} #{task.name}: created attributes" + raw_properties = task.property_names.map { task.property(_1) } + puts "#{Time.now} #{task.name}: created properties" + raw_ports = task.port_names.map { task.port(_1) } + puts "#{Time.now} #{task.name}: created ports" + + # We can do this here ONLY BECAUSE we're populating an initial + # state. Further updates need to call the `discover_` methods in + # the main thread + async_task.reachable!(task, state_reader: state_reader) + async_task.discover_attributes(raw_attributes) + async_task.discover_properties(raw_properties) + async_task.discover_ports(raw_ports) + puts "#{Time.now} #{task.name}: discovered" + async_task + end + + def initialize(name) + @name = name + + @attributes = {} + @properties = {} + @ports = {} + + @current_state = nil + end + + def to_proxy + self + end + + # Declare that the remote task is not reachable anymore + # + # Must be called from the main thread. It also dispose of the underlying + # resources + def unreachable! + run_hook :on_unreachable + + @attributes.each_value do + run_hook :on_attribute_unreachable, _1 + _1.unreachable! + end + + @properties.each_value do + run_hook :on_property_unreachable, _1 + _1.unreachable! + end + + @ports.each_value do + run_hook :on_port_unreachable, _1 + _1.unreachable! + end + + dispose + end + + def reachable? + @raw_task_context + end + + # Set the underlying task context + # + # Must be called from the main thread + def reachable!(task_context, state_reader:) + @raw_task_context = task_context + @identity = task_context.ior + state_read_init(state_reader) + run_hook :on_reachable, task_context + end + + def on_reachable(&block) + super + + block.call if reachable? + end + + def state_read_init(state_reader) + @state_reader = state_reader + + @state_read_queue = queue = Queue.new + @state_read_stop = event = Concurrent::Event.new + @state_read_thread = Thread.new do + state_read_poll_thread(state_reader, queue, event) + end + end + + def on_state_change(&block) + super + + block.call(@current_state) if @current_state + end + + def each_attribute(&block) + @attributes.each_value(&block) + end + + def each_property(&block) + @properties.each_value(&block) + end + + def each_port(&block) + @ports.each_value(&block) + end + + def each_property(&block) + @properties.each_value(&block) + end + + def on_attribute_reachable(&block) + super + + @attributes.each_value { block.call(_1) } + end + + def on_property_reachable(&block) + super + + @properties.each_value { block.call(_1) } + end + + def on_port_reachable(&block) + super + + @ports.each_value { block.call(_1) } + end + + def discover_attributes(raw_attributes) + @attributes = + raw_attributes.each_with_object({}) do |p, h| + async = Attribute.new(p.name, p.type) + async.reachable!(p) + h[p.name] = async + end + + @attributes.each_value { run_hook :on_attribute_reachable, _1 } + end + + def discover_properties(raw_properties) + @properties = + raw_properties.each_with_object({}) do |p, h| + async = Property.new(p.name, p.type) + async.reachable!(p) + h[p.name] = async + end + + @properties.each_value { run_hook :on_property_reachable, _1 } + end + + def discover_ports(raw_ports) + @ports = + raw_ports.each_with_object({}) do |p, h| + async = Port.new(p.name, p.type) + async.reachable!(p) + h[p.name] = async + end + + @ports.each_value { run_hook :on_port_reachable, _1 } + end + + def dispose + @raw_task_context = nil + + Concurrent::Promises.future(@state_reader, &:disconnect) + @properties.clear + end + + def state_read_poll_thread(reader, queue, stop, period: 0.1) + until stop.set? + tic = Time.now + while (state = reader.read_new) + queue << state + end + remaining = period - (Time.now - tic) + sleep remaining if remaining > 0.01 + end + end + + def poll(period: 0.1) + begin + while (new_state = read_new_state) + @current_state = new_state + run_hook :on_state_change, new_state + end + rescue ThreadError + sleep(period) + end + end + + def read_new_state + @state_read_queue.pop(true) + rescue ThreadError + end + end + end + end +end diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index 228a44328..34da4724d 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -54,9 +54,6 @@ class RuntimeState < Qt::Widget attr_reader :ui_task_inspector # A logging configuration widget we use to manage logging attr_reader :ui_logging_configuration - # The list of task names of the task currently displayed by the task - # inspector - attr_reader :current_orocos_tasks # Returns a list of actions that can be performed on the Roby # instance @@ -143,8 +140,7 @@ def initialize(parent: nil, create_ui @current_job = nil - @current_orocos_tasks = Set.new - @proxies = {} + @current_tasks = Array.new syskit.on_ui_event do |event_name, *args| if (w = @ui_event_widgets[event_name]) @@ -538,12 +534,10 @@ def create_ui_new_job # Sets up polling on a given syskit interface def poll_syskit_interface if syskit.connected? - begin - display_current_cycle_index_and_time - query_deployment_update - update_current_job_task_names if current_job - rescue Roby::Interface::ComError # rubocop:disable Lint/SuppressedException - end + display_current_cycle_index_and_time + query_deployment_update + update_current_job_task_names if current_job + poll_task_contexts else reset_current_deployments reset_current_job @@ -555,6 +549,10 @@ def poll_syskit_interface end slots "poll_syskit_interface()" + def poll_task_contexts + @name_service.each_task(&:poll) + end + def display_current_cycle_index_and_time return unless syskit.cycle_start_time @@ -569,15 +567,17 @@ def reset_current_job @current_job = nil @current_job_task_names = [] - update_task_inspector(@name_service.names) + update_task_inspector(@name_service.tasks) end def process_current_deployments update_name_service(@current_deployments) - names = @name_service.names - names &= @current_job_task_names if @current_job - update_task_inspector(names) + if @current_job + update_task_inspector(@current_job_tasks) + else + update_task_inspector(@name_service.tasks) + end end def query_deployment_update @@ -604,28 +604,24 @@ def reset_current_deployments def update_current_job_task_names polling_call [], "tasks_of_job", @current_job.job_id do |tasks| - @current_job_task_names = + # TODO: handle asynchronicity + @current_job_tasks = tasks - .map { _1.arguments[:orocos_name] } + .map { @name_service.get(_1.arguments[:orocos_name]) } .compact end end - def update_task_inspector(task_names) - orocos_tasks = task_names.to_set - removed = current_orocos_tasks - orocos_tasks - new = orocos_tasks - current_orocos_tasks - removed.each do |task_name| - ui_task_inspector.remove_task(task_name) + def update_task_inspector(tasks) + removed = @current_tasks - tasks + new = tasks - @current_tasks + removed.each do |task| + ui_task_inspector.remove_task(task.name) end - new.each do |task_name| - @proxies[task_name] ||= Orocos::Async::TaskContextProxy.new( - task_name, name_service: @async_name_service - ) - - ui_task_inspector.add_task(@proxies[task_name]) + new.each do |task| + ui_task_inspector.add_task(task) end - @current_orocos_tasks = orocos_tasks.dup + @current_tasks = tasks.dup end def reset_task_inspector diff --git a/test/telemetry/async/test_name_service.rb b/test/telemetry/async/test_name_service.rb index 3afce24c2..c5d800eec 100644 --- a/test/telemetry/async/test_name_service.rb +++ b/test/telemetry/async/test_name_service.rb @@ -1,7 +1,7 @@ # frozen_string_literal: true require "syskit/test/self" -require "syskit/telemetry/async/name_service" +require "syskit/telemetry/async" module Syskit module Telemetry @@ -23,7 +23,7 @@ module Async @ns.async_update_tasks([deployed_task]) @ns.wait_for_task_discovery @ns.resolve_discovered_tasks - assert_equal task.ior, @ns.get("test").ior + assert_equal task.ior, @ns.get("test").identity end it "does not re-resolve a registered task if the IOR matches" do @@ -46,7 +46,7 @@ module Async assert @ns.has_pending_discoveries? @ns.wait_for_task_discovery @ns.resolve_discovered_tasks - assert_equal task2.ior, @ns.get("test").ior + assert_equal task2.ior, @ns.get("test").identity end it "requeues the discovery if a task's IOR changed" do @@ -58,7 +58,7 @@ module Async @ns.async_update_tasks([deployed_task2]) @ns.wait_for_task_discovery @ns.resolve_discovered_tasks - assert_equal task2.ior, @ns.get("test").ior + assert_equal task2.ior, @ns.get("test").identity end it "does not register a task if it has been removed while it was " \ @@ -244,7 +244,11 @@ module Async describe "#ior" do it "returns the IOR of a registered task" do _, task = make_deployed_task("test", "some") - @ns.register(task) + async_task = Orocos.allow_blocking_calls do + TaskContext.discover(task) + end + assert_equal "test", async_task.name + @ns.register(async_task) assert_equal task.ior, @ns.ior("test") end diff --git a/test/telemetry/async/test_task_context.rb b/test/telemetry/async/test_task_context.rb new file mode 100644 index 000000000..e76e9d0f6 --- /dev/null +++ b/test/telemetry/async/test_task_context.rb @@ -0,0 +1,256 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" + +module Syskit + module Telemetry + module Async + describe TaskContext do + before do + @ns = NameService.new + @ruby_tasks = [] + end + + after do + @ruby_tasks.each(&:dispose) + end + + describe ".discover" do + it "creates an async task already initialized with the remote " \ + "task's interface" do + t, async = make_async_task "test" + assert async.reachable? + assert_equal t.ior, async.identity + assert_equal Set["state", "in", "out"], async.each_port.to_set(&:name) + assert_equal ["prop"], async.each_property.map(&:name) + assert_includes async.each_attribute.map(&:name), "attr" + end + end + + describe "state change notifications" do + it "adds a callback that is called when a state change " \ + "is received by #poll" do + task, async = make_async_task "test" + + states = [] + async.on_state_change { states << _1 } + assert_polling_eventually(async) { states == [:PRE_OPERATIONAL] } + + Orocos.allow_blocking_calls do + task.configure + task.start + end + assert_polling_eventually(async) do + states == %I[PRE_OPERATIONAL STOPPED RUNNING] + end + end + it "calls the block with the currently known state" do + task, async = make_async_task "test" + Orocos.allow_blocking_calls do + task.configure + task.start + end + states = [] + async.on_state_change { states << _1 } + assert_polling_eventually(async) do + states[-1] == :RUNNING + end + + states = [] + async.on_state_change { states << _1 } + assert_equal [:RUNNING], states + end + + it "does not call the block is no state is known" do + _, async = make_async_task "test" + record = flexmock + record.should_receive(:called).never + async.on_state_change { record.called } + end + end + + describe "reachability" do + it "is reachable right after .discover" do + _, async = make_async_task "test" + assert async.reachable? + end + + it "calls the reachability callback on registration" do + _, async = make_async_task "test" + record = flexmock + record.should_receive(:called).once + async.on_reachable { record.called } + end + + it "calls on_unreachable when unreachable! is called" do + _, async = make_async_task "test" + record = flexmock + record.should_receive(:called).once + async.on_unreachable { record.called } + async.unreachable! + end + + it "does not call new reachable callbacks " \ + "if the task is not reachable" do + _, async = make_async_task "test" + async.unreachable! + record = flexmock + record.should_receive(:called).never + async.on_reachable { record.called } + end + end + + describe "attributes" do + it "calls the on_attribute_reachable hooks on registration" do + _, async = make_async_task "test" + attributes = [] + async.on_attribute_reachable { attributes << _1 } + assert_includes attributes.to_set(&:name), "attr" + end + + it "calls the attribute's on_reachable hook on registration" do + _, async = make_async_task "test" + m = flexmock + m.should_receive(:called).once + async.attribute("attr").on_reachable { m.called } + end + + it "calls the on_attribute_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + attributes = [] + async.on_attribute_unreachable { attributes << _1 } + async.unreachable! + assert_includes attributes.map(&:name), "attr" + end + + it "calls the attribute's on_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + attributes = [] + async.on_attribute_reachable { attributes << _1 } + m = flexmock + m.should_receive(:called).once + attributes[0].on_unreachable { m.called } + async.unreachable! + end + end + + describe "properties" do + it "calls the on_property_reachable hooks on registration" do + _, async = make_async_task "test" + properties = [] + async.on_property_reachable { properties << _1 } + assert_equal ["prop"], properties.map(&:name) + end + + it "calls the property's on_reachable hook on registration" do + _, async = make_async_task "test" + m = flexmock + m.should_receive(:called).once + async.property("prop").on_reachable { m.called } + end + + it "calls the on_property_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + properties = [] + async.on_property_unreachable { properties << _1 } + async.unreachable! + assert_equal ["prop"], properties.map(&:name) + end + + it "calls the propertie's on_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + properties = [] + async.on_property_reachable { properties << _1 } + m = flexmock + m.should_receive(:called).once + properties[0].on_unreachable { m.called } + async.unreachable! + end + end + + describe "ports" do + it "calls the on_port_reachable hooks on registration" do + _, async = make_async_task "test" + ports = [] + async.on_port_reachable { ports << _1 } + assert_equal Set["state", "in", "out"], ports.to_set(&:name) + end + + it "calls the attribute's on_reachable hook on registration" do + _, async = make_async_task "test" + m = flexmock + m.should_receive(:called).once + async.port("in").on_reachable { m.called } + end + + it "calls the on_port_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + ports = [] + async.on_port_unreachable { ports << _1 } + async.unreachable! + assert_equal Set["in", "out", "state"], ports.to_set(&:name) + end + + it "calls the port's on_unreachable hooks when " \ + "the task becomes unreachable" do + _, async = make_async_task "test" + ports = [] + async.on_port_reachable { ports << _1 } + m = flexmock + m.should_receive(:called).once + ports[0].on_unreachable { m.called } + async.unreachable! + end + end + + describe "ports" do + it "calls the on_port_reachable hooks on registration" do + _, async = make_async_task "test" + ports = [] + async.on_port_reachable { ports << _1 } + assert_equal Set["state", "in", "out"], ports.to_set(&:name) + end + end + + def make_ruby_task(name) + ruby_task = Orocos.allow_blocking_calls do + t = Orocos::RubyTasks::TaskContext.new(name) + t.create_attribute "attr", "/int16_t" + t.create_property "prop", "/int32_t" + t.create_input_port "in", "/float" + t.create_output_port "out", "/double" + t + end + @ruby_tasks << ruby_task + ruby_task + end + + def make_async_task(name) + t = make_ruby_task name + async = Orocos.allow_blocking_calls do + TaskContext.discover(t) + end + [t, async] + end + + def assert_polling_eventually(async, period: 0.01, timeout: 2, &block) + deadline = Time.now + timeout + while Time.now < deadline + async.poll + return if block.call + + sleep(period) + end + + flunk("condition not reached in #{timeout} seconds") + end + end + end + end +end From d59b524335c2145cb165b3f654c07042197a7d17 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Sat, 21 Dec 2024 22:04:32 -0300 Subject: [PATCH 063/158] fix: Orocos's async yields object names in the on_*_reachable hooks --- lib/syskit/telemetry/async/task_context.rb | 24 ++++++++++++++++------ test/telemetry/async/test_task_context.rb | 22 ++++++++------------ 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/lib/syskit/telemetry/async/task_context.rb b/lib/syskit/telemetry/async/task_context.rb index eafe2f4e1..4a252b901 100644 --- a/lib/syskit/telemetry/async/task_context.rb +++ b/lib/syskit/telemetry/async/task_context.rb @@ -82,17 +82,17 @@ def unreachable! run_hook :on_unreachable @attributes.each_value do - run_hook :on_attribute_unreachable, _1 + run_hook :on_attribute_unreachable, _1.name _1.unreachable! end @properties.each_value do - run_hook :on_property_unreachable, _1 + run_hook :on_property_unreachable, _1.name _1.unreachable! end @ports.each_value do - run_hook :on_port_unreachable, _1 + run_hook :on_port_unreachable, _1.name _1.unreachable! end @@ -154,19 +154,31 @@ def each_property(&block) def on_attribute_reachable(&block) super - @attributes.each_value { block.call(_1) } + @attributes.each_key { block.call(_1) } + end + + def attribute(name) + @attributes.fetch(name) end def on_property_reachable(&block) super - @properties.each_value { block.call(_1) } + @properties.each_key { block.call(_1) } + end + + def property(name) + @properties.fetch(name) end def on_port_reachable(&block) super - @ports.each_value { block.call(_1) } + @ports.each_key { block.call(_1) } + end + + def port(name) + @ports.fetch(name) end def discover_attributes(raw_attributes) diff --git a/test/telemetry/async/test_task_context.rb b/test/telemetry/async/test_task_context.rb index e76e9d0f6..2b5d9e0c1 100644 --- a/test/telemetry/async/test_task_context.rb +++ b/test/telemetry/async/test_task_context.rb @@ -106,7 +106,7 @@ module Async _, async = make_async_task "test" attributes = [] async.on_attribute_reachable { attributes << _1 } - assert_includes attributes.to_set(&:name), "attr" + assert_includes attributes, "attr" end it "calls the attribute's on_reachable hook on registration" do @@ -122,17 +122,15 @@ module Async attributes = [] async.on_attribute_unreachable { attributes << _1 } async.unreachable! - assert_includes attributes.map(&:name), "attr" + assert_includes attributes, "attr" end it "calls the attribute's on_unreachable hooks when " \ "the task becomes unreachable" do _, async = make_async_task "test" - attributes = [] - async.on_attribute_reachable { attributes << _1 } m = flexmock m.should_receive(:called).once - attributes[0].on_unreachable { m.called } + async.attribute("attr").on_unreachable { m.called } async.unreachable! end end @@ -142,7 +140,7 @@ module Async _, async = make_async_task "test" properties = [] async.on_property_reachable { properties << _1 } - assert_equal ["prop"], properties.map(&:name) + assert_equal ["prop"], properties end it "calls the property's on_reachable hook on registration" do @@ -158,7 +156,7 @@ module Async properties = [] async.on_property_unreachable { properties << _1 } async.unreachable! - assert_equal ["prop"], properties.map(&:name) + assert_equal ["prop"], properties end it "calls the propertie's on_unreachable hooks when " \ @@ -168,7 +166,7 @@ module Async async.on_property_reachable { properties << _1 } m = flexmock m.should_receive(:called).once - properties[0].on_unreachable { m.called } + async.property("prop").on_unreachable { m.called } async.unreachable! end end @@ -178,7 +176,7 @@ module Async _, async = make_async_task "test" ports = [] async.on_port_reachable { ports << _1 } - assert_equal Set["state", "in", "out"], ports.to_set(&:name) + assert_equal Set["state", "in", "out"], ports.to_set end it "calls the attribute's on_reachable hook on registration" do @@ -194,17 +192,15 @@ module Async ports = [] async.on_port_unreachable { ports << _1 } async.unreachable! - assert_equal Set["in", "out", "state"], ports.to_set(&:name) + assert_equal Set["in", "out", "state"], ports.to_set end it "calls the port's on_unreachable hooks when " \ "the task becomes unreachable" do _, async = make_async_task "test" - ports = [] - async.on_port_reachable { ports << _1 } m = flexmock m.should_receive(:called).once - ports[0].on_unreachable { m.called } + async.port("in").on_unreachable { m.called } async.unreachable! end end From 265e3ab4eb6b3797044699e3c0746d93d49da084 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Sun, 22 Dec 2024 08:43:44 -0300 Subject: [PATCH 064/158] fix: job selection in the telemetry UI --- lib/syskit/telemetry/async/name_service.rb | 16 ++++++++++++++-- lib/syskit/telemetry/ui/runtime_state.rb | 9 ++++++--- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/lib/syskit/telemetry/async/name_service.rb b/lib/syskit/telemetry/async/name_service.rb index c3c44800f..e38911430 100644 --- a/lib/syskit/telemetry/async/name_service.rb +++ b/lib/syskit/telemetry/async/name_service.rb @@ -230,9 +230,21 @@ def ior(name) raise Orocos::NotFound, "task context #{name} cannot be found." end - # (see NameServiceBase#get) + # Return a task from its name, or nil if it does not exist + # + # @param [String] name + # @return [TaskContext,nil] + def find(name) + @registered_tasks[name] + end + + # Return a task from its name, or raise if it does not exist + # + # @param [String] name + # @return [TaskContext] + # @raise [Orocos::NotFound] def get(name, **) - task = @registered_tasks[name] + task = find(name) return task if task raise Orocos::NotFound, "task context #{name} cannot be found." diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index 34da4724d..194b6b7f0 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -140,7 +140,8 @@ def initialize(parent: nil, create_ui @current_job = nil - @current_tasks = Array.new + @current_job_tasks = [] + @current_tasks = [] syskit.on_ui_event do |event_name, *args| if (w = @ui_event_widgets[event_name]) @@ -604,11 +605,13 @@ def reset_current_deployments def update_current_job_task_names polling_call [], "tasks_of_job", @current_job.job_id do |tasks| - # TODO: handle asynchronicity + # TODO: handle asynchronicity, the tasks may not be already + # discovered and/or the @current_job_tasks = tasks - .map { @name_service.get(_1.arguments[:orocos_name]) } + .map { _1.arguments[:orocos_name] } .compact + .map { @name_service.find(_1) } end end From 8394ea25b9b458dc620d36557439c2ca8d47e3a6 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Sun, 22 Dec 2024 08:44:43 -0300 Subject: [PATCH 065/158] feat: implement just enough of the orocos'rb Async interface for the task inspector This is only the API surface. Data is currently still not transferred. --- lib/syskit/telemetry/async.rb | 4 +- lib/syskit/telemetry/async/attribute.rb | 9 +- lib/syskit/telemetry/async/input_port.rb | 18 +++ .../telemetry/async/interface_object.rb | 26 +++- lib/syskit/telemetry/async/output_port.rb | 18 +++ lib/syskit/telemetry/async/port.rb | 11 -- lib/syskit/telemetry/async/property.rb | 9 +- .../async/readable_interface_object.rb | 54 +++++++++ lib/syskit/telemetry/async/task_context.rb | 113 +++++++++++++----- test/telemetry/async/test_task_context.rb | 47 ++++++-- 10 files changed, 249 insertions(+), 60 deletions(-) create mode 100644 lib/syskit/telemetry/async/input_port.rb create mode 100644 lib/syskit/telemetry/async/output_port.rb delete mode 100644 lib/syskit/telemetry/async/port.rb create mode 100644 lib/syskit/telemetry/async/readable_interface_object.rb diff --git a/lib/syskit/telemetry/async.rb b/lib/syskit/telemetry/async.rb index 2aacd51b0..68dbce9ec 100644 --- a/lib/syskit/telemetry/async.rb +++ b/lib/syskit/telemetry/async.rb @@ -3,9 +3,11 @@ require "syskit/telemetry/async/name_service" require "syskit/telemetry/async/task_context" require "syskit/telemetry/async/interface_object" +require "syskit/telemetry/async/readable_interface_object" require "syskit/telemetry/async/attribute" require "syskit/telemetry/async/property" -require "syskit/telemetry/async/port" +require "syskit/telemetry/async/input_port" +require "syskit/telemetry/async/output_port" module Syskit module Telemetry diff --git a/lib/syskit/telemetry/async/attribute.rb b/lib/syskit/telemetry/async/attribute.rb index f3043f767..9f4ea2fb8 100644 --- a/lib/syskit/telemetry/async/attribute.rb +++ b/lib/syskit/telemetry/async/attribute.rb @@ -4,7 +4,14 @@ module Syskit module Telemetry module Async # Callback-based API for remote task ports - class Attribute < InterfaceObject + class Attribute < ReadableInterfaceObject + def on_raw_change(&block) + on_raw_data(&block) + end + + def on_change(&block) + on_data(&block) + end end end end diff --git a/lib/syskit/telemetry/async/input_port.rb b/lib/syskit/telemetry/async/input_port.rb new file mode 100644 index 000000000..7f7b4c492 --- /dev/null +++ b/lib/syskit/telemetry/async/input_port.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Async interface compatible with the orocos.rb's API + class InputPort < ReadableInterfaceObject + def output? + false + end + + def input? + true + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/interface_object.rb b/lib/syskit/telemetry/async/interface_object.rb index d1a883dc1..fb01c3f0b 100644 --- a/lib/syskit/telemetry/async/interface_object.rb +++ b/lib/syskit/telemetry/async/interface_object.rb @@ -13,8 +13,7 @@ class InterfaceObjectHooks define_hooks :on_reachable define_hooks :on_unreachable - define_hooks :on_data - define_hooks :on_raw_data + define_hooks :on_error end # Callback-based API to the orocos.rb property API @@ -48,10 +47,31 @@ def unreachable! end def on_reachable(&block) - super(&block) + super block.call if @raw_object end + + def once_on_reachable(&block) + # on_reachable might call the block right away, in which case + # `listener` will be nil. Use the called flag to allow disposing + # of the listener the second time without causing a double call + # to the block + called = false + listener = on_reachable do + block.call unless called + called = true + listener&.dispose + end + end + + def new_sample + @type.zero + end + + def type_name + @type.name + end end end end diff --git a/lib/syskit/telemetry/async/output_port.rb b/lib/syskit/telemetry/async/output_port.rb new file mode 100644 index 000000000..6483b2b43 --- /dev/null +++ b/lib/syskit/telemetry/async/output_port.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Async interface compatible with the orocos.rb's API + class OutputPort < ReadableInterfaceObject + def output? + true + end + + def input? + false + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/port.rb b/lib/syskit/telemetry/async/port.rb deleted file mode 100644 index 07992d8b5..000000000 --- a/lib/syskit/telemetry/async/port.rb +++ /dev/null @@ -1,11 +0,0 @@ -# frozen_string_literal: true - -module Syskit - module Telemetry - module Async - # Callback-based API for remote task ports - class Port < InterfaceObject - end - end - end -end \ No newline at end of file diff --git a/lib/syskit/telemetry/async/property.rb b/lib/syskit/telemetry/async/property.rb index e7bdf9924..352173650 100644 --- a/lib/syskit/telemetry/async/property.rb +++ b/lib/syskit/telemetry/async/property.rb @@ -4,7 +4,14 @@ module Syskit module Telemetry module Async # Callback-based API to the orocos.rb property API - class Property < InterfaceObject + class Property < ReadableInterfaceObject + def on_raw_change(&block) + on_raw_data(&block) + end + + def on_change(&block) + on_data(&block) + end end end end diff --git a/lib/syskit/telemetry/async/readable_interface_object.rb b/lib/syskit/telemetry/async/readable_interface_object.rb new file mode 100644 index 000000000..da9c640a1 --- /dev/null +++ b/lib/syskit/telemetry/async/readable_interface_object.rb @@ -0,0 +1,54 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Definition of hooks related to reading data + class ReadableInterfaceObjectHooks < InterfaceObject + define_hooks :on_data + define_hooks :on_raw_data + end + + # Base class for interface objects that allow to read data + class ReadableInterfaceObject < ReadableInterfaceObjectHooks + # Callback management object with the same API than orocos.rb's + class Listener + def initialize(object, event, block) + @object = object + @event = event + @block = block + end + + def start + return if @disposable + + @disposable = @object.send(@event, &@block) + end + + def stop + @disposable&.dispose + @disposable = nil + end + + def dispose + stop + end + end + + alias __on_raw_data on_raw_data + def on_raw_data(&block) + listener = Listener.new(self, :__on_raw_data, block) + listener.start + listener + end + + alias __on_data on_data + def on_data(&block) + listener = Listener.new(self, :__on_data, block) + listener.start + listener + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/task_context.rb b/lib/syskit/telemetry/async/task_context.rb index 4a252b901..9a50ccd86 100644 --- a/lib/syskit/telemetry/async/task_context.rb +++ b/lib/syskit/telemetry/async/task_context.rb @@ -3,6 +3,10 @@ module Syskit module Telemetry module Async + # Definition of hooks for the {TaskContext} class + # + # This is made separately to allow overloading them in the main class in + # a natural way class TaskContextHooks include Roby::Hooks include Roby::Hooks::InstanceHooks @@ -30,6 +34,11 @@ class TaskContext < TaskContextHooks # @return [String] attr_reader :identity + # The task model + # + # @return [OroGen::Spec::TaskContext] + attr_reader :model + # Discover information about a Orocos::TaskContext and create the # corresponding {TaskContext} # @@ -41,27 +50,46 @@ def self.discover(task) state_reader = task.state_reader( pull: true, type: :circular_buffer, size: 10 ) - puts "#{Time.now} #{task.name}: created state reader" - raw_attributes = task.attribute_names.map { task.attribute(_1) } - puts "#{Time.now} #{task.name}: created attributes" - raw_properties = task.property_names.map { task.property(_1) } - puts "#{Time.now} #{task.name}: created properties" - raw_ports = task.port_names.map { task.port(_1) } - puts "#{Time.now} #{task.name}: created ports" + discover_attributes(async_task, task) + discover_properties(async_task, task) + discover_ports(async_task, task) # We can do this here ONLY BECAUSE we're populating an initial # state. Further updates need to call the `discover_` methods in # the main thread async_task.reachable!(task, state_reader: state_reader) + async_task + end + + # @api private + # + # Discover a remote task's attributes + def self.discover_attributes(async_task, task) + raw_attributes = task.attribute_names.map { task.attribute(_1) } async_task.discover_attributes(raw_attributes) + end + + # @api private + # + # Discover a remote task's properties + def self.discover_properties(async_task, task) + raw_properties = task.property_names.map { task.property(_1) } async_task.discover_properties(raw_properties) + end + + # @api private + # + # Discover a remote task's ports + def self.discover_ports(async_task, task) + raw_ports = task.port_names.map { task.port(_1) } async_task.discover_ports(raw_ports) - puts "#{Time.now} #{task.name}: discovered" - async_task end - def initialize(name) + def initialize(name, model: self.class.dummy_orogen_model(name)) + super() + @name = name + @model = model @attributes = {} @properties = {} @@ -70,6 +98,13 @@ def initialize(name) @current_state = nil end + @dummy_orogen_models = Concurrent::Hash.new + + def self.dummy_orogen_model(name) + @dummy_orogen_models[name] ||= + Orocos.create_orogen_task_context_model(name) + end + def to_proxy self end @@ -81,22 +116,24 @@ def to_proxy def unreachable! run_hook :on_unreachable - @attributes.each_value do - run_hook :on_attribute_unreachable, _1.name - _1.unreachable! - end + run_interface_unreachable_hooks( + @attributes.each_value, :on_attribute_unreachable + ) + run_interface_unreachable_hooks( + @properties.each_value, :on_property_unreachable + ) + run_interface_unreachable_hooks( + @ports.each_value, :on_port_unreachable + ) - @properties.each_value do - run_hook :on_property_unreachable, _1.name - _1.unreachable! - end + dispose + end - @ports.each_value do - run_hook :on_port_unreachable, _1.name + def run_interface_unreachable_hooks(objects, event) + objects.each do + run_hook event, _1.name _1.unreachable! end - - dispose end def reachable? @@ -147,8 +184,12 @@ def each_port(&block) @ports.each_value(&block) end - def each_property(&block) - @properties.each_value(&block) + def each_input_port(&block) + @ports.each_value.find_all(&:input?).each(&block) + end + + def each_output_port(&block) + @ports.each_value.find_all { !_1.input? }.each(&block) end def on_attribute_reachable(&block) @@ -206,7 +247,15 @@ def discover_properties(raw_properties) def discover_ports(raw_ports) @ports = raw_ports.each_with_object({}) do |p, h| - async = Port.new(p.name, p.type) + klass = + case p + when Orocos::InputPort + InputPort + else + OutputPort + end + + async = klass.new(p.name, p.type) async.reachable!(p) h[p.name] = async end @@ -233,19 +282,17 @@ def state_read_poll_thread(reader, queue, stop, period: 0.1) end def poll(period: 0.1) - begin - while (new_state = read_new_state) - @current_state = new_state - run_hook :on_state_change, new_state - end - rescue ThreadError - sleep(period) + while (new_state = read_new_state) + @current_state = new_state + run_hook :on_state_change, new_state end + rescue ThreadError + sleep(period) end def read_new_state @state_read_queue.pop(true) - rescue ThreadError + rescue ThreadError # rubocop:disable Lint/SuppressedException end end end diff --git a/test/telemetry/async/test_task_context.rb b/test/telemetry/async/test_task_context.rb index 2b5d9e0c1..376494cef 100644 --- a/test/telemetry/async/test_task_context.rb +++ b/test/telemetry/async/test_task_context.rb @@ -22,10 +22,46 @@ module Async t, async = make_async_task "test" assert async.reachable? assert_equal t.ior, async.identity - assert_equal Set["state", "in", "out"], async.each_port.to_set(&:name) + assert_equal Set["state", "in", "out"], + async.each_port.to_set(&:name) assert_equal ["prop"], async.each_property.map(&:name) assert_includes async.each_attribute.map(&:name), "attr" end + + it "registers attributes" do + _, async = make_async_task "test" + attr = async.attribute("attr") + assert_includes async.each_attribute.to_a, attr + assert_kind_of Attribute, attr + assert_equal "attr", attr.name + end + + it "registers properties" do + _, async = make_async_task "test" + prop = async.property("prop") + assert_equal [prop], async.each_property.to_a + assert_kind_of Property, prop + assert_equal "prop", prop.name + end + + it "registers input ports" do + _, async = make_async_task "test" + in_p = async.port("in") + assert_equal [in_p], async.each_input_port.to_a + assert_includes async.each_port.to_a, in_p + assert_kind_of InputPort, in_p + assert_equal "in", in_p.name + end + + it "registers output ports" do + _, async = make_async_task "test" + out_p = async.port("out") + assert_equal Set[async.port("state"), out_p], + async.each_output_port.to_set + assert_includes async.each_port.to_a, out_p + assert_kind_of OutputPort, out_p + assert_equal "out", out_p.name + end end describe "state change notifications" do @@ -205,15 +241,6 @@ module Async end end - describe "ports" do - it "calls the on_port_reachable hooks on registration" do - _, async = make_async_task "test" - ports = [] - async.on_port_reachable { ports << _1 } - assert_equal Set["state", "in", "out"], ports.to_set(&:name) - end - end - def make_ruby_task(name) ruby_task = Orocos.allow_blocking_calls do t = Orocos::RubyTasks::TaskContext.new(name) From 098cfcd5ce43da02e5be681461428bb43b2cefd7 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 23 Dec 2024 09:09:44 -0300 Subject: [PATCH 066/158] chore: refactor Async::NameService for clarity --- lib/syskit/telemetry/async/name_service.rb | 62 +++++++++++++++------- 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/lib/syskit/telemetry/async/name_service.rb b/lib/syskit/telemetry/async/name_service.rb index e38911430..66651d932 100644 --- a/lib/syskit/telemetry/async/name_service.rb +++ b/lib/syskit/telemetry/async/name_service.rb @@ -154,39 +154,61 @@ def wait_for_task_discovery # @api private # - # Remove a resolved task from the pending discoveries + # Find a valid resolved task from the pending discoveries # - # @return [(String,TaskContext),nil] a resolved task or nil - # if there are none so far + # @return [AsyncDiscovery,nil] a valid resolved task or nil if there are + # none so far def pop_discovered_task loop do - async_discovery, current_ior = pop_resolved_discovery - return unless async_discovery - next unless current_ior - - if async_discovery.ior != current_ior - # The IOR associated with that name changed since the future - # started processing. Throw away the resolved task and start - # again - async_discovery.async_task&.dispose - async_discover_task(async_discovery.task) - next - end - + return unless (async_discovery = pop_finished_discovery) + next unless finished_discovery_validate_ior(async_discovery) next unless async_discovery.async_task return async_discovery end end - def pop_resolved_discovery + # @api private + # + # Get one async discovery result from the terminated discovery futures + # + # Unlike {pop_discovered_task}, it will not try to find a valid discovered + # task. It only gets one finished result + # + # @return [AsyncDiscovery] + def pop_finished_discovery async_discovery = @discovery.each_value.find(&:resolved?) return unless async_discovery @discovery.delete(async_discovery.task.name) async_discovery.update_from_result + async_discovery + end + + # @api private + # + # Validate that an async discovery result matches the expected IOR + # for the task + # + # To guard against race conditions, the name service object maintains + # a hash of the task names to the expected IORs. When we fetch an async + # discovery result, we validate that the found task is actually pointing + # to the expected IOR. If it is not, the result is thrown away and a + # new discovery is initiated + # + # @param [AsyncDiscovery] async_discovery + def finished_discovery_validate_ior(async_discovery) current_ior = @iors.get[async_discovery.task.name] - [async_discovery, current_ior] + return unless current_ior + + return true if async_discovery.ior == current_ior + + # The IOR associated with that name changed since the future + # started processing. Throw away the resolved task and start + # again + async_discovery.async_task&.dispose + async_discover_task(async_discovery.task) + false end # @api private @@ -225,7 +247,9 @@ def orogen_model_from_name(name) # (see NameServiceBase#get) def ior(name) task = @registered_tasks[name] - return task.identity if task.respond_to?(:identity) + if (identity = task&.identity) + return identity + end raise Orocos::NotFound, "task context #{name} cannot be found." end From c7c474780b67954bfcef287044f04d32d369faca Mon Sep 17 00:00:00 2001 From: kapeps Date: Fri, 24 Jan 2025 13:38:10 -0300 Subject: [PATCH 067/158] fix: log transfer/transfer_server CLI commands --- lib/syskit/cli/log_runtime_archive_main.rb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index fbaabd107..ecc14ed44 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -91,27 +91,27 @@ def watch_transfer( # rubocop:disable Metrics/ParameterLists option :max_upload_rate_mbps, type: :numeric, default: 10, desc: "max upload rate in Mbps" def transfer( # rubocop:disable Metrics/ParameterLists - source_dir, host, port, certificate, user, password, implicit_ftps + source_dir, host, port, certificate_path, user, password, implicit_ftps ) source_dir = validate_directory_exists(source_dir) archiver = make_archiver(source_dir) - server_params = { - host: host, port: port, certificate: certificate, + server_params = LogRuntimeArchive::FTPParameters.new( + host: host, port: port, certificate: File.read(certificate_path), user: user, password: password, implicit_ftps: implicit_ftps, max_upload_rate: rate_mbps_to_bps(options[:max_upload_rate_mbps]) - } + ) archiver.process_root_folder_transfer(server_params) end desc "transfer_server", "creates the log transfer FTP server \ that runs on the main computer" def transfer_server( # rubocop:disable Metrics/ParameterLists - target_dir, host, port, certificate, user, password, implicit_ftps + target_dir, host, port, certfile_path, user, password, implicit_ftps ) - create_server(target_dir, host, port, certificate, user, password, - implicit_ftps) + server = create_server(target_dir, host, port, certfile_path, user, password, implicit_ftps) + server.run end no_commands do # rubocop:disable Metrics/BlockLength From b48bed7ee5d4627cbc133f82b61651123f23867a Mon Sep 17 00:00:00 2001 From: kapeps Date: Fri, 24 Jan 2025 14:20:44 -0300 Subject: [PATCH 068/158] fix: linting and unit tests --- lib/syskit/cli/log_runtime_archive_main.rb | 11 ++++++----- test/cli/test_log_runtime_archive_main.rb | 11 ++++++++--- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index ecc14ed44..8734fc2cb 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -70,11 +70,11 @@ def archive(root_dir, target_dir) option :max_upload_rate_mbps, type: :numeric, default: 10, desc: "max upload rate in Mbps" def watch_transfer( # rubocop:disable Metrics/ParameterLists - source_dir, host, port, certificate, user, password, implicit_ftps + source_dir, host, port, certfile_path, user, password, implicit_ftps ) loop do begin - transfer(source_dir, host, port, certificate, user, password, + transfer(source_dir, host, port, certfile_path, user, password, implicit_ftps) rescue Errno::ENOSPC next @@ -91,13 +91,13 @@ def watch_transfer( # rubocop:disable Metrics/ParameterLists option :max_upload_rate_mbps, type: :numeric, default: 10, desc: "max upload rate in Mbps" def transfer( # rubocop:disable Metrics/ParameterLists - source_dir, host, port, certificate_path, user, password, implicit_ftps + source_dir, host, port, certfile_path, user, password, implicit_ftps ) source_dir = validate_directory_exists(source_dir) archiver = make_archiver(source_dir) server_params = LogRuntimeArchive::FTPParameters.new( - host: host, port: port, certificate: File.read(certificate_path), + host: host, port: port, certificate: File.read(certfile_path), user: user, password: password, implicit_ftps: implicit_ftps, max_upload_rate: rate_mbps_to_bps(options[:max_upload_rate_mbps]) @@ -110,7 +110,8 @@ def transfer( # rubocop:disable Metrics/ParameterLists def transfer_server( # rubocop:disable Metrics/ParameterLists target_dir, host, port, certfile_path, user, password, implicit_ftps ) - server = create_server(target_dir, host, port, certfile_path, user, password, implicit_ftps) + server = create_server(target_dir, host, port, certfile_path, user, + password, implicit_ftps) server.run end diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index a914eda3c..aa35993fb 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -157,6 +157,13 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) @source_dir = make_tmppath @server_params = server_params @max_upload_rate = rate_mbps_to_bps(10) + @ftp_params = LogRuntimeArchive::FTPParameters.new( + host: @server_params[:host], port: @server_params[:port], + certificate: File.read(@server_params[:certificate]), + user:@server_params[:user], password: @server_params[:password], + implicit_ftps: @server_params[:implicit_ftps], + max_upload_rate: @max_upload_rate + ) @server = call_create_server(make_tmppath, @server_params) end @@ -173,9 +180,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) .new_instances .should_receive(:process_root_folder_transfer) .with( - @server_params.merge( - { max_upload_rate: @max_upload_rate } - ) + @ftp_params ) .pass_thru do called += 1 From 98a82eb1117a84cf43694a494aef9461ea4d03c2 Mon Sep 17 00:00:00 2001 From: kapeps Date: Fri, 24 Jan 2025 15:09:25 -0300 Subject: [PATCH 069/158] chore: include unit test to verify if transfer actually transfers files --- test/cli/test_log_runtime_archive_main.rb | 38 +++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index aa35993fb..607162fc7 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -160,7 +160,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) @ftp_params = LogRuntimeArchive::FTPParameters.new( host: @server_params[:host], port: @server_params[:port], certificate: File.read(@server_params[:certificate]), - user:@server_params[:user], password: @server_params[:password], + user: @server_params[:user], password: @server_params[:password], implicit_ftps: @server_params[:implicit_ftps], max_upload_rate: @max_upload_rate ) @@ -221,6 +221,19 @@ def rate_mbps_to_bps(rate_mbps) e.message end + it "actually transfer files" do + dataset_tmp_path = make_tmppath + root_tmp_path = make_tmppath + + call_create_server(root_tmp_path, @server_params) + + make_dataset(dataset_tmp_path, "19981222-1301") + make_dataset(dataset_tmp_path, "19981222-1302") + + call_transfer(dataset_tmp_path) + assert(File.exist?(root_tmp_path / "19981222-1301" / "test.0.log")) + end + # Call 'transfer' function instead of 'watch' to call transfer once def call_transfer(source_dir) args = [ @@ -230,6 +243,27 @@ def call_transfer(source_dir) ] LogRuntimeArchiveMain.start(args) end + + def make_dataset(path, name) + dataset = (path / name) + dataset.mkpath + FileUtils.touch(dataset / "info.yml") + make_random_file("test.0.log", root: dataset) + dataset + end + + def make_random_file(name, root: @root, size: 1024) + content = Base64.encode64(Random.bytes(size)) + make_in_file name, content, root: root + content + end + + def make_in_file(name, content, root: @root) + path = (root / name) + path.write(content) + [] << path + path + end end def call_create_server(tgt_dir, server_params) @@ -241,7 +275,7 @@ def server_params interface = "127.0.0.1" ca = RobyApp::TmpRootCA.new(interface) - { host: interface, port: 0, + { host: interface, port: 42_429, certificate: ca.private_certificate_path, user: "nilvo", password: "nilvo123", implicit_ftps: true } From 62217ff86b64dd3dcd8f0d2d8b08d1511603d3b1 Mon Sep 17 00:00:00 2001 From: kapeps Date: Mon, 27 Jan 2025 09:27:57 -0300 Subject: [PATCH 070/158] fix: use port 0, updating the server params when the port is selected --- lib/syskit/cli/log_runtime_archive_main.rb | 14 +++++++------- test/cli/test_log_runtime_archive_main.rb | 12 +++++++----- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index 8734fc2cb..70b75e7cf 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -68,13 +68,13 @@ def archive(root_dir, target_dir) option :max_size, type: :numeric, default: 10_000, desc: "max log size in MB" option :max_upload_rate_mbps, - type: :numeric, default: 10, desc: "max upload rate in Mbps" + type: :numeric, default: 1_000, desc: "max upload rate in Mbps" def watch_transfer( # rubocop:disable Metrics/ParameterLists - source_dir, host, port, certfile_path, user, password, implicit_ftps + source_dir, host, port, certificate_path, user, password, implicit_ftps ) loop do begin - transfer(source_dir, host, port, certfile_path, user, password, + transfer(source_dir, host, port, certificate_path, user, password, implicit_ftps) rescue Errno::ENOSPC next @@ -89,15 +89,15 @@ def watch_transfer( # rubocop:disable Metrics/ParameterLists option :max_size, type: :numeric, default: 10_000, desc: "max log size in MB" option :max_upload_rate_mbps, - type: :numeric, default: 10, desc: "max upload rate in Mbps" + type: :numeric, default: 1_000, desc: "max upload rate in Mbps" def transfer( # rubocop:disable Metrics/ParameterLists - source_dir, host, port, certfile_path, user, password, implicit_ftps + source_dir, host, port, certificate_path, user, password, implicit_ftps ) source_dir = validate_directory_exists(source_dir) archiver = make_archiver(source_dir) server_params = LogRuntimeArchive::FTPParameters.new( - host: host, port: port, certificate: File.read(certfile_path), + host: host, port: port, certificate: File.read(certificate_path), user: user, password: password, implicit_ftps: implicit_ftps, max_upload_rate: rate_mbps_to_bps(options[:max_upload_rate_mbps]) @@ -118,7 +118,7 @@ def transfer_server( # rubocop:disable Metrics/ParameterLists no_commands do # rubocop:disable Metrics/BlockLength # Converts rate in Mbps to bps def rate_mbps_to_bps(rate_mbps) - rate_mbps / (10**6) + rate_mbps * (10**6) end def validate_directory_exists(dir) diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 607162fc7..7afc08b26 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -225,21 +225,23 @@ def rate_mbps_to_bps(rate_mbps) dataset_tmp_path = make_tmppath root_tmp_path = make_tmppath - call_create_server(root_tmp_path, @server_params) + server = call_create_server(root_tmp_path, @server_params) make_dataset(dataset_tmp_path, "19981222-1301") make_dataset(dataset_tmp_path, "19981222-1302") - call_transfer(dataset_tmp_path) + call_transfer(dataset_tmp_path, server_port: server.port) assert(File.exist?(root_tmp_path / "19981222-1301" / "test.0.log")) end # Call 'transfer' function instead of 'watch' to call transfer once - def call_transfer(source_dir) + def call_transfer(source_dir, server_port: nil) + updated_server_params = @server_params + updated_server_params[:port] = server_port if server_port args = [ "transfer", source_dir, - *@server_params.values + *updated_server_params.values ] LogRuntimeArchiveMain.start(args) end @@ -275,7 +277,7 @@ def server_params interface = "127.0.0.1" ca = RobyApp::TmpRootCA.new(interface) - { host: interface, port: 42_429, + { host: interface, port: 0, certificate: ca.private_certificate_path, user: "nilvo", password: "nilvo123", implicit_ftps: true } From 8482e73c532599e29c0b95812add4a119e606742 Mon Sep 17 00:00:00 2001 From: kapeps Date: Tue, 28 Jan 2025 16:04:09 -0300 Subject: [PATCH 071/158] fix: include max upload rate mbps when testing watch_transfer --- test/cli/test_log_runtime_archive_main.rb | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index 7afc08b26..a309c5ae8 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -193,7 +193,8 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) "watch_transfer", @source_dir, *@server_params.values, - "--period", 0.5 + "--period", 0.5, + "--max_upload_rate_mbps", 10 ] LogRuntimeArchiveMain.start(args) end @@ -204,7 +205,7 @@ def call_archive(root_path, archive_path, low_limit, freed_limit) # Converts rate in Mbps to bps def rate_mbps_to_bps(rate_mbps) - rate_mbps / (10**6) + rate_mbps * (10**6) end end From 22be3a3a6585899cc5e25f5f03caf9b20da9bc9c Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 23 Dec 2024 09:43:03 -0300 Subject: [PATCH 072/158] feat: create Telemetry::Async::OutputReader --- lib/syskit/telemetry/async.rb | 1 + .../telemetry/async/interface_object.rb | 2 +- lib/syskit/telemetry/async/output_port.rb | 8 + lib/syskit/telemetry/async/output_reader.rb | 134 ++++++++++ lib/syskit/test/polling_executor.rb | 38 +++ test/telemetry/async/test_output_reader.rb | 243 ++++++++++++++++++ 6 files changed, 425 insertions(+), 1 deletion(-) create mode 100644 lib/syskit/telemetry/async/output_reader.rb create mode 100644 lib/syskit/test/polling_executor.rb create mode 100644 test/telemetry/async/test_output_reader.rb diff --git a/lib/syskit/telemetry/async.rb b/lib/syskit/telemetry/async.rb index 68dbce9ec..fdd1fe911 100644 --- a/lib/syskit/telemetry/async.rb +++ b/lib/syskit/telemetry/async.rb @@ -8,6 +8,7 @@ require "syskit/telemetry/async/property" require "syskit/telemetry/async/input_port" require "syskit/telemetry/async/output_port" +require "syskit/telemetry/async/output_reader" module Syskit module Telemetry diff --git a/lib/syskit/telemetry/async/interface_object.rb b/lib/syskit/telemetry/async/interface_object.rb index fb01c3f0b..0cc24e127 100644 --- a/lib/syskit/telemetry/async/interface_object.rb +++ b/lib/syskit/telemetry/async/interface_object.rb @@ -49,7 +49,7 @@ def unreachable! def on_reachable(&block) super - block.call if @raw_object + block.call(@raw_object) if @raw_object end def once_on_reachable(&block) diff --git a/lib/syskit/telemetry/async/output_port.rb b/lib/syskit/telemetry/async/output_port.rb index 6483b2b43..864dfe9dd 100644 --- a/lib/syskit/telemetry/async/output_port.rb +++ b/lib/syskit/telemetry/async/output_port.rb @@ -12,6 +12,14 @@ def output? def input? false end + + # Asynchronously create a data reader on this port + def reader(connect_on:, disconnect_on:, **policy) + OutputReader.new( + self, policy, + connect_on: connect_on, disconnect_on: disconnect_on + ) + end end end end diff --git a/lib/syskit/telemetry/async/output_reader.rb b/lib/syskit/telemetry/async/output_reader.rb new file mode 100644 index 000000000..f6f28fc14 --- /dev/null +++ b/lib/syskit/telemetry/async/output_reader.rb @@ -0,0 +1,134 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Holder for a resolved data reader + class OutputReader + # The async port this reader is connected to + attr_reader :port + + def initialize(port, policy, connect_on:, disconnect_on:) + @port = port + @policy = policy + + @cancel_event = Concurrent::Promises.resolvable_event + @connection_future = nil + @last_read_future = Concurrent::Promises.fulfilled_future(nil) + @reader = Concurrent::AtomicReference.new(nil) + + @connection_executor = connect_on + @disconnection_executor = disconnect_on + + @reachability_listener = port.on_reachable do |raw_port| + connect(raw_port, policy) + end + + @unreachability_listener = port.on_unreachable do + disconnect_on_unreachability + end + end + + def raw_reader + @reader.get + end + + def connected? + @reader.get + end + + def poll + resolve_connection unless @reader.get + end + + def resolve_connection + return unless @connection_future&.resolved? + + fulfilled, result, reason = @connection_future.result + if fulfilled + @reader.set(result) + else + warn "failed to create reader on #{@port}: #{reason}" + end + + @connection_future = nil + end + + def raw_read_with_result(executor, sample = nil, copy_old_data = true) # rubocop:disable Style/OptionalBooleanParameter + @last_read_future = @last_read_future.chain_on(executor) do + @reader.get&.raw_read_with_result(sample, copy_old_data) + end + end + + def raw_read(executor, sample = nil, copy_old_data: true) + raw_read_with_result(executor, sample, copy_old_data) + .then do |_, read_sample| + read_sample + end + end + + def raw_read_new(executor, sample = nil) + raw_read_with_result(executor, sample, false) + .then do |result, read_sample| + read_sample if result == Orocos::NEW_DATA + end + end + + # @api private + # + # Connect to the actual port + def connect(raw_port, policy) + if @reader.get + raise StateError, + "#connect called on an already connected reader" + end + + cancel_event = @cancel_event + future = Concurrent::Promises.future_on(@connection_executor) do + raw_port.reader(**policy) unless cancel_event.resolved? + end + @connection_future = future + end + + # @api private + # + # Internal disconnection method, leaving the reader reconnect when the + # port is reachable again + def disconnect_on_unreachability + @cancel_event.resolve + @cancel_event = Concurrent::Promises.resolvable_event + + disconnect_future = + (@connection_future || @last_read_future) + .then_on(@disconnection_executor, @reader.get) do |_, reader| + reader&.disconnect + end + + @reader.set(nil) + @connection_future = nil + @last_read_future = Concurrent::Promises.fulfilled_future(nil) + disconnect_future + end + + # Disconnect from the remote port + # + # Note that this discards any data that is still being read. The + # port will automatically reconnect + def disconnect + dispose + end + + # Disconnect and disable this reader + def dispose + @reachability_listener.dispose + @unreachability_listener.dispose + disconnect_on_unreachability + end + + def buffer_size + @policy[:size] + end + end + end + end +end diff --git a/lib/syskit/test/polling_executor.rb b/lib/syskit/test/polling_executor.rb new file mode 100644 index 000000000..862b55fb9 --- /dev/null +++ b/lib/syskit/test/polling_executor.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +module Syskit + module Test + # Test executor for classes that use concurrent-ruby + # + # This is meant for testing and debugging. Each call to process_one will + # process a single queued task in the current thread + class PollingExecutor < Concurrent::ImmediateExecutor + def initialize + super + + @task_queue = Queue.new + end + + def post(*args, &task) + @task_queue << [args, task] + end + + def take_one_task + @task_queue.pop(true) + rescue ThreadError + # queue full + end + + def execute_one + args, task = take_one_task + task&.call(*args) + task + end + + def execute_all + while execute_one + end + end + end + end +end diff --git a/test/telemetry/async/test_output_reader.rb b/test/telemetry/async/test_output_reader.rb new file mode 100644 index 000000000..6106e7e7c --- /dev/null +++ b/test/telemetry/async/test_output_reader.rb @@ -0,0 +1,243 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" +require "syskit/test/polling_executor" + +module Syskit + module Telemetry + module Async + describe OutputReader do + before do + @connection_executor = Test::PollingExecutor.new + @disconnection_executor = Test::PollingExecutor.new + @read_executor = Test::PollingExecutor.new + @ruby_tasks = [] + end + + after do + @ruby_tasks.each(&:dispose) + end + + it "asynchronously connects to the port" do + _, async = make_async_task("test") + reader = make_reader(async.port("out")) + + execute_all(@connection_executor) + reader.poll + assert reader.connected? + end + + describe "#disconnect" do + before do + @task, @async = make_async_task("test") + end + + it "asynchronously disconnects" do + reader = make_connected_reader(@async.port("out")) + flexmock(reader.raw_reader).should_receive(:disconnect).once + future = reader.disconnect + execute_all(@disconnection_executor) + future.value! + end + + it "synchronizes the disconnection on the last read" do + reader = make_connected_reader(@async.port("out")) + executed = [] + 3.times do |i| + reader.raw_read_new(@read_executor).then { executed << i } + end + future = reader.disconnect.then { executed << 3 } + + execute_all(@disconnection_executor) + execute_all(@read_executor) + execute_all(@disconnection_executor) + future.value! + + assert_equal (0...4).to_a, executed + end + + it "synchronizes the disconnection on the connection" do + reader = make_reader(@async.port("out")) + executed = [] + flexmock(Orocos::OutputReader) + .new_instances.should_receive(:disconnect).once + future = reader.disconnect.then { executed << 1 } + + execute_all(@disconnection_executor) + execute_all(@connection_executor) + execute_all(@disconnection_executor) + future.value! + end + end + + describe "#raw_read_with_result" do + before do + @task, @async = make_async_task("test") + end + + it "makes all reads sequential" do + reader = make_reader(@async.port("out")) + @task.out.write(42) + executor0 = Test::PollingExecutor.new + executor1 = Test::PollingExecutor.new + future0 = reader.raw_read_with_result(executor0) + future1 = reader.raw_read_with_result(executor1) + + execute_all(executor1) + refute future1.resolved? + execute_all(executor0) + assert future0.resolved? + execute_all(executor1) + assert future1.resolved? + end + + it "returns nil if the reader is disconnected, and it does not " \ + "attempt to read the old reader object" do + reader = make_connected_reader(@async.port("out")) + flexmock(reader.raw_reader) + .should_receive(:raw_read_with_result) + .never + disconnect_reader(reader) + + future = reader.raw_read_with_result(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + end + + describe "#raw_read" do + before do + @task, @async = make_async_task("test") + end + + it "returns nil if the reader is not connected" do + reader = make_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + + it "returns nil if the reader is connected but " \ + "there has never been any samples" do + reader = make_connected_reader(@async.port("out")) + future = reader.raw_read(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + + it "reads a new sample once the reader is connected" do + reader = make_connected_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read(@read_executor) + execute_all(@read_executor) + assert_equal 42, Typelib.to_ruby(future.value!) + end + + it "returns the old sample if there are no new samples" do + reader = make_connected_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read(@read_executor) + execute_all(@read_executor) + assert_equal 42, Typelib.to_ruby(future.value!) + + future = reader.raw_read(@read_executor) + execute_all(@read_executor) + assert_equal 42, Typelib.to_ruby(future.value!) + end + end + + describe "#raw_read_new" do + before do + @task, @async = make_async_task("test") + end + + it "returns nil if the reader is not connected" do + reader = make_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read_new(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + + it "returns nil if the reader is connected but " \ + "there has never been any samples" do + reader = make_connected_reader(@async.port("out")) + future = reader.raw_read_new(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + + it "reads a new sample once the reader is connected" do + reader = make_connected_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read_new(@read_executor) + execute_all(@read_executor) + assert_equal 42, Typelib.to_ruby(future.value!) + end + + it "returns nil if there are no new samples" do + reader = make_connected_reader(@async.port("out")) + @task.out.write(42) + future = reader.raw_read_new(@read_executor) + execute_all(@read_executor) + assert_equal 42, Typelib.to_ruby(future.value!) + + future = reader.raw_read_new(@read_executor) + execute_all(@read_executor) + assert_nil future.value! + end + end + + def make_ruby_task(name) + ruby_task = Orocos.allow_blocking_calls do + t = Orocos::RubyTasks::TaskContext.new(name) + t.create_output_port "out", "/double" + t + end + @ruby_tasks << ruby_task + ruby_task + end + + def make_async_task(name) + t = make_ruby_task name + async = Orocos.allow_blocking_calls do + TaskContext.discover(t) + end + [t, async] + end + + def make_reader(port) + OutputReader.new( + port, {}, + connect_on: @connection_executor, + disconnect_on: @disconnection_executor + ) + end + + def make_connected_reader(port) + reader = make_reader(port) + execute_all(@connection_executor) + reader.poll + assert reader.connected? + reader + end + + def disconnect_reader(reader) + future = reader.disconnect + execute_all(@disconnection_executor) + future.value! + end + + def execute_all(executor) + Orocos.allow_blocking_calls { executor.execute_all } + end + + def execute_one(executor) + Orocos.allow_blocking_calls { executor.execute_one } + end + end + end + end +end From 20d1e659b33bc1643fca5595e472b0a8ab008cd3 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Thu, 26 Dec 2024 16:51:15 -0300 Subject: [PATCH 073/158] feat: make TaskContext and its interface types usable as keys in hashes --- .../telemetry/async/interface_object.rb | 19 +++++- lib/syskit/telemetry/async/task_context.rb | 21 +++++- test/telemetry/async/test_task_context.rb | 65 +++++++++++++++++++ 3 files changed, 101 insertions(+), 4 deletions(-) diff --git a/lib/syskit/telemetry/async/interface_object.rb b/lib/syskit/telemetry/async/interface_object.rb index 0cc24e127..a8ba31fb4 100644 --- a/lib/syskit/telemetry/async/interface_object.rb +++ b/lib/syskit/telemetry/async/interface_object.rb @@ -18,18 +18,35 @@ class InterfaceObjectHooks # Callback-based API to the orocos.rb property API class InterfaceObject < InterfaceObjectHooks + # @return [TaskContext] the underlying task context + attr_reader :task_context # @return [String] the property name attr_reader :name # @return [Class] the property type attr_reader :type - def initialize(name, type) + # Hash code + # + # Two interface objects are considered the same from a hash key + # perspective if they are of the same name, type and point to the + # same remote task, even if they are two different objects + attr_reader :hash + + def initialize(task_context, name, type) super() + @task_context = task_context + @hash = [task_context, self.class, name].hash @name = name @type = type end + def eql?(other) + other.task_context.eql?(task_context) && + other.name == name && + other.class == self.class + end + def reachable? @raw_object end diff --git a/lib/syskit/telemetry/async/task_context.rb b/lib/syskit/telemetry/async/task_context.rb index 9a50ccd86..2a3b71a8f 100644 --- a/lib/syskit/telemetry/async/task_context.rb +++ b/lib/syskit/telemetry/async/task_context.rb @@ -39,6 +39,12 @@ class TaskContext < TaskContextHooks # @return [OroGen::Spec::TaskContext] attr_reader :model + # Hash code for this task context + # + # Two different TaskContext objects that point to the same remote object + # will be considered the same from the perspective of a hash key + attr_reader :hash + # Discover information about a Orocos::TaskContext and create the # corresponding {TaskContext} # @@ -90,6 +96,11 @@ def initialize(name, model: self.class.dummy_orogen_model(name)) @name = name @model = model + # !!!! DO NOT add the identity to the hash code, or it will change + # the hash whenever the remote task changes. From the Async + # perspective, a task's identity is determined by its name + # (we can't have two different tasks with the same name) + @hash = name.hash @attributes = {} @properties = {} @@ -109,6 +120,10 @@ def to_proxy self end + def eql?(other) + name == other.name + end + # Declare that the remote task is not reachable anymore # # Must be called from the main thread. It also dispose of the underlying @@ -225,7 +240,7 @@ def port(name) def discover_attributes(raw_attributes) @attributes = raw_attributes.each_with_object({}) do |p, h| - async = Attribute.new(p.name, p.type) + async = Attribute.new(self, p.name, p.type) async.reachable!(p) h[p.name] = async end @@ -236,7 +251,7 @@ def discover_attributes(raw_attributes) def discover_properties(raw_properties) @properties = raw_properties.each_with_object({}) do |p, h| - async = Property.new(p.name, p.type) + async = Property.new(self, p.name, p.type) async.reachable!(p) h[p.name] = async end @@ -255,7 +270,7 @@ def discover_ports(raw_ports) OutputPort end - async = klass.new(p.name, p.type) + async = klass.new(self, p.name, p.type) async.reachable!(p) h[p.name] = async end diff --git a/test/telemetry/async/test_task_context.rb b/test/telemetry/async/test_task_context.rb index 376494cef..38e893886 100644 --- a/test/telemetry/async/test_task_context.rb +++ b/test/telemetry/async/test_task_context.rb @@ -16,6 +16,20 @@ module Async @ruby_tasks.each(&:dispose) end + it "is the same as another async task with the same remote task when " \ + "used as hash key" do + t, async = make_async_task "test" + async2 = Orocos.allow_blocking_calls { TaskContext.discover(t) } + + _, async3 = make_async_task "test2" + + hash = { async => 42 } + assert_equal 42, hash[async2] + assert_nil hash[async3] + assert_nil hash[42] + assert_nil hash["test"] + end + describe ".discover" do it "creates an async task already initialized with the remote " \ "task's interface" do @@ -169,6 +183,23 @@ module Async async.attribute("attr").on_unreachable { m.called } async.unreachable! end + + it "is usable as a hash key" do + t, async = make_async_task "test" + attr = async.attribute("attr") + + async2 = Orocos.allow_blocking_calls { TaskContext.discover(t) } + attr2 = async2.attribute("attr") + + _, async3 = make_async_task "test2" + attr3 = async3.attribute("attr") + + hash = { attr => 42 } + assert_equal 42, hash[attr2] + assert_nil hash[attr3] + assert_nil hash[42] + assert_nil hash["test"] + end end describe "properties" do @@ -205,6 +236,23 @@ module Async async.property("prop").on_unreachable { m.called } async.unreachable! end + + it "is usable as a hash key" do + t, async = make_async_task "test" + prop = async.property("prop") + + async2 = Orocos.allow_blocking_calls { TaskContext.discover(t) } + prop2 = async2.property("prop") + + _, async3 = make_async_task "test2" + prop3 = async3.property("prop") + + hash = { prop => 42 } + assert_equal 42, hash[prop2] + assert_nil hash[prop3] + assert_nil hash[42] + assert_nil hash["test"] + end end describe "ports" do @@ -239,6 +287,23 @@ module Async async.port("in").on_unreachable { m.called } async.unreachable! end + + it "is usable as a hash key" do + t, async = make_async_task "test" + port = async.port("out") + + async2 = Orocos.allow_blocking_calls { TaskContext.discover(t) } + port2 = async2.port("out") + + _, async3 = make_async_task "test2" + port3 = async3.port("out") + + hash = { port => 42 } + assert_equal 42, hash[port2] + assert_nil hash[port3] + assert_nil hash[42] + assert_nil hash["test"] + end end def make_ruby_task(name) From 60769c9e4782d7677d3497013e5539bc9b49ced6 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Thu, 26 Dec 2024 18:07:06 -0300 Subject: [PATCH 074/158] fix: make on_reachable return a disposable that deregisters the callback --- .../telemetry/async/interface_object.rb | 3 +- test/telemetry/async/test_interface_object.rb | 49 +++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 test/telemetry/async/test_interface_object.rb diff --git a/lib/syskit/telemetry/async/interface_object.rb b/lib/syskit/telemetry/async/interface_object.rb index a8ba31fb4..687a184d8 100644 --- a/lib/syskit/telemetry/async/interface_object.rb +++ b/lib/syskit/telemetry/async/interface_object.rb @@ -64,9 +64,10 @@ def unreachable! end def on_reachable(&block) - super + disposable = super block.call(@raw_object) if @raw_object + disposable end def once_on_reachable(&block) diff --git a/test/telemetry/async/test_interface_object.rb b/test/telemetry/async/test_interface_object.rb new file mode 100644 index 000000000..b6c8df9be --- /dev/null +++ b/test/telemetry/async/test_interface_object.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" + +module Syskit + module Telemetry + module Async + describe InterfaceObject do + before do + @object = InterfaceObject.new(flexmock, "something", flexmock) + end + + describe "on_reachable" do + it "registers a callback called when the object " \ + "becomes reachable" do + recorder = flexmock + recorder.should_receive(:called).with(raw = flexmock).once + @object.on_reachable do + recorder.called(_1) + end + @object.reachable!(raw) + end + + it "calls the callback right away if the object is already " \ + "reachable" do + recorder = flexmock + recorder.should_receive(:called).with(raw = flexmock).once + @object.reachable!(raw) + @object.on_reachable do + recorder.called(_1) + end + end + + it "stops calling if the value returned on registration was " \ + "disposed" do + recorder = flexmock + recorder.should_receive(:called).never + disposable = @object.on_reachable do + recorder.called(_1) + end + disposable.dispose + @object.reachable!(flexmock) + end + end + end + end + end +end From c52d2cd3d466668218144be754131c835ac7bd6f Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Fri, 27 Dec 2024 15:45:16 -0300 Subject: [PATCH 075/158] feat: implement Async::PortReadManager --- lib/syskit/telemetry/async.rb | 1 + lib/syskit/telemetry/async/output_reader.rb | 4 + .../telemetry/async/port_read_manager.rb | 213 ++++++++++++++++++ .../telemetry/async/test_port_read_manager.rb | 202 +++++++++++++++++ 4 files changed, 420 insertions(+) create mode 100644 lib/syskit/telemetry/async/port_read_manager.rb create mode 100644 test/telemetry/async/test_port_read_manager.rb diff --git a/lib/syskit/telemetry/async.rb b/lib/syskit/telemetry/async.rb index fdd1fe911..8c50e9480 100644 --- a/lib/syskit/telemetry/async.rb +++ b/lib/syskit/telemetry/async.rb @@ -9,6 +9,7 @@ require "syskit/telemetry/async/input_port" require "syskit/telemetry/async/output_port" require "syskit/telemetry/async/output_reader" +require "syskit/telemetry/async/port_read_manager" module Syskit module Telemetry diff --git a/lib/syskit/telemetry/async/output_reader.rb b/lib/syskit/telemetry/async/output_reader.rb index f6f28fc14..03a5759af 100644 --- a/lib/syskit/telemetry/async/output_reader.rb +++ b/lib/syskit/telemetry/async/output_reader.rb @@ -125,6 +125,10 @@ def dispose disconnect_on_unreachability end + def disposed? + @reachability_listener.disposed? + end + def buffer_size @policy[:size] end diff --git a/lib/syskit/telemetry/async/port_read_manager.rb b/lib/syskit/telemetry/async/port_read_manager.rb new file mode 100644 index 000000000..dbc1645b9 --- /dev/null +++ b/lib/syskit/telemetry/async/port_read_manager.rb @@ -0,0 +1,213 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # @api private + # + # Central class that manages data readers for ports + class PortReadManager + def initialize( + connection_executor: self.class.default_connection_executor, + disconnection_executor: self.class.default_disconnection_executor, + read_executor: self.class.default_read_executor + ) + @callbacks = {} + @pollers = {} + + @connection_executor = connection_executor + @disconnection_executor = disconnection_executor + @read_executor = read_executor + end + + CONNECTION_DEFAULT_THREADS = 20 + DISCONNECTION_DEFAULT_THREADS = 20 + READ_DEFAULT_THREADS = 5 + + def self.default_connection_executor + @default_connection_executor ||= + Concurrent::CachedThreadPool.new( + max_threads: CONNECTION_DEFAULT_THREADS + ) + end + + def self.default_disconnection_executor + @default_disconnection_executor ||= + Concurrent::CachedThreadPool.new( + max_threads: DISCONNECTION_DEFAULT_THREADS + ) + end + + def self.default_read_executor + @default_read_executor ||= + Concurrent::CachedThreadPool.new( + max_threads: READ_DEFAULT_THREADS + ) + end + + Callback = Struct.new( + :port, :callback, :period, :buffer_size, keyword_init: true + ) do + def dispatch(value) + callback.call(value) + end + end + + Poller = Struct.new( + :port, :reader, :next_time, :period, :read_future, + keyword_init: true + ) do + def connected? + reader.connected? + end + + def poll + reader.poll + end + + def scheduled_read? + read_future + end + + def result + read_future&.result + end + + def schedule_read_if_needed(now, executor) + self.next_time ||= now + return unless self.next_time <= now + + self.read_future = reader.raw_read_new(executor) + end + + def prepare_next_read(now) + delta_in_periods = ((now - next_time) / period).ceil + # delta_in_periods == 0 should be impossible. + # But, you know, little cost + self.next_time += [delta_in_periods, 1].max * period + self.read_future = nil + end + + def resolved_read? + read_future&.resolved? + end + + def reset_read_tracking + self.read_future = nil + self.next_time = nil + end + + def buffer_size + reader&.buffer_size + end + end + + # Register a callback for data from a port + # + # @param [Async::OutputPort] port the port whose data is needed + # @param [#call] callback the object that will receive data when + # available + # @param [Numeric] period reading period in seconds + # @param [Integer] buffer_size the size of the sample buffer requested + # by the callback. The actual buffer will be of *at least* that many + # samples. + def register_callback(port, callback, period:, buffer_size:) + callback = Callback.new( + port: port, callback: callback, + period: period, buffer_size: buffer_size + ) + + (@callbacks[port] ||= []) << callback + ensure_reader_uptodate(port) + end + + # Reconnect the reader for this port if needed + # + # The main reason is the modification of the buffer policy + def ensure_reader_uptodate(port) + poller = find_poller_for_port(port) || + Poller.new(port: port) + + buffer_size = required_buffer_size_for(port) + if buffer_size != poller.buffer_size + poller.reader&.dispose + poller.reader = port.reader( + connect_on: @connection_executor, + disconnect_on: @disconnection_executor, + type: :circular_buffer, pull: true, size: buffer_size + ) + end + + update_poller_period(poller) + @pollers[port] = poller + end + + def dispose + @pollers.each_value do |p| + p.reader.dispose + end + @pollers = {} + end + + # Whether we are currently polling the given port + def polling?(port) + @pollers.key?(port) + end + + # Update a poller's period to match the callbacks currently listening + # to it + def update_poller_period(poller) + poller.period = @callbacks[poller.port].map(&:period).min + end + + # Return the Reader for the given port + # + # @return [Reader,nil] + def find_poller_for_port(port) + @pollers[port] + end + + # Method called regularly to update the asynchronous class state + def poll + now = monotonic_time + @pollers.each_value do |p| + p.poll + + if !p.connected? + p.reset_read_tracking + elsif !p.scheduled_read? + p.schedule_read_if_needed(now, @read_executor) + elsif p.resolved_read? + dispatch_read_result(p) + p.prepare_next_read(now) + end + end + end + + # Time in seconds returned by CLOCK_MONOTONIC + def monotonic_time + Process.clock_gettime(Process::CLOCK_MONOTONIC) + end + + # Send read data to registered callbacks + def dispatch_read_result(poller) + fulfilled, value, reason = poller.result + if fulfilled + @callbacks[poller.port].each { |c| c.dispatch(value) } + else + warn "failed to read #{poller.port}: #{reason}" + end + end + + # Return the buffer size needed by all callbacks of a port, in aggregate + # + # @return [Integer] + def required_buffer_size_for(port) + return unless (callbacks = @callbacks[port]) + + callbacks.map { _1.buffer_size }.max + end + end + end + end +end diff --git a/test/telemetry/async/test_port_read_manager.rb b/test/telemetry/async/test_port_read_manager.rb new file mode 100644 index 000000000..2d7e471d7 --- /dev/null +++ b/test/telemetry/async/test_port_read_manager.rb @@ -0,0 +1,202 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" +require "syskit/test/polling_executor" + +module Syskit + module Telemetry + module Async + describe PortReadManager do + before do + @connection_executor = Test::PollingExecutor.new + @disconnection_executor = Test::PollingExecutor.new + @read_executor = Test::PollingExecutor.new + @manager = PortReadManager.new( + connection_executor: @connection_executor, + disconnection_executor: @disconnection_executor, + read_executor: @read_executor + ) + @ruby_tasks = [] + end + + after do + @manager.dispose + @ruby_tasks.each(&:dispose) + end + + describe "#register_callback" do + it "creates a poller when a callback is first registered" do + _, async = make_async_task("test") + @manager.register_callback( + async.port("out"), proc {}, period: 0.1, buffer_size: 1 + ) + assert @manager.polling?(async.port("out")) + end + + it "keeps the current reader if the buffer size is compatible" do + _, async = make_async_task("test") + out_p = async.port("out") + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 1 + ) + reader = @manager.find_poller_for_port(out_p).reader + + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 1 + ) + assert_same reader, @manager.find_poller_for_port(out_p).reader + end + + it "creates a new reader if the buffer size is greater " \ + "than the actual" do + _, async = make_async_task("test") + out_p = async.port("out") + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 1 + ) + orig_reader = @manager.find_poller_for_port(out_p).reader + + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 5 + ) + reader = @manager.find_poller_for_port(out_p).reader + refute_same orig_reader, reader + assert_equal 5, reader.buffer_size + assert orig_reader.disposed? + end + + it "updates the poller period at each new callback " \ + "(reusing poller)" do + _, async = make_async_task("test") + out_p = async.port("out") + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 1 + ) + assert_equal 0.1, @manager.find_poller_for_port(out_p).period + + @manager.register_callback( + out_p, proc {}, period: 0.05, buffer_size: 1 + ) + assert_equal 0.05, @manager.find_poller_for_port(out_p).period + end + + it "updates the poller period at each new callback " \ + "(new poller)" do + _, async = make_async_task("test") + out_p = async.port("out") + @manager.register_callback( + out_p, proc {}, period: 0.1, buffer_size: 1 + ) + assert_equal 0.1, @manager.find_poller_for_port(out_p).period + + @manager.register_callback( + out_p, proc {}, period: 0.05, buffer_size: 5 + ) + assert_equal 0.05, @manager.find_poller_for_port(out_p).period + end + end + + describe "#poll" do + before do + @task, @async = make_async_task("test") + @received_samples = [] + @out_p = @async.port("out") + @manager.register_callback( + @out_p, + proc { @received_samples << _1 }, + period: 0.1, buffer_size: 1 + ) + @poller = @manager.find_poller_for_port(@out_p) + end + + it "does nothing if the reader is not connected" do + @manager.poll + end + + it "immediately schedules the next read once connected" do + execute_all(@connection_executor) + @manager.poll + + assert @poller.reader.connected? + assert @poller.read_future + + Orocos.allow_blocking_calls { @task.out.write 42 } + execute_all(@read_executor) + assert_equal 42, @poller.read_future.value + @manager.poll + + assert_equal [42], @received_samples + end + + it "reschedules the next read based on the read period" do + execute_all(@connection_executor) + @manager.poll + + execute_all(@read_executor) + @poller.read_future.wait + current_t = @poller.next_time + time = freeze_monotonic_time + + @manager.poll + next_t = @poller.next_time + delta_in_periods = (next_t - current_t) / 0.1 + assert_in_delta delta_in_periods, delta_in_periods.round, 1e-6 + assert_operator next_t, :>, time + assert_operator next_t - time, :<, 0.1 + end + + it "does not do anything for pollers whose next time " \ + "has not been reached" do + execute_all(@connection_executor) + @manager.poll + + execute_all(@read_executor) + @poller.read_future.wait + time = freeze_monotonic_time + @manager.poll + @manager.poll + refute @poller.read_future + + freeze_monotonic_time(time + 0.1) + @manager.poll + assert @poller.read_future + end + + def freeze_monotonic_time(time = @manager.monotonic_time) + @frozen_time = time + flexmock(@manager) + .should_receive(:monotonic_time) + .and_return { @frozen_time } + time + end + end + + def make_ruby_task(name) + ruby_task = Orocos.allow_blocking_calls do + t = Orocos::RubyTasks::TaskContext.new(name) + t.create_attribute "attr", "/int16_t" + t.create_property "prop", "/int32_t" + t.create_input_port "in", "/float" + t.create_output_port "out", "/double" + t + end + @ruby_tasks << ruby_task + ruby_task + end + + def make_async_task(name) + t = make_ruby_task name + async = Orocos.allow_blocking_calls do + TaskContext.discover(t) + end + [t, async] + end + + def execute_all(executor) + Orocos.allow_blocking_calls { executor.execute_all } + end + end + end + end +end From c81e904c7c6c628e96d6e7b446e0d36e5962194e Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Fri, 27 Dec 2024 16:22:37 -0300 Subject: [PATCH 076/158] fix: disable type export in 'telemetry ui' It is an costly, obsolete behaviour that is unneeded in this case. --- lib/syskit/telemetry/cli.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/syskit/telemetry/cli.rb b/lib/syskit/telemetry/cli.rb index 9b2f9518f..30873ea5f 100644 --- a/lib/syskit/telemetry/cli.rb +++ b/lib/syskit/telemetry/cli.rb @@ -49,6 +49,7 @@ def roby_setup # rubocop:disable Metrics/AbcSize Syskit.conf.only_load_models = true # We don't need the process server, win some startup time Syskit.conf.disables_local_process_server = true + Syskit.conf.export_types = false Roby.app.ignore_all_load_errors = true Roby.app.development_mode = false From 284e1333de7fb645190e53b66b677f4a55dddba3 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 11:25:27 -0300 Subject: [PATCH 077/158] feat: add API to remove callbacks from PortReadManager --- .../telemetry/async/port_read_manager.rb | 37 +++++++++++++++++-- .../telemetry/async/test_port_read_manager.rb | 29 +++++++++++++++ 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/lib/syskit/telemetry/async/port_read_manager.rb b/lib/syskit/telemetry/async/port_read_manager.rb index dbc1645b9..edcb8d4e7 100644 --- a/lib/syskit/telemetry/async/port_read_manager.rb +++ b/lib/syskit/telemetry/async/port_read_manager.rb @@ -61,6 +61,10 @@ def connected? reader.connected? end + def dispose + reader.dispose + end + def poll reader.poll end @@ -119,6 +123,26 @@ def register_callback(port, callback, period:, buffer_size:) (@callbacks[port] ||= []) << callback ensure_reader_uptodate(port) + Roby.disposable do + deregister_callback(port, callback) + end + end + + # @api private + # + # De-registers a callback + # + # This is not meant to be called directly. Use the disposable + # returned by {#register_callback} instead. + def deregister_callback(port, callback) + return unless (callbacks = @callbacks[port]) + + callbacks.delete(callback) + if callbacks.empty? + remove_poller(port) + else + ensure_reader_uptodate(port) + end end # Reconnect the reader for this port if needed @@ -142,10 +166,17 @@ def ensure_reader_uptodate(port) @pollers[port] = poller end + # @api private + # + # Remove the poller for a given port + def remove_poller(port) + return unless (poller = @pollers.delete(port)) + + poller.dispose + end + def dispose - @pollers.each_value do |p| - p.reader.dispose - end + @pollers.each_value(&:dispose) @pollers = {} end diff --git a/test/telemetry/async/test_port_read_manager.rb b/test/telemetry/async/test_port_read_manager.rb index 2d7e471d7..59c6b2542 100644 --- a/test/telemetry/async/test_port_read_manager.rb +++ b/test/telemetry/async/test_port_read_manager.rb @@ -95,6 +95,35 @@ module Async ) assert_equal 0.05, @manager.find_poller_for_port(out_p).period end + + it "returns a disposable that will clear the callback" do + _, async = make_async_task("test") + port = async.port("out") + disposable = @manager.register_callback( + port, proc {}, period: 0.1, buffer_size: 1 + ) + poller = @manager.find_poller_for_port(port) + flexmock(poller).should_receive(:dispose).once.pass_thru + disposable.dispose + refute @manager.polling?(async.port("out")) + end + + it "keeps the poller if there are callbacks remaining" do + _, async = make_async_task("test") + port = async.port("out") + disposable = @manager.register_callback( + port, proc {}, period: 0.1, buffer_size: 1 + ) + disposable2 = @manager.register_callback( + port, proc {}, period: 0.1, buffer_size: 1 + ) + poller = @manager.find_poller_for_port(port) + flexmock(poller).should_receive(:dispose).once.pass_thru + disposable.dispose + assert @manager.polling?(async.port("out")) + disposable2.dispose + refute @manager.polling?(async.port("out")) + end end describe "#poll" do From d740e210dae2f9cb2056b7c8e1dfdb4ae8ad61d6 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 11:26:24 -0300 Subject: [PATCH 078/158] feat: use PortReadManager to read data for ports --- lib/syskit/telemetry/async.rb | 1 + lib/syskit/telemetry/async/listener.rb | 39 ++++++++ lib/syskit/telemetry/async/name_service.rb | 13 ++- lib/syskit/telemetry/async/output_port.rb | 33 ++++++- lib/syskit/telemetry/async/output_reader.rb | 9 +- .../telemetry/async/port_read_manager.rb | 22 +++-- lib/syskit/telemetry/async/task_context.rb | 91 +++++++++---------- test/telemetry/async/test_name_service.rb | 4 +- test/telemetry/async/test_output_reader.rb | 5 +- .../telemetry/async/test_port_read_manager.rb | 4 +- test/telemetry/async/test_task_context.rb | 29 +++--- 11 files changed, 165 insertions(+), 85 deletions(-) create mode 100644 lib/syskit/telemetry/async/listener.rb diff --git a/lib/syskit/telemetry/async.rb b/lib/syskit/telemetry/async.rb index 8c50e9480..8d946e397 100644 --- a/lib/syskit/telemetry/async.rb +++ b/lib/syskit/telemetry/async.rb @@ -4,6 +4,7 @@ require "syskit/telemetry/async/task_context" require "syskit/telemetry/async/interface_object" require "syskit/telemetry/async/readable_interface_object" +require "syskit/telemetry/async/listener" require "syskit/telemetry/async/attribute" require "syskit/telemetry/async/property" require "syskit/telemetry/async/input_port" diff --git a/lib/syskit/telemetry/async/listener.rb b/lib/syskit/telemetry/async/listener.rb new file mode 100644 index 000000000..c4f4f966e --- /dev/null +++ b/lib/syskit/telemetry/async/listener.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # Adapter object to provide Orocos::Async listener API + # + # Unlike the disposable returned by the hooks, the listener API + # from Orocos::Async allows to stop and start listening + class Listener + # @param [#call] register_with a callable that will register the callback + def initialize(register_with) + @register_with = register_with + end + + # Register the callback on the configured object and event + # + # Does nothing if the listener is already started + def start + return if @disposable + + @disposable = @register_with.call + end + + # De-registers the callback + # + # Does nothing if the listener is already started + def stop + @disposable&.dispose + @disposable = nil + end + + def dispose + stop + end + end + end + end +end diff --git a/lib/syskit/telemetry/async/name_service.rb b/lib/syskit/telemetry/async/name_service.rb index 66651d932..20eaf76ec 100644 --- a/lib/syskit/telemetry/async/name_service.rb +++ b/lib/syskit/telemetry/async/name_service.rb @@ -15,7 +15,10 @@ class NameService < Orocos::NameServiceBase # @param [Hash] tasks The tasks which are # known by the name service. # @note The namespace is always "Local" - def initialize(thread_count: 1) + def initialize( + discovery_executor: Concurrent::CachedThreadPool.new(max_length: 2), + port_read_manager: PortReadManager.new + ) super() @iors = Concurrent::AtomicReference.new({}) @@ -24,8 +27,8 @@ def initialize(thread_count: 1) @task_removed_callbacks = Concurrent::Array.new @orogen_models = Concurrent::Hash.new @discovery = {} - @discovery_executor = - Concurrent::FixedThreadPool.new(thread_count) + @discovery_executor = discovery_executor + @port_read_manager = port_read_manager end def tasks @@ -228,7 +231,9 @@ def discover_task(name, ior, orogen_model_name) model: orogen_model_from_name(orogen_model_name) ) - async_task = TaskContext.discover(task) + async_task = TaskContext.discover( + task, port_read_manager: @port_read_manager + ) [ior, async_task] rescue StandardError => e diff --git a/lib/syskit/telemetry/async/output_port.rb b/lib/syskit/telemetry/async/output_port.rb index 864dfe9dd..0e2707834 100644 --- a/lib/syskit/telemetry/async/output_port.rb +++ b/lib/syskit/telemetry/async/output_port.rb @@ -4,7 +4,13 @@ module Syskit module Telemetry module Async # Async interface compatible with the orocos.rb's API - class OutputPort < ReadableInterfaceObject + class OutputPort < InterfaceObject + def initialize(task_context, name, type, port_read_manager) + super(task_context, name, type) + + @port_read_manager = port_read_manager + end + def output? true end @@ -13,6 +19,31 @@ def input? false end + def on_raw_data(period: 0.1, init: false, buffer_size: 1) + callback = proc do |value| + yield(value) if value + end + + register_with = proc do + @port_read_manager.register_callback( + self, callback, + period: period, init: init, buffer_size: buffer_size + ) + end + + listener = Listener.new(register_with) + listener.start + listener + end + + def on_data(period: 0.1, init: false, buffer_size: 1) + on_raw_data( + period: period, init: init, buffer_size: buffer_size + ) do |data| + yield Typelib.to_ruby(data) + end + end + # Asynchronously create a data reader on this port def reader(connect_on:, disconnect_on:, **policy) OutputReader.new( diff --git a/lib/syskit/telemetry/async/output_reader.rb b/lib/syskit/telemetry/async/output_reader.rb index 03a5759af..9114ea3c4 100644 --- a/lib/syskit/telemetry/async/output_reader.rb +++ b/lib/syskit/telemetry/async/output_reader.rb @@ -8,9 +8,12 @@ class OutputReader # The async port this reader is connected to attr_reader :port + # The policy hash used to create this reader + attr_reader :policy + def initialize(port, policy, connect_on:, disconnect_on:) @port = port - @policy = policy + @policy = policy.dup.freeze @cancel_event = Concurrent::Promises.resolvable_event @connection_future = nil @@ -128,10 +131,6 @@ def dispose def disposed? @reachability_listener.disposed? end - - def buffer_size - @policy[:size] - end end end end diff --git a/lib/syskit/telemetry/async/port_read_manager.rb b/lib/syskit/telemetry/async/port_read_manager.rb index edcb8d4e7..28c28710b 100644 --- a/lib/syskit/telemetry/async/port_read_manager.rb +++ b/lib/syskit/telemetry/async/port_read_manager.rb @@ -46,7 +46,7 @@ def self.default_read_executor end Callback = Struct.new( - :port, :callback, :period, :buffer_size, keyword_init: true + :port, :callback, :period, :buffer_size, :init, keyword_init: true ) do def dispatch(value) callback.call(value) @@ -101,8 +101,8 @@ def reset_read_tracking self.next_time = nil end - def buffer_size - reader&.buffer_size + def policy + reader&.policy end end @@ -115,10 +115,10 @@ def buffer_size # @param [Integer] buffer_size the size of the sample buffer requested # by the callback. The actual buffer will be of *at least* that many # samples. - def register_callback(port, callback, period:, buffer_size:) + def register_callback(port, callback, period:, buffer_size:, init: false) callback = Callback.new( port: port, callback: callback, - period: period, buffer_size: buffer_size + period: period, buffer_size: buffer_size, init: init ) (@callbacks[port] ||= []) << callback @@ -152,13 +152,13 @@ def ensure_reader_uptodate(port) poller = find_poller_for_port(port) || Poller.new(port: port) - buffer_size = required_buffer_size_for(port) - if buffer_size != poller.buffer_size + policy = required_policy_for(port) + if policy != poller.policy poller.reader&.dispose poller.reader = port.reader( connect_on: @connection_executor, disconnect_on: @disconnection_executor, - type: :circular_buffer, pull: true, size: buffer_size + **policy ) end @@ -233,10 +233,12 @@ def dispatch_read_result(poller) # Return the buffer size needed by all callbacks of a port, in aggregate # # @return [Integer] - def required_buffer_size_for(port) + def required_policy_for(port) return unless (callbacks = @callbacks[port]) - callbacks.map { _1.buffer_size }.max + buffer_size = callbacks.map { _1.buffer_size }.max + init = callbacks.map { _1.init }.inject(&:|) + { type: :circular_buffer, size: buffer_size, init: init, pull: true } end end end diff --git a/lib/syskit/telemetry/async/task_context.rb b/lib/syskit/telemetry/async/task_context.rb index 2a3b71a8f..f1cb77b59 100644 --- a/lib/syskit/telemetry/async/task_context.rb +++ b/lib/syskit/telemetry/async/task_context.rb @@ -45,17 +45,35 @@ class TaskContext < TaskContextHooks # will be considered the same from the perspective of a hash key attr_reader :hash + def states_index_to_symbols + return @states_index_to_symbols if @states_index_to_symbols + + @states_index_to_symbols = [] + @states_index_to_symbols[Orocos::TaskContext::STATE_PRE_OPERATIONAL] = + :PRE_OPERATIONAL + @states_index_to_symbols[Orocos::TaskContext::STATE_STOPPED] = + :STOPPED + @states_index_to_symbols[Orocos::TaskContext::STATE_RUNNING] = + :RUNNING + @states_index_to_symbols[Orocos::TaskContext::STATE_RUNTIME_ERROR] = + :RUNTIME_ERROR + @states_index_to_symbols[Orocos::TaskContext::STATE_EXCEPTION] = + :EXCEPTION + @states_index_to_symbols[Orocos::TaskContext::STATE_FATAL_ERROR] = + :FATAL_ERROR + @states_index_to_symbols + end + # Discover information about a Orocos::TaskContext and create the # corresponding {TaskContext} # # This is meant to be called in a separate thread - def self.discover(task) - async_task = TaskContext.new(task.name) + def self.discover(task, port_read_manager:) + async_task = TaskContext.new( + task.name, port_read_manager: port_read_manager + ) # Already do an initial discovery of all the task's interface objects - state_reader = task.state_reader( - pull: true, type: :circular_buffer, size: 10 - ) discover_attributes(async_task, task) discover_properties(async_task, task) discover_ports(async_task, task) @@ -63,7 +81,7 @@ def self.discover(task) # We can do this here ONLY BECAUSE we're populating an initial # state. Further updates need to call the `discover_` methods in # the main thread - async_task.reachable!(task, state_reader: state_reader) + async_task.reachable!(task) async_task end @@ -91,7 +109,9 @@ def self.discover_ports(async_task, task) async_task.discover_ports(raw_ports) end - def initialize(name, model: self.class.dummy_orogen_model(name)) + def initialize( + name, port_read_manager:, model: self.class.dummy_orogen_model(name) + ) super() @name = name @@ -102,6 +122,7 @@ def initialize(name, model: self.class.dummy_orogen_model(name)) # (we can't have two different tasks with the same name) @hash = name.hash + @port_read_manager = port_read_manager @attributes = {} @properties = {} @ports = {} @@ -158,10 +179,16 @@ def reachable? # Set the underlying task context # # Must be called from the main thread - def reachable!(task_context, state_reader:) + def reachable!(task_context) @raw_task_context = task_context @identity = task_context.ior - state_read_init(state_reader) + + @state_reader_callback = + port("state").on_data(init: true, buffer_size: 20) do |new_state| + new_state = states_index_to_symbols[new_state] || new_state + @current_state = new_state + run_hook :on_state_change, new_state + end run_hook :on_reachable, task_context end @@ -171,16 +198,6 @@ def on_reachable(&block) block.call if reachable? end - def state_read_init(state_reader) - @state_reader = state_reader - - @state_read_queue = queue = Queue.new - @state_read_stop = event = Concurrent::Event.new - @state_read_thread = Thread.new do - state_read_poll_thread(state_reader, queue, event) - end - end - def on_state_change(&block) super @@ -262,15 +279,16 @@ def discover_properties(raw_properties) def discover_ports(raw_ports) @ports = raw_ports.each_with_object({}) do |p, h| - klass = + async = case p when Orocos::InputPort - InputPort + InputPort.new(self, p.name, p.type) else - OutputPort + OutputPort.new( + self, p.name, p.type, @port_read_manager + ) end - async = klass.new(self, p.name, p.type) async.reachable!(p) h[p.name] = async end @@ -281,33 +299,8 @@ def discover_ports(raw_ports) def dispose @raw_task_context = nil - Concurrent::Promises.future(@state_reader, &:disconnect) @properties.clear - end - - def state_read_poll_thread(reader, queue, stop, period: 0.1) - until stop.set? - tic = Time.now - while (state = reader.read_new) - queue << state - end - remaining = period - (Time.now - tic) - sleep remaining if remaining > 0.01 - end - end - - def poll(period: 0.1) - while (new_state = read_new_state) - @current_state = new_state - run_hook :on_state_change, new_state - end - rescue ThreadError - sleep(period) - end - - def read_new_state - @state_read_queue.pop(true) - rescue ThreadError # rubocop:disable Lint/SuppressedException + @state_reader_callback.dispose end end end diff --git a/test/telemetry/async/test_name_service.rb b/test/telemetry/async/test_name_service.rb index c5d800eec..1f94d9ae0 100644 --- a/test/telemetry/async/test_name_service.rb +++ b/test/telemetry/async/test_name_service.rb @@ -245,7 +245,9 @@ module Async it "returns the IOR of a registered task" do _, task = make_deployed_task("test", "some") async_task = Orocos.allow_blocking_calls do - TaskContext.discover(task) + TaskContext.discover( + task, port_read_manager: PortReadManager.new + ) end assert_equal "test", async_task.name @ns.register(async_task) diff --git a/test/telemetry/async/test_output_reader.rb b/test/telemetry/async/test_output_reader.rb index 6106e7e7c..6050bb6a3 100644 --- a/test/telemetry/async/test_output_reader.rb +++ b/test/telemetry/async/test_output_reader.rb @@ -13,10 +13,13 @@ module Async @disconnection_executor = Test::PollingExecutor.new @read_executor = Test::PollingExecutor.new @ruby_tasks = [] + + @port_read_manager = PortReadManager.new end after do @ruby_tasks.each(&:dispose) + @port_read_manager.dispose end it "asynchronously connects to the port" do @@ -203,7 +206,7 @@ def make_ruby_task(name) def make_async_task(name) t = make_ruby_task name async = Orocos.allow_blocking_calls do - TaskContext.discover(t) + TaskContext.discover(t, port_read_manager: @port_read_manager) end [t, async] end diff --git a/test/telemetry/async/test_port_read_manager.rb b/test/telemetry/async/test_port_read_manager.rb index 59c6b2542..19cbad71a 100644 --- a/test/telemetry/async/test_port_read_manager.rb +++ b/test/telemetry/async/test_port_read_manager.rb @@ -62,7 +62,7 @@ module Async ) reader = @manager.find_poller_for_port(out_p).reader refute_same orig_reader, reader - assert_equal 5, reader.buffer_size + assert_equal 5, reader.policy[:size] assert orig_reader.disposed? end @@ -217,7 +217,7 @@ def make_ruby_task(name) def make_async_task(name) t = make_ruby_task name async = Orocos.allow_blocking_calls do - TaskContext.discover(t) + TaskContext.discover(t, port_read_manager: @manager) end [t, async] end diff --git a/test/telemetry/async/test_task_context.rb b/test/telemetry/async/test_task_context.rb index 38e893886..f52e97701 100644 --- a/test/telemetry/async/test_task_context.rb +++ b/test/telemetry/async/test_task_context.rb @@ -9,17 +9,19 @@ module Async describe TaskContext do before do @ns = NameService.new + @port_read_manager = PortReadManager.new @ruby_tasks = [] end after do + @port_read_manager.dispose @ruby_tasks.each(&:dispose) end it "is the same as another async task with the same remote task when " \ "used as hash key" do t, async = make_async_task "test" - async2 = Orocos.allow_blocking_calls { TaskContext.discover(t) } + async2 = discover_task(t) _, async3 = make_async_task "test2" @@ -85,13 +87,13 @@ module Async states = [] async.on_state_change { states << _1 } - assert_polling_eventually(async) { states == [:PRE_OPERATIONAL] } + assert_polling_eventually { states == [:PRE_OPERATIONAL] } Orocos.allow_blocking_calls do task.configure task.start end - assert_polling_eventually(async) do + assert_polling_eventually do states == %I[PRE_OPERATIONAL STOPPED RUNNING] end end @@ -103,7 +105,7 @@ module Async end states = [] async.on_state_change { states << _1 } - assert_polling_eventually(async) do + assert_polling_eventually do states[-1] == :RUNNING end @@ -188,7 +190,7 @@ module Async t, async = make_async_task "test" attr = async.attribute("attr") - async2 = Orocos.allow_blocking_calls { TaskContext.discover(t) } + async2 = discover_task(t) attr2 = async2.attribute("attr") _, async3 = make_async_task "test2" @@ -241,7 +243,7 @@ module Async t, async = make_async_task "test" prop = async.property("prop") - async2 = Orocos.allow_blocking_calls { TaskContext.discover(t) } + async2 = discover_task(t) prop2 = async2.property("prop") _, async3 = make_async_task "test2" @@ -292,7 +294,7 @@ module Async t, async = make_async_task "test" port = async.port("out") - async2 = Orocos.allow_blocking_calls { TaskContext.discover(t) } + async2 = discover_task(t) port2 = async2.port("out") _, async3 = make_async_task "test2" @@ -321,16 +323,19 @@ def make_ruby_task(name) def make_async_task(name) t = make_ruby_task name - async = Orocos.allow_blocking_calls do - TaskContext.discover(t) + [t, discover_task(t)] + end + + def discover_task(task) + Orocos.allow_blocking_calls do + TaskContext.discover(task, port_read_manager: @port_read_manager) end - [t, async] end - def assert_polling_eventually(async, period: 0.01, timeout: 2, &block) + def assert_polling_eventually(period: 0.01, timeout: 2, &block) deadline = Time.now + timeout while Time.now < deadline - async.poll + @port_read_manager.poll return if block.call sleep(period) From f9cbf3694af8564073b9ab31ed476e07f376488e Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 11:33:54 -0300 Subject: [PATCH 079/158] chore: setup the separate test:telemetry target --- Rakefile | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/Rakefile b/Rakefile index ad3d5f956..751591b37 100644 --- a/Rakefile +++ b/Rakefile @@ -28,6 +28,14 @@ def minitest_set_options(test_task, name) test_task.options = "#{TESTOPTS} #{minitest_args} -- --simplecov-name=#{name}" end +Rake::TestTask.new("test:telemetry") do |t| + t.libs << "." + t.libs << "lib" + minitest_set_options(t, "telemetry") + t.test_files = FileList["test/telemetry/**/test_*.rb"] + t.warning = false +end + Rake::TestTask.new("test:core") do |t| t.libs << "." t.libs << "lib" @@ -37,6 +45,7 @@ Rake::TestTask.new("test:core") do |t| .exclude("test/ros/**/*.rb") .exclude("test/gui/**/*.rb") .exclude("test/live/**/*.rb") + .exclude("test/telemetry/**/*.rb") t.test_files = test_files t.warning = false end @@ -57,7 +66,7 @@ Rake::TestTask.new("test:gui") do |t| t.warning = false end -task "test" => ["test:gui", "test:core", "test:live"] +task "test" => ["test:gui", "test:core", "test:live", "test:telemetry"] task "rubocop" do raise "rubocop failed" unless system(ENV["RUBOCOP_CMD"] || "rubocop") From 845f321da0c58e8588d9b97cd689c0a317218b0e Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 13:58:26 -0300 Subject: [PATCH 080/158] fix: working Async along with the task inspector This commits adapts/adds new calls to please Vizkit. The most important change is that the initialization of the state values is now done by the port manager instead of synchronously. This is made necessary as the task inspector registers a callback on the state change before it does on reachability, leading sometimes to having the state remaining INITIALIZING --- lib/syskit/telemetry/async/interface_object.rb | 8 ++++++++ lib/syskit/telemetry/async/output_port.rb | 4 ++++ .../telemetry/async/port_read_manager.rb | 11 +++++++++++ lib/syskit/telemetry/async/task_context.rb | 5 +++-- lib/syskit/telemetry/ui/runtime_state.rb | 18 +++++++++++------- test/telemetry/async/test_task_context.rb | 9 ++++++--- 6 files changed, 43 insertions(+), 12 deletions(-) diff --git a/lib/syskit/telemetry/async/interface_object.rb b/lib/syskit/telemetry/async/interface_object.rb index 687a184d8..4424c0b26 100644 --- a/lib/syskit/telemetry/async/interface_object.rb +++ b/lib/syskit/telemetry/async/interface_object.rb @@ -90,6 +90,14 @@ def new_sample def type_name @type.name end + + def to_proxy + self + end + + def full_name + "#{@task_context.name}.#{@name}" + end end end end diff --git a/lib/syskit/telemetry/async/output_port.rb b/lib/syskit/telemetry/async/output_port.rb index 0e2707834..35a19cd77 100644 --- a/lib/syskit/telemetry/async/output_port.rb +++ b/lib/syskit/telemetry/async/output_port.rb @@ -51,6 +51,10 @@ def reader(connect_on:, disconnect_on:, **policy) connect_on: connect_on, disconnect_on: disconnect_on ) end + + def type? + true + end end end end diff --git a/lib/syskit/telemetry/async/port_read_manager.rb b/lib/syskit/telemetry/async/port_read_manager.rb index 28c28710b..12fa127db 100644 --- a/lib/syskit/telemetry/async/port_read_manager.rb +++ b/lib/syskit/telemetry/async/port_read_manager.rb @@ -55,6 +55,7 @@ def dispatch(value) Poller = Struct.new( :port, :reader, :next_time, :period, :read_future, + :propagate_last_received_value, :last_value, keyword_init: true ) do def connected? @@ -128,6 +129,11 @@ def register_callback(port, callback, period:, buffer_size:, init: false) end end + # Request that the last received value is sent to the callbacks + def propagate_last_received_value(port) + find_poller_for_port(port)&.propagate_last_received_value = true + end + # @api private # # De-registers a callback @@ -224,7 +230,12 @@ def monotonic_time def dispatch_read_result(poller) fulfilled, value, reason = poller.result if fulfilled + if poller.propagate_last_received_value + value ||= poller.last_value + poller.propagate_last_received_value = false + end @callbacks[poller.port].each { |c| c.dispatch(value) } + poller.last_value = value else warn "failed to read #{poller.port}: #{reason}" end diff --git a/lib/syskit/telemetry/async/task_context.rb b/lib/syskit/telemetry/async/task_context.rb index f1cb77b59..dcb2aeb9e 100644 --- a/lib/syskit/telemetry/async/task_context.rb +++ b/lib/syskit/telemetry/async/task_context.rb @@ -183,13 +183,13 @@ def reachable!(task_context) @raw_task_context = task_context @identity = task_context.ior + run_hook :on_reachable, task_context @state_reader_callback = port("state").on_data(init: true, buffer_size: 20) do |new_state| new_state = states_index_to_symbols[new_state] || new_state @current_state = new_state run_hook :on_state_change, new_state end - run_hook :on_reachable, task_context end def on_reachable(&block) @@ -201,7 +201,8 @@ def on_reachable(&block) def on_state_change(&block) super - block.call(@current_state) if @current_state + # Explicitly ask to send the last received value + @port_read_manager.propagate_last_received_value(port("state")) end def each_attribute(&block) diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index 194b6b7f0..7bfac7552 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -18,6 +18,9 @@ require "syskit/telemetry/async" require "syskit/interface/v2" +# Monkey patching from Vizkit +Syskit::Telemetry::Async::OutputPort.include Orocos::QtOrocos + module Syskit module Telemetry module UI @@ -234,8 +237,13 @@ def reset @call_guards = {} @orogen_models = {} - @name_service = Async::NameService.new - @async_name_service = Orocos::Async::NameService.new(@name_service) + @port_read_manager&.dispose + @name_service&.dispose + + @port_read_manager = Async::PortReadManager.new + @name_service = Async::NameService.new( + port_read_manager: @port_read_manager + ) end def hide_loggers? @@ -538,7 +546,7 @@ def poll_syskit_interface display_current_cycle_index_and_time query_deployment_update update_current_job_task_names if current_job - poll_task_contexts + @port_read_manager.poll else reset_current_deployments reset_current_job @@ -550,10 +558,6 @@ def poll_syskit_interface end slots "poll_syskit_interface()" - def poll_task_contexts - @name_service.each_task(&:poll) - end - def display_current_cycle_index_and_time return unless syskit.cycle_start_time diff --git a/test/telemetry/async/test_task_context.rb b/test/telemetry/async/test_task_context.rb index f52e97701..1585b81bb 100644 --- a/test/telemetry/async/test_task_context.rb +++ b/test/telemetry/async/test_task_context.rb @@ -109,9 +109,12 @@ module Async states[-1] == :RUNNING end - states = [] - async.on_state_change { states << _1 } - assert_equal [:RUNNING], states + states2 = [] + async.on_state_change { states2 << _1 } + assert_polling_eventually do + states2[-1] == :RUNNING + end + assert_equal [:RUNNING], states2 end it "does not call the block is no state is known" do From cf58297d6150fcfb53ee0421d14f53471b72c616 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 14:51:17 -0300 Subject: [PATCH 081/158] feat: implement sub ports --- lib/syskit/telemetry/async.rb | 1 + lib/syskit/telemetry/async/output_port.rb | 5 + .../telemetry/async/output_port_subfield.rb | 115 +++++++++++++++ .../async/test_output_port_subfield.rb | 137 ++++++++++++++++++ 4 files changed, 258 insertions(+) create mode 100644 lib/syskit/telemetry/async/output_port_subfield.rb create mode 100644 test/telemetry/async/test_output_port_subfield.rb diff --git a/lib/syskit/telemetry/async.rb b/lib/syskit/telemetry/async.rb index 8d946e397..b7dacc731 100644 --- a/lib/syskit/telemetry/async.rb +++ b/lib/syskit/telemetry/async.rb @@ -9,6 +9,7 @@ require "syskit/telemetry/async/property" require "syskit/telemetry/async/input_port" require "syskit/telemetry/async/output_port" +require "syskit/telemetry/async/output_port_subfield" require "syskit/telemetry/async/output_reader" require "syskit/telemetry/async/port_read_manager" diff --git a/lib/syskit/telemetry/async/output_port.rb b/lib/syskit/telemetry/async/output_port.rb index 35a19cd77..55f88ab6d 100644 --- a/lib/syskit/telemetry/async/output_port.rb +++ b/lib/syskit/telemetry/async/output_port.rb @@ -52,6 +52,11 @@ def reader(connect_on:, disconnect_on:, **policy) ) end + # Create a port-like accessor for a field of the port + def sub_port(subfields) + OutputPortSubfield.new(self, subfields, @port_read_manager) + end + def type? true end diff --git a/lib/syskit/telemetry/async/output_port_subfield.rb b/lib/syskit/telemetry/async/output_port_subfield.rb new file mode 100644 index 000000000..b4d495cbb --- /dev/null +++ b/lib/syskit/telemetry/async/output_port_subfield.rb @@ -0,0 +1,115 @@ +# frozen_string_literal: true + +module Syskit + module Telemetry + module Async + # An API compatible with {OutputPort} but that will give access to a sub-part + # of a data sample + # + # For instance, a field in a struct + class OutputPortSubfield < InterfaceObject + def initialize(port, subfield, port_read_manager) + @path = normalize_subfield_path(subfield) + subname = compute_subname(@path) + subtype = compute_subtype(port.type, @path) + + super(port.task_context, "#{port.name}#{subname.join}", subtype) + + @port_read_manager = port_read_manager + + @orig_port = port + @on_port_reachable = port.on_reachable do |raw| + reachable!(raw) + end + @on_port_unreachable = port.on_unreachable do + unreachable! + end + end + + def output? + true + end + + def input? + false + end + + def dispose + @on_port_reachable.dispose + @on_port_unreachable.dispose + end + + def on_raw_data(period: 0.1, init: false, buffer_size: 1) + callback = proc do |value| + yield(self.class.resolve_subfield(value, @path)) if value + end + + register_with = proc do + @port_read_manager.register_callback( + @orig_port, callback, + period: period, init: init, buffer_size: buffer_size + ) + end + + listener = Listener.new(register_with) + listener.start + listener + end + + def on_data(**policy) + on_raw_data(**policy) do |sample| + sample = Typelib.to_ruby(sample) + yield(sample) + end + end + + def sub_port(subfield) + OutputPortSubfield.new( + @orig_port, + @subfield + Array(subfield) + ) + end + + def self.resolve_subfield(root_sample, path) + path.inject(root_sample) do |sample, f| + break(nil) if f.kind_of?(Integer) && sample.size <= f + + sample.raw_get(f) + end + end + + def compute_subtype(type, path) + path.inject(type) do |t, f| + case f + when Integer + t.deference + else + t[f] + end + end + end + + def normalize_subfield_path(subfield) + subfield.map do |field| + if /^\d+$/.match?(field) + Integer(field) + else + field.to_s + end + end + end + + def compute_subname(path) + path.map do |field| + case field + when Integer + "[#{field}]" + else + ".#{field}" + end + end + end + end + end + end +end diff --git a/test/telemetry/async/test_output_port_subfield.rb b/test/telemetry/async/test_output_port_subfield.rb new file mode 100644 index 000000000..a321f5419 --- /dev/null +++ b/test/telemetry/async/test_output_port_subfield.rb @@ -0,0 +1,137 @@ +# frozen_string_literal: true + +require "syskit/test/self" +require "syskit/telemetry/async" + +module Syskit + module Telemetry + module Async + describe OutputPortSubfield do + before do + @ruby_tasks = [] + @port_read_manager = PortReadManager.new + end + + after do + @ruby_tasks.each(&:dispose) + @port_read_manager.dispose + end + + it "computes the subfield name and type for compound types" do + _, async = make_async_task("test") + port = async.port("rbs").sub_port(%w[time microseconds]) + assert_equal "/int64_t", port.type.name + assert_equal "rbs.time.microseconds", port.name + end + + it "computes the subfield name and type for container types" do + _, async = make_async_task("test") + port = async.port("joints").sub_port(%w[elements 10 effort]) + assert_equal "/float", port.type.name + assert_equal "joints.elements[10].effort", port.name + end + + it "is reachable" do + _, async = make_async_task("test") + port = async.port("joints").sub_port(%w[elements 10 effort]) + assert port.reachable? + end + + it "becomes unreachable when the underlying port is" do + _, async = make_async_task("test") + port = async.port("joints").sub_port(%w[elements 10 effort]) + + mock = flexmock + mock.should_receive(:unreachable).once + port.on_unreachable { mock.unreachable } + async.port("joints").unreachable! + refute port.reachable? + end + + describe "#subfield" do + before do + @task = make_ruby_task("test") + end + + it "resolves a subfield in a compound type" do + rbs = @task.rbs.new_sample + rbs.raw_get(:time).microseconds = 42 + assert_equal 42, OutputPortSubfield.resolve_subfield( + rbs, %w[time microseconds] + ) + end + + it "resolves a subfield in a container type" do + joints = @task.joints.new_sample + joints.elements = 11.times.map { { effort: _1 } } + assert_equal 10, OutputPortSubfield.resolve_subfield( + joints, ["elements", 10, "effort"] + ) + end + + it "returns nil if the path refers to a container element that " \ + "does not exist" do + joints = @task.joints.new_sample + joints.elements = 10.times.map { { effort: _1 } } + assert_nil OutputPortSubfield.resolve_subfield( + joints, ["elements", 10, "effort"] + ) + end + end + + it "yields the subfield's data when available" do + task, async = make_async_task("test") + full_port = async.port("joints") + port = full_port.sub_port(%w[elements 10 effort]) + + received = [] + port.on_raw_data do |value| + received << value + end + + assert_polling_eventually do + @port_read_manager.find_poller_for_port(full_port).connected? + end + + Orocos.allow_blocking_calls do + joint_states = 11.times.map { |i| { effort: i } } + task.joints.write({ elements: joint_states }) + end + + assert_polling_eventually { received == [10] } + end + + def make_ruby_task(name) + ruby_task = Orocos.allow_blocking_calls do + t = Orocos::RubyTasks::TaskContext.new(name) + t.create_output_port "rbs", "/base/samples/RigidBodyState" + t.create_output_port "joints", "/base/samples/Joints" + t + end + @ruby_tasks << ruby_task + ruby_task + end + + def make_async_task(name) + t = make_ruby_task name + async = Orocos.allow_blocking_calls do + TaskContext.discover(t, port_read_manager: @port_read_manager) + end + [t, async] + end + + def assert_polling_eventually(period: 0.01, timeout: 2, &block) + deadline = Time.now + timeout + while Time.now < deadline + @port_read_manager.poll + return if block.call + + sleep(period) + end + + flunk("condition not reached in #{timeout} seconds") + end + end + end + end +end From dca435a3972ca2001c7e03a8972c56aa21984c69 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 14:51:51 -0300 Subject: [PATCH 082/158] chore: use the time after the first read to initialize next_time This allows to "spread" the pollers instead of potentially having all of them synchronized --- .../telemetry/async/port_read_manager.rb | 4 ++-- .../telemetry/async/test_port_read_manager.rb | 20 ++++++++++++++++++- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/lib/syskit/telemetry/async/port_read_manager.rb b/lib/syskit/telemetry/async/port_read_manager.rb index 12fa127db..80394c658 100644 --- a/lib/syskit/telemetry/async/port_read_manager.rb +++ b/lib/syskit/telemetry/async/port_read_manager.rb @@ -79,13 +79,13 @@ def result end def schedule_read_if_needed(now, executor) - self.next_time ||= now - return unless self.next_time <= now + return if self.next_time && self.next_time > now self.read_future = reader.raw_read_new(executor) end def prepare_next_read(now) + self.next_time ||= now delta_in_periods = ((now - next_time) / period).ceil # delta_in_periods == 0 should be impossible. # But, you know, little cost diff --git a/test/telemetry/async/test_port_read_manager.rb b/test/telemetry/async/test_port_read_manager.rb index 19cbad71a..6645d38a6 100644 --- a/test/telemetry/async/test_port_read_manager.rb +++ b/test/telemetry/async/test_port_read_manager.rb @@ -158,12 +158,30 @@ module Async assert_equal [42], @received_samples end - it "reschedules the next read based on the read period" do + it "reschedules the second read based on the end of the first" do execute_all(@connection_executor) @manager.poll execute_all(@read_executor) @poller.read_future.wait + time = freeze_monotonic_time + + @manager.poll + next_t = @poller.next_time + assert_in_delta time + 0.1, next_t, 1e-6 + end + + it "reschedules reads > 2 based on the period" do + execute_all(@connection_executor) + @manager.poll + execute_all(@read_executor) + @poller.read_future.wait + @manager.poll + sleep 0.2 + @manager.poll + execute_all(@read_executor) + @poller.read_future.wait + current_t = @poller.next_time time = freeze_monotonic_time From e21080138fcaf5b638ed97d8b90d1822850d2165 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 14:52:37 -0300 Subject: [PATCH 083/158] fix: reset @current_state to nil in dispose --- lib/syskit/telemetry/async/task_context.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/syskit/telemetry/async/task_context.rb b/lib/syskit/telemetry/async/task_context.rb index dcb2aeb9e..4e2fe148c 100644 --- a/lib/syskit/telemetry/async/task_context.rb +++ b/lib/syskit/telemetry/async/task_context.rb @@ -299,6 +299,7 @@ def discover_ports(raw_ports) def dispose @raw_task_context = nil + @current_state = nil @properties.clear @state_reader_callback.dispose From b07323d8fc1e54c947274d47f1b87d6436060c73 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 14:52:48 -0300 Subject: [PATCH 084/158] chore define PortReadManager::Poller#to_s for better debugging --- .../telemetry/async/port_read_manager.rb | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/lib/syskit/telemetry/async/port_read_manager.rb b/lib/syskit/telemetry/async/port_read_manager.rb index 80394c658..09c0be082 100644 --- a/lib/syskit/telemetry/async/port_read_manager.rb +++ b/lib/syskit/telemetry/async/port_read_manager.rb @@ -78,6 +78,21 @@ def result read_future&.result end + def to_s(relative_to: PortReadManager.monotonic_time) + next_time_delta_ms = (next_time - relative_to) * 1000 if next_time + + format( + "poller %s: connected=%s " \ + "scheduled=%s " \ + "next_time=%.3f (in %i ms)", + name: port.full_name, + next_time: next_time || 0, + next_time_delta_ms: next_time_delta_ms || 0, + connected: connected? ? "yes" : "no", + scheduled: read_future ? "yes" : "no" + ) + end + def schedule_read_if_needed(now, executor) return if self.next_time && self.next_time > now @@ -223,6 +238,11 @@ def poll # Time in seconds returned by CLOCK_MONOTONIC def monotonic_time + self.class.monotonic_time + end + + # Time in seconds returned by CLOCK_MONOTONIC + def self.monotonic_time Process.clock_gettime(Process::CLOCK_MONOTONIC) end From 2ff67e958ec6b61336c8416d73c74c96d198825f Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 15:27:32 -0300 Subject: [PATCH 085/158] fix: use ThreadPoolExecutor instead of CachedThreadPool The latter is essentially the former with smoe parameters fixed, such as max_threads, in a way that is not overridable --- lib/syskit/telemetry/async/name_service.rb | 3 ++- lib/syskit/telemetry/async/port_read_manager.rb | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/syskit/telemetry/async/name_service.rb b/lib/syskit/telemetry/async/name_service.rb index 20eaf76ec..dd4fd79e6 100644 --- a/lib/syskit/telemetry/async/name_service.rb +++ b/lib/syskit/telemetry/async/name_service.rb @@ -16,7 +16,8 @@ class NameService < Orocos::NameServiceBase # known by the name service. # @note The namespace is always "Local" def initialize( - discovery_executor: Concurrent::CachedThreadPool.new(max_length: 2), + discovery_executor: + Concurrent::ThreadPoolExecutor.new(max_threads: 10), port_read_manager: PortReadManager.new ) super() diff --git a/lib/syskit/telemetry/async/port_read_manager.rb b/lib/syskit/telemetry/async/port_read_manager.rb index 09c0be082..2b8f25d02 100644 --- a/lib/syskit/telemetry/async/port_read_manager.rb +++ b/lib/syskit/telemetry/async/port_read_manager.rb @@ -26,21 +26,21 @@ def initialize( def self.default_connection_executor @default_connection_executor ||= - Concurrent::CachedThreadPool.new( + Concurrent::ThreadPoolExecutor.new( max_threads: CONNECTION_DEFAULT_THREADS ) end def self.default_disconnection_executor @default_disconnection_executor ||= - Concurrent::CachedThreadPool.new( + Concurrent::ThreadPoolExecutor.new( max_threads: DISCONNECTION_DEFAULT_THREADS ) end def self.default_read_executor @default_read_executor ||= - Concurrent::CachedThreadPool.new( + Concurrent::ThreadPoolExecutor.new( max_threads: READ_DEFAULT_THREADS ) end @@ -94,7 +94,7 @@ def to_s(relative_to: PortReadManager.monotonic_time) end def schedule_read_if_needed(now, executor) - return if self.next_time && self.next_time > now + return if next_time && next_time > now self.read_future = reader.raw_read_new(executor) end From 1d07885d2781492b5d20981b6893cffa2e982200 Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 16:48:27 -0300 Subject: [PATCH 086/158] chore: make OutputPortSubfield appear as a vizkit-compatible output port --- lib/syskit/telemetry/async/output_port_subfield.rb | 4 ++++ lib/syskit/telemetry/ui/runtime_state.rb | 1 + 2 files changed, 5 insertions(+) diff --git a/lib/syskit/telemetry/async/output_port_subfield.rb b/lib/syskit/telemetry/async/output_port_subfield.rb index b4d495cbb..336a24512 100644 --- a/lib/syskit/telemetry/async/output_port_subfield.rb +++ b/lib/syskit/telemetry/async/output_port_subfield.rb @@ -109,6 +109,10 @@ def compute_subname(path) end end end + + def type? + true + end end end end diff --git a/lib/syskit/telemetry/ui/runtime_state.rb b/lib/syskit/telemetry/ui/runtime_state.rb index 7bfac7552..09300882a 100644 --- a/lib/syskit/telemetry/ui/runtime_state.rb +++ b/lib/syskit/telemetry/ui/runtime_state.rb @@ -20,6 +20,7 @@ # Monkey patching from Vizkit Syskit::Telemetry::Async::OutputPort.include Orocos::QtOrocos +Syskit::Telemetry::Async::OutputPortSubfield.include Orocos::QtOrocos module Syskit module Telemetry From de8ddee71371b8a5608da91a7492d51691854c1b Mon Sep 17 00:00:00 2001 From: Sylvain Joyeux Date: Mon, 30 Dec 2024 16:50:08 -0300 Subject: [PATCH 087/158] chore: initialize new callbacks with the last read value This is expected by the current Vizkit implementation --- .../telemetry/async/port_read_manager.rb | 52 ++++++++++++++----- 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/lib/syskit/telemetry/async/port_read_manager.rb b/lib/syskit/telemetry/async/port_read_manager.rb index 2b8f25d02..f07a46eee 100644 --- a/lib/syskit/telemetry/async/port_read_manager.rb +++ b/lib/syskit/telemetry/async/port_read_manager.rb @@ -46,9 +46,11 @@ def self.default_read_executor end Callback = Struct.new( - :port, :callback, :period, :buffer_size, :init, keyword_init: true + :port, :callback, :period, :buffer_size, + :init, :needs_last_received_value, keyword_init: true ) do def dispatch(value) + self.needs_last_received_value = false callback.call(value) end end @@ -134,11 +136,13 @@ def policy def register_callback(port, callback, period:, buffer_size:, init: false) callback = Callback.new( port: port, callback: callback, - period: period, buffer_size: buffer_size, init: init + period: period, buffer_size: buffer_size, init: init, + needs_last_received_value: true ) (@callbacks[port] ||= []) << callback ensure_reader_uptodate(port) + propagate_last_received_value(port) Roby.disposable do deregister_callback(port, callback) end @@ -225,14 +229,29 @@ def poll @pollers.each_value do |p| p.poll - if !p.connected? - p.reset_read_tracking - elsif !p.scheduled_read? - p.schedule_read_if_needed(now, @read_executor) - elsif p.resolved_read? - dispatch_read_result(p) - p.prepare_next_read(now) - end + process_poller_state(p, now) + end + end + + # @api private + # + # Helper for {#poll} to process a single poller + def process_poller_state(poller, now) + unless poller.connected? + poller.reset_read_tracking + return + end + + if poller.propagate_last_received_value && poller.last_value && + !poller.resolved_read? + dispatch_last_received_value(poller) + end + + if !poller.scheduled_read? + poller.schedule_read_if_needed(now, @read_executor) + elsif poller.resolved_read? + dispatch_read_result(poller) + poller.prepare_next_read(now) end end @@ -250,17 +269,22 @@ def self.monotonic_time def dispatch_read_result(poller) fulfilled, value, reason = poller.result if fulfilled - if poller.propagate_last_received_value - value ||= poller.last_value - poller.propagate_last_received_value = false - end @callbacks[poller.port].each { |c| c.dispatch(value) } poller.last_value = value + poller.propagate_last_received_value = false else warn "failed to read #{poller.port}: #{reason}" end end + # Send last received value to the callbacks that require it + def dispatch_last_received_value(poller) + @callbacks[poller.port].each do |c| + c.dispatch(poller.last_value) + end + poller.propagate_last_received_value = false + end + # Return the buffer size needed by all callbacks of a port, in aggregate # # @return [Integer] From 19ba3bd9bed8c5d89e5c0c13abc035efdab6d17f Mon Sep 17 00:00:00 2001 From: kapeps Date: Wed, 29 Jan 2025 09:57:58 -0300 Subject: [PATCH 088/158] fix: unit test flexmock not mocking full result --- test/cli/test_log_runtime_archive.rb | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 8c13c7557..e70d33ec2 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -629,9 +629,12 @@ def create_server(params) end it "does not remove the source file if the transfer failed" do + result = RobyApp::LogTransferServer::LogUploadState::Result.new( + "/PATH", false, "message" + ) flexmock(LogRuntimeArchive) .should_receive(:transfer_file) - .and_return(flexmock(success?: false)) + .and_return(result) results = LogRuntimeArchive.transfer_dataset( @dataset, @params, @root, full: true ) From 8426192e4f7b85a5369bd8a43751a5f917a13569 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Thu, 5 Dec 2024 13:56:20 -0300 Subject: [PATCH 089/158] chore: copy plan marks when applying merge group This became a necessity as we pretend to do early deployments at network generation. In this scenario, when a "free" task context is merged into its deployed counter part its plan marks is not carried out to the deployment task instance. Then, this task instance would be garbage collected because it did not carried the permanent mark from the replace "free" task context --- lib/syskit/network_generation/merge_solver.rb | 1 + test/network_generation/test_merge_solver.rb | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/lib/syskit/network_generation/merge_solver.rb b/lib/syskit/network_generation/merge_solver.rb index 5040f4034..bc14a26e3 100644 --- a/lib/syskit/network_generation/merge_solver.rb +++ b/lib/syskit/network_generation/merge_solver.rb @@ -138,6 +138,7 @@ def apply_merge_group(merged_task_to_task) merged_task_to_task.each do |merged_task, task| unless merged_task.transaction_proxy? + plan.copy_task_marks(from: merged_task, to: task) plan.remove_task(merged_task) end register_replacement(merged_task, task) diff --git a/test/network_generation/test_merge_solver.rb b/test/network_generation/test_merge_solver.rb index f0414eb29..e97a8f8b0 100644 --- a/test/network_generation/test_merge_solver.rb +++ b/test/network_generation/test_merge_solver.rb @@ -358,6 +358,31 @@ def mock_merged_task_with_concrete_input_connections(*connections) end end + describe "#apply_merge_group" do + attr_reader :local_plan, :solver + + before do + @local_plan = Roby::Plan.new + @solver = Syskit::NetworkGeneration::MergeSolver.new(@local_plan) + end + + it "applyes merged task plan marks to the destination task" do + task1 = Roby::Task.new + task2 = Roby::Task.new + + local_plan.add_permanent_task task1 + local_plan.add_mission_task task1 + + refute local_plan.permanent_task? task2 + refute local_plan.mission_task? task2 + + solver.apply_merge_group({ task1 => task2 }) + + assert local_plan.permanent_task? task2 + assert local_plan.mission_task? task2 + end + end + describe "functional tests" do describe "merging compositions" do attr_reader :plan, :srv_m, :task_m, :cmp_m From 53dbabad6894bbe6fd8dc1d483b437bac3990f08 Mon Sep 17 00:00:00 2001 From: kapeps Date: Mon, 27 Jan 2025 14:30:11 -0300 Subject: [PATCH 090/158] feat: generalize ensure free space to be able to take any directory as input --- lib/syskit/cli/log_runtime_archive.rb | 8 +++++--- test/cli/test_log_runtime_archive.rb | 28 +++++++++++++++++++-------- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index b99afad7d..44b664968 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -82,20 +82,22 @@ def process_root_folder # bytes, at which the archiver starts deleting the oldest log files # @param [integer] free_space_delete_until: post-deletion free space in bytes, # at which the archiver stops deleting the oldest log files - def ensure_free_space(free_space_low_limit, free_space_delete_until) + def ensure_free_space( + free_space_low_limit, free_space_delete_until, directory: @target_dir + ) if free_space_low_limit > free_space_delete_until raise ArgumentError, "cannot erase files: freed limit is smaller than " \ "low limit space." end - stat = Sys::Filesystem.stat(@target_dir) + stat = Sys::Filesystem.stat(directory) available_space = stat.bytes_available return if available_space > free_space_low_limit until available_space >= free_space_delete_until - files = @target_dir.each_child.select(&:file?) + files = directory.each_child.select(&:file?) if files.empty? Roby.warn "Cannot erase files: the folder is empty but the " \ "available space is smaller than the threshold." diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 8c13c7557..9f4aaaea4 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -680,6 +680,16 @@ def create_server(params) assert_deleted_files([0, 1, 2, 3]) end + it "removes enough files to reach the freed limit in chosen directory" do + different_dir = make_tmppath + size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] + mock_files_size(size_files, directory: different_dir) + mock_available_space(0.5, directory: different_dir) + + @archiver.ensure_free_space(1, 10, directory: different_dir) + assert_deleted_files([0, 1, 2, 3], directory: different_dir) + end + it "stops removing files when there is no file in folder even if freed limit is not achieved" do size_files = Array.new(10, 1) @@ -690,16 +700,18 @@ def create_server(params) assert_deleted_files([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) end - def mock_files_size(sizes) + def mock_files_size(sizes, directory: @archive_dir) @mocked_files_sizes = sizes @mocked_files_sizes.each_with_index do |size, i| - (@archive_dir / i.to_s).write(" " * size) + (directory / i.to_s).write(" " * size) end end - def mock_available_space(total_available_disk_space) + def mock_available_space( + total_available_disk_space, directory: @archive_dir + ) flexmock(Sys::Filesystem) - .should_receive(:stat).with(@archive_dir) + .should_receive(:stat).with(directory) .and_return do flexmock( bytes_available: total_available_disk_space @@ -707,17 +719,17 @@ def mock_available_space(total_available_disk_space) end end - def assert_deleted_files(deleted_files) + def assert_deleted_files(deleted_files, directory: @archive_dir) if deleted_files.empty? - files = @archive_dir.each_child.select(&:file?) + files = directory.each_child.select(&:file?) assert_equal 10, files.size else (0..9).each do |i| if deleted_files.include?(i) - refute (@archive_dir / i.to_s).exist?, + refute (directory / i.to_s).exist?, "#{i} was expected to be deleted, but has not been" else - assert (@archive_dir / i.to_s).exist?, + assert (directory / i.to_s).exist?, "#{i} was expected to be present, but got deleted" end end From 60ac4d1adda8dc97bf362089bfe085b982307eb5 Mon Sep 17 00:00:00 2001 From: kapeps Date: Mon, 27 Jan 2025 15:32:03 -0300 Subject: [PATCH 091/158] feat: ensure_free_space and watch cli commands --- lib/syskit/cli/log_runtime_archive_main.rb | 43 ++++++++++++ test/cli/test_log_runtime_archive_main.rb | 81 +++++++++++++++++++--- 2 files changed, 116 insertions(+), 8 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index fbaabd107..5bd2459ad 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -114,6 +114,49 @@ def transfer_server( # rubocop:disable Metrics/ParameterLists implicit_ftps) end + desc "watch_ensure_free_space", "watches the ensure free space process" + option :period, + type: :numeric, default: 10, desc: "polling period in seconds" + option :max_size, + type: :numeric, default: 10_000, desc: "max log size in MB" + option :free_space_low_limit, + type: :numeric, default: 5_000, desc: "start deleting files if \ + available space is below this threshold (threshold in MB)" + option :free_space_freed_limit, + type: :numeric, default: 25_000, desc: "stop deleting files if \ + available space is above this threshold (threshold in MB)" + def watch_ensure_free_space(source_dir) + loop do + ensure_free_space(source_dir) + + puts "Ensured free space in #{source_dir}, " \ + "sleeping #{options[:period]}s" + sleep options[:period] + end + end + + desc "ensure_free_space", "ensures there is free space, if not, start \ + deleting files" + option :max_size, + type: :numeric, default: 10_000, desc: "max log size in MB" + option :free_space_low_limit, + type: :numeric, default: 5_000, desc: "start deleting files if \ + available space is below this threshold (threshold in MB)" + option :free_space_freed_limit, + type: :numeric, default: 25_000, desc: "stop deleting files if \ + available space is above this threshold (threshold in MB)" + def ensure_free_space(source_dir) + source_dir = validate_directory_exists(source_dir) + + archiver = make_archiver(source_dir) + + archiver.ensure_free_space( + options[:free_space_low_limit] * 1_000_000, + options[:free_space_freed_limit] * 1_000_000, + directory: source_dir + ) + end + no_commands do # rubocop:disable Metrics/BlockLength # Converts rate in Mbps to bps def rate_mbps_to_bps(rate_mbps) diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index a914eda3c..f5dde3852 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -227,6 +227,71 @@ def call_transfer(source_dir) end end + describe "#ensure_free_space" do + before do + @directory = make_tmppath + @mocked_files_sizes = [] + + 10.times { |i| (@directory / i.to_s).write(i.to_s) } + + @archiver = LogRuntimeArchive.new(@directory) + end + + it "removes enough files to reach the freed limit" do + size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] + mock_files_size(size_files, directory: @directory) + mock_available_space(0.5, directory: @directory) + + call_ensure_free_space(@directory, 1, 10) + assert_deleted_files([0, 1, 2, 3], directory: @directory) + end + + def call_ensure_free_space(source_dir, low_limit, freed_limit) + pp "source_dir #{source_dir}" + args = [ + "ensure_free_space", + source_dir, + "--free-space-low-limit", low_limit, + "--free-space-freed-limit", freed_limit + ] + LogRuntimeArchiveMain.start(args) + end + end + + describe "#watch_ensure_free_space" do + before do + @directory = make_tmppath + + @mocked_files_sizes = [] + 5.times { |i| (@directory / i.to_s).write(i.to_s) } + end + + it "calls ensure free space with the specified period" do + mock_files_size([], directory: @directory) + mock_available_space(200, directory: @directory) # 70 MB + + quit = Class.new(RuntimeError) + called = 0 + flexmock(LogRuntimeArchive) + .new_instances + .should_receive(:ensure_free_space) + .pass_thru do + called += 1 + raise quit if called == 3 + end + + tic = Time.now + assert_raises(quit) do + LogRuntimeArchiveMain.start( + ["watch_ensure_free_space", @directory, "--period", 0.5] + ) + end + + assert called == 3 + assert_operator(Time.now - tic, :>, 0.9) + end + end + def call_create_server(tgt_dir, server_params) cli = LogRuntimeArchiveMain.new cli.create_server(tgt_dir, *server_params.values) @@ -244,18 +309,18 @@ def server_params # Mock files sizes in bytes # @param [Array] size of files in MB - def mock_files_size(sizes) + def mock_files_size(sizes, directory: @archive_dir) @mocked_files_sizes = sizes @mocked_files_sizes.each_with_index do |size, i| - (@archive_dir / i.to_s).write(" " * size * 1e6) + (directory / i.to_s).write(" " * size * 1e6) end end # Mock total disk available space in bytes # @param [Float] total_available_disk_space total available space in MB - def mock_available_space(total_available_disk_space) + def mock_available_space(total_available_disk_space, directory: @archive_dir) flexmock(Sys::Filesystem) - .should_receive(:stat).with(@archive_dir) + .should_receive(:stat).with(directory) .and_return do flexmock( bytes_available: total_available_disk_space * 1e6 @@ -263,17 +328,17 @@ def mock_available_space(total_available_disk_space) end end - def assert_deleted_files(deleted_files) + def assert_deleted_files(deleted_files, directory: @archive_dir) if deleted_files.empty? - files = @archive_dir.each_child.select(&:file?) + files = directory.each_child.select(&:file?) assert_equal 5, files.size else (0..4).each do |i| if deleted_files.include?(i) - refute (@archive_dir / i.to_s).exist?, + refute (directory / i.to_s).exist?, "#{i} was expected to be deleted, but has not been" else - assert (@archive_dir / i.to_s).exist?, + assert (directory / i.to_s).exist?, "#{i} was expected to be present, but got deleted" end end From 79cf599809d106b7ac5b4023c508bc3904a3401a Mon Sep 17 00:00:00 2001 From: kapeps Date: Wed, 29 Jan 2025 12:08:25 -0300 Subject: [PATCH 092/158] feat: chose directories and files based on modification time when ensuring free space --- lib/syskit/cli/log_runtime_archive.rb | 2 +- lib/syskit/cli/log_runtime_archive_main.rb | 12 +++-- test/cli/test_log_runtime_archive.rb | 31 ++++++++++++ test/cli/test_log_runtime_archive_main.rb | 56 ++++++++++++++++++++-- 4 files changed, 90 insertions(+), 11 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 44b664968..acf44da74 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -104,7 +104,7 @@ def ensure_free_space( break end - removed_file = files.min + removed_file = files.min_by(&:mtime) size_removed_file = removed_file.size removed_file.unlink available_space += size_removed_file diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index 5bd2459ad..a230e687e 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -150,11 +150,13 @@ def ensure_free_space(source_dir) archiver = make_archiver(source_dir) - archiver.ensure_free_space( - options[:free_space_low_limit] * 1_000_000, - options[:free_space_freed_limit] * 1_000_000, - directory: source_dir - ) + source_dir.children.select(&:directory?).sort_by(&:mtime).each do |child| + archiver.ensure_free_space( + options[:free_space_low_limit] * 1_000_000, + options[:free_space_freed_limit] * 1_000_000, + directory: (source_dir / child) + ) + end end no_commands do # rubocop:disable Metrics/BlockLength diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 9f4aaaea4..6f02be786 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -667,6 +667,7 @@ def create_server(params) it "does nothing if there is enough free space" do mock_available_space(2) + mock_mtime @archiver.ensure_free_space(1, 10) assert_deleted_files([]) end @@ -675,6 +676,7 @@ def create_server(params) size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] mock_files_size(size_files) mock_available_space(0.5) + mock_mtime @archiver.ensure_free_space(1, 10) assert_deleted_files([0, 1, 2, 3]) @@ -685,16 +687,29 @@ def create_server(params) size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] mock_files_size(size_files, directory: different_dir) mock_available_space(0.5, directory: different_dir) + mock_mtime(directory: different_dir) @archiver.ensure_free_space(1, 10, directory: different_dir) assert_deleted_files([0, 1, 2, 3], directory: different_dir) end + it "removes files based on modified timestamp" do + different_dir = make_tmppath + size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] + mock_files_size(size_files, directory: different_dir) + mock_available_space(0.5, directory: different_dir) + mock_mtime(directory: different_dir, reverse_alphabetical: true) + + @archiver.ensure_free_space(1, 10, directory: different_dir) + assert_deleted_files([8, 9, 10], directory: different_dir) + end + it "stops removing files when there is no file in folder even if freed limit is not achieved" do size_files = Array.new(10, 1) mock_files_size(size_files) mock_available_space(0.5) + mock_mtime @archiver.ensure_free_space(1, 15) assert_deleted_files([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -707,6 +722,22 @@ def mock_files_size(sizes, directory: @archive_dir) end end + # Mock the modification time of the files to be alphabetical order + # @param [String] directory the directory to mock the items modification + # time + # @param [Bool] reverse_alphabetical true if use reverse alphabetical + # order + def mock_mtime(directory: @archive_dir, reverse_alphabetical: false) + items = directory.children + .select { |child| child.file? || child.directory? } + + items = items.sort_by(&:to_s) + items = items.reverse if reverse_alphabetical + items.each_with_index do |item, i| + File.utime(i, i, item.to_s) + end + end + def mock_available_space( total_available_disk_space, directory: @archive_dir ) diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index f5dde3852..df9090197 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -230,24 +230,56 @@ def call_transfer(source_dir) describe "#ensure_free_space" do before do @directory = make_tmppath + @sub_directory = Pathname.new(@directory / "subdir") + @sub_directory2 = Pathname.new(@directory / "subdir_2") + @sub_directory.mkdir unless @sub_directory.exist? + @sub_directory2.mkdir unless @sub_directory2.exist? @mocked_files_sizes = [] - 10.times { |i| (@directory / i.to_s).write(i.to_s) } + 10.times { |i| (@sub_directory / i.to_s).write(i.to_s) } + 10.times { |i| (@sub_directory2 / i.to_s).write(i.to_s) } @archiver = LogRuntimeArchive.new(@directory) end it "removes enough files to reach the freed limit" do size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] - mock_files_size(size_files, directory: @directory) - mock_available_space(0.5, directory: @directory) + mock_files_size(size_files, directory: @sub_directory) + mock_files_size(size_files, directory: @sub_directory2) + mock_available_space(0.5, directory: @sub_directory) + mock_available_space(0.5, directory: @sub_directory2) + mock_mtime(directory: @sub_directory) + mock_mtime(directory: @sub_directory2) call_ensure_free_space(@directory, 1, 10) - assert_deleted_files([0, 1, 2, 3], directory: @directory) + assert_deleted_files([0, 1, 2, 3], directory: @sub_directory) + assert_deleted_files([0, 1, 2, 3], directory: @sub_directory2) + end + + it "removes from directories based on modification time" do + size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] + mock_files_size(size_files, directory: @sub_directory) + mock_files_size(size_files, directory: @sub_directory2) + mock_available_space(0.5, directory: @sub_directory) + mock_available_space(0.5, directory: @sub_directory2) + mock_mtime(directory: @directory, reverse_alphabetical: true) + + flexmock_m = flexmock(LogRuntimeArchive) + + flexmock_m + .new_instances + .should_receive(:ensure_free_space) + .with(1_000_000, 10_000_000, directory: @sub_directory2) + .ordered + flexmock_m + .new_instances + .should_receive(:ensure_free_space) + .with(1_000_000, 10_000_000, directory: @sub_directory) + .ordered + call_ensure_free_space(@directory, 1, 10) end def call_ensure_free_space(source_dir, low_limit, freed_limit) - pp "source_dir #{source_dir}" args = [ "ensure_free_space", source_dir, @@ -328,6 +360,20 @@ def mock_available_space(total_available_disk_space, directory: @archive_dir) end end + # Mock the modification time of the files to be alphabetical order + # @param [String] directory the directory to mock the items modification time + # @param [Bool] reverse_alphabetical true if use reverse alphabetical order + def mock_mtime(directory: @archive_dir, reverse_alphabetical: false) + items = directory.children + .select { |child| child.file? || child.directory? } + + items = items.sort_by(&:to_s) + items = items.reverse if reverse_alphabetical + items.each_with_index do |item, i| + File.utime(i, i, item.to_s) + end + end + def assert_deleted_files(deleted_files, directory: @archive_dir) if deleted_files.empty? files = directory.each_child.select(&:file?) From bda965555300198821a01b072a71949e90a92fd0 Mon Sep 17 00:00:00 2001 From: kapeps Date: Wed, 29 Jan 2025 15:44:05 -0300 Subject: [PATCH 093/158] chore: break ensure free space if desired free space was reached --- lib/syskit/cli/log_runtime_archive.rb | 9 ++++-- lib/syskit/cli/log_runtime_archive_main.rb | 2 +- test/cli/test_log_runtime_archive.rb | 10 +++--- test/cli/test_log_runtime_archive_main.rb | 36 ++++++++++------------ 4 files changed, 30 insertions(+), 27 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index acf44da74..91624fb11 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -82,6 +82,10 @@ def process_root_folder # bytes, at which the archiver starts deleting the oldest log files # @param [integer] free_space_delete_until: post-deletion free space in bytes, # at which the archiver stops deleting the oldest log files + # + # @return [Boolean] true if successfully ensured free space, meaning there is + # the required free space, false if deleting the files in this directory was + # not enough to free up the required space def ensure_free_space( free_space_low_limit, free_space_delete_until, directory: @target_dir ) @@ -94,14 +98,14 @@ def ensure_free_space( stat = Sys::Filesystem.stat(directory) available_space = stat.bytes_available - return if available_space > free_space_low_limit + return true if available_space > free_space_low_limit until available_space >= free_space_delete_until files = directory.each_child.select(&:file?) if files.empty? Roby.warn "Cannot erase files: the folder is empty but the " \ "available space is smaller than the threshold." - break + return false end removed_file = files.min_by(&:mtime) @@ -109,6 +113,7 @@ def ensure_free_space( removed_file.unlink available_space += size_removed_file end + true end def process_dataset(child, full:) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index a230e687e..c0e6e64f8 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -151,7 +151,7 @@ def ensure_free_space(source_dir) archiver = make_archiver(source_dir) source_dir.children.select(&:directory?).sort_by(&:mtime).each do |child| - archiver.ensure_free_space( + break if archiver.ensure_free_space( options[:free_space_low_limit] * 1_000_000, options[:free_space_freed_limit] * 1_000_000, directory: (source_dir / child) diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index 6f02be786..d9b8e766e 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -668,7 +668,7 @@ def create_server(params) it "does nothing if there is enough free space" do mock_available_space(2) mock_mtime - @archiver.ensure_free_space(1, 10) + assert @archiver.ensure_free_space(1, 10) assert_deleted_files([]) end @@ -678,7 +678,7 @@ def create_server(params) mock_available_space(0.5) mock_mtime - @archiver.ensure_free_space(1, 10) + assert @archiver.ensure_free_space(1, 10) assert_deleted_files([0, 1, 2, 3]) end @@ -689,7 +689,7 @@ def create_server(params) mock_available_space(0.5, directory: different_dir) mock_mtime(directory: different_dir) - @archiver.ensure_free_space(1, 10, directory: different_dir) + assert @archiver.ensure_free_space(1, 10, directory: different_dir) assert_deleted_files([0, 1, 2, 3], directory: different_dir) end @@ -700,7 +700,7 @@ def create_server(params) mock_available_space(0.5, directory: different_dir) mock_mtime(directory: different_dir, reverse_alphabetical: true) - @archiver.ensure_free_space(1, 10, directory: different_dir) + assert @archiver.ensure_free_space(1, 10, directory: different_dir) assert_deleted_files([8, 9, 10], directory: different_dir) end @@ -711,7 +711,7 @@ def create_server(params) mock_available_space(0.5) mock_mtime - @archiver.ensure_free_space(1, 15) + refute @archiver.ensure_free_space(1, 15) assert_deleted_files([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) end diff --git a/test/cli/test_log_runtime_archive_main.rb b/test/cli/test_log_runtime_archive_main.rb index df9090197..6d56950cf 100644 --- a/test/cli/test_log_runtime_archive_main.rb +++ b/test/cli/test_log_runtime_archive_main.rb @@ -246,13 +246,16 @@ def call_transfer(source_dir) size_files = [6, 2, 1, 6, 7, 10, 3, 5, 8, 9] mock_files_size(size_files, directory: @sub_directory) mock_files_size(size_files, directory: @sub_directory2) - mock_available_space(0.5, directory: @sub_directory) - mock_available_space(0.5, directory: @sub_directory2) + mock_available_space(0, directory: @sub_directory) + mock_available_space(100.5, directory: @sub_directory2) mock_mtime(directory: @sub_directory) mock_mtime(directory: @sub_directory2) + mock_mtime(directory: @directory) - call_ensure_free_space(@directory, 1, 10) - assert_deleted_files([0, 1, 2, 3], directory: @sub_directory) + call_ensure_free_space(@directory, 101, 110) + assert_deleted_files( + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], directory: @sub_directory + ) assert_deleted_files([0, 1, 2, 3], directory: @sub_directory2) end @@ -262,21 +265,14 @@ def call_transfer(source_dir) mock_files_size(size_files, directory: @sub_directory2) mock_available_space(0.5, directory: @sub_directory) mock_available_space(0.5, directory: @sub_directory2) + mock_mtime(directory: @sub_directory) + mock_mtime(directory: @sub_directory2) mock_mtime(directory: @directory, reverse_alphabetical: true) - flexmock_m = flexmock(LogRuntimeArchive) - - flexmock_m - .new_instances - .should_receive(:ensure_free_space) - .with(1_000_000, 10_000_000, directory: @sub_directory2) - .ordered - flexmock_m - .new_instances - .should_receive(:ensure_free_space) - .with(1_000_000, 10_000_000, directory: @sub_directory) - .ordered call_ensure_free_space(@directory, 1, 10) + assert_deleted_files([0, 1, 2, 3], directory: @sub_directory2) + # Does not delete any file from newest directory + assert_equal 10, @sub_directory.each_child.select(&:file?).size end def call_ensure_free_space(source_dir, low_limit, freed_limit) @@ -293,14 +289,16 @@ def call_ensure_free_space(source_dir, low_limit, freed_limit) describe "#watch_ensure_free_space" do before do @directory = make_tmppath + @sub_directory = Pathname.new(@directory / "subdir") + @sub_directory.mkdir unless @sub_directory.exist? @mocked_files_sizes = [] - 5.times { |i| (@directory / i.to_s).write(i.to_s) } + 5.times { |i| (@sub_directory / i.to_s).write(i.to_s) } end it "calls ensure free space with the specified period" do - mock_files_size([], directory: @directory) - mock_available_space(200, directory: @directory) # 70 MB + mock_files_size([], directory: @sub_directory) + mock_available_space(200, directory: @sub_directory) # 70 MB quit = Class.new(RuntimeError) called = 0 From ce08879493ac1ecbb863e301404dd14c19134550 Mon Sep 17 00:00:00 2001 From: kapeps Date: Thu, 30 Jan 2025 10:01:18 -0300 Subject: [PATCH 094/158] fix: pass max archive size as argument instead of class initialization param in logRuntimeArchive --- lib/syskit/cli/log_runtime_archive.rb | 18 +++++++++--------- lib/syskit/cli/log_runtime_archive_main.rb | 15 ++++----------- test/cli/test_log_runtime_archive.rb | 16 ++++++++-------- 3 files changed, 21 insertions(+), 28 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 91624fb11..76dbcf4ae 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -29,17 +29,14 @@ class LogRuntimeArchive # should be nil in transfer mode, as the logs will be transferred directly # to the ftp server @see process_root_folder_transfer # @param [Logger] logger the log structure - # @param [Integer] max_archive_size the max size of the archive def initialize( root_dir, target_dir: nil, - logger: LogRuntimeArchive.null_logger, - max_archive_size: DEFAULT_MAX_ARCHIVE_SIZE + logger: LogRuntimeArchive.null_logger ) @last_archive_index = {} @logger = logger @root_dir = root_dir @target_dir = target_dir - @max_archive_size = max_archive_size end # Iterate over all datasets in a Roby log root folder and transfer them @@ -65,11 +62,14 @@ def process_root_folder_transfer(server_params) # @param [Pathname] root_dir the log root folder # @param [Pathname] target_dir the folder in which to save the # archived datasets - def process_root_folder + # @param [Integer] max_archive_size the max size of the archive + def process_root_folder(max_archive_size: DEFAULT_MAX_ARCHIVE_SIZE) candidates = self.class.find_all_dataset_folders(@root_dir) running = candidates.last candidates.each do |child| - process_dataset(child, full: child != running) + process_dataset( + child, max_archive_size: max_archive_size, full: child != running + ) end end @@ -116,13 +116,13 @@ def ensure_free_space( true end - def process_dataset(child, full:) + def process_dataset(child, max_archive_size: DEFAULT_MAX_ARCHIVE_SIZE, full:) use_existing = true loop do open_archive_for( child.basename.to_s, use_existing: use_existing ) do |io| - if io.tell > @max_archive_size + if io.tell > max_archive_size use_existing = false break end @@ -130,7 +130,7 @@ def process_dataset(child, full:) dataset_complete = self.class.archive_dataset( io, child, logger: @logger, full: full, - max_size: @max_archive_size + max_size: max_archive_size ) return if dataset_complete end diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index c0e6e64f8..1f6aaddc4 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -58,15 +58,15 @@ def archive(root_dir, target_dir) options[:free_space_low_limit] * 1_000_000, options[:free_space_freed_limit] * 1_000_000 ) - archiver.process_root_folder + archiver.process_root_folder( + max_archive_size: options[:max_size] * (1024**2) + ) end desc "watch_transfer", "watches a dataset root folder \ and periodically performs transfer" option :period, type: :numeric, default: 600, desc: "polling period in seconds" - option :max_size, - type: :numeric, default: 10_000, desc: "max log size in MB" option :max_upload_rate_mbps, type: :numeric, default: 10, desc: "max upload rate in Mbps" def watch_transfer( # rubocop:disable Metrics/ParameterLists @@ -86,8 +86,6 @@ def watch_transfer( # rubocop:disable Metrics/ParameterLists end desc "transfer", "transfers the datasets" - option :max_size, - type: :numeric, default: 10_000, desc: "max log size in MB" option :max_upload_rate_mbps, type: :numeric, default: 10, desc: "max upload rate in Mbps" def transfer( # rubocop:disable Metrics/ParameterLists @@ -117,8 +115,6 @@ def transfer_server( # rubocop:disable Metrics/ParameterLists desc "watch_ensure_free_space", "watches the ensure free space process" option :period, type: :numeric, default: 10, desc: "polling period in seconds" - option :max_size, - type: :numeric, default: 10_000, desc: "max log size in MB" option :free_space_low_limit, type: :numeric, default: 5_000, desc: "start deleting files if \ available space is below this threshold (threshold in MB)" @@ -137,8 +133,6 @@ def watch_ensure_free_space(source_dir) desc "ensure_free_space", "ensures there is free space, if not, start \ deleting files" - option :max_size, - type: :numeric, default: 10_000, desc: "max log size in MB" option :free_space_low_limit, type: :numeric, default: 5_000, desc: "start deleting files if \ available space is below this threshold (threshold in MB)" @@ -180,8 +174,7 @@ def make_archiver(root_dir, target_dir: nil) Syskit::CLI::LogRuntimeArchive.new( root_dir, - target_dir: target_dir, logger: logger, - max_archive_size: options[:max_size] * (1024**2) + target_dir: target_dir, logger: logger ) end diff --git a/test/cli/test_log_runtime_archive.rb b/test/cli/test_log_runtime_archive.rb index d9b8e766e..581ddffad 100644 --- a/test/cli/test_log_runtime_archive.rb +++ b/test/cli/test_log_runtime_archive.rb @@ -392,9 +392,9 @@ module CLI .write(test1 = Base64.encode64(Random.bytes(1024))) (dataset / "test.2.log").write(Base64.encode64(Random.bytes(1024))) process = LogRuntimeArchive.new( - @root, target_dir: @archive_dir, max_archive_size: 1024 + @root, target_dir: @archive_dir ) - process.process_root_folder + process.process_root_folder(max_archive_size: 1024) entries = read_archive(path: @archive_dir / "20220434-2023.0.tar") assert_equal 1, entries.size @@ -420,12 +420,12 @@ module CLI (dataset / "test.2.log") .write(test2 = Base64.encode64(Random.bytes(128))) process = LogRuntimeArchive.new( - @root, target_dir: @archive_dir, max_archive_size: 1024 + @root, target_dir: @archive_dir ) - process.process_root_folder + process.process_root_folder(max_archive_size: 1024) (dataset / "test.3.log").write(Base64.encode64(Random.bytes(1024))) - process.process_root_folder + process.process_root_folder(max_archive_size: 1024) entries = read_archive(path: @archive_dir / "20220434-2023.1.tar") assert_equal 2, entries.size @@ -446,12 +446,12 @@ module CLI test1 = make_random_file "test.1.log", root: dataset test2 = make_random_file "test.2.log", root: dataset process = LogRuntimeArchive.new( - @root, target_dir: @archive_dir, max_archive_size: 1024 + @root, target_dir: @archive_dir ) - process.process_root_folder + process.process_root_folder(max_archive_size: 1024) make_random_file "test.3.log", root: dataset - process.process_root_folder + process.process_root_folder(max_archive_size: 1024) entries = read_archive(path: @archive_dir / "20220434-2023.1.tar") assert_equal 1, entries.size From 70220e076e99a792f868931571a2bef798222394 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Thu, 12 Dec 2024 13:34:23 -0300 Subject: [PATCH 095/158] feat: allow early deployment at network generation --- lib/syskit/exceptions.rb | 33 +++++++ lib/syskit/network_generation/merge_solver.rb | 3 +- .../system_network_deployer.rb | 16 ++-- .../system_network_generator.rb | 78 ++++++++++++++--- .../test_system_network_generator.rb | 85 +++++++++++++++++++ 5 files changed, 194 insertions(+), 21 deletions(-) diff --git a/lib/syskit/exceptions.rb b/lib/syskit/exceptions.rb index 25085e706..d1400ceee 100644 --- a/lib/syskit/exceptions.rb +++ b/lib/syskit/exceptions.rb @@ -483,6 +483,39 @@ def pretty_print(pp) end end + class ConflictingDeploymentAllocation < SpecError + attr_reader :deployment_to_tasks + + def initialize(deployment_to_tasks) + @deployment_to_tasks = deployment_to_tasks + end + + def pretty_print(pp) + pp.text "cannot deploy the following tasks" + deployment_to_tasks.each do |deployed_task, tasks| + tasks.each do |task| + pp.nest(2) do + pp.breakable + pp.text "#{task} (#{task.orogen_model.name})" + end + end + pp.breakable + pp.text "because the same " + process_server_name = deployed_task.configured_deployment + .process_server_name + orogen_model = deployed_task.configured_deployment + .orogen_model + pp.text( + "deployed task #{deployed_task.mapped_task_name} from deployment " \ + "#{orogen_model.name} defined in " \ + "#{orogen_model.project.name} on #{process_server_name}" + ) + + pp.text " is allocated for them" + end + end + end + # Exception raised at the end of #resolve if some tasks do not have a # deployed equivalent class MissingDeployments < SpecError diff --git a/lib/syskit/network_generation/merge_solver.rb b/lib/syskit/network_generation/merge_solver.rb index bc14a26e3..6060f3bad 100644 --- a/lib/syskit/network_generation/merge_solver.rb +++ b/lib/syskit/network_generation/merge_solver.rb @@ -222,7 +222,8 @@ def may_merge_task_contexts?(merged_task, task) # Merges involving a deployed task can only involve a # non-deployed task as well - if task.execution_agent && merged_task.execution_agent + if task.execution_agent && merged_task.execution_agent && + (task.execution_agent != merged_task.execution_agent) info "rejected: deployment attribute mismatches" return false end diff --git a/lib/syskit/network_generation/system_network_deployer.rb b/lib/syskit/network_generation/system_network_deployer.rb index f4c12f912..a5f86ab25 100644 --- a/lib/syskit/network_generation/system_network_deployer.rb +++ b/lib/syskit/network_generation/system_network_deployer.rb @@ -56,15 +56,15 @@ def initialize(plan, # will run on the generated network # @return [Set] the set of tasks for which the deployer could # not find a deployment - def deploy(validate: true) + def deploy(validate: true, reuse_deployments: false, deployment_tasks: {}) debug "Deploying the system network" all_tasks = plan.find_local_tasks(TaskContext).to_a selected_deployments, missing_deployments = - select_deployments(all_tasks) + select_deployments(all_tasks, reuse: reuse_deployments) log_timepoint "select_deployments" - apply_selected_deployments(selected_deployments) + apply_selected_deployments(selected_deployments, deployment_tasks) log_timepoint "apply_selected_deployments" if validate @@ -132,13 +132,10 @@ def find_suitable_deployment_for(task) # Find which deployments should be used for which tasks # # @param [[Component]] tasks the tasks to be deployed - # @param [Component=>Models::DeploymentGroup] the association - # between a component and the group that should be used to - # deploy it # @return [(Component=>Deployment,[Component])] the association # between components and the deployments that should be used # for them, and the list of components without deployments - def select_deployments(tasks) + def select_deployments(tasks, reuse: false) used_deployments = Set.new missing_deployments = Set.new selected_deployments = {} @@ -150,7 +147,7 @@ def select_deployments(tasks) if !selected missing_deployments << task - elsif used_deployments.include?(selected) + elsif !reuse && used_deployments.include?(selected) debug do machine, configured_deployment, task_name = *selected "#{task} resolves to #{configured_deployment}.#{task_name} " \ @@ -170,8 +167,7 @@ def select_deployments(tasks) # @param [Component=>Deployment] selected_deployments the # component-to-deployment association # @return [void] - def apply_selected_deployments(selected_deployments) - deployment_tasks = {} + def apply_selected_deployments(selected_deployments, deployment_tasks = {}) selected_deployments.each do |task, deployed_task| deployed_task, = deployed_task.instanciate( plan, diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index 77b15e946..73e833705 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -11,11 +11,12 @@ class SystemNetworkGenerator include Logger::Hierarchy include Roby::DRoby::EventLogging - attr_reader :plan, :event_logger, :merge_solver + attr_reader :plan, :event_logger, :merge_solver, :default_deployment_group def initialize(plan, event_logger: plan.event_logger, - merge_solver: MergeSolver.new(plan)) + merge_solver: MergeSolver.new(plan), + default_deployment_group: nil) if merge_solver.plan != plan raise ArgumentError, "gave #{merge_solver} as merge solver, which applies on #{merge_solver.plan}. Was expecting #{plan}" end @@ -23,6 +24,7 @@ def initialize(plan, @plan = plan @event_logger = event_logger @merge_solver = merge_solver + @default_deployment_group = default_deployment_group end # Generate the network in the plan @@ -33,7 +35,8 @@ def initialize(plan, def generate(instance_requirements, garbage_collect: true, validate_abstract_network: true, - validate_generated_network: true) + validate_generated_network: true, + early_deploy: false) # We first generate a non-deployed network that fits all # requirements. @@ -41,7 +44,8 @@ def generate(instance_requirements, compute_system_network(instance_requirements, garbage_collect: garbage_collect, validate_abstract_network: validate_abstract_network, - validate_generated_network: validate_generated_network) + validate_generated_network: validate_generated_network, + early_deploy: early_deploy) end end @@ -188,17 +192,36 @@ def self.remove_abstract_composition_optional_children(plan) end end + def deploy(deployment_tasks) + network_deployer = SystemNetworkDeployer.new( + plan, + merge_solver: merge_solver, + default_deployment_group: default_deployment_group + ) + + network_deployer.deploy(validate: false, + reuse_deployments: true, + deployment_tasks: deployment_tasks) + network_deployer.verify_all_tasks_deployed + end + # Compute in #plan the network needed to fullfill the requirements # # This network is neither validated nor tied to actual deployments def compute_system_network(instance_requirements, garbage_collect: true, validate_abstract_network: true, - validate_generated_network: true) + validate_generated_network: true, + early_deploy: false) + @toplevel_tasks = log_timepoint_group "instanciate" do instanciate(instance_requirements) end + @toplevel_instance_requirements = instance_requirements + deployment_tasks = {} + deploy(deployment_tasks) if early_deploy + merge_solver.merge_identical_tasks log_timepoint "merge" Engine.instanciated_network_postprocessing.each do |block| @@ -207,7 +230,11 @@ def compute_system_network(instance_requirements, garbage_collect: true, end link_to_busses log_timepoint "link_to_busses" + + deploy(deployment_tasks) if early_deploy + merge_solver.merge_identical_tasks + log_timepoint "merge" self.class.remove_abstract_composition_optional_children(plan) @@ -232,9 +259,9 @@ def compute_system_network(instance_requirements, garbage_collect: true, # And get rid of the 'permanent' marking we use to be able to # run static_garbage_collect - plan.each_task do |task| - plan.unmark_permanent_task(task) - end + plan.permanent_tasks + .find_all { |task| !task.kind_of?(Syskit::Deployment) } + .each { |task| plan.unmark_permanent_task(task) } Engine.system_network_postprocessing.each do |block| block.call(self, plan) @@ -247,7 +274,7 @@ def compute_system_network(instance_requirements, garbage_collect: true, end if validate_generated_network - self.validate_generated_network + self.validate_generated_network(with_deployments: early_deploy) log_timepoint "validate_generated_network" end @@ -343,6 +370,36 @@ def self.verify_device_allocation(plan, toplevel_tasks_to_requirements = {}) end end + def verify_all_deployments_are_unique + deployment_to_task_map = {} + plan.find_local_tasks(Syskit::TaskContext).each do |t| + deployment_to_task_map[t.orocos_name] = + (deployment_to_task_map[t.orocos_name] || []) + [t] + end + + using_same_deployment = deployment_to_task_map.select do |_, tasks| + tasks.size > 1 + end + + return if using_same_deployment.empty? + + deployment_to_task = using_same_deployment + .each_with_object({}) do |(orocos_name, tasks), h| + deployed_tasks = default_deployment_group + .find_all_suitable_deployments_for(tasks.first) + + deployed_task = deployed_tasks.select do |d| + d.mapped_task_name == orocos_name + end + + h[deployed_task.first] = tasks + end + + raise ConflictingDeploymentAllocation.new( + deployment_to_task + ), "there are deployments used multiple times" + end + # Validates the network generated by {#compute_system_network} # # It performs the tests that are only needed on an abstract network, @@ -353,9 +410,10 @@ def validate_abstract_network end # Validates the network generated by {#compute_system_network} - def validate_generated_network + def validate_generated_network(with_deployments: false) self.class.verify_task_allocation(plan) self.class.verify_device_allocation(plan, toplevel_tasks_to_requirements) + verify_all_deployments_are_unique if with_deployments super if defined? super end end diff --git a/test/network_generation/test_system_network_generator.rb b/test/network_generation/test_system_network_generator.rb index d0f672254..132cbdcf3 100644 --- a/test/network_generation/test_system_network_generator.rb +++ b/test/network_generation/test_system_network_generator.rb @@ -111,6 +111,91 @@ def arg=(value) flexmock(generator).should_receive(:validate_generated_network).once generator.compute_system_network([], validate_generated_network: true) end + + describe "early deploy" do + attr_reader :net_gen, :device_m, :cmp_m, :task_m, :net_gen_plan + + before do + @device_m = Device.new_submodel(name: "D") do + output_port "out", "/double" + end + device_m = @device_m + driver_m = TaskContext.new_submodel(name: "Driver") do + output_port "out", "/double" + driver_for device_m, as: "test" + end + + @task_m = TaskContext.new_submodel(name: "Task") do + argument :arg + input_port "in", "/double" + end + task_m = @task_m + + @cmp_m = Syskit::Composition.new_submodel + cmp_m = @cmp_m + cmp_m.add device_m, as: "device" + cmp_m.add task_m, as: "task" + cmp_m.device_child.connect_to cmp_m.task_child + + syskit_stub_configured_deployment(driver_m) + syskit_stub_configured_deployment(task_m, "task1") + + @net_gen = SystemNetworkGenerator.new( + @net_gen_plan = Roby::Plan.new, + default_deployment_group: default_deployment_group + ) + end + + it "can merge tasks with same execution agent" do + d = robot.device(device_m, as: "d") + assert net_gen.compute_system_network( + [cmp_m.use("device" => d), cmp_m.use("device" => d)], + early_deploy: true, + validate_generated_network: true + ) + end + + it "raises when a deployment is used more than once" do + d = robot.device(device_m, as: "d") + assert_raises(ConflictingDeploymentAllocation) do + net_gen.compute_system_network( + [cmp_m.use("task" => task_m.with_arguments(arg: 1), + "device" => d), + cmp_m.use("task" => task_m.with_arguments(arg: 2), + "device" => d)], + early_deploy: true + ) + end + end + + it "early resolves deployments with hints" do + syskit_stub_configured_deployment(task_m, "task2") + local_net_gen = SystemNetworkGenerator.new( + local_net_gen_plan = Roby::Plan.new, + default_deployment_group: default_deployment_group + ) + + d = robot.device(device_m, as: "d") + assert local_net_gen.compute_system_network( + [1, 2].map do |x| + cmp_m.use("task" => task_m.prefer_deployed_tasks(/task#{x}/), + "device" => d) + + end, + early_deploy: true + ) + + [1, 2].each do |x| + tasks = local_net_gen_plan.find_local_tasks(TaskContext) + .select do |t| + if t.respond_to? :orocos_name + t.orocos_name == "task#{x}" + end + end + assert tasks.size == 1 + end + end + end end describe "#generate" do From 6409d34e1517f93e0157d3dd0677e0647a7f9bc1 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Thu, 12 Dec 2024 13:45:02 -0300 Subject: [PATCH 096/158] chore: create conf to toggle early deployments at network gen --- Rakefile | 45 +++++++++++++++++-------- lib/syskit/network_generation/engine.rb | 25 ++++++++++---- lib/syskit/roby_app/configuration.rb | 19 +++++++++++ test/features/early_deploy.rb | 1 + 4 files changed, 69 insertions(+), 21 deletions(-) create mode 100644 test/features/early_deploy.rb diff --git a/Rakefile b/Rakefile index 751591b37..882dd5450 100644 --- a/Rakefile +++ b/Rakefile @@ -28,6 +28,28 @@ def minitest_set_options(test_task, name) test_task.options = "#{TESTOPTS} #{minitest_args} -- --simplecov-name=#{name}" end +def core(early_deploy: false) + s = ":no-early-deploy" + if early_deploy + s = ":early-deploy" + early_deploy_setup = ["test/features/early_deploy.rb"] + end + + Rake::TestTask.new("test:core#{s}") do |t| + t.libs << "." + t.libs << "lib" + minitest_set_options(t, "core") + test_files = FileList["test/**/test_*.rb", *early_deploy_setup] + test_files = test_files + .exclude("test/ros/**/*.rb") + .exclude("test/gui/**/*.rb") + .exclude("test/live/**/*.rb") + .exclude("test/telemetry/**/*.rb") + t.test_files = test_files + t.warning = false + end +end + Rake::TestTask.new("test:telemetry") do |t| t.libs << "." t.libs << "lib" @@ -36,20 +58,7 @@ Rake::TestTask.new("test:telemetry") do |t| t.warning = false end -Rake::TestTask.new("test:core") do |t| - t.libs << "." - t.libs << "lib" - minitest_set_options(t, "core") - test_files = FileList["test/**/test_*.rb"] - test_files = test_files - .exclude("test/ros/**/*.rb") - .exclude("test/gui/**/*.rb") - .exclude("test/live/**/*.rb") - .exclude("test/telemetry/**/*.rb") - t.test_files = test_files - t.warning = false -end - +desc "Run separate tests that require a live syskit instance" task "test:live" do tests = Dir.enum_for(:glob, "test/live/test_*.rb").to_a unless system(File.join("test", "live", "run"), *tests) @@ -57,6 +66,8 @@ task "test:live" do exit 1 end end + +desc "run gui-only tests" Rake::TestTask.new("test:gui") do |t| t.libs << "." t.libs << "lib" @@ -66,6 +77,12 @@ Rake::TestTask.new("test:gui") do |t| t.warning = false end +core early_deploy: true +core +desc "Run core library tests, excluding GUI and live tests" +task "test:core" => ["test:core:no-early-deploy", "test:core:early-deploy"] + +desc "Run all tests" task "test" => ["test:gui", "test:core", "test:live", "test:telemetry"] task "rubocop" do diff --git a/lib/syskit/network_generation/engine.rb b/lib/syskit/network_generation/engine.rb index 4d294e535..7119301b1 100644 --- a/lib/syskit/network_generation/engine.rb +++ b/lib/syskit/network_generation/engine.rb @@ -703,18 +703,24 @@ def compute_system_network( Engine.discover_requirement_tasks_from_plan(real_plan), garbage_collect: true, validate_abstract_network: true, - validate_generated_network: true + validate_generated_network: true, + default_deployment_group: nil, + early_deploy: false ) requirement_tasks = requirement_tasks.to_a instance_requirements = requirement_tasks.map(&:requirements) system_network_generator = SystemNetworkGenerator.new( - work_plan, event_logger: event_logger, merge_solver: merge_solver + work_plan, + event_logger: event_logger, + merge_solver: merge_solver, + default_deployment_group: default_deployment_group ) toplevel_tasks = system_network_generator.generate( instance_requirements, garbage_collect: garbage_collect, validate_abstract_network: validate_abstract_network, - validate_generated_network: validate_generated_network + validate_generated_network: validate_generated_network, + early_deploy: early_deploy ) Hash[requirement_tasks.zip(toplevel_tasks)] @@ -749,14 +755,17 @@ def resolve_system_network( validate_deployed_network: true, compute_deployments: true, default_deployment_group: Syskit.conf.deployment_group, - compute_policies: true + compute_policies: true, + early_deploy: Syskit.conf.early_deploy? ) required_instances = compute_system_network( requirement_tasks, garbage_collect: garbage_collect, validate_abstract_network: validate_abstract_network, - validate_generated_network: validate_generated_network + validate_generated_network: validate_generated_network, + default_deployment_group: (default_deployment_group if early_deploy), + early_deploy: early_deploy ) if compute_deployments @@ -804,7 +813,8 @@ def resolve( validate_abstract_network: true, validate_generated_network: true, validate_deployed_network: true, - validate_final_network: true + validate_final_network: true, + early_deploy: Syskit.conf.early_deploy? ) required_instances = resolve_system_network( requirement_tasks, @@ -814,7 +824,8 @@ def resolve( compute_deployments: compute_deployments, default_deployment_group: default_deployment_group, compute_policies: compute_policies, - validate_deployed_network: validate_deployed_network + validate_deployed_network: validate_deployed_network, + early_deploy: early_deploy ) apply_system_network_to_plan( diff --git a/lib/syskit/roby_app/configuration.rb b/lib/syskit/roby_app/configuration.rb index 045f66ef6..c4769c4f4 100644 --- a/lib/syskit/roby_app/configuration.rb +++ b/lib/syskit/roby_app/configuration.rb @@ -121,6 +121,24 @@ class Configuration # likely want this attr_predicate :kill_all_on_process_server_connection?, true + # Indicates where the deployment stage happens + # + # If false, it will happen at the end of the whole network generation + # (the historical behaviour). If true, it will happen just after + # instantiation + # + # The default is false + # + # @see early_deploy= + def early_deploy? + @early_deploy + end + + # Controls where the deployment stage happens + # + # @see early_deploy? + attr_writer :early_deploy + # Controls whether the orogen types should be exported as Ruby # constants # @@ -154,6 +172,7 @@ def initialize(app) @kill_all_on_process_server_connection = false @register_self_on_name_server = (ENV["SYSKIT_REGISTER_SELF_ON_NAME_SERVER"] != "0") @strict_model_for = false + @early_deploy = false @log_rotation_period = nil @log_transfer = LogTransferManager::Configuration.new( diff --git a/test/features/early_deploy.rb b/test/features/early_deploy.rb new file mode 100644 index 000000000..7f5038bcd --- /dev/null +++ b/test/features/early_deploy.rb @@ -0,0 +1 @@ +Syskit.conf.early_deploy = true From e775e71c5a03911721bf4a26f2046dc6a34250ef Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Mon, 16 Dec 2024 15:52:50 -0300 Subject: [PATCH 097/158] refactor: early_deploy as member of NetworkGeneration necessary for sharing data with the public api that takes no arguments --- lib/syskit/network_generation/engine.rb | 6 ++--- .../system_network_generator.rb | 25 +++++++++++-------- .../test_system_network_generator.rb | 13 +++++----- 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/lib/syskit/network_generation/engine.rb b/lib/syskit/network_generation/engine.rb index 7119301b1..c68b4b0a4 100644 --- a/lib/syskit/network_generation/engine.rb +++ b/lib/syskit/network_generation/engine.rb @@ -713,14 +713,14 @@ def compute_system_network( work_plan, event_logger: event_logger, merge_solver: merge_solver, - default_deployment_group: default_deployment_group + default_deployment_group: default_deployment_group, + early_deploy: early_deploy ) toplevel_tasks = system_network_generator.generate( instance_requirements, garbage_collect: garbage_collect, validate_abstract_network: validate_abstract_network, - validate_generated_network: validate_generated_network, - early_deploy: early_deploy + validate_generated_network: validate_generated_network ) Hash[requirement_tasks.zip(toplevel_tasks)] diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index 73e833705..21745fb18 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -11,12 +11,17 @@ class SystemNetworkGenerator include Logger::Hierarchy include Roby::DRoby::EventLogging - attr_reader :plan, :event_logger, :merge_solver, :default_deployment_group + attr_reader :plan, + :event_logger, + :merge_solver, + :default_deployment_group, + :early_deploy def initialize(plan, event_logger: plan.event_logger, merge_solver: MergeSolver.new(plan), - default_deployment_group: nil) + default_deployment_group: nil, + early_deploy: false) if merge_solver.plan != plan raise ArgumentError, "gave #{merge_solver} as merge solver, which applies on #{merge_solver.plan}. Was expecting #{plan}" end @@ -25,6 +30,7 @@ def initialize(plan, @event_logger = event_logger @merge_solver = merge_solver @default_deployment_group = default_deployment_group + @early_deploy = early_deploy end # Generate the network in the plan @@ -35,8 +41,7 @@ def initialize(plan, def generate(instance_requirements, garbage_collect: true, validate_abstract_network: true, - validate_generated_network: true, - early_deploy: false) + validate_generated_network: true) # We first generate a non-deployed network that fits all # requirements. @@ -44,8 +49,7 @@ def generate(instance_requirements, compute_system_network(instance_requirements, garbage_collect: garbage_collect, validate_abstract_network: validate_abstract_network, - validate_generated_network: validate_generated_network, - early_deploy: early_deploy) + validate_generated_network: validate_generated_network) end end @@ -210,8 +214,7 @@ def deploy(deployment_tasks) # This network is neither validated nor tied to actual deployments def compute_system_network(instance_requirements, garbage_collect: true, validate_abstract_network: true, - validate_generated_network: true, - early_deploy: false) + validate_generated_network: true) @toplevel_tasks = log_timepoint_group "instanciate" do instanciate(instance_requirements) @@ -274,7 +277,7 @@ def compute_system_network(instance_requirements, garbage_collect: true, end if validate_generated_network - self.validate_generated_network(with_deployments: early_deploy) + self.validate_generated_network log_timepoint "validate_generated_network" end @@ -410,10 +413,10 @@ def validate_abstract_network end # Validates the network generated by {#compute_system_network} - def validate_generated_network(with_deployments: false) + def validate_generated_network self.class.verify_task_allocation(plan) self.class.verify_device_allocation(plan, toplevel_tasks_to_requirements) - verify_all_deployments_are_unique if with_deployments + verify_all_deployments_are_unique if early_deploy super if defined? super end end diff --git a/test/network_generation/test_system_network_generator.rb b/test/network_generation/test_system_network_generator.rb index 132cbdcf3..626654ddf 100644 --- a/test/network_generation/test_system_network_generator.rb +++ b/test/network_generation/test_system_network_generator.rb @@ -142,7 +142,8 @@ def arg=(value) @net_gen = SystemNetworkGenerator.new( @net_gen_plan = Roby::Plan.new, - default_deployment_group: default_deployment_group + default_deployment_group: default_deployment_group, + early_deploy: true ) end @@ -150,7 +151,6 @@ def arg=(value) d = robot.device(device_m, as: "d") assert net_gen.compute_system_network( [cmp_m.use("device" => d), cmp_m.use("device" => d)], - early_deploy: true, validate_generated_network: true ) end @@ -162,8 +162,7 @@ def arg=(value) [cmp_m.use("task" => task_m.with_arguments(arg: 1), "device" => d), cmp_m.use("task" => task_m.with_arguments(arg: 2), - "device" => d)], - early_deploy: true + "device" => d)] ) end end @@ -172,7 +171,8 @@ def arg=(value) syskit_stub_configured_deployment(task_m, "task2") local_net_gen = SystemNetworkGenerator.new( local_net_gen_plan = Roby::Plan.new, - default_deployment_group: default_deployment_group + default_deployment_group: default_deployment_group, + early_deploy: true ) d = robot.device(device_m, as: "d") @@ -181,8 +181,7 @@ def arg=(value) cmp_m.use("task" => task_m.prefer_deployed_tasks(/task#{x}/), "device" => d) - end, - early_deploy: true + end ) [1, 2].each do |x| From be6aa8cea7e4d272bdc83be52828b3d96cfc4985 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Tue, 17 Dec 2024 10:58:46 -0300 Subject: [PATCH 098/158] chore: decide whether merge contexts with non nil agents --- lib/syskit/network_generation/merge_solver.rb | 20 +++++++++++++++++-- .../system_network_generator.rb | 15 ++++++++++++-- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/lib/syskit/network_generation/merge_solver.rb b/lib/syskit/network_generation/merge_solver.rb index 6060f3bad..83178643c 100644 --- a/lib/syskit/network_generation/merge_solver.rb +++ b/lib/syskit/network_generation/merge_solver.rb @@ -34,6 +34,8 @@ class MergeSolver # information attr_reader :event_logger + attr_writer :merge_when_identical_agents + def initialize(plan, event_logger: plan.event_logger) @plan = plan @event_logger = event_logger @@ -43,6 +45,7 @@ def initialize(plan, event_logger: plan.event_logger) @task_replacement_graph = Roby::Relations::BidirectionalDirectedAdjacencyGraph.new @resolved_replacements = {} @invalid_merges = Set.new + @merge_when_identical_agents = false end def clear @@ -51,6 +54,10 @@ def clear @invalid_merges.clear end + def merge_when_identical_agents? + @merge_when_identical_agents + end + # Returns the task that is used in place of the given task # # @param [Roby::Task] the task for which we want to know the @@ -222,8 +229,7 @@ def may_merge_task_contexts?(merged_task, task) # Merges involving a deployed task can only involve a # non-deployed task as well - if task.execution_agent && merged_task.execution_agent && - (task.execution_agent != merged_task.execution_agent) + unless mergeable_agents?(merged_task, task) info "rejected: deployment attribute mismatches" return false end @@ -231,6 +237,16 @@ def may_merge_task_contexts?(merged_task, task) true end + def mergeable_agents?(merged_task, task) + return true unless (task.execution_agent && merged_task.execution_agent) + + return false unless merge_when_identical_agents? + + return true if task.execution_agent == merged_task.execution_agent + + false + end + def each_component_merge_candidate(task) # Get the set of candidates. We are checking if the tasks in # this set can be replaced by +task+ diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index 21745fb18..425fb8a6c 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -225,7 +225,7 @@ def compute_system_network(instance_requirements, garbage_collect: true, deployment_tasks = {} deploy(deployment_tasks) if early_deploy - merge_solver.merge_identical_tasks + merge_identical_tasks log_timepoint "merge" Engine.instanciated_network_postprocessing.each do |block| block.call(self, plan) @@ -236,7 +236,7 @@ def compute_system_network(instance_requirements, garbage_collect: true, deploy(deployment_tasks) if early_deploy - merge_solver.merge_identical_tasks + merge_identical_tasks log_timepoint "merge" @@ -284,6 +284,17 @@ def compute_system_network(instance_requirements, garbage_collect: true, @toplevel_tasks end + def merge_identical_tasks + # When early deploying MergeSolver must merge tasks with identical + # execution agents. This breaks a strong assumption for the solver that is + # if two tasks have execution agents, then they must be different. But + # when early deploying tasks that have the same agent are actually the + # same + merge_solver.merge_when_identical_agents = early_deploy + merge_solver.merge_identical_tasks + merge_solver.merge_when_identical_agents = false + end + def toplevel_tasks_to_requirements (@toplevel_tasks || []) .map { |t| merge_solver.replacement_for(t) } From 1dc015fcfa11c73f2eadf4a251169c780a58fa98 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Mon, 3 Feb 2025 13:11:46 -0300 Subject: [PATCH 099/158] fix: propagate validate_deployed_network flag propagate it from engine to system network generator --- lib/syskit/network_generation/engine.rb | 7 +++++-- .../network_generation/system_network_generator.rb | 9 ++++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/lib/syskit/network_generation/engine.rb b/lib/syskit/network_generation/engine.rb index c68b4b0a4..c1578be87 100644 --- a/lib/syskit/network_generation/engine.rb +++ b/lib/syskit/network_generation/engine.rb @@ -705,6 +705,7 @@ def compute_system_network( validate_abstract_network: true, validate_generated_network: true, default_deployment_group: nil, + validate_deployed_network: false, early_deploy: false ) requirement_tasks = requirement_tasks.to_a @@ -714,7 +715,8 @@ def compute_system_network( event_logger: event_logger, merge_solver: merge_solver, default_deployment_group: default_deployment_group, - early_deploy: early_deploy + early_deploy: early_deploy, + validate_deployed_network: validate_deployed_network ) toplevel_tasks = system_network_generator.generate( instance_requirements, @@ -765,7 +767,8 @@ def resolve_system_network( validate_abstract_network: validate_abstract_network, validate_generated_network: validate_generated_network, default_deployment_group: (default_deployment_group if early_deploy), - early_deploy: early_deploy + validate_deployed_network: validate_deployed_network, + early_deploy: early_deploy && compute_deployments ) if compute_deployments diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index 425fb8a6c..178372590 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -15,13 +15,15 @@ class SystemNetworkGenerator :event_logger, :merge_solver, :default_deployment_group, - :early_deploy + :early_deploy, + :validate_deployed_network def initialize(plan, event_logger: plan.event_logger, merge_solver: MergeSolver.new(plan), default_deployment_group: nil, - early_deploy: false) + early_deploy: false, + validate_deployed_network: false) if merge_solver.plan != plan raise ArgumentError, "gave #{merge_solver} as merge solver, which applies on #{merge_solver.plan}. Was expecting #{plan}" end @@ -31,6 +33,7 @@ def initialize(plan, @merge_solver = merge_solver @default_deployment_group = default_deployment_group @early_deploy = early_deploy + @validate_deployed_network = validate_deployed_network end # Generate the network in the plan @@ -206,7 +209,7 @@ def deploy(deployment_tasks) network_deployer.deploy(validate: false, reuse_deployments: true, deployment_tasks: deployment_tasks) - network_deployer.verify_all_tasks_deployed + network_deployer.verify_all_tasks_deployed if validate_deployed_network end # Compute in #plan the network needed to fullfill the requirements From 6427629c742fccb7e866cafe69b81410f54386a2 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Mon, 3 Feb 2025 13:12:26 -0300 Subject: [PATCH 100/158] doc: early_deploy and validate_deployed_network flags --- .../system_network_generator.rb | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index 178372590..4359a68a8 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -14,9 +14,19 @@ class SystemNetworkGenerator attr_reader :plan, :event_logger, :merge_solver, - :default_deployment_group, - :early_deploy, - :validate_deployed_network + :default_deployment_group + + # Indicates if deployment stage happens within network generation + def early_deploy? + @early_deploy + end + + # Condition for deployment validation when early deploying + # + # @see early_deploy? + def validate_deployed_network? + @validate_deployed_network + end def initialize(plan, event_logger: plan.event_logger, @@ -209,7 +219,7 @@ def deploy(deployment_tasks) network_deployer.deploy(validate: false, reuse_deployments: true, deployment_tasks: deployment_tasks) - network_deployer.verify_all_tasks_deployed if validate_deployed_network + network_deployer.verify_all_tasks_deployed if validate_deployed_network? end # Compute in #plan the network needed to fullfill the requirements @@ -226,7 +236,8 @@ def compute_system_network(instance_requirements, garbage_collect: true, @toplevel_instance_requirements = instance_requirements deployment_tasks = {} - deploy(deployment_tasks) if early_deploy + + deploy(deployment_tasks) if early_deploy? merge_identical_tasks log_timepoint "merge" @@ -237,7 +248,7 @@ def compute_system_network(instance_requirements, garbage_collect: true, link_to_busses log_timepoint "link_to_busses" - deploy(deployment_tasks) if early_deploy + deploy(deployment_tasks) if early_deploy? merge_identical_tasks @@ -293,7 +304,7 @@ def merge_identical_tasks # if two tasks have execution agents, then they must be different. But # when early deploying tasks that have the same agent are actually the # same - merge_solver.merge_when_identical_agents = early_deploy + merge_solver.merge_when_identical_agents = early_deploy? merge_solver.merge_identical_tasks merge_solver.merge_when_identical_agents = false end @@ -430,7 +441,7 @@ def validate_abstract_network def validate_generated_network self.class.verify_task_allocation(plan) self.class.verify_device_allocation(plan, toplevel_tasks_to_requirements) - verify_all_deployments_are_unique if early_deploy + verify_all_deployments_are_unique if early_deploy? super if defined? super end end From ff5c9adf1ee1f4a63fbdae7159941dd592ada5c2 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Tue, 7 Jan 2025 11:58:53 -0300 Subject: [PATCH 101/158] fix(test): misleading test name it actually tests for instantiation, not for deployment --- test/test/test_profile_assertions.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test/test_profile_assertions.rb b/test/test/test_profile_assertions.rb index bd80dca83..8dda33d60 100644 --- a/test/test/test_profile_assertions.rb +++ b/test/test/test_profile_assertions.rb @@ -440,7 +440,7 @@ module Test assert_can_instanciate(@cmp_m.use(@srv_m => @task_m)) end - it "allows deploying together with the actions or profile" do + it "allows instantiating together with the actions or profile" do @task_m.argument :bla @test_profile.define "test", @cmp_m.use(@srv_m => @task_m) assert_can_instanciate( From 68b614f7a3831629a9e63e83095623e7cc77c7cc Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Tue, 7 Jan 2025 12:52:16 -0300 Subject: [PATCH 102/158] chore(test): rewrite test for early_deploy compatibility --- test/coordination/test_task_script.rb | 5 +++-- test/test/test_profile_assertions.rb | 8 ++++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/test/coordination/test_task_script.rb b/test/coordination/test_task_script.rb index af2d5ac8f..5091e2930 100644 --- a/test/coordination/test_task_script.rb +++ b/test/coordination/test_task_script.rb @@ -320,7 +320,8 @@ def start it "does port mapping if necessary" do composition_m = Syskit::Composition.new_submodel composition_m.add srv_m, as: "test" - composition = syskit_deploy_and_configure(composition_m.use("test" => component)) + syskit_stub_configured_deployment task_m + composition = syskit_deploy_and_configure(composition_m.use("test" => task_m)) reader = nil composition.script do @@ -328,7 +329,7 @@ def start end syskit_start(composition) - component.orocos_task.local_ruby_task.out.write(10) + composition.test_child.orocos_task.local_ruby_task.out.write(10) assert_equal 10, reader.read end diff --git a/test/test/test_profile_assertions.rb b/test/test/test_profile_assertions.rb index 8dda33d60..b83b58615 100644 --- a/test/test/test_profile_assertions.rb +++ b/test/test/test_profile_assertions.rb @@ -582,7 +582,9 @@ module Test end it "allows deploying together with the actions or profile" do - @test_profile.define("test", @cmp_m.use(@srv_m => @task_m)) + @test_profile.define("test", @cmp_m.use(@srv_m => \ + @task_m.to_instance_requirements + .use_deployment(@deployment_m))) assert_can_deploy( @test_profile.test_def, together_with: @task_m.to_instance_requirements @@ -721,7 +723,9 @@ module Test end it "allows deploying together with the actions or profile" do - @test_profile.define("test", @cmp_m.use(@srv_m => @task_m)) + @test_profile.define("test", @cmp_m.use(@srv_m => \ + @task_m.to_instance_requirements + .use_deployment(@deployment_m))) assert_can_deploy_all( @test_profile.test_def, together_with: @task_m.to_instance_requirements From 056fbe7871f68e520fd81dd01ddabc9ce8474794 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Tue, 7 Jan 2025 13:27:48 -0300 Subject: [PATCH 103/158] refactor: rubocop grievances --- lib/syskit/network_generation/merge_solver.rb | 2 +- lib/syskit/network_generation/system_network_generator.rb | 2 +- test/features/early_deploy.rb | 2 ++ test/network_generation/test_system_network_generator.rb | 7 ++----- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/syskit/network_generation/merge_solver.rb b/lib/syskit/network_generation/merge_solver.rb index 83178643c..f8621b5bd 100644 --- a/lib/syskit/network_generation/merge_solver.rb +++ b/lib/syskit/network_generation/merge_solver.rb @@ -238,7 +238,7 @@ def may_merge_task_contexts?(merged_task, task) end def mergeable_agents?(merged_task, task) - return true unless (task.execution_agent && merged_task.execution_agent) + return true unless task.execution_agent && merged_task.execution_agent return false unless merge_when_identical_agents? diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index 4359a68a8..3e47a77c2 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -28,7 +28,7 @@ def validate_deployed_network? @validate_deployed_network end - def initialize(plan, + def initialize(plan, # rubocop:disable Metrics/ParameterLists event_logger: plan.event_logger, merge_solver: MergeSolver.new(plan), default_deployment_group: nil, diff --git a/test/features/early_deploy.rb b/test/features/early_deploy.rb index 7f5038bcd..fd48a6f6b 100644 --- a/test/features/early_deploy.rb +++ b/test/features/early_deploy.rb @@ -1 +1,3 @@ +# frozen_string_literal: true + Syskit.conf.early_deploy = true diff --git a/test/network_generation/test_system_network_generator.rb b/test/network_generation/test_system_network_generator.rb index 626654ddf..8ddc98331 100644 --- a/test/network_generation/test_system_network_generator.rb +++ b/test/network_generation/test_system_network_generator.rb @@ -180,16 +180,13 @@ def arg=(value) [1, 2].map do |x| cmp_m.use("task" => task_m.prefer_deployed_tasks(/task#{x}/), "device" => d) - end ) [1, 2].each do |x| tasks = local_net_gen_plan.find_local_tasks(TaskContext) - .select do |t| - if t.respond_to? :orocos_name - t.orocos_name == "task#{x}" - end + .select do |t| + t.respond_to?(:orocos_name) && t.orocos_name == "task#{x}" end assert tasks.size == 1 end From ecfc211cffbaab4751fc0c1e1b05ef346212efbc Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 31 Jan 2025 14:35:07 -0300 Subject: [PATCH 104/158] chore: split may_merge_task_context? It was being used for Composition and Placeholder (i.e. non TaskContext) merge evaluation. This wasn't a problem until know because it tests for execution_agent existence which is nil for Composition and Placeholder, but know we test for TaskContext#orocos_name and orocos_name is not a member of Composition neither Placeholder --- lib/syskit/network_generation/merge_solver.rb | 54 ++++++++++++------- 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/lib/syskit/network_generation/merge_solver.rb b/lib/syskit/network_generation/merge_solver.rb index f8621b5bd..5e0bd247d 100644 --- a/lib/syskit/network_generation/merge_solver.rb +++ b/lib/syskit/network_generation/merge_solver.rb @@ -7,7 +7,7 @@ module NetworkGeneration # # This is the core of the system deployment algorithm implemented in # Engine - class MergeSolver + class MergeSolver # rubocop:disable Metrics/ClassLength extend Logger::Hierarchy include Logger::Hierarchy include Roby::DRoby::EventLogging @@ -205,16 +205,7 @@ def self.merge_identical_tasks(plan) solver.merge_identical_tasks end - # Tests whether task.merge(target_task) is a valid operation - # - # @param [Syskit::TaskContext] task - # @param [Syskit::TaskContext] target_task - # - # @return [false,true] if false, the merge is not possible. If - # true, it is possible. If nil, the only thing that makes the - # merge impossible are missing inputs, and these tasks might - # therefore be merged if there was a dataflow cycle - def may_merge_task_contexts?(merged_task, task) + def may_merge_components?(merged_task, task) can_merge = log_nest(2) do task.can_merge?(merged_task) end @@ -227,6 +218,21 @@ def may_merge_task_contexts?(merged_task, task) return false end + true + end + + # Tests whether task.merge(target_task) is a valid operation + # + # @param [Syskit::TaskContext] task + # @param [Syskit::TaskContext] target_task + # + # @return [false,true] if false, the merge is not possible. If + # true, it is possible. If nil, the only thing that makes the + # merge impossible are missing inputs, and these tasks might + # therefore be merged if there was a dataflow cycle + def may_merge_task_contexts?(merged_task, task) + return false unless may_merge_components?(merged_task, task) + # Merges involving a deployed task can only involve a # non-deployed task as well unless mergeable_agents?(merged_task, task) @@ -242,9 +248,7 @@ def mergeable_agents?(merged_task, task) return false unless merge_when_identical_agents? - return true if task.execution_agent == merged_task.execution_agent - - false + task.orocos_name == merged_task.orocos_name end def each_component_merge_candidate(task) @@ -346,9 +350,7 @@ def composition_children_by_role(task) end def may_merge_compositions?(merged_task, task) - unless may_merge_task_contexts?(merged_task, task) - return false - end + return false unless may_merge_components?(merged_task, task) merged_task_children = composition_children_by_role(merged_task) task_children = composition_children_by_role(task) @@ -463,6 +465,22 @@ def pretty_print_failure(pp) :source_task, :source_port, :policy, :sink_port, :sink_task ) + def may_merge?(merged_task, task) + case merged_task + when TaskContext + may_merge_task_contexts?(merged_task, task) + when Composition + may_merge_compositions?(merged_task, task) + when Placeholder + may_merge_components?(merged_task, task) + else + raise ArgumentError, + "may_merge? called with #{merged_task} of type " \ + "#{merged_task.class}, expected either TaskContext, " \ + "Composition or Placeholder" + end + end + # Resolve merge between N tasks with the given tasks as seeds # # The method will cycle through the task's mismatching inputs (if @@ -479,7 +497,7 @@ def pretty_print_failure(pp) # # @return [MergeResolution] def resolve_merge(merged_task, task, mappings) - unless may_merge_task_contexts?(merged_task, task) + unless may_merge?(merged_task, task) return MergeResolution.new(mappings, merged_task, task, [], []) end From 9a677cdd6c3dc8a375bf9de7bad29f4ebea17db1 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Tue, 4 Feb 2025 10:41:18 -0300 Subject: [PATCH 105/158] refactor: follow estabilished pattern --- lib/syskit/network_generation/system_network_generator.rb | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index 3e47a77c2..ae14c9698 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -398,7 +398,7 @@ def self.verify_device_allocation(plan, toplevel_tasks_to_requirements = {}) end end - def verify_all_deployments_are_unique + def self.verify_all_deployments_are_unique(plan, default_deployment_group) deployment_to_task_map = {} plan.find_local_tasks(Syskit::TaskContext).each do |t| deployment_to_task_map[t.orocos_name] = @@ -441,7 +441,11 @@ def validate_abstract_network def validate_generated_network self.class.verify_task_allocation(plan) self.class.verify_device_allocation(plan, toplevel_tasks_to_requirements) - verify_all_deployments_are_unique if early_deploy? + if early_deploy? + self.class.verify_all_deployments_are_unique( + plan, default_deployment_group + ) + end super if defined? super end end From 3b5becbbd13d2e15a02ae40764e44e67803a1b9b Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Tue, 4 Feb 2025 11:40:37 -0300 Subject: [PATCH 106/158] Update lib/syskit/exceptions.rb Co-authored-by: jhonasiv <34279171+jhonasiv@users.noreply.github.com> --- lib/syskit/exceptions.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/exceptions.rb b/lib/syskit/exceptions.rb index d1400ceee..0ee851181 100644 --- a/lib/syskit/exceptions.rb +++ b/lib/syskit/exceptions.rb @@ -507,7 +507,7 @@ def pretty_print(pp) .orogen_model pp.text( "deployed task #{deployed_task.mapped_task_name} from deployment " \ - "#{orogen_model.name} defined in " \ + "#{orogen_model.name} is defined in " \ "#{orogen_model.project.name} on #{process_server_name}" ) From be24850d4f40bd7117c082712643afd91b6e4c0f Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Tue, 4 Feb 2025 11:42:50 -0300 Subject: [PATCH 107/158] doc: remove deprecated return description --- lib/syskit/network_generation/merge_solver.rb | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/syskit/network_generation/merge_solver.rb b/lib/syskit/network_generation/merge_solver.rb index 5e0bd247d..ea609711f 100644 --- a/lib/syskit/network_generation/merge_solver.rb +++ b/lib/syskit/network_generation/merge_solver.rb @@ -227,9 +227,7 @@ def may_merge_components?(merged_task, task) # @param [Syskit::TaskContext] target_task # # @return [false,true] if false, the merge is not possible. If - # true, it is possible. If nil, the only thing that makes the - # merge impossible are missing inputs, and these tasks might - # therefore be merged if there was a dataflow cycle + # true, it is possible. def may_merge_task_contexts?(merged_task, task) return false unless may_merge_components?(merged_task, task) From 12711c643c5a356c501b7547a1b876318f18a67b Mon Sep 17 00:00:00 2001 From: kapeps Date: Wed, 5 Feb 2025 11:42:43 -0300 Subject: [PATCH 108/158] fix: rubocop grievances --- lib/syskit/cli/log_runtime_archive.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 76dbcf4ae..236417fc4 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -116,7 +116,7 @@ def ensure_free_space( true end - def process_dataset(child, max_archive_size: DEFAULT_MAX_ARCHIVE_SIZE, full:) + def process_dataset(child, full:, max_archive_size: DEFAULT_MAX_ARCHIVE_SIZE) use_existing = true loop do open_archive_for( From 8e4ffd308c40dd01e1f4138f7cfd7d8c38924b1e Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 7 Feb 2025 16:14:48 -0300 Subject: [PATCH 109/158] fix(test): actually checking for execution agent existence the test was "succeding" because can_merge? was returning false, and not because both execution agents were non nil, as the test intended --- test/network_generation/test_merge_solver.rb | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/test/network_generation/test_merge_solver.rb b/test/network_generation/test_merge_solver.rb index e97a8f8b0..831d29a0a 100644 --- a/test/network_generation/test_merge_solver.rb +++ b/test/network_generation/test_merge_solver.rb @@ -48,12 +48,16 @@ target_task.should_receive(:can_merge?).with(task).and_return(false).once assert !solver.may_merge_task_contexts?(task, target_task) end - it "returns false for tasks that have execution agents" do - plan.add(t1 = simple_component_model.new) - plan.add(t2 = simple_composition_model.new) - flexmock(t1).should_receive(:execution_agent).and_return(true) - assert !solver.may_merge_task_contexts?(t1, t2) - assert !solver.may_merge_task_contexts?(t2, t1) + it "returns false if both tasks have execution agents and " \ + "merge_when_identical_agents is false" do + plan.add(task1 = simple_component_model.new) + plan.add(task2 = simple_composition_model.new) + [task1, task2].permutation.each do |t1, t2| + flexmock(t1).should_receive(:execution_agent).and_return(true) + t1.should_receive(:can_merge?).with(t2).and_return(true).once + end + assert !solver.may_merge_task_contexts?(task1, task2) + assert !solver.may_merge_task_contexts?(task2, task1) end end From 83371b85cef0298ba68042502783e66c8ad1ff00 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 7 Feb 2025 16:36:31 -0300 Subject: [PATCH 110/158] chore: toggle solver's merge_when_identical_agents at engine level In order to it be possible, I had to change merge_solver.merge_identical_tasks call to merge_solver.merge_compositions at Engine#finalize_deployed_tasks. With merged previously because all had non nil execution agents, but now when this rule changes with early_deploy and the merge solver would attempt to merge tasks contexts because their agents are non nil. It particulary breaks when for some reason the current task context cannot be deployed by its transaction proxy counter part as in "ensures that the old task gets garbage collected when child of another still useful task test" --- lib/syskit/network_generation/engine.rb | 5 ++++- lib/syskit/network_generation/merge_solver.rb | 8 ++++---- .../system_network_generator.rb | 15 ++------------- .../test_system_network_generator.rb | 5 ++++- 4 files changed, 14 insertions(+), 19 deletions(-) diff --git a/lib/syskit/network_generation/engine.rb b/lib/syskit/network_generation/engine.rb index c1578be87..34bf2bc3f 100644 --- a/lib/syskit/network_generation/engine.rb +++ b/lib/syskit/network_generation/engine.rb @@ -401,7 +401,7 @@ def finalize_deployed_tasks # This is required to merge the already existing compositions # with the ones in the plan - merge_solver.merge_identical_tasks + merge_solver.merge_compositions log_timepoint "merge" [selected_deployment_tasks, reused_deployed_tasks | newly_deployed_tasks] @@ -710,6 +710,7 @@ def compute_system_network( ) requirement_tasks = requirement_tasks.to_a instance_requirements = requirement_tasks.map(&:requirements) + merge_solver.merge_task_contexts_with_same_agent = early_deploy system_network_generator = SystemNetworkGenerator.new( work_plan, event_logger: event_logger, @@ -761,6 +762,7 @@ def resolve_system_network( early_deploy: Syskit.conf.early_deploy? ) + merge_solver.merge_task_contexts_with_same_agent = early_deploy required_instances = compute_system_network( requirement_tasks, garbage_collect: garbage_collect, @@ -819,6 +821,7 @@ def resolve( validate_final_network: true, early_deploy: Syskit.conf.early_deploy? ) + merge_solver.merge_task_contexts_with_same_agent = early_deploy required_instances = resolve_system_network( requirement_tasks, garbage_collect: garbage_collect, diff --git a/lib/syskit/network_generation/merge_solver.rb b/lib/syskit/network_generation/merge_solver.rb index ea609711f..1f5aab1fe 100644 --- a/lib/syskit/network_generation/merge_solver.rb +++ b/lib/syskit/network_generation/merge_solver.rb @@ -34,7 +34,7 @@ class MergeSolver # rubocop:disable Metrics/ClassLength # information attr_reader :event_logger - attr_writer :merge_when_identical_agents + attr_writer :merge_task_contexts_with_same_agent def initialize(plan, event_logger: plan.event_logger) @plan = plan @@ -45,7 +45,7 @@ def initialize(plan, event_logger: plan.event_logger) @task_replacement_graph = Roby::Relations::BidirectionalDirectedAdjacencyGraph.new @resolved_replacements = {} @invalid_merges = Set.new - @merge_when_identical_agents = false + @merge_task_contexts_with_same_agent = false end def clear @@ -54,8 +54,8 @@ def clear @invalid_merges.clear end - def merge_when_identical_agents? - @merge_when_identical_agents + def merge_task_contexts_with_same_agent? + @merge_task_contexts_with_same_agent end # Returns the task that is used in place of the given task diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index ae14c9698..a7e2a48bd 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -239,7 +239,7 @@ def compute_system_network(instance_requirements, garbage_collect: true, deploy(deployment_tasks) if early_deploy? - merge_identical_tasks + merge_solver.merge_identical_tasks log_timepoint "merge" Engine.instanciated_network_postprocessing.each do |block| block.call(self, plan) @@ -250,7 +250,7 @@ def compute_system_network(instance_requirements, garbage_collect: true, deploy(deployment_tasks) if early_deploy? - merge_identical_tasks + merge_solver.merge_identical_tasks log_timepoint "merge" @@ -298,17 +298,6 @@ def compute_system_network(instance_requirements, garbage_collect: true, @toplevel_tasks end - def merge_identical_tasks - # When early deploying MergeSolver must merge tasks with identical - # execution agents. This breaks a strong assumption for the solver that is - # if two tasks have execution agents, then they must be different. But - # when early deploying tasks that have the same agent are actually the - # same - merge_solver.merge_when_identical_agents = early_deploy? - merge_solver.merge_identical_tasks - merge_solver.merge_when_identical_agents = false - end - def toplevel_tasks_to_requirements (@toplevel_tasks || []) .map { |t| merge_solver.replacement_for(t) } diff --git a/test/network_generation/test_system_network_generator.rb b/test/network_generation/test_system_network_generator.rb index 8ddc98331..64af16ced 100644 --- a/test/network_generation/test_system_network_generator.rb +++ b/test/network_generation/test_system_network_generator.rb @@ -115,7 +115,7 @@ def arg=(value) describe "early deploy" do attr_reader :net_gen, :device_m, :cmp_m, :task_m, :net_gen_plan - before do + before do # rubocop:disable Metrics/BlockLength @device_m = Device.new_submodel(name: "D") do output_port "out", "/double" end @@ -145,6 +145,7 @@ def arg=(value) default_deployment_group: default_deployment_group, early_deploy: true ) + @net_gen.merge_solver.merge_task_contexts_with_same_agent = true end it "can merge tasks with same execution agent" do @@ -174,6 +175,8 @@ def arg=(value) default_deployment_group: default_deployment_group, early_deploy: true ) + local_net_gen.merge_solver + .merge_task_contexts_with_same_agent = true d = robot.device(device_m, as: "d") assert local_net_gen.compute_system_network( From e346f957448076a10e3f143cf3b3bdccc5fa9c96 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 7 Feb 2025 16:35:45 -0300 Subject: [PATCH 111/158] fix: when early_deploying all tasks should have deployments when early deploying, a non deployed task can't be merged with a deployed one, because the basic assumption is that all tasks should have a deployment' --- lib/syskit/network_generation/merge_solver.rb | 8 ++++++-- test/network_generation/test_merge_solver.rb | 16 ++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/lib/syskit/network_generation/merge_solver.rb b/lib/syskit/network_generation/merge_solver.rb index 1f5aab1fe..66036ca07 100644 --- a/lib/syskit/network_generation/merge_solver.rb +++ b/lib/syskit/network_generation/merge_solver.rb @@ -242,9 +242,13 @@ def may_merge_task_contexts?(merged_task, task) end def mergeable_agents?(merged_task, task) - return true unless task.execution_agent && merged_task.execution_agent + unless merge_task_contexts_with_same_agent? + return true unless task.execution_agent && merged_task.execution_agent - return false unless merge_when_identical_agents? + return false + end + + return false unless task.execution_agent && merged_task.execution_agent task.orocos_name == merged_task.orocos_name end diff --git a/test/network_generation/test_merge_solver.rb b/test/network_generation/test_merge_solver.rb index 831d29a0a..1362b3d4f 100644 --- a/test/network_generation/test_merge_solver.rb +++ b/test/network_generation/test_merge_solver.rb @@ -59,6 +59,22 @@ assert !solver.may_merge_task_contexts?(task1, task2) assert !solver.may_merge_task_contexts?(task2, task1) end + it "returns false for tasks that do not have execution agents when " \ + "merge_when_identical_agents is true" do + plan.add(task1 = simple_component_model.new) + plan.add(task2 = simple_composition_model.new) + + [task1, task2].permutation.each do |t1, t2| + flexmock(t1).should_receive(:execution_agent).and_return(false) + t1.should_receive(:can_merge?).with(t2).and_return(true).once + end + + local_solver = Syskit::NetworkGeneration::MergeSolver.new(plan) + local_solver.merge_task_contexts_with_same_agent = true + + assert !local_solver.may_merge_task_contexts?(task1, task2) + assert !local_solver.may_merge_task_contexts?(task2, task1) + end end describe "may_merge_compositions?" do From 735afa014950aefc86e5e6263c1855b2ff2e2f2c Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Wed, 5 Feb 2025 14:29:32 -0300 Subject: [PATCH 112/158] refactor: verify_all_tasks_deployed as class method --- .../system_network_deployer.rb | 26 ++++++++++++++++--- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/lib/syskit/network_generation/system_network_deployer.rb b/lib/syskit/network_generation/system_network_deployer.rb index a5f86ab25..1794cc6c7 100644 --- a/lib/syskit/network_generation/system_network_deployer.rb +++ b/lib/syskit/network_generation/system_network_deployer.rb @@ -75,13 +75,22 @@ def deploy(validate: true, reuse_deployments: false, deployment_tasks: {}) missing_deployments end + def find_all_suitable_deployments_for(task, from: task) + self.class.find_all_suitable_deployments_for( + default_deployment_group, + task, + from: from + ) + end + # Find all candidates, resolved using deployment groups in the task hierarchy # # The method falls back to the default deployment group if no # deployments for the task could be found in the plan itself # # @return [Set] - def find_all_suitable_deployments_for(task, from: task) + def self.find_all_suitable_deployments_for(default_deployment_group, + task, from: task) candidates = from.requirements.deployment_group .find_all_suitable_deployments_for(task) return candidates unless candidates.empty? @@ -93,7 +102,9 @@ def find_all_suitable_deployments_for(task, from: task) end parents.each_with_object(Set.new) do |p, s| - s.merge(find_all_suitable_deployments_for(task, from: p)) + s.merge(find_all_suitable_deployments_for(default_deployment_group, + task, + from: p)) end end @@ -201,12 +212,16 @@ def validate_deployed_network verify_all_configurations_exist end + def verify_all_tasks_deployed + self.class.verify_all_tasks_deployed(plan, default_deployment_group) + end + # Verifies that all tasks in the plan are deployed # # @param [Component=>DeploymentGroup] deployment_groups which # deployment groups has been used for which task. This is used # to generate the error messages when needed. - def verify_all_tasks_deployed + def self.verify_all_tasks_deployed(plan, default_deployment_group) not_deployed = plan.find_local_tasks(TaskContext) .not_finished.not_abstract .find_all { |t| !t.execution_agent } @@ -215,7 +230,10 @@ def verify_all_tasks_deployed tasks_with_candidates = {} not_deployed.each do |task| - candidates = find_all_suitable_deployments_for(task) + candidates = find_all_suitable_deployments_for( + default_deployment_group, + task + ) candidates = candidates.map do |deployed_task| task_name = deployed_task.mapped_task_name existing_tasks = From bd5abba295e4d31e55d4cb2260743f163286aeed Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 7 Feb 2025 16:37:03 -0300 Subject: [PATCH 113/158] chore: verify tasks deployment only at the end of network generation verifying it earlier led throwing deploying errors where there were primary instantiation errors --- .../network_generation/system_network_generator.rb | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index a7e2a48bd..4ed0e0cab 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -219,7 +219,6 @@ def deploy(deployment_tasks) network_deployer.deploy(validate: false, reuse_deployments: true, deployment_tasks: deployment_tasks) - network_deployer.verify_all_tasks_deployed if validate_deployed_network? end # Compute in #plan the network needed to fullfill the requirements @@ -426,11 +425,19 @@ def validate_abstract_network super if defined? super end + def self.verify_all_tasks_deployed(plan, default_deployment_group) + SystemNetworkDeployer.verify_all_tasks_deployed( + plan, + default_deployment_group + ) + end + # Validates the network generated by {#compute_system_network} def validate_generated_network self.class.verify_task_allocation(plan) self.class.verify_device_allocation(plan, toplevel_tasks_to_requirements) - if early_deploy? + if early_deploy? && validate_deployed_network? + self.class.verify_all_tasks_deployed(plan, default_deployment_group) self.class.verify_all_deployments_are_unique( plan, default_deployment_group ) From b860fe90336267c331669869d8c6d0fc3f3960d7 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Wed, 5 Feb 2025 16:03:25 -0300 Subject: [PATCH 114/158] test: do not merge when just one task has an execution agent when early deploying the assumption changes to "every task should have an execution agent" so merging one that does with one that doesn't would be wrong. Note that when not early_deploying the merge criteria is exactly the opposite, two task contexts should be merged when just one of them has an execution agent --- .../test_system_network_generator.rb | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/test/network_generation/test_system_network_generator.rb b/test/network_generation/test_system_network_generator.rb index 64af16ced..f8650b140 100644 --- a/test/network_generation/test_system_network_generator.rb +++ b/test/network_generation/test_system_network_generator.rb @@ -143,7 +143,7 @@ def arg=(value) @net_gen = SystemNetworkGenerator.new( @net_gen_plan = Roby::Plan.new, default_deployment_group: default_deployment_group, - early_deploy: true + early_deploy: true, validate_deployed_network: true ) @net_gen.merge_solver.merge_task_contexts_with_same_agent = true end @@ -194,6 +194,34 @@ def arg=(value) assert tasks.size == 1 end end + + it "does not merge a task without deployments " \ + "with a compatible task that has one" do + local_net_gen = SystemNetworkGenerator.new( + local_net_gen_plan = Roby::Plan.new, + default_deployment_group: Models::DeploymentGroup.new, + early_deploy: true, + validate_deployed_network: true + ) + + task_m = self.task_m + deployment_m = Syskit::Deployment.new_submodel do + task "task", task_m + end + + local_net_gen.merge_solver + .merge_task_contexts_with_same_agent = true + e = assert_raises(MissingDeployments) do + local_net_gen.compute_system_network( + [task_m.to_instance_requirements, + task_m.to_instance_requirements + .use_deployment(deployment_m)], + validate_deployed_network: true + ) + end + + assert_equal 1, e.tasks.size + end end end From 0e4d2e532035cfce2965a8d68b79d7fdfe9f7d4d Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 7 Feb 2025 15:50:45 -0300 Subject: [PATCH 115/158] chore: pass validate_deployed_network as generate argument because all validate_* paramenters are passed to generate so far --- lib/syskit/network_generation/engine.rb | 8 +-- .../system_network_generator.rb | 54 ++++++++++--------- .../test_system_network_generator.rb | 5 +- 3 files changed, 34 insertions(+), 33 deletions(-) diff --git a/lib/syskit/network_generation/engine.rb b/lib/syskit/network_generation/engine.rb index 34bf2bc3f..edb4c6a49 100644 --- a/lib/syskit/network_generation/engine.rb +++ b/lib/syskit/network_generation/engine.rb @@ -704,9 +704,9 @@ def compute_system_network( garbage_collect: true, validate_abstract_network: true, validate_generated_network: true, - default_deployment_group: nil, - validate_deployed_network: false, early_deploy: false + default_deployment_group: Syskit.conf.deployment_group, + validate_deployed_network: (true if Syskit.conf.early_deploy?) ) requirement_tasks = requirement_tasks.to_a instance_requirements = requirement_tasks.map(&:requirements) @@ -717,13 +717,13 @@ def compute_system_network( merge_solver: merge_solver, default_deployment_group: default_deployment_group, early_deploy: early_deploy, - validate_deployed_network: validate_deployed_network ) toplevel_tasks = system_network_generator.generate( instance_requirements, garbage_collect: garbage_collect, validate_abstract_network: validate_abstract_network, - validate_generated_network: validate_generated_network + validate_generated_network: validate_generated_network, + validate_deployed_network: validate_deployed_network ) Hash[requirement_tasks.zip(toplevel_tasks)] diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index 4ed0e0cab..df0995808 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -21,19 +21,11 @@ def early_deploy? @early_deploy end - # Condition for deployment validation when early deploying - # - # @see early_deploy? - def validate_deployed_network? - @validate_deployed_network - end - def initialize(plan, # rubocop:disable Metrics/ParameterLists event_logger: plan.event_logger, merge_solver: MergeSolver.new(plan), default_deployment_group: nil, - early_deploy: false, - validate_deployed_network: false) + early_deploy: false) if merge_solver.plan != plan raise ArgumentError, "gave #{merge_solver} as merge solver, which applies on #{merge_solver.plan}. Was expecting #{plan}" end @@ -43,10 +35,11 @@ def initialize(plan, # rubocop:disable Metrics/ParameterLists @merge_solver = merge_solver @default_deployment_group = default_deployment_group @early_deploy = early_deploy - @validate_deployed_network = validate_deployed_network end # Generate the network in the plan + # param [bool] validate_deployed_network controls whether or not the + # deployed network is validated, when #early_deploy? is true # # @return [HashArray>] the # list of toplevel tasks mapped to the instance requirements it @@ -54,7 +47,8 @@ def initialize(plan, # rubocop:disable Metrics/ParameterLists def generate(instance_requirements, garbage_collect: true, validate_abstract_network: true, - validate_generated_network: true) + validate_generated_network: true, + validate_deployed_network: true) # We first generate a non-deployed network that fits all # requirements. @@ -62,7 +56,8 @@ def generate(instance_requirements, compute_system_network(instance_requirements, garbage_collect: garbage_collect, validate_abstract_network: validate_abstract_network, - validate_generated_network: validate_generated_network) + validate_generated_network: validate_generated_network, + validate_deployed_network: validate_deployed_network) end end @@ -226,7 +221,8 @@ def deploy(deployment_tasks) # This network is neither validated nor tied to actual deployments def compute_system_network(instance_requirements, garbage_collect: true, validate_abstract_network: true, - validate_generated_network: true) + validate_generated_network: true, + validate_deployed_network: true) @toplevel_tasks = log_timepoint_group "instanciate" do instanciate(instance_requirements) @@ -294,6 +290,10 @@ def compute_system_network(instance_requirements, garbage_collect: true, log_timepoint "validate_generated_network" end + if early_deploy? && validate_deployed_network + self.validate_deployed_network + end + @toplevel_tasks end @@ -425,25 +425,27 @@ def validate_abstract_network super if defined? super end - def self.verify_all_tasks_deployed(plan, default_deployment_group) - SystemNetworkDeployer.verify_all_tasks_deployed( - plan, - default_deployment_group - ) - end - # Validates the network generated by {#compute_system_network} def validate_generated_network self.class.verify_task_allocation(plan) self.class.verify_device_allocation(plan, toplevel_tasks_to_requirements) - if early_deploy? && validate_deployed_network? - self.class.verify_all_tasks_deployed(plan, default_deployment_group) - self.class.verify_all_deployments_are_unique( - plan, default_deployment_group - ) - end super if defined? super end + + def validate_deployed_network + self.class.verify_all_tasks_deployed(plan, default_deployment_group) + self.class.verify_all_deployments_are_unique( + plan, default_deployment_group + ) + super if defined? super + end + + def self.verify_all_tasks_deployed(plan, default_deployment_group) + SystemNetworkDeployer.verify_all_tasks_deployed( + plan, + default_deployment_group + ) + end end end end diff --git a/test/network_generation/test_system_network_generator.rb b/test/network_generation/test_system_network_generator.rb index f8650b140..7a4037108 100644 --- a/test/network_generation/test_system_network_generator.rb +++ b/test/network_generation/test_system_network_generator.rb @@ -143,7 +143,7 @@ def arg=(value) @net_gen = SystemNetworkGenerator.new( @net_gen_plan = Roby::Plan.new, default_deployment_group: default_deployment_group, - early_deploy: true, validate_deployed_network: true + early_deploy: true ) @net_gen.merge_solver.merge_task_contexts_with_same_agent = true end @@ -200,8 +200,7 @@ def arg=(value) local_net_gen = SystemNetworkGenerator.new( local_net_gen_plan = Roby::Plan.new, default_deployment_group: Models::DeploymentGroup.new, - early_deploy: true, - validate_deployed_network: true + early_deploy: true ) task_m = self.task_m From 9619635e57aea00b90fc148e1701e04ddb665e35 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 7 Feb 2025 16:28:47 -0300 Subject: [PATCH 116/158] chore: use early deploy FF as default value --- lib/syskit/network_generation/engine.rb | 6 +++--- lib/syskit/test/network_manipulation.rb | 3 ++- test/network_generation/test_engine.rb | 6 +++++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/syskit/network_generation/engine.rb b/lib/syskit/network_generation/engine.rb index edb4c6a49..d6edc0952 100644 --- a/lib/syskit/network_generation/engine.rb +++ b/lib/syskit/network_generation/engine.rb @@ -704,9 +704,9 @@ def compute_system_network( garbage_collect: true, validate_abstract_network: true, validate_generated_network: true, - early_deploy: false default_deployment_group: Syskit.conf.deployment_group, - validate_deployed_network: (true if Syskit.conf.early_deploy?) + validate_deployed_network: (true if Syskit.conf.early_deploy?), + early_deploy: Syskit.conf.early_deploy? ) requirement_tasks = requirement_tasks.to_a instance_requirements = requirement_tasks.map(&:requirements) @@ -716,7 +716,7 @@ def compute_system_network( event_logger: event_logger, merge_solver: merge_solver, default_deployment_group: default_deployment_group, - early_deploy: early_deploy, + early_deploy: early_deploy ) toplevel_tasks = system_network_generator.generate( instance_requirements, diff --git a/lib/syskit/test/network_manipulation.rb b/lib/syskit/test/network_manipulation.rb index caf9814c1..c45743a7d 100644 --- a/lib/syskit/test/network_manipulation.rb +++ b/lib/syskit/test/network_manipulation.rb @@ -162,7 +162,8 @@ def syskit_generate_network(*to_instanciate, add_missions: true) engine = NetworkGeneration::Engine.new(plan, work_plan: trsc) mapping = engine.compute_system_network( tasks_to_instanciate.map(&:planning_task), - validate_generated_network: false + validate_generated_network: false, + early_deploy: false ) trsc.commit_transaction mapping diff --git a/test/network_generation/test_engine.rb b/test/network_generation/test_engine.rb index ef9ff6ada..7e0ce1c08 100644 --- a/test/network_generation/test_engine.rb +++ b/test/network_generation/test_engine.rb @@ -104,7 +104,11 @@ def work_plan it "saves the mapping from requirement task in real_plan to instanciated task in work_plan" do flexmock(requirements).should_receive(:instanciate) .and_return(instanciated_task = simple_component_model.new) - mapping = syskit_engine.compute_system_network([planning_task]) + syskit_stub_configured_deployment(simple_component_model) + mapping = syskit_engine.compute_system_network( + [planning_task], + default_deployment_group: default_deployment_group + ) assert_equal instanciated_task, mapping[planning_task] end end From d726d8eca90702f48df30760d6b3d2f684e4e502 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 7 Feb 2025 16:45:51 -0300 Subject: [PATCH 117/158] chore: simplify logic --- lib/syskit/network_generation/merge_solver.rb | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/syskit/network_generation/merge_solver.rb b/lib/syskit/network_generation/merge_solver.rb index 66036ca07..c435d361e 100644 --- a/lib/syskit/network_generation/merge_solver.rb +++ b/lib/syskit/network_generation/merge_solver.rb @@ -243,9 +243,7 @@ def may_merge_task_contexts?(merged_task, task) def mergeable_agents?(merged_task, task) unless merge_task_contexts_with_same_agent? - return true unless task.execution_agent && merged_task.execution_agent - - return false + return !(task.execution_agent && merged_task.execution_agent) end return false unless task.execution_agent && merged_task.execution_agent From 415fa600e7608ac556ac79fc295384d02dc4b8b2 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Tue, 11 Feb 2025 08:25:45 -0300 Subject: [PATCH 118/158] refactor: move common code to helper file move network generation exception to helpers --- lib/syskit.rb | 1 + lib/syskit/base.rb | 1 + lib/syskit/exceptions.rb | 29 +++------------ .../network_generation_exception_helpers.rb | 35 +++++++++++++++++++ test/test_exceptions.rb | 6 ++-- 5 files changed, 46 insertions(+), 26 deletions(-) create mode 100644 lib/syskit/network_generation_exception_helpers.rb diff --git a/lib/syskit.rb b/lib/syskit.rb index dc047072d..5bc5941c0 100644 --- a/lib/syskit.rb +++ b/lib/syskit.rb @@ -127,6 +127,7 @@ module ProcessManagers require "syskit/actual_data_flow_graph" require "syskit/data_flow" require "syskit/connection_graphs" +require "syskit/network_generation_exception_helpers" require "syskit/exceptions" require "syskit/network_generation" require "syskit/runtime" diff --git a/lib/syskit/base.rb b/lib/syskit/base.rb index 64b6f3c5c..d2d6f40f8 100644 --- a/lib/syskit/base.rb +++ b/lib/syskit/base.rb @@ -2,6 +2,7 @@ require "logger" require "utilrb/logger" +require "syskit/network_generation_exception_helpers" require "syskit/exceptions" require "facets/string/snakecase" diff --git a/lib/syskit/exceptions.rb b/lib/syskit/exceptions.rb index 0ee851181..1922ac75f 100644 --- a/lib/syskit/exceptions.rb +++ b/lib/syskit/exceptions.rb @@ -437,6 +437,8 @@ def pretty_print(pp) end class ConflictingDeviceAllocation < SpecError + include Syskit::NetworkGenerationsExceptionHelpers + attr_reader :device, :tasks, :inputs def can_merge? @@ -447,38 +449,17 @@ def initialize(device, task0, task1, toplevel_tasks_to_requirements = {}) @device = device @tasks = [task0, task1] - solver = NetworkGeneration::MergeSolver.new(task0.plan) - @merge_result = solver.resolve_merge(task0, task1, {}) @involved_definitions = @tasks.map do |t| find_all_related_syskit_actions(t, toplevel_tasks_to_requirements) end end - def find_all_related_syskit_actions(task, toplevel_tasks_to_requirements) - result = [] - while task - result.concat(toplevel_tasks_to_requirements[task] || []) - task = task.each_parent_task.first - end - result - end - def pretty_print(pp) pp.text "device '#{device.name}' of type #{device.model} is assigned " pp.text "to two tasks that cannot be merged" - pp.breakable - @merge_result.pretty_print_failure(pp) - @involved_definitions.each_with_index do |defs, i| - next if defs.empty? - - pp.breakable - pp.text "Chain #{i + 1} is needed by the following definitions:" - pp.nest(2) do - defs.each do |d| - pp.breakable - pp.text d.to_s - end - end + print_failed_merge_chain(pp, *@tasks) + @tasks.zip(@involved_definitions).each do |t, defs| + print_dependent_definitions(pp, t, defs) end end end diff --git a/lib/syskit/network_generation_exception_helpers.rb b/lib/syskit/network_generation_exception_helpers.rb new file mode 100644 index 000000000..969cece5a --- /dev/null +++ b/lib/syskit/network_generation_exception_helpers.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +module Syskit + # Common methods for network generation exception messages + module NetworkGenerationsExceptionHelpers + def find_all_related_syskit_actions(task, toplevel_tasks_to_requirements) + result = [] + while task + result.concat(toplevel_tasks_to_requirements[task] || []) + task = task.each_parent_task.first + end + result + end + + def print_dependent_definitions(pp, task, defs) + return if defs.empty? + + pp.breakable + pp.text "#{task} is needed by the following definitions:" + pp.nest(2) do + defs.each do |d| + pp.breakable + pp.text d.to_s + end + end + end + + def print_failed_merge_chain(pp, task0, task1) + solver = NetworkGeneration::MergeSolver.new(task0.plan) + @merge_result = solver.resolve_merge(task0, task1, {}) + pp.breakable + @merge_result.pretty_print_failure(pp) + end + end +end diff --git a/test/test_exceptions.rb b/test/test_exceptions.rb index af3b71b08..105659957 100644 --- a/test/test_exceptions.rb +++ b/test/test_exceptions.rb @@ -95,9 +95,11 @@ module Syskit arg: 1, conf: ["default"], read_only: false - Chain 1 is needed by the following definitions: + T(arg: 2, conf: ["default"], read_only: false, \ + test_dev: device(D, as: test)) is needed by the following definitions: Test.test2_def - Chain 2 is needed by the following definitions: + T(arg: 1, conf: ["default"], read_only: false, \ + test_dev: device(D, as: test)) is needed by the following definitions: Test.test1_def PP assert_equal expected, formatted.gsub(//, "").chomp From c400fa24a51bae6cd7d9cd86694c6f18934f848b Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Tue, 11 Feb 2025 08:27:43 -0300 Subject: [PATCH 119/158] chore: enhance ConflictingDeploymentAllocation exception now it gives global context information --- lib/syskit/exceptions.rb | 36 ++++----- .../system_network_generator.rb | 10 ++- test/test_exceptions.rb | 78 +++++++++++++++++++ 3 files changed, 103 insertions(+), 21 deletions(-) diff --git a/lib/syskit/exceptions.rb b/lib/syskit/exceptions.rb index 1922ac75f..53a5aabe8 100644 --- a/lib/syskit/exceptions.rb +++ b/lib/syskit/exceptions.rb @@ -465,34 +465,34 @@ def pretty_print(pp) end class ConflictingDeploymentAllocation < SpecError - attr_reader :deployment_to_tasks + include Syskit::NetworkGenerationsExceptionHelpers + + attr_reader :deployment_to_tasks, :toplevel_tasks_to_requirements - def initialize(deployment_to_tasks) + def initialize(deployment_to_tasks, toplevel_tasks_to_requirements = {}) @deployment_to_tasks = deployment_to_tasks + @toplevel_tasks_to_requirements = toplevel_tasks_to_requirements end def pretty_print(pp) - pp.text "cannot deploy the following tasks" deployment_to_tasks.each do |deployed_task, tasks| - tasks.each do |task| - pp.nest(2) do - pp.breakable - pp.text "#{task} (#{task.orogen_model.name})" - end - end - pp.breakable - pp.text "because the same " process_server_name = deployed_task.configured_deployment .process_server_name - orogen_model = deployed_task.configured_deployment - .orogen_model + orogen_model = deployed_task.configured_deployment.orogen_model pp.text( - "deployed task #{deployed_task.mapped_task_name} from deployment " \ - "#{orogen_model.name} is defined in " \ - "#{orogen_model.project.name} on #{process_server_name}" + "deployed task '#{deployed_task.mapped_task_name}' from deployment " \ + "'#{orogen_model.name}' defined in " \ + "'#{orogen_model.project.name}' on '#{process_server_name}' is " \ + "assigned to multiple tasks. Here follows one merge failure " \ + "(it can have more):" ) - - pp.text " is allocated for them" + print_failed_merge_chain(pp, tasks[0], tasks[1]) + tasks.each do |t| + defs = find_all_related_syskit_actions( + t, toplevel_tasks_to_requirements + ) + print_dependent_definitions(pp, t, defs) + end end end end diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index df0995808..6b96ead12 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -386,7 +386,11 @@ def self.verify_device_allocation(plan, toplevel_tasks_to_requirements = {}) end end - def self.verify_all_deployments_are_unique(plan, default_deployment_group) + def self.verify_all_deployments_are_unique( + plan, + default_deployment_group, + toplevel_tasks_to_requirements + ) deployment_to_task_map = {} plan.find_local_tasks(Syskit::TaskContext).each do |t| deployment_to_task_map[t.orocos_name] = @@ -412,7 +416,7 @@ def self.verify_all_deployments_are_unique(plan, default_deployment_group) end raise ConflictingDeploymentAllocation.new( - deployment_to_task + deployment_to_task, toplevel_tasks_to_requirements ), "there are deployments used multiple times" end @@ -435,7 +439,7 @@ def validate_generated_network def validate_deployed_network self.class.verify_all_tasks_deployed(plan, default_deployment_group) self.class.verify_all_deployments_are_unique( - plan, default_deployment_group + plan, default_deployment_group, toplevel_tasks_to_requirements.dup ) super if defined? super end diff --git a/test/test_exceptions.rb b/test/test_exceptions.rb index 105659957..01393d9d2 100644 --- a/test/test_exceptions.rb +++ b/test/test_exceptions.rb @@ -168,4 +168,82 @@ module Syskit assert_equal expected, formatted.gsub(//, "").chomp end end + + describe ConflictingDeploymentAllocation do + # This exception appears only in early_deploy context + + attr_reader :net_gen, :profile + + before do + Roby.app.using_task_library "orogen_syskit_tests" + + task_m = OroGen.orogen_syskit_tests.Empty + cmp_m = Syskit::Composition.new_submodel + cmp_m.add task_m, as: "task" + + @net_gen = NetworkGeneration::SystemNetworkGenerator.new( + @net_gen_plan = Roby::Plan.new, + default_deployment_group: default_deployment_group, + early_deploy: true + ) + @net_gen.default_deployment_group.use_deployment( + OroGen::Deployments.syskit_tests_empty => "test_" + ) + @net_gen.merge_solver.merge_task_contexts_with_same_agent = true + + @profile = Actions::Profile.new("Test") + @profile.define("test1", cmp_m) + .use("task" => task_m.with_arguments(arg: 1)) + @profile.define("test2", cmp_m) + .use("task" => task_m.with_arguments(arg: 2)) + + @old_early_deply = Syskit.conf.early_deploy? + Syskit.conf.early_deploy = true + end + + after do + Syskit.conf.early_deploy = @old_early_deply + end + + it "displays deployment allocation conflicts, depicts one failed merge chain " \ + "and list non deployed toplevel definitions" do + e = assert_raises(ConflictingDeploymentAllocation) do + net_gen.compute_system_network( + [profile.test1_def, profile.test2_def] + ) + end + formatted = PP.pp(e, +"") + + expected = <<~PP.chomp + deployed task 'test_syskit_tests_empty' from deployment \ + 'syskit_tests_empty' defined in 'orogen_syskit_tests' on 'localhost' is \ + assigned to multiple tasks. Here follows one merge failure \ + (it can have more): + Chain 1 cannot be merged in chain 2: + Chain 1: + OroGen.orogen_syskit_tests.Empty + no owners + arguments: + orocos_name: "test_syskit_tests_empty", + read_only: false, + conf: ["default"], + arg: 1 + Chain 2: + OroGen.orogen_syskit_tests.Empty + no owners + arguments: + orocos_name: "test_syskit_tests_empty", + read_only: false, + conf: ["default"], + arg: 2 + OroGen.orogen_syskit_tests.Empty(arg: 1, conf: ["default"], \ + orocos_name: test_syskit_tests_empty, read_only: false) is needed by the following definitions: + Test.test1_def + OroGen.orogen_syskit_tests.Empty(arg: 2, conf: ["default"], \ + orocos_name: test_syskit_tests_empty, read_only: false) is needed by the following definitions: + Test.test2_def + PP + assert_equal expected, formatted.gsub(//, "").chomp + end + end end From a78164778c12204bc13b4bfbcf923ec4602af386 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Tue, 11 Feb 2025 08:29:19 -0300 Subject: [PATCH 120/158] chore: remove useless cop disable --- lib/syskit/network_generation/system_network_generator.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index 6b96ead12..c02a8dabd 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -21,7 +21,7 @@ def early_deploy? @early_deploy end - def initialize(plan, # rubocop:disable Metrics/ParameterLists + def initialize(plan, event_logger: plan.event_logger, merge_solver: MergeSolver.new(plan), default_deployment_group: nil, From 628a4c6ce0270e5635ab7575d2408c26e6784f0e Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Thu, 13 Feb 2025 10:13:48 -0300 Subject: [PATCH 121/158] chore: change ConflictingDeploymentAllocation error message --- lib/syskit/exceptions.rb | 20 ++++++++-------- .../system_network_generator.rb | 24 ++++--------------- test/test_exceptions.rb | 17 ++++++------- 3 files changed, 23 insertions(+), 38 deletions(-) diff --git a/lib/syskit/exceptions.rb b/lib/syskit/exceptions.rb index 53a5aabe8..8b8186db6 100644 --- a/lib/syskit/exceptions.rb +++ b/lib/syskit/exceptions.rb @@ -475,24 +475,24 @@ def initialize(deployment_to_tasks, toplevel_tasks_to_requirements = {}) end def pretty_print(pp) - deployment_to_tasks.each do |deployed_task, tasks| - process_server_name = deployed_task.configured_deployment - .process_server_name - orogen_model = deployed_task.configured_deployment.orogen_model + deployment_to_tasks.each do |orocos_name, tasks| + agent = tasks.first.execution_agent + deployment_m = agent.deployed_orogen_model_by_name(orocos_name) pp.text( - "deployed task '#{deployed_task.mapped_task_name}' from deployment " \ - "'#{orogen_model.name}' defined in " \ - "'#{orogen_model.project.name}' on '#{process_server_name}' is " \ - "assigned to multiple tasks. Here follows one merge failure " \ - "(it can have more):" + "deployed task '#{orocos_name}' from deployment " \ + "'#{deployment_m.name}' defined in " \ + "'#{deployment_m.project.name}' on '#{agent.process_server_name}' " \ + "is assigned to #{tasks.size} tasks. Bellow is the list of " \ + "the dependent non-deployed actions. Right after the list is " \ + "is a detailed explanation of why the first two tasks are not merged:" ) - print_failed_merge_chain(pp, tasks[0], tasks[1]) tasks.each do |t| defs = find_all_related_syskit_actions( t, toplevel_tasks_to_requirements ) print_dependent_definitions(pp, t, defs) end + print_failed_merge_chain(pp, tasks[0], tasks[1]) end end end diff --git a/lib/syskit/network_generation/system_network_generator.rb b/lib/syskit/network_generation/system_network_generator.rb index c02a8dabd..9d701c295 100644 --- a/lib/syskit/network_generation/system_network_generator.rb +++ b/lib/syskit/network_generation/system_network_generator.rb @@ -388,14 +388,10 @@ def self.verify_device_allocation(plan, toplevel_tasks_to_requirements = {}) def self.verify_all_deployments_are_unique( plan, - default_deployment_group, toplevel_tasks_to_requirements ) - deployment_to_task_map = {} - plan.find_local_tasks(Syskit::TaskContext).each do |t| - deployment_to_task_map[t.orocos_name] = - (deployment_to_task_map[t.orocos_name] || []) + [t] - end + deployment_to_task_map = plan.find_local_tasks(Syskit::TaskContext) + .group_by(&:orocos_name) using_same_deployment = deployment_to_task_map.select do |_, tasks| tasks.size > 1 @@ -403,20 +399,8 @@ def self.verify_all_deployments_are_unique( return if using_same_deployment.empty? - deployment_to_task = using_same_deployment - .each_with_object({}) do |(orocos_name, tasks), h| - deployed_tasks = default_deployment_group - .find_all_suitable_deployments_for(tasks.first) - - deployed_task = deployed_tasks.select do |d| - d.mapped_task_name == orocos_name - end - - h[deployed_task.first] = tasks - end - raise ConflictingDeploymentAllocation.new( - deployment_to_task, toplevel_tasks_to_requirements + using_same_deployment, toplevel_tasks_to_requirements ), "there are deployments used multiple times" end @@ -439,7 +423,7 @@ def validate_generated_network def validate_deployed_network self.class.verify_all_tasks_deployed(plan, default_deployment_group) self.class.verify_all_deployments_are_unique( - plan, default_deployment_group, toplevel_tasks_to_requirements.dup + plan, toplevel_tasks_to_requirements.dup ) super if defined? super end diff --git a/test/test_exceptions.rb b/test/test_exceptions.rb index 01393d9d2..ff5896207 100644 --- a/test/test_exceptions.rb +++ b/test/test_exceptions.rb @@ -217,8 +217,15 @@ module Syskit expected = <<~PP.chomp deployed task 'test_syskit_tests_empty' from deployment \ 'syskit_tests_empty' defined in 'orogen_syskit_tests' on 'localhost' is \ - assigned to multiple tasks. Here follows one merge failure \ - (it can have more): + assigned to 2 tasks. Bellow is the list of \ + the dependent non-deployed actions. Right after the list is \ + is a detailed explanation of why the first two tasks are not merged: + OroGen.orogen_syskit_tests.Empty(arg: 1, conf: ["default"], \ + orocos_name: test_syskit_tests_empty, read_only: false) is needed by the following definitions: + Test.test1_def + OroGen.orogen_syskit_tests.Empty(arg: 2, conf: ["default"], \ + orocos_name: test_syskit_tests_empty, read_only: false) is needed by the following definitions: + Test.test2_def Chain 1 cannot be merged in chain 2: Chain 1: OroGen.orogen_syskit_tests.Empty @@ -236,12 +243,6 @@ module Syskit read_only: false, conf: ["default"], arg: 2 - OroGen.orogen_syskit_tests.Empty(arg: 1, conf: ["default"], \ - orocos_name: test_syskit_tests_empty, read_only: false) is needed by the following definitions: - Test.test1_def - OroGen.orogen_syskit_tests.Empty(arg: 2, conf: ["default"], \ - orocos_name: test_syskit_tests_empty, read_only: false) is needed by the following definitions: - Test.test2_def PP assert_equal expected, formatted.gsub(//, "").chomp end From ff8569b36b948bff8d404e7e3f68e1698d222e7b Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Thu, 13 Feb 2025 10:20:25 -0300 Subject: [PATCH 122/158] style: use refute instead of assert ! --- test/network_generation/test_merge_solver.rb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/network_generation/test_merge_solver.rb b/test/network_generation/test_merge_solver.rb index 1362b3d4f..3a0b43742 100644 --- a/test/network_generation/test_merge_solver.rb +++ b/test/network_generation/test_merge_solver.rb @@ -56,8 +56,8 @@ flexmock(t1).should_receive(:execution_agent).and_return(true) t1.should_receive(:can_merge?).with(t2).and_return(true).once end - assert !solver.may_merge_task_contexts?(task1, task2) - assert !solver.may_merge_task_contexts?(task2, task1) + refute solver.may_merge_task_contexts?(task1, task2) + refute solver.may_merge_task_contexts?(task2, task1) end it "returns false for tasks that do not have execution agents when " \ "merge_when_identical_agents is true" do @@ -72,8 +72,8 @@ local_solver = Syskit::NetworkGeneration::MergeSolver.new(plan) local_solver.merge_task_contexts_with_same_agent = true - assert !local_solver.may_merge_task_contexts?(task1, task2) - assert !local_solver.may_merge_task_contexts?(task2, task1) + refute local_solver.may_merge_task_contexts?(task1, task2) + refute local_solver.may_merge_task_contexts?(task2, task1) end end From 33295725d19f1ce044e27f7aafa7472a2784792b Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 6 Feb 2025 08:49:27 -0300 Subject: [PATCH 123/158] feat: use recommend_init flag to apply init policy --- lib/syskit/models/port.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/syskit/models/port.rb b/lib/syskit/models/port.rb index 959907074..8cd836939 100644 --- a/lib/syskit/models/port.rb +++ b/lib/syskit/models/port.rb @@ -105,6 +105,9 @@ def to_component_port # @raise [SelfConnection] def connect_to(in_port, policy = {}) out_port = to_component_port + if out_port.respond_to?(:recommend_init) + policy[:init] = out_port.recommend_init + end if out_port == self if in_port.respond_to?(:to_component_port) in_port = in_port.to_component_port From 9e043a9e29add525bba04934fadc1c304043fd05 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 6 Feb 2025 08:50:43 -0300 Subject: [PATCH 124/158] chore: fix rubocop offense by moving code fix Metrics/CyclomaticComplexity offense by moving the connection validation code to a separate function --- lib/syskit/models/port.rb | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/lib/syskit/models/port.rb b/lib/syskit/models/port.rb index 8cd836939..c624faec5 100644 --- a/lib/syskit/models/port.rb +++ b/lib/syskit/models/port.rb @@ -111,16 +111,7 @@ def connect_to(in_port, policy = {}) if out_port == self if in_port.respond_to?(:to_component_port) in_port = in_port.to_component_port - if !out_port.output? - raise WrongPortConnectionDirection.new(self, in_port), "cannot connect #{out_port} to #{in_port}: #{out_port} is not an output port" - elsif !in_port.input? - raise WrongPortConnectionDirection.new(self, in_port), "cannot connect #{out_port} to #{in_port}: #{in_port} is not an input port" - elsif out_port.component_model == in_port.component_model - raise SelfConnection.new(out_port, in_port), "cannot connect #{out_port} to #{in_port}: they are both ports of the same component" - elsif out_port.type != in_port.type - raise WrongPortConnectionTypes.new(self, in_port), "cannot connect #{out_port} to #{in_port}: types mismatch" - end - + validate_connection!(out_port, in_port) component_model.connect_ports(in_port.component_model, [out_port.name, in_port.name] => policy) else Syskit.connect self, in_port, policy @@ -131,6 +122,28 @@ def connect_to(in_port, policy = {}) end end + def validate_connection!(out_port, in_port) + unless out_port.output? + raise WrongPortConnectionDirection.new(self, in_port), + "cannot connect #{out_port} to #{in_port}: " \ + "#{out_port} is not an output port" + end + unless in_port.input? + raise WrongPortConnectionDirection.new(self, in_port), + "cannot connect #{out_port} to #{in_port}: " \ + "#{in_port} is not an input port" + end + if out_port.component_model == in_port.component_model + raise SelfConnection.new(out_port, in_port), + "cannot connect #{out_port} to #{in_port}: " \ + "they are both ports of the same component" + end + unless out_port.type == in_port.type + raise WrongPortConnectionTypes.new(self, in_port), + "cannot connect #{out_port} to #{in_port}: types mismatch" + end + end + # Tests whether self is connected to the provided port def connected_to?(sink_port) source_port = try_to_component_port From af7d128bf0e44dc92c6c5c0b023f90389fa4f14d Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 6 Feb 2025 08:54:10 -0300 Subject: [PATCH 125/158] test: add tests for init policy --- test/models/test_port.rb | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/test/models/test_port.rb b/test/models/test_port.rb index 7aeed89df..ded906242 100644 --- a/test/models/test_port.rb +++ b/test/models/test_port.rb @@ -72,6 +72,23 @@ out_task_m.out_port.connect_to in_task_m.in_port end end + it "adds 'init: true' as a default policy" do + policy = {} + flexmock(out_task_m).should_receive(:connect_ports).explicitly.once + .with(in_task_m, %w[out in] => policy.merge(init: true)) + out_task_m.out_port.connect_to in_task_m.in_port, policy + assert policy[:init] + end + it "adds 'init: false' if recommend_init flag is set to false" do + out_port_m = Syskit::Models::Port.new(out_task_m, out_task_m.orogen_model.find_port("out")) + out_port_m.orogen_model.recommend_init = false + refute out_port_m.recommend_init + policy = {} + flexmock(out_task_m).should_receive(:connect_ports).explicitly + .with(in_task_m, %w[out in] => policy.merge(init: false)) + out_task_m.out_port.connect_to in_task_m.in_port, policy + refute policy[:init] + end end describe "#can_connect_to?" do From 329b77f8ca1498382d168a551059335090d6aeb5 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 7 Feb 2025 16:26:54 -0300 Subject: [PATCH 126/158] fix/chore: policy for data readers and exported ports and rewrite code in port model --- lib/syskit/dynamic_port_binding.rb | 4 ++++ lib/syskit/models/port.rb | 9 +++++---- lib/syskit/network_generation/dataflow_dynamics.rb | 5 +++++ 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/syskit/dynamic_port_binding.rb b/lib/syskit/dynamic_port_binding.rb index b503a28d9..c63932a7b 100644 --- a/lib/syskit/dynamic_port_binding.rb +++ b/lib/syskit/dynamic_port_binding.rb @@ -199,6 +199,10 @@ def initialize( # Method called by {Accessor} to create the accessor object from a # port def create_accessor(port) + if port.respond_to?(:init_policy) && + [true, false].include?(port.init_policy) + policy.merge(init: port.init_policy) + end port.reader(**policy) end diff --git a/lib/syskit/models/port.rb b/lib/syskit/models/port.rb index c624faec5..8b360c12e 100644 --- a/lib/syskit/models/port.rb +++ b/lib/syskit/models/port.rb @@ -105,13 +105,14 @@ def to_component_port # @raise [SelfConnection] def connect_to(in_port, policy = {}) out_port = to_component_port - if out_port.respond_to?(:recommend_init) - policy[:init] = out_port.recommend_init + if out_port.respond_to?(:init_policy) && + [true, false].include?(out_port.init_policy) + policy = policy.merge(init: out_port.init_policy) end if out_port == self if in_port.respond_to?(:to_component_port) in_port = in_port.to_component_port - validate_connection!(out_port, in_port) + validate_connection(out_port, in_port) component_model.connect_ports(in_port.component_model, [out_port.name, in_port.name] => policy) else Syskit.connect self, in_port, policy @@ -122,7 +123,7 @@ def connect_to(in_port, policy = {}) end end - def validate_connection!(out_port, in_port) + def validate_connection(out_port, in_port) unless out_port.output? raise WrongPortConnectionDirection.new(self, in_port), "cannot connect #{out_port} to #{in_port}: " \ diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index 9ce1cfa88..ee24431c1 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -601,6 +601,11 @@ def policy_for( "#{sink_task}:#{sink_port.name}" end + source_port_m = source_port.model + if [true, false].include?(source_port_m.init_policy) + policy = policy.merge(init: source_port_m.init_policy) + end + sink_port_m = sink_port.model if sink_port_m.needs_reliable_connection? compute_reliable_connection_policy( From 7214e0bd0ea51f7d179cdc9cf95dbec4638cfab3 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 7 Feb 2025 16:27:24 -0300 Subject: [PATCH 127/158] test: add tests and update existing ones --- test/models/test_port.rb | 23 +++++++---- .../test_dataflow_dynamics.rb | 40 +++++++++++++++++++ test/test_dynamic_port_binding.rb | 39 ++++++++++++++++++ 3 files changed, 94 insertions(+), 8 deletions(-) diff --git a/test/models/test_port.rb b/test/models/test_port.rb index ded906242..9656c84b2 100644 --- a/test/models/test_port.rb +++ b/test/models/test_port.rb @@ -72,22 +72,29 @@ out_task_m.out_port.connect_to in_task_m.in_port end end - it "adds 'init: true' as a default policy" do + it "makes sure init policy is not set without calling recommend_init" do policy = {} flexmock(out_task_m).should_receive(:connect_ports).explicitly.once - .with(in_task_m, %w[out in] => policy.merge(init: true)) + .with(in_task_m, %w[out in] => policy) + out_task_m.out_port.connect_to in_task_m.in_port, policy + end + it "adds 'init: true' policy if recommend_init was called" do + out_port_m = Syskit::Models::Port.new(out_task_m, out_task_m.orogen_model.find_port("out")) + out_port_m.orogen_model.recommend_init + assert out_port_m.init_policy + policy = {} + flexmock(out_task_m).should_receive(:connect_ports).explicitly + .with(in_task_m, %w[out in] => { init: true }) out_task_m.out_port.connect_to in_task_m.in_port, policy - assert policy[:init] end - it "adds 'init: false' if recommend_init flag is set to false" do + it "adds 'init: false' policy if recommend_init(init: false) was called" do out_port_m = Syskit::Models::Port.new(out_task_m, out_task_m.orogen_model.find_port("out")) - out_port_m.orogen_model.recommend_init = false - refute out_port_m.recommend_init + out_port_m.orogen_model.recommend_init(init: false) + refute out_port_m.init_policy policy = {} flexmock(out_task_m).should_receive(:connect_ports).explicitly - .with(in_task_m, %w[out in] => policy.merge(init: false)) + .with(in_task_m, %w[out in] => { init: false }) out_task_m.out_port.connect_to in_task_m.in_port, policy - refute policy[:init] end end diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index 5dfd15e8e..ddf47f11f 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -240,6 +240,46 @@ module NetworkGeneration policy_graph[[task0, task1]][%w[out in]]) end + it "adds init: true policy if available and saves it " \ + "in the graph's policy_graph" do + plan.add(task0 = @task_m.new) + plan.add(task1 = @task_m.new) + + add_agents(tasks = [task0, task1]) + flexmock(@dynamics).should_receive(:propagate).with(tasks) + + task0.out_port.model.recommend_init + task0.out_port.connect_to(task1.in_port) + + @dynamics.should_receive(:policy_for) + .with(task0, "out", "in", task1, nil) + .and_return(type: :buffer, size: 42, init: true) + policy_graph = @dynamics.compute_connection_policies + + assert_equal({ type: :buffer, size: 42, init: true }, + policy_graph[[task0, task1]][%w[out in]]) + end + + it "adds init: false policy if available and saves it " \ + "in the graph's policy_graph" do + plan.add(task0 = @task_m.new) + plan.add(task1 = @task_m.new) + + add_agents(tasks = [task0, task1]) + flexmock(@dynamics).should_receive(:propagate).with(tasks) + + task0.out_port.model.recommend_init(init: false) + task0.out_port.connect_to(task1.in_port) + + @dynamics.should_receive(:policy_for) + .with(task0, "out", "in", task1, nil) + .and_return(type: :buffer, size: 42, init: false) + policy_graph = @dynamics.compute_connection_policies + + assert_equal({ type: :buffer, size: 42, init: false }, + policy_graph[[task0, task1]][%w[out in]]) + end + it "computes the policies on the concrete connections" do plan.add(task = @task_m.new) cmp = @cmp_m.instanciate(plan) diff --git a/test/test_dynamic_port_binding.rb b/test/test_dynamic_port_binding.rb index 759225df9..172526bb7 100644 --- a/test/test_dynamic_port_binding.rb +++ b/test/test_dynamic_port_binding.rb @@ -382,6 +382,45 @@ module Syskit @task = syskit_stub_deploy_configure_and_start(@task_m) end + describe "policy" do + attr_reader :task, :port_binding + + before do + @port_binding = flexmock + @accessor = DynamicPortBinding::Accessor.new(@port_binding) + flexmock(@accessor) + .should_receive(:create_accessor) + .explicitly + .with(@task.out_port).and_return { @task.out_port.reader } + end + + it "expects no policy if recommend_init is not called" do + flexmock(@task.out_port) + .should_receive(:reader) + .and_return({}) + + @accessor.create_accessor(@task.out_port) + end + + it "expects init: true policy if recommend_init is called" do + @task.out_port.model.recommend_init + flexmock(@task.out_port) + .should_receive(:reader) + .and_return({ init: true }) + + @accessor.create_accessor(@task.out_port) + end + + it "expects init: false policy if recommend_init(init: false) is called" do + @task.out_port.model.recommend_init(init: false) + flexmock(@task.out_port) + .should_receive(:reader) + .and_return({ init: false }) + + @accessor.create_accessor(@task.out_port) + end + end + describe "#update" do attr_reader :task, :port_binding From 0dd9f6cb5999af7c8cba09b68aebf8f62e3ce5d5 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 10 Feb 2025 10:30:56 -0300 Subject: [PATCH 128/158] chore: rewrite tests to call init_policy --- test/network_generation/test_dataflow_dynamics.rb | 4 ++-- test/test_dynamic_port_binding.rb | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index ddf47f11f..cd350f350 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -248,7 +248,7 @@ module NetworkGeneration add_agents(tasks = [task0, task1]) flexmock(@dynamics).should_receive(:propagate).with(tasks) - task0.out_port.model.recommend_init + task0.out_port.model.init_policy(true) task0.out_port.connect_to(task1.in_port) @dynamics.should_receive(:policy_for) @@ -268,7 +268,7 @@ module NetworkGeneration add_agents(tasks = [task0, task1]) flexmock(@dynamics).should_receive(:propagate).with(tasks) - task0.out_port.model.recommend_init(init: false) + task0.out_port.model.init_policy(false) task0.out_port.connect_to(task1.in_port) @dynamics.should_receive(:policy_for) diff --git a/test/test_dynamic_port_binding.rb b/test/test_dynamic_port_binding.rb index 172526bb7..6f99ea117 100644 --- a/test/test_dynamic_port_binding.rb +++ b/test/test_dynamic_port_binding.rb @@ -394,7 +394,7 @@ module Syskit .with(@task.out_port).and_return { @task.out_port.reader } end - it "expects no policy if recommend_init is not called" do + it "expects no policy if init_policy is not called" do flexmock(@task.out_port) .should_receive(:reader) .and_return({}) @@ -402,8 +402,8 @@ module Syskit @accessor.create_accessor(@task.out_port) end - it "expects init: true policy if recommend_init is called" do - @task.out_port.model.recommend_init + it "expects init: true policy if init_policy(true) is called" do + @task.out_port.model.init_policy(true) flexmock(@task.out_port) .should_receive(:reader) .and_return({ init: true }) @@ -411,8 +411,8 @@ module Syskit @accessor.create_accessor(@task.out_port) end - it "expects init: false policy if recommend_init(init: false) is called" do - @task.out_port.model.recommend_init(init: false) + it "expects init: false policy if init_policy(false) is called" do + @task.out_port.model.init_policy(false) flexmock(@task.out_port) .should_receive(:reader) .and_return({ init: false }) From ae5f29f54dc08babe1b7ec428bf91a3590412f6d Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 13 Feb 2025 10:47:20 -0300 Subject: [PATCH 129/158] chore: remove unneccessary code in models/port and tests --- lib/syskit/models/port.rb | 4 ---- test/models/test_port.rb | 24 ------------------------ 2 files changed, 28 deletions(-) diff --git a/lib/syskit/models/port.rb b/lib/syskit/models/port.rb index 8b360c12e..ceaf1c158 100644 --- a/lib/syskit/models/port.rb +++ b/lib/syskit/models/port.rb @@ -105,10 +105,6 @@ def to_component_port # @raise [SelfConnection] def connect_to(in_port, policy = {}) out_port = to_component_port - if out_port.respond_to?(:init_policy) && - [true, false].include?(out_port.init_policy) - policy = policy.merge(init: out_port.init_policy) - end if out_port == self if in_port.respond_to?(:to_component_port) in_port = in_port.to_component_port diff --git a/test/models/test_port.rb b/test/models/test_port.rb index 9656c84b2..7aeed89df 100644 --- a/test/models/test_port.rb +++ b/test/models/test_port.rb @@ -72,30 +72,6 @@ out_task_m.out_port.connect_to in_task_m.in_port end end - it "makes sure init policy is not set without calling recommend_init" do - policy = {} - flexmock(out_task_m).should_receive(:connect_ports).explicitly.once - .with(in_task_m, %w[out in] => policy) - out_task_m.out_port.connect_to in_task_m.in_port, policy - end - it "adds 'init: true' policy if recommend_init was called" do - out_port_m = Syskit::Models::Port.new(out_task_m, out_task_m.orogen_model.find_port("out")) - out_port_m.orogen_model.recommend_init - assert out_port_m.init_policy - policy = {} - flexmock(out_task_m).should_receive(:connect_ports).explicitly - .with(in_task_m, %w[out in] => { init: true }) - out_task_m.out_port.connect_to in_task_m.in_port, policy - end - it "adds 'init: false' policy if recommend_init(init: false) was called" do - out_port_m = Syskit::Models::Port.new(out_task_m, out_task_m.orogen_model.find_port("out")) - out_port_m.orogen_model.recommend_init(init: false) - refute out_port_m.init_policy - policy = {} - flexmock(out_task_m).should_receive(:connect_ports).explicitly - .with(in_task_m, %w[out in] => { init: false }) - out_task_m.out_port.connect_to in_task_m.in_port, policy - end end describe "#can_connect_to?" do From 7ab7ee841ed309f8d7185894e2e4a1ef402e657b Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 13 Feb 2025 11:32:09 -0300 Subject: [PATCH 130/158] fix: do not override existing init flag Use init_policy from the port as a default for the policy Do not override the existing init flag in the policy `policy.fetch(:init, port.model.init_policy)` to set the init value if not already provided in the policy --- lib/syskit/dynamic_port_binding.rb | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/syskit/dynamic_port_binding.rb b/lib/syskit/dynamic_port_binding.rb index c63932a7b..f6430ce6a 100644 --- a/lib/syskit/dynamic_port_binding.rb +++ b/lib/syskit/dynamic_port_binding.rb @@ -199,10 +199,7 @@ def initialize( # Method called by {Accessor} to create the accessor object from a # port def create_accessor(port) - if port.respond_to?(:init_policy) && - [true, false].include?(port.init_policy) - policy.merge(init: port.init_policy) - end + policy.merge(init: policy.fetch(:init, port.model.init_policy)) port.reader(**policy) end From 945bf5894cca19725af290741e9043a2f7ea8e68 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 13 Feb 2025 12:24:51 -0300 Subject: [PATCH 131/158] fix/test: allow policy merge and add tests for this --- .../network_generation/dataflow_dynamics.rb | 12 +++---- .../test_dataflow_dynamics.rb | 36 ++++++++++++++++++- 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index ee24431c1..52f8d08ca 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -601,30 +601,26 @@ def policy_for( "#{sink_task}:#{sink_port.name}" end - source_port_m = source_port.model - if [true, false].include?(source_port_m.init_policy) - policy = policy.merge(init: source_port_m.init_policy) - end - sink_port_m = sink_port.model if sink_port_m.needs_reliable_connection? - compute_reliable_connection_policy( + policy = compute_reliable_connection_policy( source_port, sink_port, fallback_policy ) elsif sink_port_m.required_connection_type == :data policy = Orocos::Port.prepare_policy(type: :data) DataFlowDynamics.debug { " result: #{policy}" } - policy elsif sink_port_m.required_connection_type == :buffer policy = Orocos::Port.prepare_policy(type: :buffer, size: 1) DataFlowDynamics.debug { " result: #{policy}" } - policy else raise UnsupportedConnectionType, "unknown required connection type " \ "#{sink_port_m.required_connection_type} " \ "on #{sink_port}" end + + source_port_m = source_port.model + policy.merge(init: source_port_m.init_policy) end def compute_reliable_connection_policy( diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index cd350f350..683f01cab 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -420,15 +420,49 @@ module NetworkGeneration "the sink port is marked as needs_reliable_connection" do @sink_task_m.in_port.needs_reliable_connection fallback_policy = flexmock + expected_policy = flexmock + + expected_policy + .should_receive(:merge) + .and_return(expected_policy) + flexmock(@dynamics) .should_receive(:compute_reliable_connection_policy) .with(@source_t.out_port, @sink_t.in_port, fallback_policy) - .once.and_return(expected_policy = flexmock) + .once.and_return(expected_policy) policy = @dynamics.policy_for( @source_t, "out", "in", @sink_t, fallback_policy ) assert_equal expected_policy, policy end + + it "merges init policy when sink requires reliable connection" do + @sink_task_m.in_port.needs_reliable_connection + @source_t.out_port.model.init_policy(true) + + fallback_policy = flexmock + flexmock(@dynamics) + .should_receive(:compute_reliable_connection_policy) + .with(@source_t.out_port, @sink_t.in_port, fallback_policy) + .once.and_return({ init: true }) + + policy = @dynamics.policy_for( + @source_t, "out", "in", @sink_t, fallback_policy + ) + + assert_equal true, policy[:init] + end + + it "merges init policy when sink requires 'buffer' connection type" do + @sink_task_m.in_port.needs_buffered_connection + @source_t.out_port.model.init_policy(true) + + policy = @dynamics.policy_for(@source_t, "out", "in", @sink_t, nil) + + assert_equal true, policy[:init] + assert_equal :buffer, policy[:type] + assert_equal 1, policy[:size] + end end describe "#compute_reliable_connection_policy" do From b8903e33850cddcb3e844a5a931e3fc557ea56b4 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 14 Feb 2025 15:33:25 -0300 Subject: [PATCH 132/158] fix: do not merge if no init policy --- lib/syskit/network_generation/dataflow_dynamics.rb | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index 52f8d08ca..318160296 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -620,7 +620,10 @@ def policy_for( end source_port_m = source_port.model - policy.merge(init: source_port_m.init_policy) + unless source_port_m.init_policy.nil? + policy.merge(init: source_port_m.init_policy) + end + policy end def compute_reliable_connection_policy( From 4769d856eff6a25dc683cbdf9c11ba725902e768 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 14 Feb 2025 16:33:54 -0300 Subject: [PATCH 133/158] Update lib/syskit/exceptions.rb Co-authored-by: jhonasiv <34279171+jhonasiv@users.noreply.github.com> --- lib/syskit/exceptions.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/exceptions.rb b/lib/syskit/exceptions.rb index 8b8186db6..dc615f5a3 100644 --- a/lib/syskit/exceptions.rb +++ b/lib/syskit/exceptions.rb @@ -482,7 +482,7 @@ def pretty_print(pp) "deployed task '#{orocos_name}' from deployment " \ "'#{deployment_m.name}' defined in " \ "'#{deployment_m.project.name}' on '#{agent.process_server_name}' " \ - "is assigned to #{tasks.size} tasks. Bellow is the list of " \ + "is assigned to #{tasks.size} tasks. Below is the list of " \ "the dependent non-deployed actions. Right after the list is " \ "is a detailed explanation of why the first two tasks are not merged:" ) From 6aa3c8d95a24cf1db7344554f6a3ca1c692d50a4 Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 14 Feb 2025 16:34:29 -0300 Subject: [PATCH 134/158] Update lib/syskit/exceptions.rb Co-authored-by: jhonasiv <34279171+jhonasiv@users.noreply.github.com> --- lib/syskit/exceptions.rb | 2 +- test/test_exceptions.rb | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/syskit/exceptions.rb b/lib/syskit/exceptions.rb index dc615f5a3..9c871a4f6 100644 --- a/lib/syskit/exceptions.rb +++ b/lib/syskit/exceptions.rb @@ -483,7 +483,7 @@ def pretty_print(pp) "'#{deployment_m.name}' defined in " \ "'#{deployment_m.project.name}' on '#{agent.process_server_name}' " \ "is assigned to #{tasks.size} tasks. Below is the list of " \ - "the dependent non-deployed actions. Right after the list is " \ + "the dependent non-deployed actions. Right after the list " \ "is a detailed explanation of why the first two tasks are not merged:" ) tasks.each do |t| diff --git a/test/test_exceptions.rb b/test/test_exceptions.rb index ff5896207..08f4f1d42 100644 --- a/test/test_exceptions.rb +++ b/test/test_exceptions.rb @@ -217,9 +217,9 @@ module Syskit expected = <<~PP.chomp deployed task 'test_syskit_tests_empty' from deployment \ 'syskit_tests_empty' defined in 'orogen_syskit_tests' on 'localhost' is \ - assigned to 2 tasks. Bellow is the list of \ + assigned to 2 tasks. Below is the list of \ the dependent non-deployed actions. Right after the list is \ - is a detailed explanation of why the first two tasks are not merged: + a detailed explanation of why the first two tasks are not merged: OroGen.orogen_syskit_tests.Empty(arg: 1, conf: ["default"], \ orocos_name: test_syskit_tests_empty, read_only: false) is needed by the following definitions: Test.test1_def From 9ede775b0491303e6008ab17941c8b1250f3e4fc Mon Sep 17 00:00:00 2001 From: Wellington Castro Date: Fri, 14 Feb 2025 16:30:54 -0300 Subject: [PATCH 135/158] fix: register task -> agent association at initialization The task->agent relation is registered in the plan graph and it changes between exception raise and the moment it is printed --- lib/syskit/exceptions.rb | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/syskit/exceptions.rb b/lib/syskit/exceptions.rb index 9c871a4f6..c92324fd6 100644 --- a/lib/syskit/exceptions.rb +++ b/lib/syskit/exceptions.rb @@ -467,16 +467,20 @@ def pretty_print(pp) class ConflictingDeploymentAllocation < SpecError include Syskit::NetworkGenerationsExceptionHelpers - attr_reader :deployment_to_tasks, :toplevel_tasks_to_requirements + attr_reader :deployment_to_tasks def initialize(deployment_to_tasks, toplevel_tasks_to_requirements = {}) @deployment_to_tasks = deployment_to_tasks @toplevel_tasks_to_requirements = toplevel_tasks_to_requirements + @deployment_to_execution_agent = \ + deployment_to_tasks.transform_values do |tasks| + tasks.first.execution_agent + end end def pretty_print(pp) deployment_to_tasks.each do |orocos_name, tasks| - agent = tasks.first.execution_agent + agent = @deployment_to_execution_agent[orocos_name] deployment_m = agent.deployed_orogen_model_by_name(orocos_name) pp.text( "deployed task '#{orocos_name}' from deployment " \ @@ -488,7 +492,7 @@ def pretty_print(pp) ) tasks.each do |t| defs = find_all_related_syskit_actions( - t, toplevel_tasks_to_requirements + t, @toplevel_tasks_to_requirements ) print_dependent_definitions(pp, t, defs) end From 5b5f65e50b6d0f5749024a2748603e35d4350f92 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 17 Feb 2025 10:47:22 -0300 Subject: [PATCH 136/158] test: fix merge test --- lib/syskit/network_generation/dataflow_dynamics.rb | 4 ++-- test/network_generation/test_dataflow_dynamics.rb | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index 318160296..b8e7746df 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -620,8 +620,8 @@ def policy_for( end source_port_m = source_port.model - unless source_port_m.init_policy.nil? - policy.merge(init: source_port_m.init_policy) + unless source_port_m.init_policy? + policy = policy.merge(init: source_port_m.init_policy) end policy end diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index 683f01cab..8993fa085 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -455,6 +455,11 @@ module NetworkGeneration it "merges init policy when sink requires 'buffer' connection type" do @sink_task_m.in_port.needs_buffered_connection + + flexmock(@source_t.out_port.model) + .should_receive(:init_policy).explicitly + .and_return(true) + @source_t.out_port.model.init_policy(true) policy = @dynamics.policy_for(@source_t, "out", "in", @sink_t, nil) From 5a67edafc54703176e3985110d1d810491c197ce Mon Sep 17 00:00:00 2001 From: jhonasiv <34279171+jhonasiv@users.noreply.github.com> Date: Mon, 17 Feb 2025 13:09:41 -0300 Subject: [PATCH 137/158] Revert "feat: init policy defaults to true" --- lib/syskit/dynamic_port_binding.rb | 1 - lib/syskit/models/port.rb | 33 +++----- .../network_generation/dataflow_dynamics.rb | 10 +-- .../test_dataflow_dynamics.rb | 81 +------------------ test/test_dynamic_port_binding.rb | 39 --------- 5 files changed, 14 insertions(+), 150 deletions(-) diff --git a/lib/syskit/dynamic_port_binding.rb b/lib/syskit/dynamic_port_binding.rb index df0623d9d..4c443f6ae 100644 --- a/lib/syskit/dynamic_port_binding.rb +++ b/lib/syskit/dynamic_port_binding.rb @@ -203,7 +203,6 @@ def initialize( # Method called by {Accessor} to create the accessor object from a # port def create_accessor(port) - policy.merge(init: policy.fetch(:init, port.model.init_policy)) port.reader(**policy) end diff --git a/lib/syskit/models/port.rb b/lib/syskit/models/port.rb index ceaf1c158..959907074 100644 --- a/lib/syskit/models/port.rb +++ b/lib/syskit/models/port.rb @@ -108,7 +108,16 @@ def connect_to(in_port, policy = {}) if out_port == self if in_port.respond_to?(:to_component_port) in_port = in_port.to_component_port - validate_connection(out_port, in_port) + if !out_port.output? + raise WrongPortConnectionDirection.new(self, in_port), "cannot connect #{out_port} to #{in_port}: #{out_port} is not an output port" + elsif !in_port.input? + raise WrongPortConnectionDirection.new(self, in_port), "cannot connect #{out_port} to #{in_port}: #{in_port} is not an input port" + elsif out_port.component_model == in_port.component_model + raise SelfConnection.new(out_port, in_port), "cannot connect #{out_port} to #{in_port}: they are both ports of the same component" + elsif out_port.type != in_port.type + raise WrongPortConnectionTypes.new(self, in_port), "cannot connect #{out_port} to #{in_port}: types mismatch" + end + component_model.connect_ports(in_port.component_model, [out_port.name, in_port.name] => policy) else Syskit.connect self, in_port, policy @@ -119,28 +128,6 @@ def connect_to(in_port, policy = {}) end end - def validate_connection(out_port, in_port) - unless out_port.output? - raise WrongPortConnectionDirection.new(self, in_port), - "cannot connect #{out_port} to #{in_port}: " \ - "#{out_port} is not an output port" - end - unless in_port.input? - raise WrongPortConnectionDirection.new(self, in_port), - "cannot connect #{out_port} to #{in_port}: " \ - "#{in_port} is not an input port" - end - if out_port.component_model == in_port.component_model - raise SelfConnection.new(out_port, in_port), - "cannot connect #{out_port} to #{in_port}: " \ - "they are both ports of the same component" - end - unless out_port.type == in_port.type - raise WrongPortConnectionTypes.new(self, in_port), - "cannot connect #{out_port} to #{in_port}: types mismatch" - end - end - # Tests whether self is connected to the provided port def connected_to?(sink_port) source_port = try_to_component_port diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index b8e7746df..9ce1cfa88 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -603,27 +603,23 @@ def policy_for( sink_port_m = sink_port.model if sink_port_m.needs_reliable_connection? - policy = compute_reliable_connection_policy( + compute_reliable_connection_policy( source_port, sink_port, fallback_policy ) elsif sink_port_m.required_connection_type == :data policy = Orocos::Port.prepare_policy(type: :data) DataFlowDynamics.debug { " result: #{policy}" } + policy elsif sink_port_m.required_connection_type == :buffer policy = Orocos::Port.prepare_policy(type: :buffer, size: 1) DataFlowDynamics.debug { " result: #{policy}" } + policy else raise UnsupportedConnectionType, "unknown required connection type " \ "#{sink_port_m.required_connection_type} " \ "on #{sink_port}" end - - source_port_m = source_port.model - unless source_port_m.init_policy? - policy = policy.merge(init: source_port_m.init_policy) - end - policy end def compute_reliable_connection_policy( diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index 8993fa085..5dfd15e8e 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -240,46 +240,6 @@ module NetworkGeneration policy_graph[[task0, task1]][%w[out in]]) end - it "adds init: true policy if available and saves it " \ - "in the graph's policy_graph" do - plan.add(task0 = @task_m.new) - plan.add(task1 = @task_m.new) - - add_agents(tasks = [task0, task1]) - flexmock(@dynamics).should_receive(:propagate).with(tasks) - - task0.out_port.model.init_policy(true) - task0.out_port.connect_to(task1.in_port) - - @dynamics.should_receive(:policy_for) - .with(task0, "out", "in", task1, nil) - .and_return(type: :buffer, size: 42, init: true) - policy_graph = @dynamics.compute_connection_policies - - assert_equal({ type: :buffer, size: 42, init: true }, - policy_graph[[task0, task1]][%w[out in]]) - end - - it "adds init: false policy if available and saves it " \ - "in the graph's policy_graph" do - plan.add(task0 = @task_m.new) - plan.add(task1 = @task_m.new) - - add_agents(tasks = [task0, task1]) - flexmock(@dynamics).should_receive(:propagate).with(tasks) - - task0.out_port.model.init_policy(false) - task0.out_port.connect_to(task1.in_port) - - @dynamics.should_receive(:policy_for) - .with(task0, "out", "in", task1, nil) - .and_return(type: :buffer, size: 42, init: false) - policy_graph = @dynamics.compute_connection_policies - - assert_equal({ type: :buffer, size: 42, init: false }, - policy_graph[[task0, task1]][%w[out in]]) - end - it "computes the policies on the concrete connections" do plan.add(task = @task_m.new) cmp = @cmp_m.instanciate(plan) @@ -420,54 +380,15 @@ module NetworkGeneration "the sink port is marked as needs_reliable_connection" do @sink_task_m.in_port.needs_reliable_connection fallback_policy = flexmock - expected_policy = flexmock - - expected_policy - .should_receive(:merge) - .and_return(expected_policy) - flexmock(@dynamics) .should_receive(:compute_reliable_connection_policy) .with(@source_t.out_port, @sink_t.in_port, fallback_policy) - .once.and_return(expected_policy) + .once.and_return(expected_policy = flexmock) policy = @dynamics.policy_for( @source_t, "out", "in", @sink_t, fallback_policy ) assert_equal expected_policy, policy end - - it "merges init policy when sink requires reliable connection" do - @sink_task_m.in_port.needs_reliable_connection - @source_t.out_port.model.init_policy(true) - - fallback_policy = flexmock - flexmock(@dynamics) - .should_receive(:compute_reliable_connection_policy) - .with(@source_t.out_port, @sink_t.in_port, fallback_policy) - .once.and_return({ init: true }) - - policy = @dynamics.policy_for( - @source_t, "out", "in", @sink_t, fallback_policy - ) - - assert_equal true, policy[:init] - end - - it "merges init policy when sink requires 'buffer' connection type" do - @sink_task_m.in_port.needs_buffered_connection - - flexmock(@source_t.out_port.model) - .should_receive(:init_policy).explicitly - .and_return(true) - - @source_t.out_port.model.init_policy(true) - - policy = @dynamics.policy_for(@source_t, "out", "in", @sink_t, nil) - - assert_equal true, policy[:init] - assert_equal :buffer, policy[:type] - assert_equal 1, policy[:size] - end end describe "#compute_reliable_connection_policy" do diff --git a/test/test_dynamic_port_binding.rb b/test/test_dynamic_port_binding.rb index 6f99ea117..759225df9 100644 --- a/test/test_dynamic_port_binding.rb +++ b/test/test_dynamic_port_binding.rb @@ -382,45 +382,6 @@ module Syskit @task = syskit_stub_deploy_configure_and_start(@task_m) end - describe "policy" do - attr_reader :task, :port_binding - - before do - @port_binding = flexmock - @accessor = DynamicPortBinding::Accessor.new(@port_binding) - flexmock(@accessor) - .should_receive(:create_accessor) - .explicitly - .with(@task.out_port).and_return { @task.out_port.reader } - end - - it "expects no policy if init_policy is not called" do - flexmock(@task.out_port) - .should_receive(:reader) - .and_return({}) - - @accessor.create_accessor(@task.out_port) - end - - it "expects init: true policy if init_policy(true) is called" do - @task.out_port.model.init_policy(true) - flexmock(@task.out_port) - .should_receive(:reader) - .and_return({ init: true }) - - @accessor.create_accessor(@task.out_port) - end - - it "expects init: false policy if init_policy(false) is called" do - @task.out_port.model.init_policy(false) - flexmock(@task.out_port) - .should_receive(:reader) - .and_return({ init: false }) - - @accessor.create_accessor(@task.out_port) - end - end - describe "#update" do attr_reader :task, :port_binding From bce3e9939cefedfb1b2408bab603519e104367c5 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 6 Feb 2025 08:49:27 -0300 Subject: [PATCH 138/158] feat: use recommend_init flag to apply init policy --- lib/syskit/models/port.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/syskit/models/port.rb b/lib/syskit/models/port.rb index 959907074..8cd836939 100644 --- a/lib/syskit/models/port.rb +++ b/lib/syskit/models/port.rb @@ -105,6 +105,9 @@ def to_component_port # @raise [SelfConnection] def connect_to(in_port, policy = {}) out_port = to_component_port + if out_port.respond_to?(:recommend_init) + policy[:init] = out_port.recommend_init + end if out_port == self if in_port.respond_to?(:to_component_port) in_port = in_port.to_component_port From febeac94bb3fc40ab627e41106bc18f94d362baa Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 6 Feb 2025 08:50:43 -0300 Subject: [PATCH 139/158] chore: fix rubocop offense by moving code fix Metrics/CyclomaticComplexity offense by moving the connection validation code to a separate function --- lib/syskit/models/port.rb | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/lib/syskit/models/port.rb b/lib/syskit/models/port.rb index 8cd836939..c624faec5 100644 --- a/lib/syskit/models/port.rb +++ b/lib/syskit/models/port.rb @@ -111,16 +111,7 @@ def connect_to(in_port, policy = {}) if out_port == self if in_port.respond_to?(:to_component_port) in_port = in_port.to_component_port - if !out_port.output? - raise WrongPortConnectionDirection.new(self, in_port), "cannot connect #{out_port} to #{in_port}: #{out_port} is not an output port" - elsif !in_port.input? - raise WrongPortConnectionDirection.new(self, in_port), "cannot connect #{out_port} to #{in_port}: #{in_port} is not an input port" - elsif out_port.component_model == in_port.component_model - raise SelfConnection.new(out_port, in_port), "cannot connect #{out_port} to #{in_port}: they are both ports of the same component" - elsif out_port.type != in_port.type - raise WrongPortConnectionTypes.new(self, in_port), "cannot connect #{out_port} to #{in_port}: types mismatch" - end - + validate_connection!(out_port, in_port) component_model.connect_ports(in_port.component_model, [out_port.name, in_port.name] => policy) else Syskit.connect self, in_port, policy @@ -131,6 +122,28 @@ def connect_to(in_port, policy = {}) end end + def validate_connection!(out_port, in_port) + unless out_port.output? + raise WrongPortConnectionDirection.new(self, in_port), + "cannot connect #{out_port} to #{in_port}: " \ + "#{out_port} is not an output port" + end + unless in_port.input? + raise WrongPortConnectionDirection.new(self, in_port), + "cannot connect #{out_port} to #{in_port}: " \ + "#{in_port} is not an input port" + end + if out_port.component_model == in_port.component_model + raise SelfConnection.new(out_port, in_port), + "cannot connect #{out_port} to #{in_port}: " \ + "they are both ports of the same component" + end + unless out_port.type == in_port.type + raise WrongPortConnectionTypes.new(self, in_port), + "cannot connect #{out_port} to #{in_port}: types mismatch" + end + end + # Tests whether self is connected to the provided port def connected_to?(sink_port) source_port = try_to_component_port From de361f0ed7c362ea1016c06977e4e6d0734f7ab7 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 6 Feb 2025 08:54:10 -0300 Subject: [PATCH 140/158] test: add tests for init policy --- test/models/test_port.rb | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/test/models/test_port.rb b/test/models/test_port.rb index 7aeed89df..ded906242 100644 --- a/test/models/test_port.rb +++ b/test/models/test_port.rb @@ -72,6 +72,23 @@ out_task_m.out_port.connect_to in_task_m.in_port end end + it "adds 'init: true' as a default policy" do + policy = {} + flexmock(out_task_m).should_receive(:connect_ports).explicitly.once + .with(in_task_m, %w[out in] => policy.merge(init: true)) + out_task_m.out_port.connect_to in_task_m.in_port, policy + assert policy[:init] + end + it "adds 'init: false' if recommend_init flag is set to false" do + out_port_m = Syskit::Models::Port.new(out_task_m, out_task_m.orogen_model.find_port("out")) + out_port_m.orogen_model.recommend_init = false + refute out_port_m.recommend_init + policy = {} + flexmock(out_task_m).should_receive(:connect_ports).explicitly + .with(in_task_m, %w[out in] => policy.merge(init: false)) + out_task_m.out_port.connect_to in_task_m.in_port, policy + refute policy[:init] + end end describe "#can_connect_to?" do From 33d115449fa3c395a040ced4d101063abb24ee87 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 7 Feb 2025 16:26:54 -0300 Subject: [PATCH 141/158] fix/chore: policy for data readers and exported ports and rewrite code in port model --- lib/syskit/dynamic_port_binding.rb | 4 ++++ lib/syskit/models/port.rb | 9 +++++---- lib/syskit/network_generation/dataflow_dynamics.rb | 5 +++++ 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/syskit/dynamic_port_binding.rb b/lib/syskit/dynamic_port_binding.rb index 4c443f6ae..7c7a74c56 100644 --- a/lib/syskit/dynamic_port_binding.rb +++ b/lib/syskit/dynamic_port_binding.rb @@ -203,6 +203,10 @@ def initialize( # Method called by {Accessor} to create the accessor object from a # port def create_accessor(port) + if port.respond_to?(:init_policy) && + [true, false].include?(port.init_policy) + policy.merge(init: port.init_policy) + end port.reader(**policy) end diff --git a/lib/syskit/models/port.rb b/lib/syskit/models/port.rb index c624faec5..8b360c12e 100644 --- a/lib/syskit/models/port.rb +++ b/lib/syskit/models/port.rb @@ -105,13 +105,14 @@ def to_component_port # @raise [SelfConnection] def connect_to(in_port, policy = {}) out_port = to_component_port - if out_port.respond_to?(:recommend_init) - policy[:init] = out_port.recommend_init + if out_port.respond_to?(:init_policy) && + [true, false].include?(out_port.init_policy) + policy = policy.merge(init: out_port.init_policy) end if out_port == self if in_port.respond_to?(:to_component_port) in_port = in_port.to_component_port - validate_connection!(out_port, in_port) + validate_connection(out_port, in_port) component_model.connect_ports(in_port.component_model, [out_port.name, in_port.name] => policy) else Syskit.connect self, in_port, policy @@ -122,7 +123,7 @@ def connect_to(in_port, policy = {}) end end - def validate_connection!(out_port, in_port) + def validate_connection(out_port, in_port) unless out_port.output? raise WrongPortConnectionDirection.new(self, in_port), "cannot connect #{out_port} to #{in_port}: " \ diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index 9ce1cfa88..ee24431c1 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -601,6 +601,11 @@ def policy_for( "#{sink_task}:#{sink_port.name}" end + source_port_m = source_port.model + if [true, false].include?(source_port_m.init_policy) + policy = policy.merge(init: source_port_m.init_policy) + end + sink_port_m = sink_port.model if sink_port_m.needs_reliable_connection? compute_reliable_connection_policy( From 51cae2d6c4501247a6f04014ba3c3b98086191b9 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 7 Feb 2025 16:27:24 -0300 Subject: [PATCH 142/158] test: add tests and update existing ones --- test/models/test_port.rb | 23 +++++++---- .../test_dataflow_dynamics.rb | 40 +++++++++++++++++++ test/test_dynamic_port_binding.rb | 39 ++++++++++++++++++ 3 files changed, 94 insertions(+), 8 deletions(-) diff --git a/test/models/test_port.rb b/test/models/test_port.rb index ded906242..9656c84b2 100644 --- a/test/models/test_port.rb +++ b/test/models/test_port.rb @@ -72,22 +72,29 @@ out_task_m.out_port.connect_to in_task_m.in_port end end - it "adds 'init: true' as a default policy" do + it "makes sure init policy is not set without calling recommend_init" do policy = {} flexmock(out_task_m).should_receive(:connect_ports).explicitly.once - .with(in_task_m, %w[out in] => policy.merge(init: true)) + .with(in_task_m, %w[out in] => policy) + out_task_m.out_port.connect_to in_task_m.in_port, policy + end + it "adds 'init: true' policy if recommend_init was called" do + out_port_m = Syskit::Models::Port.new(out_task_m, out_task_m.orogen_model.find_port("out")) + out_port_m.orogen_model.recommend_init + assert out_port_m.init_policy + policy = {} + flexmock(out_task_m).should_receive(:connect_ports).explicitly + .with(in_task_m, %w[out in] => { init: true }) out_task_m.out_port.connect_to in_task_m.in_port, policy - assert policy[:init] end - it "adds 'init: false' if recommend_init flag is set to false" do + it "adds 'init: false' policy if recommend_init(init: false) was called" do out_port_m = Syskit::Models::Port.new(out_task_m, out_task_m.orogen_model.find_port("out")) - out_port_m.orogen_model.recommend_init = false - refute out_port_m.recommend_init + out_port_m.orogen_model.recommend_init(init: false) + refute out_port_m.init_policy policy = {} flexmock(out_task_m).should_receive(:connect_ports).explicitly - .with(in_task_m, %w[out in] => policy.merge(init: false)) + .with(in_task_m, %w[out in] => { init: false }) out_task_m.out_port.connect_to in_task_m.in_port, policy - refute policy[:init] end end diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index 5dfd15e8e..ddf47f11f 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -240,6 +240,46 @@ module NetworkGeneration policy_graph[[task0, task1]][%w[out in]]) end + it "adds init: true policy if available and saves it " \ + "in the graph's policy_graph" do + plan.add(task0 = @task_m.new) + plan.add(task1 = @task_m.new) + + add_agents(tasks = [task0, task1]) + flexmock(@dynamics).should_receive(:propagate).with(tasks) + + task0.out_port.model.recommend_init + task0.out_port.connect_to(task1.in_port) + + @dynamics.should_receive(:policy_for) + .with(task0, "out", "in", task1, nil) + .and_return(type: :buffer, size: 42, init: true) + policy_graph = @dynamics.compute_connection_policies + + assert_equal({ type: :buffer, size: 42, init: true }, + policy_graph[[task0, task1]][%w[out in]]) + end + + it "adds init: false policy if available and saves it " \ + "in the graph's policy_graph" do + plan.add(task0 = @task_m.new) + plan.add(task1 = @task_m.new) + + add_agents(tasks = [task0, task1]) + flexmock(@dynamics).should_receive(:propagate).with(tasks) + + task0.out_port.model.recommend_init(init: false) + task0.out_port.connect_to(task1.in_port) + + @dynamics.should_receive(:policy_for) + .with(task0, "out", "in", task1, nil) + .and_return(type: :buffer, size: 42, init: false) + policy_graph = @dynamics.compute_connection_policies + + assert_equal({ type: :buffer, size: 42, init: false }, + policy_graph[[task0, task1]][%w[out in]]) + end + it "computes the policies on the concrete connections" do plan.add(task = @task_m.new) cmp = @cmp_m.instanciate(plan) diff --git a/test/test_dynamic_port_binding.rb b/test/test_dynamic_port_binding.rb index 759225df9..172526bb7 100644 --- a/test/test_dynamic_port_binding.rb +++ b/test/test_dynamic_port_binding.rb @@ -382,6 +382,45 @@ module Syskit @task = syskit_stub_deploy_configure_and_start(@task_m) end + describe "policy" do + attr_reader :task, :port_binding + + before do + @port_binding = flexmock + @accessor = DynamicPortBinding::Accessor.new(@port_binding) + flexmock(@accessor) + .should_receive(:create_accessor) + .explicitly + .with(@task.out_port).and_return { @task.out_port.reader } + end + + it "expects no policy if recommend_init is not called" do + flexmock(@task.out_port) + .should_receive(:reader) + .and_return({}) + + @accessor.create_accessor(@task.out_port) + end + + it "expects init: true policy if recommend_init is called" do + @task.out_port.model.recommend_init + flexmock(@task.out_port) + .should_receive(:reader) + .and_return({ init: true }) + + @accessor.create_accessor(@task.out_port) + end + + it "expects init: false policy if recommend_init(init: false) is called" do + @task.out_port.model.recommend_init(init: false) + flexmock(@task.out_port) + .should_receive(:reader) + .and_return({ init: false }) + + @accessor.create_accessor(@task.out_port) + end + end + describe "#update" do attr_reader :task, :port_binding From 138c34b7396ca2878bf43d2c379ba083b1d8ba66 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 10 Feb 2025 10:30:56 -0300 Subject: [PATCH 143/158] chore: rewrite tests to call init_policy --- test/network_generation/test_dataflow_dynamics.rb | 4 ++-- test/test_dynamic_port_binding.rb | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index ddf47f11f..cd350f350 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -248,7 +248,7 @@ module NetworkGeneration add_agents(tasks = [task0, task1]) flexmock(@dynamics).should_receive(:propagate).with(tasks) - task0.out_port.model.recommend_init + task0.out_port.model.init_policy(true) task0.out_port.connect_to(task1.in_port) @dynamics.should_receive(:policy_for) @@ -268,7 +268,7 @@ module NetworkGeneration add_agents(tasks = [task0, task1]) flexmock(@dynamics).should_receive(:propagate).with(tasks) - task0.out_port.model.recommend_init(init: false) + task0.out_port.model.init_policy(false) task0.out_port.connect_to(task1.in_port) @dynamics.should_receive(:policy_for) diff --git a/test/test_dynamic_port_binding.rb b/test/test_dynamic_port_binding.rb index 172526bb7..6f99ea117 100644 --- a/test/test_dynamic_port_binding.rb +++ b/test/test_dynamic_port_binding.rb @@ -394,7 +394,7 @@ module Syskit .with(@task.out_port).and_return { @task.out_port.reader } end - it "expects no policy if recommend_init is not called" do + it "expects no policy if init_policy is not called" do flexmock(@task.out_port) .should_receive(:reader) .and_return({}) @@ -402,8 +402,8 @@ module Syskit @accessor.create_accessor(@task.out_port) end - it "expects init: true policy if recommend_init is called" do - @task.out_port.model.recommend_init + it "expects init: true policy if init_policy(true) is called" do + @task.out_port.model.init_policy(true) flexmock(@task.out_port) .should_receive(:reader) .and_return({ init: true }) @@ -411,8 +411,8 @@ module Syskit @accessor.create_accessor(@task.out_port) end - it "expects init: false policy if recommend_init(init: false) is called" do - @task.out_port.model.recommend_init(init: false) + it "expects init: false policy if init_policy(false) is called" do + @task.out_port.model.init_policy(false) flexmock(@task.out_port) .should_receive(:reader) .and_return({ init: false }) From 43ea219f3a81a7f737fba1164e68f799a0dcc5c5 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 13 Feb 2025 10:47:20 -0300 Subject: [PATCH 144/158] chore: remove unneccessary code in models/port and tests --- lib/syskit/models/port.rb | 4 ---- test/models/test_port.rb | 24 ------------------------ 2 files changed, 28 deletions(-) diff --git a/lib/syskit/models/port.rb b/lib/syskit/models/port.rb index 8b360c12e..ceaf1c158 100644 --- a/lib/syskit/models/port.rb +++ b/lib/syskit/models/port.rb @@ -105,10 +105,6 @@ def to_component_port # @raise [SelfConnection] def connect_to(in_port, policy = {}) out_port = to_component_port - if out_port.respond_to?(:init_policy) && - [true, false].include?(out_port.init_policy) - policy = policy.merge(init: out_port.init_policy) - end if out_port == self if in_port.respond_to?(:to_component_port) in_port = in_port.to_component_port diff --git a/test/models/test_port.rb b/test/models/test_port.rb index 9656c84b2..7aeed89df 100644 --- a/test/models/test_port.rb +++ b/test/models/test_port.rb @@ -72,30 +72,6 @@ out_task_m.out_port.connect_to in_task_m.in_port end end - it "makes sure init policy is not set without calling recommend_init" do - policy = {} - flexmock(out_task_m).should_receive(:connect_ports).explicitly.once - .with(in_task_m, %w[out in] => policy) - out_task_m.out_port.connect_to in_task_m.in_port, policy - end - it "adds 'init: true' policy if recommend_init was called" do - out_port_m = Syskit::Models::Port.new(out_task_m, out_task_m.orogen_model.find_port("out")) - out_port_m.orogen_model.recommend_init - assert out_port_m.init_policy - policy = {} - flexmock(out_task_m).should_receive(:connect_ports).explicitly - .with(in_task_m, %w[out in] => { init: true }) - out_task_m.out_port.connect_to in_task_m.in_port, policy - end - it "adds 'init: false' policy if recommend_init(init: false) was called" do - out_port_m = Syskit::Models::Port.new(out_task_m, out_task_m.orogen_model.find_port("out")) - out_port_m.orogen_model.recommend_init(init: false) - refute out_port_m.init_policy - policy = {} - flexmock(out_task_m).should_receive(:connect_ports).explicitly - .with(in_task_m, %w[out in] => { init: false }) - out_task_m.out_port.connect_to in_task_m.in_port, policy - end end describe "#can_connect_to?" do From d67f48317d26e93c73d4f55921a863ead93978d0 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 13 Feb 2025 11:32:09 -0300 Subject: [PATCH 145/158] fix: do not override existing init flag Use init_policy from the port as a default for the policy Do not override the existing init flag in the policy `policy.fetch(:init, port.model.init_policy)` to set the init value if not already provided in the policy --- lib/syskit/dynamic_port_binding.rb | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/syskit/dynamic_port_binding.rb b/lib/syskit/dynamic_port_binding.rb index 7c7a74c56..df0623d9d 100644 --- a/lib/syskit/dynamic_port_binding.rb +++ b/lib/syskit/dynamic_port_binding.rb @@ -203,10 +203,7 @@ def initialize( # Method called by {Accessor} to create the accessor object from a # port def create_accessor(port) - if port.respond_to?(:init_policy) && - [true, false].include?(port.init_policy) - policy.merge(init: port.init_policy) - end + policy.merge(init: policy.fetch(:init, port.model.init_policy)) port.reader(**policy) end From 2fd848d5495c5e2aba9f24952d51dd2288de80ad Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Thu, 13 Feb 2025 12:24:51 -0300 Subject: [PATCH 146/158] fix/test: allow policy merge and add tests for this --- .../network_generation/dataflow_dynamics.rb | 12 +++---- .../test_dataflow_dynamics.rb | 36 ++++++++++++++++++- 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index ee24431c1..52f8d08ca 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -601,30 +601,26 @@ def policy_for( "#{sink_task}:#{sink_port.name}" end - source_port_m = source_port.model - if [true, false].include?(source_port_m.init_policy) - policy = policy.merge(init: source_port_m.init_policy) - end - sink_port_m = sink_port.model if sink_port_m.needs_reliable_connection? - compute_reliable_connection_policy( + policy = compute_reliable_connection_policy( source_port, sink_port, fallback_policy ) elsif sink_port_m.required_connection_type == :data policy = Orocos::Port.prepare_policy(type: :data) DataFlowDynamics.debug { " result: #{policy}" } - policy elsif sink_port_m.required_connection_type == :buffer policy = Orocos::Port.prepare_policy(type: :buffer, size: 1) DataFlowDynamics.debug { " result: #{policy}" } - policy else raise UnsupportedConnectionType, "unknown required connection type " \ "#{sink_port_m.required_connection_type} " \ "on #{sink_port}" end + + source_port_m = source_port.model + policy.merge(init: source_port_m.init_policy) end def compute_reliable_connection_policy( diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index cd350f350..683f01cab 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -420,15 +420,49 @@ module NetworkGeneration "the sink port is marked as needs_reliable_connection" do @sink_task_m.in_port.needs_reliable_connection fallback_policy = flexmock + expected_policy = flexmock + + expected_policy + .should_receive(:merge) + .and_return(expected_policy) + flexmock(@dynamics) .should_receive(:compute_reliable_connection_policy) .with(@source_t.out_port, @sink_t.in_port, fallback_policy) - .once.and_return(expected_policy = flexmock) + .once.and_return(expected_policy) policy = @dynamics.policy_for( @source_t, "out", "in", @sink_t, fallback_policy ) assert_equal expected_policy, policy end + + it "merges init policy when sink requires reliable connection" do + @sink_task_m.in_port.needs_reliable_connection + @source_t.out_port.model.init_policy(true) + + fallback_policy = flexmock + flexmock(@dynamics) + .should_receive(:compute_reliable_connection_policy) + .with(@source_t.out_port, @sink_t.in_port, fallback_policy) + .once.and_return({ init: true }) + + policy = @dynamics.policy_for( + @source_t, "out", "in", @sink_t, fallback_policy + ) + + assert_equal true, policy[:init] + end + + it "merges init policy when sink requires 'buffer' connection type" do + @sink_task_m.in_port.needs_buffered_connection + @source_t.out_port.model.init_policy(true) + + policy = @dynamics.policy_for(@source_t, "out", "in", @sink_t, nil) + + assert_equal true, policy[:init] + assert_equal :buffer, policy[:type] + assert_equal 1, policy[:size] + end end describe "#compute_reliable_connection_policy" do From b2cd8e348f5f858f2d9ee7ea7019aea5cae0172f Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 14 Feb 2025 15:33:25 -0300 Subject: [PATCH 147/158] fix: do not merge if no init policy --- lib/syskit/network_generation/dataflow_dynamics.rb | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index 52f8d08ca..318160296 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -620,7 +620,10 @@ def policy_for( end source_port_m = source_port.model - policy.merge(init: source_port_m.init_policy) + unless source_port_m.init_policy.nil? + policy.merge(init: source_port_m.init_policy) + end + policy end def compute_reliable_connection_policy( From 179f280ea1fca56d31aba5d2eeeb21d48a663040 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Mon, 17 Feb 2025 10:47:22 -0300 Subject: [PATCH 148/158] fix: fix merge test --- lib/syskit/dynamic_port_binding.rb | 2 +- lib/syskit/network_generation/dataflow_dynamics.rb | 5 +---- test/network_generation/test_dataflow_dynamics.rb | 6 +++++- test/network_generation/test_engine.rb | 2 +- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/syskit/dynamic_port_binding.rb b/lib/syskit/dynamic_port_binding.rb index df0623d9d..2ea0edbd1 100644 --- a/lib/syskit/dynamic_port_binding.rb +++ b/lib/syskit/dynamic_port_binding.rb @@ -203,7 +203,7 @@ def initialize( # Method called by {Accessor} to create the accessor object from a # port def create_accessor(port) - policy.merge(init: policy.fetch(:init, port.model.init_policy)) + @policy.merge(init: @policy.fetch(:init, port.model.init_policy?)) port.reader(**policy) end diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index 318160296..6b69b8bc3 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -620,10 +620,7 @@ def policy_for( end source_port_m = source_port.model - unless source_port_m.init_policy.nil? - policy.merge(init: source_port_m.init_policy) - end - policy + policy.merge(init: source_port_m.init_policy?) end def compute_reliable_connection_policy( diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index 683f01cab..55eda2312 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -455,8 +455,12 @@ module NetworkGeneration it "merges init policy when sink requires 'buffer' connection type" do @sink_task_m.in_port.needs_buffered_connection - @source_t.out_port.model.init_policy(true) + flexmock(@source_t.out_port.model) + .should_receive(:init_policy?).explicitly + .and_return(true) + + @source_t.out_port.model.init_policy(true) policy = @dynamics.policy_for(@source_t, "out", "in", @sink_t, nil) assert_equal true, policy[:init] diff --git a/test/network_generation/test_engine.rb b/test/network_generation/test_engine.rb index ef9ff6ada..bdd35f4ea 100644 --- a/test/network_generation/test_engine.rb +++ b/test/network_generation/test_engine.rb @@ -859,7 +859,7 @@ def deploy_dev_and_bus syskit_configure(cmp) assert_equal( - { %w[out in] => { type: :buffer, size: 4 } }, + { %w[out in] => { type: :buffer, size: 4, init: nil } }, RequiredDataFlow.edge_info(cmp.source_child, cmp.sink_child) ) end From 411e3ef41787fce9b3ca4369452da5859f08d043 Mon Sep 17 00:00:00 2001 From: kapeps Date: Mon, 17 Feb 2025 17:56:15 -0300 Subject: [PATCH 149/158] chore: warn in case it could not find unmanaged task --- lib/syskit/process_managers/unmanaged/process.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/syskit/process_managers/unmanaged/process.rb b/lib/syskit/process_managers/unmanaged/process.rb index 6b6acfb84..ef99ccd8d 100644 --- a/lib/syskit/process_managers/unmanaged/process.rb +++ b/lib/syskit/process_managers/unmanaged/process.rb @@ -119,9 +119,12 @@ def name_service_get_all_tasks result = {} until expected_names.empty? + sleep 0.1 + expected_names.delete_if do |name| result[name] = name_service.get(name) rescue Orocos::NotFound + ::Robot.warn "could not find unmanaged task #{name}" false end end From 1b1a6a9ec89dc292152d3d62e8c57438891a7bb0 Mon Sep 17 00:00:00 2001 From: kapeps Date: Tue, 18 Feb 2025 15:59:40 -0300 Subject: [PATCH 150/158] refactor: name_service_get_all_tasks loop --- lib/syskit/process_managers/unmanaged/process.rb | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/lib/syskit/process_managers/unmanaged/process.rb b/lib/syskit/process_managers/unmanaged/process.rb index ef99ccd8d..0b071fc79 100644 --- a/lib/syskit/process_managers/unmanaged/process.rb +++ b/lib/syskit/process_managers/unmanaged/process.rb @@ -111,22 +111,29 @@ def spawn(_options = {}) # Calls the name service until all of the tasks are resolved. Ignores # whenever a Orocos::NotFound exception is raised. # + # @param [Float] warning_period period for warning message in seconds + # # @raises RuntimeError # @raises Orocos::CORBA::ComError # @return [Hash] - def name_service_get_all_tasks + def name_service_get_all_tasks(warning_period: 5.0) expected_names = mapped_task_names.dup result = {} - until expected_names.empty? - sleep 0.1 + warning_time_deadline = Time.at(0) + until expected_names.empty? expected_names.delete_if do |name| result[name] = name_service.get(name) rescue Orocos::NotFound - ::Robot.warn "could not find unmanaged task #{name}" + if Time.now > warning_time_deadline + ::Robot.warn "could not find unmanaged task #{name}" + warning_time_deadline = Time.now + warning_period + end false end + + sleep 0.1 if expected_names.any? end result end From 643622ac967e6752c5f8dfcb267ba3677e6f548d Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 21 Feb 2025 06:53:38 -0300 Subject: [PATCH 151/158] fix: assign merge to policy and fix priority - Correctly merges policy, making sure :init is only set if it wasn't already defined. This prevents :init from being overwritten when explicitly provided. - Adds a test to confirm that an existing :init value remains unchanged. --- lib/syskit/dynamic_port_binding.rb | 2 +- test/test_dynamic_port_binding.rb | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/syskit/dynamic_port_binding.rb b/lib/syskit/dynamic_port_binding.rb index 2ea0edbd1..37670ac8f 100644 --- a/lib/syskit/dynamic_port_binding.rb +++ b/lib/syskit/dynamic_port_binding.rb @@ -203,7 +203,7 @@ def initialize( # Method called by {Accessor} to create the accessor object from a # port def create_accessor(port) - @policy.merge(init: @policy.fetch(:init, port.model.init_policy?)) + @policy = { init: port.model.init_policy? }.merge(@policy) port.reader(**policy) end diff --git a/test/test_dynamic_port_binding.rb b/test/test_dynamic_port_binding.rb index 6f99ea117..ac733139a 100644 --- a/test/test_dynamic_port_binding.rb +++ b/test/test_dynamic_port_binding.rb @@ -559,7 +559,21 @@ def wait_until_connected(accessor) reader.attach_to_task(task) reader.update - assert_equal({ type: :buffer, size: 20 }, reader.resolved_accessor.policy) + assert_equal({ type: :buffer, size: 20, init: nil }, + reader.resolved_accessor.policy) + end + + it "does not override existing :init value in policy" do + reader = Models::DynamicPortBinding + .create(@task_m.out_port) + .instanciate + .to_data_accessor(type: :buffer, size: 20, init: true) + task = syskit_stub_deploy_and_configure(@task_m) + reader.attach_to_task(task) + reader.update + + assert_equal({ type: :buffer, size: 20, init: true }, + reader.resolved_accessor.policy) end describe "#read_new" do From d3860c64dfd4d2bc6fbcf284492e4ed7f12c31ed Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 21 Feb 2025 07:03:58 -0300 Subject: [PATCH 152/158] chore: fix rubocop offense --- test/network_generation/test_dataflow_dynamics.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index 55eda2312..31f54d8ae 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -459,7 +459,7 @@ module NetworkGeneration flexmock(@source_t.out_port.model) .should_receive(:init_policy?).explicitly .and_return(true) - + @source_t.out_port.model.init_policy(true) policy = @dynamics.policy_for(@source_t, "out", "in", @sink_t, nil) From d84f11069a58d12f113ba45757246c4a5e2f653b Mon Sep 17 00:00:00 2001 From: kapeps Date: Fri, 21 Feb 2025 11:14:36 -0300 Subject: [PATCH 153/158] fix: transfer server and transfer client --- lib/syskit/cli/log_runtime_archive_main.rb | 2 +- lib/syskit/roby_app/log_transfer_server/ftp_upload.rb | 4 ++-- lib/syskit/runtime/server/spawn_server.rb | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/syskit/cli/log_runtime_archive_main.rb b/lib/syskit/cli/log_runtime_archive_main.rb index 597ffcfb7..4dd1845a2 100755 --- a/lib/syskit/cli/log_runtime_archive_main.rb +++ b/lib/syskit/cli/log_runtime_archive_main.rb @@ -109,7 +109,7 @@ def transfer_server( # rubocop:disable Metrics/ParameterLists target_dir, host, port, certfile_path, user, password, implicit_ftps ) server = create_server(target_dir, host, port, certfile_path, user, - password, implicit_ftps) + password, implicit_ftps == "true") server.run end diff --git a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb index e9c58ea3d..228a98d9a 100644 --- a/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb +++ b/lib/syskit/roby_app/log_transfer_server/ftp_upload.rb @@ -70,9 +70,9 @@ def open_and_transfer(root: nil) end def chdir_to_file_directory(ftp, root) - dataset_path = File.dirname(@file.relative_path_from(root)) + dataset_path = @file.relative_path_from(root).dirname - dataset_path.split("/") do |folder| + dataset_path.each_filename do |folder| ftp.chdir(folder) rescue Net::FTPPermError => _e ftp.mkdir(folder) diff --git a/lib/syskit/runtime/server/spawn_server.rb b/lib/syskit/runtime/server/spawn_server.rb index 9169b8d1b..f69a22933 100644 --- a/lib/syskit/runtime/server/spawn_server.rb +++ b/lib/syskit/runtime/server/spawn_server.rb @@ -86,7 +86,7 @@ def wait_until_stopped puts "FTP server started. Press ENTER or c-C to stop it" $stdout.flush begin - $stdin.readline + sleep rescue Interrupt puts "Interrupt" end From f31f4107df2746f41a013bc7ce40e55ad59abd3f Mon Sep 17 00:00:00 2001 From: kapeps Date: Fri, 21 Feb 2025 11:20:17 -0300 Subject: [PATCH 154/158] refactor: log_runtime_archive be compatible with ruby 2.5.5 --- lib/syskit/cli/log_runtime_archive.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index b11b28a36..06427dbef 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -286,7 +286,7 @@ def self.find_all_dataset_folders(root_dir) child if (child / "info.yml").file? end - candidates.compact.sort_by { _1.basename.to_s } + candidates.compact.sort_by { |a| a.basename.to_s } end # Safely add an entry into an archive, compressing it with zstd From 1104abedc9094708bcb9ea8ac5fa2a4331f17426 Mon Sep 17 00:00:00 2001 From: kapeps Date: Mon, 24 Feb 2025 11:39:22 -0300 Subject: [PATCH 155/158] fix: rubocop grievance --- lib/syskit/cli/log_runtime_archive.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/syskit/cli/log_runtime_archive.rb b/lib/syskit/cli/log_runtime_archive.rb index 06427dbef..8d47e92f3 100644 --- a/lib/syskit/cli/log_runtime_archive.rb +++ b/lib/syskit/cli/log_runtime_archive.rb @@ -286,7 +286,7 @@ def self.find_all_dataset_folders(root_dir) child if (child / "info.yml").file? end - candidates.compact.sort_by { |a| a.basename.to_s } + candidates.compact.sort_by { |a| a.basename.to_s } end # Safely add an entry into an archive, compressing it with zstd From f28f627924c71e5141cc990c1623b893f5a4d944 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Fri, 21 Feb 2025 07:31:13 -0300 Subject: [PATCH 156/158] fix/test: fix policy handling in tests Fixes incorrect .and_return(...) usage in tests, replacing it with .with(...) to properly validate method arguments. --- test/network_generation/test_dataflow_dynamics.rb | 10 ++++------ test/test_dynamic_port_binding.rb | 8 ++++---- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index 31f54d8ae..b638b9856 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -439,18 +439,18 @@ module NetworkGeneration it "merges init policy when sink requires reliable connection" do @sink_task_m.in_port.needs_reliable_connection @source_t.out_port.model.init_policy(true) - fallback_policy = flexmock + flexmock(@dynamics) .should_receive(:compute_reliable_connection_policy) .with(@source_t.out_port, @sink_t.in_port, fallback_policy) - .once.and_return({ init: true }) + .once.and_return({}) policy = @dynamics.policy_for( @source_t, "out", "in", @sink_t, fallback_policy ) - assert_equal true, policy[:init] + assert policy[:init] end it "merges init policy when sink requires 'buffer' connection type" do @@ -463,9 +463,7 @@ module NetworkGeneration @source_t.out_port.model.init_policy(true) policy = @dynamics.policy_for(@source_t, "out", "in", @sink_t, nil) - assert_equal true, policy[:init] - assert_equal :buffer, policy[:type] - assert_equal 1, policy[:size] + assert policy[:init] end end diff --git a/test/test_dynamic_port_binding.rb b/test/test_dynamic_port_binding.rb index ac733139a..3fafc3e20 100644 --- a/test/test_dynamic_port_binding.rb +++ b/test/test_dynamic_port_binding.rb @@ -382,7 +382,7 @@ module Syskit @task = syskit_stub_deploy_configure_and_start(@task_m) end - describe "policy" do + describe "init policy" do attr_reader :task, :port_binding before do @@ -397,7 +397,7 @@ module Syskit it "expects no policy if init_policy is not called" do flexmock(@task.out_port) .should_receive(:reader) - .and_return({}) + .with({}) @accessor.create_accessor(@task.out_port) end @@ -406,7 +406,7 @@ module Syskit @task.out_port.model.init_policy(true) flexmock(@task.out_port) .should_receive(:reader) - .and_return({ init: true }) + .with({ init: true }) @accessor.create_accessor(@task.out_port) end @@ -415,7 +415,7 @@ module Syskit @task.out_port.model.init_policy(false) flexmock(@task.out_port) .should_receive(:reader) - .and_return({ init: false }) + .with({ init: false }) @accessor.create_accessor(@task.out_port) end From 5df6b650c4929249ceffabff07c6b52789af0834 Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Tue, 18 Feb 2025 09:31:13 -0300 Subject: [PATCH 157/158] feat: add fine-grained policy merge Previously, if a parameter was added to the designer-provided policy, no automated policy determination would be performed. That meant that the init flag would not be computed in this case. To fix this, a merge_policy function was implemented to merge the explicit policy (the designer-provided one) and the computed policy (init, for instance) The fix follows these rules: - if a value is in policy, use it; - otherwise use the value from computed_policy Also, if the type policy is set to data, the size policy must be removed, as its only meaningful for the type buffer and it causes the connection to fail. The Ruby method `merge` merges two hashes. According to the documentation: "Returns a new hash containing the contents of other_hash and the contents of hsh. If no block is specified, the value for entries with duplicate keys will be that of other_hash." In this case, to follow the rules of prioritizing the value from explicit_policy in case of key duplication, 'other_hash' is explicit_policy and 'hsh' is computed_policy --- .../network_generation/dataflow_dynamics.rb | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/lib/syskit/network_generation/dataflow_dynamics.rb b/lib/syskit/network_generation/dataflow_dynamics.rb index 6b69b8bc3..03fd89f91 100644 --- a/lib/syskit/network_generation/dataflow_dynamics.rb +++ b/lib/syskit/network_generation/dataflow_dynamics.rb @@ -553,6 +553,16 @@ def compute_connection_policies policy_graph end + def merge_policy(explicit_policy, computed_policy) + merged_policy = computed_policy.merge(explicit_policy) + + if merged_policy[:type] == :data + merged_policy.delete(:size) + end + + merged_policy + end + # @api private # # Compute the policies for all connections starting from a given task @@ -563,13 +573,10 @@ def compute_policies_from(connection_graph, source_task, policy_graph = {}) mappings.each_with_object({}) do |(port_pair, policy), h| policy = policy.dup fallback_policy = policy.delete(:fallback_policy) - if policy.empty? - h[port_pair] = - policy_for(source_task, *port_pair, sink_task, - fallback_policy) - else - h[port_pair] = policy - end + computed_policy = policy_for( + source_task, *port_pair, sink_task, fallback_policy + ) + h[port_pair] = merge_policy(policy, computed_policy) end policy_graph[[source_task, sink_task]] = computed_policies end From 7489a9042d22aeabca55aabcad7ac0353021f32e Mon Sep 17 00:00:00 2001 From: eduardacoppo Date: Tue, 18 Feb 2025 13:29:17 -0300 Subject: [PATCH 158/158] test: add tests and update existing --- .../test_dataflow_dynamics.rb | 67 ++++++++++++++++++- 1 file changed, 64 insertions(+), 3 deletions(-) diff --git a/test/network_generation/test_dataflow_dynamics.rb b/test/network_generation/test_dataflow_dynamics.rb index b638b9856..db65349d6 100644 --- a/test/network_generation/test_dataflow_dynamics.rb +++ b/test/network_generation/test_dataflow_dynamics.rb @@ -298,7 +298,7 @@ module NetworkGeneration policy_graph[[cmp.c_child, task]][%w[out in]]) end - it "uses in-graph policies over the computed ones" do + it "merges in-graph policies with the computed ones" do plan.add(task0 = @task_m.new) plan.add(task1 = @task_m.new) @@ -307,10 +307,13 @@ module NetworkGeneration task0.out_port.connect_to(task1.in_port, type: :buffer, size: 42) - @dynamics.should_receive(:policy_for).never + @dynamics + .should_receive(:policy_for) + .with(task0, "out", "in", task1, nil) + .and_return(type: :buffer, size: 10, init: nil) policy_graph = @dynamics.compute_connection_policies - assert_equal({ type: :buffer, size: 42 }, + assert_equal({ type: :buffer, size: 42, init: nil }, policy_graph[[task0, task1]][%w[out in]]) end @@ -360,6 +363,64 @@ module NetworkGeneration add_agents(tasks[0, 2]) flexmock(@dynamics).should_receive(:propagate).with(tasks[0, 2]) end + + it "handles the case where the explicit policy sets the type to :data" do + plan.add(task0 = @task_m.new) + plan.add(task1 = @task_m.new) + + add_agents(tasks = [task0, task1]) + flexmock(@dynamics).should_receive(:propagate).with(tasks) + + task0.out_port.connect_to task1.in_port, type: :data + + flexmock(@dynamics) + .should_receive(:policy_for) + .with(task0, "out", "in", task1, nil) + .and_return(type: :buffer, size: 10, init: true) + + policy_graph = @dynamics.compute_connection_policies + expected_policy = { type: :data, init: true } + assert_equal(expected_policy, + policy_graph[[task0, task1]][%w[out in]]) + end + end + + describe "merge_policy" do + before do + @dynamics = NetworkGeneration::DataFlowDynamics.new(plan) + end + + it "merges policies by preferring explicit values over " \ + "computed values" do + explicit_policy = { type: :buffer, size: 20, init: true } + computed_policy = { type: :buffer, size: 10, init: true } + + merged_policy = + @dynamics.merge_policy(explicit_policy, computed_policy) + + assert_equal({ type: :buffer, size: 20, init: true }, merged_policy) + end + + it "removes the size value when the type is set to :data" do + explicit_policy = { type: :data, init: true } + computed_policy = { type: :buffer, size: 10, init: true } + + merged_policy = + @dynamics.merge_policy(explicit_policy, computed_policy) + + assert_equal({ type: :data, init: true }, merged_policy) + end + + it "falls back to computed values when explicit values " \ + "are not provided" do + explicit_policy = {} + computed_policy = { type: :buffer, size: 10, init: true } + + merged_policy = + @dynamics.merge_policy(explicit_policy, computed_policy) + + assert_equal({ type: :buffer, size: 10, init: true }, merged_policy) + end end describe "#policy_for" do