From 3cfb33b0073ae84db688d8f045ee2facc2b2bcc1 Mon Sep 17 00:00:00 2001 From: Koichi Date: Thu, 10 Jul 2025 14:00:16 +0900 Subject: [PATCH 01/27] added mutex in fatfs metadata Signed-off-by: Koichi --- .../tests/test_fatfs_consistency/Cargo.toml | 18 ++ .../tests/test_fatfs_consistency/src/lib.rs | 252 ++++++++++++++++++ .../test_fatfs_simple_consistency/Cargo.toml | 15 ++ .../test_fatfs_simple_consistency/src/lib.rs | 111 ++++++++ awkernel_lib/src/file/fatfs/dir_entry.rs | 24 +- awkernel_lib/src/file/fatfs/file.rs | 223 +++++++++++----- awkernel_lib/src/file/fatfs/fs.rs | 29 +- userland/Cargo.toml | 14 +- userland/src/lib.rs | 6 + 9 files changed, 621 insertions(+), 71 deletions(-) create mode 100644 applications/tests/test_fatfs_consistency/Cargo.toml create mode 100644 applications/tests/test_fatfs_consistency/src/lib.rs create mode 100644 applications/tests/test_fatfs_simple_consistency/Cargo.toml create mode 100644 applications/tests/test_fatfs_simple_consistency/src/lib.rs diff --git a/applications/tests/test_fatfs_consistency/Cargo.toml b/applications/tests/test_fatfs_consistency/Cargo.toml new file mode 100644 index 000000000..6d927befd --- /dev/null +++ b/applications/tests/test_fatfs_consistency/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "test_fatfs_consistency" +version = "0.1.0" +edition = "2024" + +[dependencies] +log = "0.4" + +[dependencies.awkernel_async_lib] +path = "../../../awkernel_async_lib" +default-features = false + +[dependencies.awkernel_lib] +path = "../../../awkernel_lib" +default-features = false + +[dependencies.awkernel_services] +path = "../../awkernel_services" \ No newline at end of file diff --git a/applications/tests/test_fatfs_consistency/src/lib.rs b/applications/tests/test_fatfs_consistency/src/lib.rs new file mode 100644 index 000000000..3e3e39961 --- /dev/null +++ b/applications/tests/test_fatfs_consistency/src/lib.rs @@ -0,0 +1,252 @@ +#![no_std] + +extern crate alloc; + +use alloc::format; +use alloc::vec::Vec; +use alloc::vec; +use alloc::borrow::Cow; +use awkernel_async_lib::scheduler::SchedulerType; +use awkernel_async_lib::file::path::AsyncVfsPath; +use awkernel_lib::file::fatfs::init_memory_fatfs; +use core::sync::atomic::{AtomicBool, AtomicU32, Ordering}; +use log::info; + +static TEST_PASSED: AtomicBool = AtomicBool::new(false); +static WRITE_COUNT: AtomicU32 = AtomicU32::new(0); +static EXPECTED_LINES: AtomicU32 = AtomicU32::new(0); + +const TEST_FILE_PATH: &str = "test_consistency.txt"; +const NUM_WRITERS: usize = 3; +const WRITES_PER_TASK: usize = 5; + +pub async fn run() { + // Initialize memory FatFS if not already done + match init_memory_fatfs() { + Ok(_) => info!("In-memory FAT filesystem initialized for consistency test."), + Err(e) => { + info!("Failed to initialize in-memory FAT filesystem: {:?}", e); + return; + } + } + + awkernel_async_lib::spawn( + "fatfs consistency test".into(), + consistency_test(), + SchedulerType::FIFO, + ) + .await + .join() + .await; +} + +async fn writer_task(id: usize) -> usize { + info!("Writer {} starting", id); + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join(TEST_FILE_PATH).unwrap(); + + // Keep file handle open for all writes + let mut file = match file_path.create_file().await { + Ok(f) => { + info!("Writer {} created/opened file", id); + f + }, + Err(e) => { + info!("Writer {} failed to create file: {:?}", id, e); + return 0; + } + }; + + let mut lines_written = 0; + + for i in 0..WRITES_PER_TASK { + // Seek to end to append + use awkernel_lib::file::io::SeekFrom; + match file.seek(SeekFrom::End(0)).await { + Ok(pos) => { + info!("Writer {} at position {} for iteration {}", id, pos, i); + }, + Err(e) => { + info!("Writer {} failed to seek: {:?}", id, e); + continue; + } + } + + // Write data unique to this writer + let data = format!("Writer {} iteration {}\n", id, i); + match file.write(data.as_bytes()).await { + Ok(_) => { + lines_written += 1; + WRITE_COUNT.fetch_add(1, Ordering::Relaxed); + info!("Writer {} wrote iteration {}", id, i); + }, + Err(e) => { + info!("Writer {} failed to write: {:?}", id, e); + continue; + } + } + + // Yield to allow other tasks to run + for _ in 0..3 { + awkernel_async_lib::r#yield().await; + } + } + + // Explicitly drop the file + drop(file); + info!("Writer {} finished, wrote {} lines", id, lines_written); + + lines_written +} + +async fn monitor_task() { + info!("Monitor starting"); + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join(TEST_FILE_PATH).unwrap(); + + // Wait a bit for writers to start + for _ in 0..20 { + awkernel_async_lib::r#yield().await; + } + + let mut checks = 0; + loop { + match file_path.open_file().await { + Ok(mut file) => { + let mut buffer = vec![0u8; 2048]; + match file.read(&mut buffer).await { + Ok(bytes_read) => { + if let Ok(content) = core::str::from_utf8(&buffer[..bytes_read]) { + let lines: Vec<&str> = content.lines().filter(|l| !l.is_empty()).collect(); + info!("Monitor: File has {} lines", lines.len()); + } + } + Err(e) => { + info!("Monitor: Failed to read: {:?}", e); + } + } + } + Err(_) => { + info!("Monitor: File not found yet"); + } + } + + checks += 1; + if checks > 20 { + break; + } + + // Check periodically + for _ in 0..10 { + awkernel_async_lib::r#yield().await; + } + } + + info!("Monitor finished"); +} + +async fn consistency_test() { + info!("Starting FatFS consistency test"); + + // First, ensure we can create a file + info!("Pre-test: Creating initial file"); + { + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join(TEST_FILE_PATH).unwrap(); + match file_path.create_file().await { + Ok(mut f) => { + f.write(b"Initial line\n").await.expect("Initial write failed"); + EXPECTED_LINES.store(1, Ordering::Relaxed); + info!("Pre-test: Initial file created successfully"); + } + Err(e) => { + info!("Pre-test: Failed to create initial file: {:?}", e); + return; + } + } + } + + // Spawn writer tasks + let mut writer_handles = Vec::new(); + for i in 0..NUM_WRITERS { + let handle = awkernel_async_lib::spawn( + Cow::Owned(format!("writer_{}", i)), + writer_task(i), + SchedulerType::FIFO, + ) + .await; + writer_handles.push(handle); + + // Small delay between spawning writers + for _ in 0..2 { + awkernel_async_lib::r#yield().await; + } + } + + // Spawn monitor task + let monitor_handle = awkernel_async_lib::spawn( + "monitor".into(), + monitor_task(), + SchedulerType::FIFO, + ) + .await; + + // Wait for all writers to complete + let mut total_lines_written = 0; + for handle in writer_handles { + match handle.join().await { + Ok(lines) => total_lines_written += lines, + Err(_) => info!("Writer task was cancelled") + } + } + + info!("All writers completed, total lines written: {}", total_lines_written); + EXPECTED_LINES.fetch_add(total_lines_written as u32, Ordering::Relaxed); + + // Wait for monitor to finish + let _ = monitor_handle.join().await; + + // Give some time for file system to settle + for _ in 0..10 { + awkernel_async_lib::r#yield().await; + } + + // Final check - read the file + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join(TEST_FILE_PATH).unwrap(); + + match file_path.open_file().await { + Ok(mut file) => { + let mut buffer = vec![0u8; 4096]; + let bytes_read = file.read(&mut buffer).await.unwrap_or(0); + + if let Ok(content) = core::str::from_utf8(&buffer[..bytes_read]) { + let lines: Vec<&str> = content.lines().filter(|l| !l.is_empty()).collect(); + let expected = EXPECTED_LINES.load(Ordering::Relaxed) as usize; + + info!("Final check: Found {} lines, expected {}", lines.len(), expected); + + // Print first few lines + for (i, line) in lines.iter().take(5).enumerate() { + info!(" Line {}: {}", i, line); + } + + if lines.len() == expected { + info!("SUCCESS: File consistency maintained!"); + TEST_PASSED.store(true, Ordering::Relaxed); + } else { + info!("FAILURE: Line count mismatch"); + } + } + } + Err(e) => { + info!("Final check: Failed to open file: {:?}", e); + } + } + + if TEST_PASSED.load(Ordering::Relaxed) { + info!("FatFS consistency test PASSED!"); + } else { + info!("FatFS consistency test FAILED!"); + } +} \ No newline at end of file diff --git a/applications/tests/test_fatfs_simple_consistency/Cargo.toml b/applications/tests/test_fatfs_simple_consistency/Cargo.toml new file mode 100644 index 000000000..69ce11776 --- /dev/null +++ b/applications/tests/test_fatfs_simple_consistency/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "test_fatfs_simple_consistency" +version = "0.1.0" +edition = "2024" + +[dependencies] +log = "0.4" + +[dependencies.awkernel_async_lib] +path = "../../../awkernel_async_lib" +default-features = false + +[dependencies.awkernel_lib] +path = "../../../awkernel_lib" +default-features = false \ No newline at end of file diff --git a/applications/tests/test_fatfs_simple_consistency/src/lib.rs b/applications/tests/test_fatfs_simple_consistency/src/lib.rs new file mode 100644 index 000000000..91f9273e9 --- /dev/null +++ b/applications/tests/test_fatfs_simple_consistency/src/lib.rs @@ -0,0 +1,111 @@ +#![no_std] + +extern crate alloc; + +use alloc::vec; +use awkernel_async_lib::scheduler::SchedulerType; +use awkernel_async_lib::file::path::AsyncVfsPath; +use awkernel_lib::file::fatfs::init_memory_fatfs; +use log::info; + +pub async fn run() { + // Initialize memory FatFS if not already done + match init_memory_fatfs() { + Ok(_) => info!("In-memory FAT filesystem initialized for simple consistency test."), + Err(e) => { + info!("Failed to initialize in-memory FAT filesystem: {e:?}"); + return; + } + } + + awkernel_async_lib::spawn( + "fatfs simple consistency test".into(), + simple_consistency_test(), + SchedulerType::FIFO, + ) + .await; +} + +async fn simple_consistency_test() { + info!("Starting simple FatFS consistency test"); + + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + + // Test 1: Simple create, write, close, read (known to work) + info!("Test 1: Simple sequential test"); + { + let file_path = root_path.join("test1.txt").unwrap(); + + // Create and write + let mut file = file_path.create_file().await.expect("Failed to create test1"); + file.write(b"Test 1 data").await.expect("Failed to write test1"); + drop(file); + + // Read back + let mut file = file_path.open_file().await.expect("Failed to open test1"); + let mut buf = vec![0u8; 100]; + let read = file.read(&mut buf).await.expect("Failed to read test1"); + info!("Test 1: Read {} bytes", read); + } + + // Test 2: Create file with one handle, then create another handle before closing first + info!("Test 2: Two handles, close in order"); + { + let file_path = root_path.join("test2.txt").unwrap(); + + // Create first handle and write + let mut file1 = file_path.create_file().await.expect("Failed to create test2 handle 1"); + file1.write(b"Handle 1 data").await.expect("Failed to write with handle 1"); + + // Create second handle while first is still open + let file2 = file_path.create_file().await.expect("Failed to create test2 handle 2"); + + // Close both + drop(file1); + drop(file2); + + // Try to read + match file_path.open_file().await { + Ok(mut file) => { + let mut buf = vec![0u8; 100]; + let read = file.read(&mut buf).await.expect("Failed to read test2"); + info!("Test 2: SUCCESS - Read {} bytes", read); + } + Err(e) => { + info!("Test 2: FAILED - Could not open file: {:?}", e); + } + } + } + + // Test 3: Create handle, close it, then create another handle + info!("Test 3: Sequential handles"); + { + let file_path = root_path.join("test3.txt").unwrap(); + + // Create first handle, write and close + let mut file1 = file_path.create_file().await.expect("Failed to create test3 handle 1"); + file1.write(b"First write").await.expect("Failed to write first"); + drop(file1); + + // Create second handle, write and close + let mut file2 = file_path.create_file().await.expect("Failed to create test3 handle 2"); + use awkernel_lib::file::io::SeekFrom; + file2.seek(SeekFrom::End(0)).await.expect("Failed to seek"); + file2.write(b" Second write").await.expect("Failed to write second"); + drop(file2); + + // Try to read + match file_path.open_file().await { + Ok(mut file) => { + let mut buf = vec![0u8; 100]; + let read = file.read(&mut buf).await.expect("Failed to read test3"); + info!("Test 3: Read {} bytes: {:?}", read, core::str::from_utf8(&buf[..read])); + } + Err(e) => { + info!("Test 3: Could not open file: {:?}", e); + } + } + } + + info!("Simple FatFS consistency test completed"); +} \ No newline at end of file diff --git a/awkernel_lib/src/file/fatfs/dir_entry.rs b/awkernel_lib/src/file/fatfs/dir_entry.rs index c3047d1f0..2be4cffe8 100644 --- a/awkernel_lib/src/file/fatfs/dir_entry.rs +++ b/awkernel_lib/src/file/fatfs/dir_entry.rs @@ -204,7 +204,7 @@ impl DirFileEntryData { } } - fn set_size(&mut self, size: u32) { + pub(crate) fn set_size(&mut self, size: u32) { self.size = size; } @@ -224,15 +224,15 @@ impl DirFileEntryData { self.reserved_0 & (1 << 4) != 0 } - fn created(&self) -> DateTime { + pub(crate) fn created(&self) -> DateTime { DateTime::decode(self.create_date, self.create_time_1, self.create_time_0) } - fn accessed(&self) -> Date { + pub(crate) fn accessed(&self) -> Date { Date::decode(self.access_date) } - fn modified(&self) -> DateTime { + pub(crate) fn modified(&self) -> DateTime { DateTime::decode(self.modify_date, self.modify_time, 0) } @@ -644,7 +644,12 @@ impl DirEntry File { assert!(!self.is_dir(), "Not a file entry"); - File::new(self.first_cluster(), Some(self.editor()), self.fs.clone()) + let metadata = self.fs.get_or_create_metadata( + self.entry_pos, + self.data.clone(), + self.first_cluster(), + ); + File::new(Some(metadata), self.fs.clone()) } /// Returns `Dir` struct for this entry. @@ -656,8 +661,13 @@ impl DirEntry Dir { assert!(self.is_dir(), "Not a directory entry"); match self.first_cluster() { - Some(n) => { - let file = File::new(Some(n), Some(self.editor()), self.fs.clone()); + Some(_) => { + let metadata = self.fs.get_or_create_metadata( + self.entry_pos, + self.data.clone(), + self.first_cluster(), + ); + let file = File::new(Some(metadata), self.fs.clone()); Dir::new(DirRawStream::File(file), self.fs.clone()) } None => FileSystem::root_dir(&self.fs), diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 22f9b8396..0400704a5 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -3,27 +3,94 @@ use core::convert::TryFrom; use core::fmt::Debug; use super::super::io::{IoBase, Read, Seek, SeekFrom, Write}; -use super::dir_entry::DirEntryEditor; +use super::dir_entry::DirFileEntryData; use super::error::Error; -use super::fs::{FileSystem, ReadWriteSeek}; +use super::fs::{FileSystem, ReadWriteSeek, FatType}; use super::time::{Date, DateTime, TimeProvider}; use awkernel_sync::mcs::MCSNode; +use awkernel_sync::mutex::Mutex; const MAX_FILE_SIZE: u32 = u32::MAX; +/// Shared file metadata that is consistent across all file handles +#[derive(Debug)] +pub(crate) struct FileMetadata { + // Core file information + pub(crate) first_cluster: Option, + pub(crate) entry_data: DirFileEntryData, + pub(crate) entry_pos: u64, + pub(crate) size: u32, + pub(crate) dirty: bool, +} + +impl FileMetadata { + pub(crate) fn new(first_cluster: Option, entry_data: DirFileEntryData, entry_pos: u64) -> Self { + let size = entry_data.size().unwrap_or(0); + Self { + first_cluster, + entry_data, + entry_pos, + size, + dirty: false, + } + } + + pub(crate) fn size(&self) -> Option { + if self.entry_data.is_dir() { + None + } else { + Some(self.size) + } + } + + pub(crate) fn set_size(&mut self, size: u32) { + if self.size != size { + self.size = size; + self.dirty = true; + } + } + + pub(crate) fn set_first_cluster(&mut self, cluster: Option, fat_type: FatType) { + if cluster != self.first_cluster { + self.first_cluster = cluster; + self.entry_data.set_first_cluster(cluster, fat_type); + self.dirty = true; + } + } + + pub(crate) fn set_modified(&mut self, date_time: DateTime) { + self.entry_data.set_modified(date_time); + self.dirty = true; + } + + pub(crate) fn set_accessed(&mut self, date: Date) { + self.entry_data.set_accessed(date); + self.dirty = true; + } + + pub(crate) fn flush(&mut self, disk: &mut IO) -> Result<(), IO::Error> { + if self.dirty { + // Update size before serializing + self.entry_data.set_size(self.size); + disk.seek(SeekFrom::Start(self.entry_pos))?; + self.entry_data.serialize(disk)?; + self.dirty = false; + } + Ok(()) + } +} + /// A FAT filesystem file object used for reading and writing data. /// /// This struct is created by the `open_file` or `create_file` methods on `Dir`. pub struct File { - // Note first_cluster is None if file is empty - first_cluster: Option, + // Shared metadata for this file (None for root dir) + metadata: Option>>, // Note: if offset points between clusters current_cluster is the previous cluster current_cluster: Option, // current position in this file offset: u32, - // file dir entry editor - None for root dir - entry: Option, // file-system reference fs: Arc>, } @@ -41,13 +108,11 @@ pub struct Extent { impl File { pub(crate) fn new( - first_cluster: Option, - entry: Option, + metadata: Option>>, fs: Arc>, ) -> Self { File { - first_cluster, - entry, + metadata, fs, current_cluster: None, // cluster before first one offset: 0, @@ -65,26 +130,30 @@ impl File { /// Will panic if this is the root directory. pub fn truncate(&mut self) -> Result<(), Error> { log::trace!("File::truncate"); - if let Some(ref mut e) = self.entry { - e.set_size(self.offset); + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + metadata.set_size(self.offset); if self.offset == 0 { - e.set_first_cluster(None, self.fs.fat_type()); + metadata.set_first_cluster(None, self.fs.fat_type()); + } + let first_cluster = metadata.first_cluster; + drop(metadata); + + if let Some(current_cluster) = self.current_cluster { + // current cluster is none only if offset is 0 + debug_assert!(self.offset > 0); + FileSystem::truncate_cluster_chain(&self.fs, current_cluster) + } else { + debug_assert!(self.offset == 0); + if let Some(n) = first_cluster { + FileSystem::free_cluster_chain(&self.fs, n)?; + } + Ok(()) } } else { // Note: we cannot handle this case because there is no size field - panic!("Trying to truncate a file without an entry"); - } - if let Some(current_cluster) = self.current_cluster { - // current cluster is none only if offset is 0 - debug_assert!(self.offset > 0); - FileSystem::truncate_cluster_chain(&self.fs, current_cluster) - } else { - debug_assert!(self.offset == 0); - if let Some(n) = self.first_cluster { - FileSystem::free_cluster_chain(&self.fs, n)?; - self.first_cluster = None; - } - Ok(()) + panic!("Trying to truncate a file without metadata"); } } @@ -98,7 +167,14 @@ impl File { let Some(mut bytes_left) = self.size() else { return None.into_iter().flatten(); }; - let Some(first) = self.first_cluster else { + let first_cluster = if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + metadata.first_cluster + } else { + None + }; + let Some(first) = first_cluster else { return None.into_iter().flatten(); }; @@ -143,8 +219,12 @@ impl File { } fn flush_dir_entry(&mut self) -> Result<(), Error> { - if let Some(ref mut e) = self.entry { - e.flush(&self.fs)?; + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + let mut disk_node = MCSNode::new(); + let mut disk = self.fs.disk.lock(&mut disk_node); + metadata.flush(&mut *disk)?; } Ok(()) } @@ -154,8 +234,11 @@ impl File { /// Note: it is set to a value from the `TimeProvider` when creating a file. #[deprecated] pub fn set_created(&mut self, date_time: DateTime) { - if let Some(ref mut e) = self.entry { - e.set_created(date_time); + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + metadata.entry_data.set_created(date_time); + metadata.dirty = true; } } @@ -164,8 +247,10 @@ impl File { /// Note: it is overwritten by a value from the `TimeProvider` on every file read operation. #[deprecated] pub fn set_accessed(&mut self, date: Date) { - if let Some(ref mut e) = self.entry { - e.set_accessed(date); + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + metadata.set_accessed(date); } } @@ -174,22 +259,30 @@ impl File { /// Note: it is overwritten by a value from the `TimeProvider` on every file write operation. #[deprecated] pub fn set_modified(&mut self, date_time: DateTime) { - if let Some(ref mut e) = self.entry { - e.set_modified(date_time); + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + metadata.set_modified(date_time); } } fn size(&self) -> Option { - match self.entry { - Some(ref e) => e.inner().size(), - None => None, + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + metadata.size() + } else { + None } } fn is_dir(&self) -> bool { - match self.entry { - Some(ref e) => e.inner().is_dir(), - None => true, // root directory + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + metadata.entry_data.is_dir() + } else { + true // root directory } } @@ -199,14 +292,21 @@ impl File { } fn set_first_cluster(&mut self, cluster: u32) { - self.first_cluster = Some(cluster); - if let Some(ref mut e) = self.entry { - e.set_first_cluster(self.first_cluster, self.fs.fat_type()); + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + metadata.set_first_cluster(Some(cluster), self.fs.fat_type()); } } pub(crate) fn first_cluster(&self) -> Option { - self.first_cluster + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + metadata.first_cluster + } else { + None + } } fn flush(&mut self) -> Result<(), Error> { @@ -218,18 +318,20 @@ impl File { } pub(crate) fn is_root_dir(&self) -> bool { - self.entry.is_none() + self.metadata.is_none() } } impl File { fn update_dir_entry_after_write(&mut self) { let offset = self.offset; - if let Some(ref mut e) = self.entry { + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); let now = self.fs.options.time_provider.get_current_date_time(); - e.set_modified(now); - if e.inner().size().is_some_and(|s| offset > s) { - e.set_size(offset); + metadata.set_modified(now); + if metadata.size().is_some_and(|s| offset > s) { + metadata.set_size(offset); } } } @@ -247,10 +349,9 @@ impl Drop for File { impl Clone for File { fn clone(&self) -> Self { File { - first_cluster: self.first_cluster, + metadata: self.metadata.clone(), current_cluster: self.current_cluster, offset: self.offset, - entry: self.entry.clone(), fs: Arc::clone(&self.fs), } } @@ -267,7 +368,7 @@ impl Read for File self.first_cluster, + None => self.first_cluster(), Some(n) => { let r = FileSystem::cluster_iter(&self.fs, n).next(); match r { @@ -305,10 +406,12 @@ impl Read for File Write for File self.first_cluster, + None => self.first_cluster(), Some(n) => { let r = FileSystem::cluster_iter(&self.fs, n).next(); match r { @@ -353,7 +456,7 @@ impl Write for File Seek for File { } } log::trace!( - "file seek {} -> {} - entry {:?}", + "file seek {} -> {} - metadata {:?}", self.offset, new_offset, - self.entry + self.metadata.is_some() ); if new_offset == self.offset { // position is the same - nothing to do @@ -428,7 +531,7 @@ impl Seek for File { None } else if new_offset_in_clusters == old_offset_in_clusters { self.current_cluster - } else if let Some(first_cluster) = self.first_cluster { + } else if let Some(first_cluster) = self.first_cluster() { // calculate number of clusters to skip // return the previous cluster if the offset points to the cluster boundary // Note: new_offset_in_clusters cannot be 0 here because new_offset is not 0 diff --git a/awkernel_lib/src/file/fatfs/fs.rs b/awkernel_lib/src/file/fatfs/fs.rs index 7ec65f942..ec1660536 100644 --- a/awkernel_lib/src/file/fatfs/fs.rs +++ b/awkernel_lib/src/file/fatfs/fs.rs @@ -1,5 +1,6 @@ use alloc::string::String; use alloc::sync::Arc; +use alloc::collections::BTreeMap; use core::borrow::BorrowMut; use core::convert::TryFrom; use core::fmt::Debug; @@ -10,7 +11,7 @@ use super::boot_sector::{format_boot_sector, BiosParameterBlock, BootSector}; use super::dir::{Dir, DirRawStream}; use super::dir_entry::{DirFileEntryData, FileAttributes, SFN_PADDING, SFN_SIZE}; use super::error::Error; -use super::file::File; +use super::file::{File, FileMetadata}; use super::table::{ alloc_cluster, count_free_clusters, format_fat, read_fat_flags, ClusterIterator, RESERVED_FAT_ENTRIES, @@ -342,6 +343,8 @@ pub struct FileSystem< total_clusters: u32, fs_info: Mutex, current_status_flags: Mutex, + // Metadata cache for open files - maps entry position to shared metadata + pub(crate) metadata_cache: Mutex>>>, } impl Debug for FileSystem @@ -361,6 +364,7 @@ where .field("total_clusters", &self.total_clusters) .field("fs_info", &" fs_info") .field("current_status_flags", &" status flags") + .field("metadata_cache", &" metadata cache") .finish() } } @@ -448,6 +452,7 @@ impl FileSystem { total_clusters, fs_info: Mutex::new(fs_info), current_status_flags: Mutex::new(status_flags), + metadata_cache: Mutex::new(BTreeMap::new()), }) } @@ -700,7 +705,6 @@ impl FileSystem { FsIoAdapter { fs: Arc::clone(fs) }, )), FatType::Fat32 => DirRawStream::File(File::new( - Some(fs.bpb.root_dir_first_cluster), None, Arc::clone(fs), )), @@ -725,6 +729,27 @@ impl FileSystem + FileSystem +{ + /// Get or create shared metadata for a file + pub(crate) fn get_or_create_metadata( + &self, + entry_pos: u64, + entry_data: DirFileEntryData, + first_cluster: Option, + ) -> Arc> { + let mut node = MCSNode::new(); + let mut cache = self.metadata_cache.lock(&mut node); + + cache.entry(entry_pos) + .or_insert_with(|| { + Arc::new(Mutex::new(FileMetadata::new(first_cluster, entry_data, entry_pos))) + }) + .clone() + } +} + impl FileSystem { diff --git a/userland/Cargo.toml b/userland/Cargo.toml index f418ef706..9bc923290 100644 --- a/userland/Cargo.toml +++ b/userland/Cargo.toml @@ -86,8 +86,16 @@ optional = true path = "../applications/tests/test_memfatfs" optional = true +[dependencies.test_fatfs_consistency] +path = "../applications/tests/test_fatfs_consistency" +optional = true + +[dependencies.test_fatfs_simple_consistency] +path = "../applications/tests/test_fatfs_simple_consistency" +optional = true + [features] -default = ["test_memfatfs"] +default = ["test_fatfs_consistency"] perf = ["awkernel_services/perf"] # Evaluation applications @@ -109,4 +117,6 @@ test_measure_channel_heavy = ["dep:test_measure_channel_heavy"] test_sched_preempt = ["dep:test_sched_preempt"] test_dag = ["dep:test_dag"] test_voluntary_preemption = ["dep:test_voluntary_preemption"] -test_memfatfs = ["dep:test_memfatfs"] \ No newline at end of file +test_memfatfs = ["dep:test_memfatfs"] +test_fatfs_consistency = ["dep:test_fatfs_consistency"] +test_fatfs_simple_consistency = ["dep:test_fatfs_simple_consistency"] \ No newline at end of file diff --git a/userland/src/lib.rs b/userland/src/lib.rs index 464fce029..74c90f4e4 100644 --- a/userland/src/lib.rs +++ b/userland/src/lib.rs @@ -61,5 +61,11 @@ pub async fn main() -> Result<(), Cow<'static, str>> { #[cfg(feature = "test_memfatfs")] test_memfatfs::run().await; // test for filesystem + #[cfg(feature = "test_fatfs_consistency")] + test_fatfs_consistency::run().await; // test for filesystem consistency + + #[cfg(feature = "test_fatfs_simple_consistency")] + test_fatfs_simple_consistency::run().await; // test for simple filesystem consistency + Ok(()) } From bc4cce0a4f9d02c7aebe631318411724758c7e3f Mon Sep 17 00:00:00 2001 From: Koichi Imai Date: Thu, 10 Jul 2025 15:14:04 +0900 Subject: [PATCH 02/27] delete unnecessary comment & cargo fmt Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 0400704a5..9a78e5954 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -5,7 +5,7 @@ use core::fmt::Debug; use super::super::io::{IoBase, Read, Seek, SeekFrom, Write}; use super::dir_entry::DirFileEntryData; use super::error::Error; -use super::fs::{FileSystem, ReadWriteSeek, FatType}; +use super::fs::{FatType, FileSystem, ReadWriteSeek}; use super::time::{Date, DateTime, TimeProvider}; use awkernel_sync::mcs::MCSNode; @@ -25,7 +25,11 @@ pub(crate) struct FileMetadata { } impl FileMetadata { - pub(crate) fn new(first_cluster: Option, entry_data: DirFileEntryData, entry_pos: u64) -> Self { + pub(crate) fn new( + first_cluster: Option, + entry_data: DirFileEntryData, + entry_pos: u64, + ) -> Self { let size = entry_data.size().unwrap_or(0); Self { first_cluster, @@ -35,7 +39,7 @@ impl FileMetadata { dirty: false, } } - + pub(crate) fn size(&self) -> Option { if self.entry_data.is_dir() { None @@ -43,14 +47,14 @@ impl FileMetadata { Some(self.size) } } - + pub(crate) fn set_size(&mut self, size: u32) { if self.size != size { self.size = size; self.dirty = true; } } - + pub(crate) fn set_first_cluster(&mut self, cluster: Option, fat_type: FatType) { if cluster != self.first_cluster { self.first_cluster = cluster; @@ -58,18 +62,21 @@ impl FileMetadata { self.dirty = true; } } - + pub(crate) fn set_modified(&mut self, date_time: DateTime) { self.entry_data.set_modified(date_time); self.dirty = true; } - + pub(crate) fn set_accessed(&mut self, date: Date) { self.entry_data.set_accessed(date); self.dirty = true; } - - pub(crate) fn flush(&mut self, disk: &mut IO) -> Result<(), IO::Error> { + + pub(crate) fn flush( + &mut self, + disk: &mut IO, + ) -> Result<(), IO::Error> { if self.dirty { // Update size before serializing self.entry_data.set_size(self.size); @@ -139,7 +146,7 @@ impl File { } let first_cluster = metadata.first_cluster; drop(metadata); - + if let Some(current_cluster) = self.current_cluster { // current cluster is none only if offset is 0 debug_assert!(self.offset > 0); @@ -152,7 +159,6 @@ impl File { Ok(()) } } else { - // Note: we cannot handle this case because there is no size field panic!("Trying to truncate a file without metadata"); } } From 9f184c8e0ecaafa68665c686cd3148dc372c959e Mon Sep 17 00:00:00 2001 From: Koichi Imai Date: Thu, 10 Jul 2025 21:41:36 +0900 Subject: [PATCH 03/27] minor fix Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 9a78e5954..5f0a27551 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -63,6 +63,11 @@ impl FileMetadata { } } + pub(crate) fn set_created(&mut self, date_time: DateTime) { + self.entry_data.set_created(date_time); + self.dirty = true; + } + pub(crate) fn set_modified(&mut self, date_time: DateTime) { self.entry_data.set_modified(date_time); self.dirty = true; @@ -159,7 +164,7 @@ impl File { Ok(()) } } else { - panic!("Trying to truncate a file without metadata"); + panic!("Trying to truncate a file without metadata (root directory?)"); } } @@ -243,8 +248,7 @@ impl File { if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let mut metadata = metadata_arc.lock(&mut node); - metadata.entry_data.set_created(date_time); - metadata.dirty = true; + metadata.set_created(date_time); } } From e120ad7b90ca3d470428fd911d0c57fabd6c67bc Mon Sep 17 00:00:00 2001 From: Koichi Date: Mon, 14 Jul 2025 22:53:11 +0900 Subject: [PATCH 04/27] format Signed-off-by: Koichi --- .../tests/test_fatfs_consistency/src/lib.rs | 98 ++++++++++--------- .../test_fatfs_simple_consistency/src/lib.rs | 84 +++++++++++----- userland/Cargo.toml | 2 +- 3 files changed, 110 insertions(+), 74 deletions(-) diff --git a/applications/tests/test_fatfs_consistency/src/lib.rs b/applications/tests/test_fatfs_consistency/src/lib.rs index 3e3e39961..e88579a6a 100644 --- a/applications/tests/test_fatfs_consistency/src/lib.rs +++ b/applications/tests/test_fatfs_consistency/src/lib.rs @@ -2,12 +2,12 @@ extern crate alloc; +use alloc::borrow::Cow; use alloc::format; -use alloc::vec::Vec; use alloc::vec; -use alloc::borrow::Cow; -use awkernel_async_lib::scheduler::SchedulerType; +use alloc::vec::Vec; use awkernel_async_lib::file::path::AsyncVfsPath; +use awkernel_async_lib::scheduler::SchedulerType; use awkernel_lib::file::fatfs::init_memory_fatfs; use core::sync::atomic::{AtomicBool, AtomicU32, Ordering}; use log::info; @@ -29,7 +29,7 @@ pub async fn run() { return; } } - + awkernel_async_lib::spawn( "fatfs consistency test".into(), consistency_test(), @@ -44,34 +44,34 @@ async fn writer_task(id: usize) -> usize { info!("Writer {} starting", id); let root_path = AsyncVfsPath::new_in_memory_fatfs(); let file_path = root_path.join(TEST_FILE_PATH).unwrap(); - + // Keep file handle open for all writes let mut file = match file_path.create_file().await { Ok(f) => { info!("Writer {} created/opened file", id); f - }, + } Err(e) => { info!("Writer {} failed to create file: {:?}", id, e); return 0; } }; - + let mut lines_written = 0; - + for i in 0..WRITES_PER_TASK { // Seek to end to append use awkernel_lib::file::io::SeekFrom; match file.seek(SeekFrom::End(0)).await { Ok(pos) => { info!("Writer {} at position {} for iteration {}", id, pos, i); - }, + } Err(e) => { info!("Writer {} failed to seek: {:?}", id, e); continue; } } - + // Write data unique to this writer let data = format!("Writer {} iteration {}\n", id, i); match file.write(data.as_bytes()).await { @@ -79,23 +79,23 @@ async fn writer_task(id: usize) -> usize { lines_written += 1; WRITE_COUNT.fetch_add(1, Ordering::Relaxed); info!("Writer {} wrote iteration {}", id, i); - }, + } Err(e) => { info!("Writer {} failed to write: {:?}", id, e); continue; } } - + // Yield to allow other tasks to run for _ in 0..3 { awkernel_async_lib::r#yield().await; } } - + // Explicitly drop the file drop(file); info!("Writer {} finished, wrote {} lines", id, lines_written); - + lines_written } @@ -103,12 +103,12 @@ async fn monitor_task() { info!("Monitor starting"); let root_path = AsyncVfsPath::new_in_memory_fatfs(); let file_path = root_path.join(TEST_FILE_PATH).unwrap(); - + // Wait a bit for writers to start for _ in 0..20 { awkernel_async_lib::r#yield().await; } - + let mut checks = 0; loop { match file_path.open_file().await { @@ -117,7 +117,8 @@ async fn monitor_task() { match file.read(&mut buffer).await { Ok(bytes_read) => { if let Ok(content) = core::str::from_utf8(&buffer[..bytes_read]) { - let lines: Vec<&str> = content.lines().filter(|l| !l.is_empty()).collect(); + let lines: Vec<&str> = + content.lines().filter(|l| !l.is_empty()).collect(); info!("Monitor: File has {} lines", lines.len()); } } @@ -130,24 +131,24 @@ async fn monitor_task() { info!("Monitor: File not found yet"); } } - + checks += 1; if checks > 20 { break; } - + // Check periodically for _ in 0..10 { awkernel_async_lib::r#yield().await; } } - + info!("Monitor finished"); } async fn consistency_test() { info!("Starting FatFS consistency test"); - + // First, ensure we can create a file info!("Pre-test: Creating initial file"); { @@ -155,7 +156,9 @@ async fn consistency_test() { let file_path = root_path.join(TEST_FILE_PATH).unwrap(); match file_path.create_file().await { Ok(mut f) => { - f.write(b"Initial line\n").await.expect("Initial write failed"); + f.write(b"Initial line\n") + .await + .expect("Initial write failed"); EXPECTED_LINES.store(1, Ordering::Relaxed); info!("Pre-test: Initial file created successfully"); } @@ -165,7 +168,7 @@ async fn consistency_test() { } } } - + // Spawn writer tasks let mut writer_handles = Vec::new(); for i in 0..NUM_WRITERS { @@ -176,61 +179,64 @@ async fn consistency_test() { ) .await; writer_handles.push(handle); - + // Small delay between spawning writers for _ in 0..2 { awkernel_async_lib::r#yield().await; } } - + // Spawn monitor task - let monitor_handle = awkernel_async_lib::spawn( - "monitor".into(), - monitor_task(), - SchedulerType::FIFO, - ) - .await; - + let monitor_handle = + awkernel_async_lib::spawn("monitor".into(), monitor_task(), SchedulerType::FIFO).await; + // Wait for all writers to complete let mut total_lines_written = 0; for handle in writer_handles { match handle.join().await { Ok(lines) => total_lines_written += lines, - Err(_) => info!("Writer task was cancelled") + Err(_) => info!("Writer task was cancelled"), } } - - info!("All writers completed, total lines written: {}", total_lines_written); + + info!( + "All writers completed, total lines written: {}", + total_lines_written + ); EXPECTED_LINES.fetch_add(total_lines_written as u32, Ordering::Relaxed); - + // Wait for monitor to finish let _ = monitor_handle.join().await; - + // Give some time for file system to settle for _ in 0..10 { awkernel_async_lib::r#yield().await; } - + // Final check - read the file let root_path = AsyncVfsPath::new_in_memory_fatfs(); let file_path = root_path.join(TEST_FILE_PATH).unwrap(); - + match file_path.open_file().await { Ok(mut file) => { let mut buffer = vec![0u8; 4096]; let bytes_read = file.read(&mut buffer).await.unwrap_or(0); - + if let Ok(content) = core::str::from_utf8(&buffer[..bytes_read]) { let lines: Vec<&str> = content.lines().filter(|l| !l.is_empty()).collect(); let expected = EXPECTED_LINES.load(Ordering::Relaxed) as usize; - - info!("Final check: Found {} lines, expected {}", lines.len(), expected); - + + info!( + "Final check: Found {} lines, expected {}", + lines.len(), + expected + ); + // Print first few lines for (i, line) in lines.iter().take(5).enumerate() { info!(" Line {}: {}", i, line); } - + if lines.len() == expected { info!("SUCCESS: File consistency maintained!"); TEST_PASSED.store(true, Ordering::Relaxed); @@ -243,10 +249,10 @@ async fn consistency_test() { info!("Final check: Failed to open file: {:?}", e); } } - + if TEST_PASSED.load(Ordering::Relaxed) { info!("FatFS consistency test PASSED!"); } else { info!("FatFS consistency test FAILED!"); } -} \ No newline at end of file +} diff --git a/applications/tests/test_fatfs_simple_consistency/src/lib.rs b/applications/tests/test_fatfs_simple_consistency/src/lib.rs index 91f9273e9..5d63670eb 100644 --- a/applications/tests/test_fatfs_simple_consistency/src/lib.rs +++ b/applications/tests/test_fatfs_simple_consistency/src/lib.rs @@ -3,8 +3,8 @@ extern crate alloc; use alloc::vec; -use awkernel_async_lib::scheduler::SchedulerType; use awkernel_async_lib::file::path::AsyncVfsPath; +use awkernel_async_lib::scheduler::SchedulerType; use awkernel_lib::file::fatfs::init_memory_fatfs; use log::info; @@ -17,7 +17,7 @@ pub async fn run() { return; } } - + awkernel_async_lib::spawn( "fatfs simple consistency test".into(), simple_consistency_test(), @@ -28,42 +28,56 @@ pub async fn run() { async fn simple_consistency_test() { info!("Starting simple FatFS consistency test"); - + let root_path = AsyncVfsPath::new_in_memory_fatfs(); - + // Test 1: Simple create, write, close, read (known to work) info!("Test 1: Simple sequential test"); { let file_path = root_path.join("test1.txt").unwrap(); - + // Create and write - let mut file = file_path.create_file().await.expect("Failed to create test1"); - file.write(b"Test 1 data").await.expect("Failed to write test1"); + let mut file = file_path + .create_file() + .await + .expect("Failed to create test1"); + file.write(b"Test 1 data") + .await + .expect("Failed to write test1"); drop(file); - + // Read back let mut file = file_path.open_file().await.expect("Failed to open test1"); let mut buf = vec![0u8; 100]; let read = file.read(&mut buf).await.expect("Failed to read test1"); info!("Test 1: Read {} bytes", read); } - + // Test 2: Create file with one handle, then create another handle before closing first info!("Test 2: Two handles, close in order"); { let file_path = root_path.join("test2.txt").unwrap(); - + // Create first handle and write - let mut file1 = file_path.create_file().await.expect("Failed to create test2 handle 1"); - file1.write(b"Handle 1 data").await.expect("Failed to write with handle 1"); - + let mut file1 = file_path + .create_file() + .await + .expect("Failed to create test2 handle 1"); + file1 + .write(b"Handle 1 data") + .await + .expect("Failed to write with handle 1"); + // Create second handle while first is still open - let file2 = file_path.create_file().await.expect("Failed to create test2 handle 2"); - + let file2 = file_path + .create_file() + .await + .expect("Failed to create test2 handle 2"); + // Close both drop(file1); drop(file2); - + // Try to read match file_path.open_file().await { Ok(mut file) => { @@ -76,36 +90,52 @@ async fn simple_consistency_test() { } } } - + // Test 3: Create handle, close it, then create another handle info!("Test 3: Sequential handles"); { let file_path = root_path.join("test3.txt").unwrap(); - + // Create first handle, write and close - let mut file1 = file_path.create_file().await.expect("Failed to create test3 handle 1"); - file1.write(b"First write").await.expect("Failed to write first"); + let mut file1 = file_path + .create_file() + .await + .expect("Failed to create test3 handle 1"); + file1 + .write(b"First write") + .await + .expect("Failed to write first"); drop(file1); - + // Create second handle, write and close - let mut file2 = file_path.create_file().await.expect("Failed to create test3 handle 2"); + let mut file2 = file_path + .create_file() + .await + .expect("Failed to create test3 handle 2"); use awkernel_lib::file::io::SeekFrom; file2.seek(SeekFrom::End(0)).await.expect("Failed to seek"); - file2.write(b" Second write").await.expect("Failed to write second"); + file2 + .write(b" Second write") + .await + .expect("Failed to write second"); drop(file2); - + // Try to read match file_path.open_file().await { Ok(mut file) => { let mut buf = vec![0u8; 100]; let read = file.read(&mut buf).await.expect("Failed to read test3"); - info!("Test 3: Read {} bytes: {:?}", read, core::str::from_utf8(&buf[..read])); + info!( + "Test 3: Read {} bytes: {:?}", + read, + core::str::from_utf8(&buf[..read]) + ); } Err(e) => { info!("Test 3: Could not open file: {:?}", e); } } } - + info!("Simple FatFS consistency test completed"); -} \ No newline at end of file +} diff --git a/userland/Cargo.toml b/userland/Cargo.toml index 9bc923290..66457d7bb 100644 --- a/userland/Cargo.toml +++ b/userland/Cargo.toml @@ -119,4 +119,4 @@ test_dag = ["dep:test_dag"] test_voluntary_preemption = ["dep:test_voluntary_preemption"] test_memfatfs = ["dep:test_memfatfs"] test_fatfs_consistency = ["dep:test_fatfs_consistency"] -test_fatfs_simple_consistency = ["dep:test_fatfs_simple_consistency"] \ No newline at end of file +test_fatfs_simple_consistency = ["dep:test_fatfs_simple_consistency"] From e123bf25be2e3265d7c5826b60cf9e273026d564 Mon Sep 17 00:00:00 2001 From: Koichi Date: Tue, 15 Jul 2025 10:19:01 +0900 Subject: [PATCH 05/27] delete FileMetadata and use DirEntryEditor Signed-off-by: Koichi --- awkernel_lib/src/file/fatfs/dir_entry.rs | 6 +- awkernel_lib/src/file/fatfs/file.rs | 103 +++-------------------- awkernel_lib/src/file/fatfs/fs.rs | 12 +-- 3 files changed, 18 insertions(+), 103 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/dir_entry.rs b/awkernel_lib/src/file/fatfs/dir_entry.rs index 2be4cffe8..ad9913511 100644 --- a/awkernel_lib/src/file/fatfs/dir_entry.rs +++ b/awkernel_lib/src/file/fatfs/dir_entry.rs @@ -473,7 +473,7 @@ pub(crate) struct DirEntryEditor { } impl DirEntryEditor { - fn new(data: DirFileEntryData, pos: u64) -> Self { + pub(crate) fn new(data: DirFileEntryData, pos: u64) -> Self { Self { data, pos, @@ -628,10 +628,6 @@ impl DirEntry DirEntryEditor { - DirEntryEditor::new(self.data.clone(), self.entry_pos) - } - pub(crate) fn is_same_entry(&self, other: &DirEntry) -> bool { self.entry_pos == other.entry_pos } diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 5f0a27551..052838f51 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -3,9 +3,9 @@ use core::convert::TryFrom; use core::fmt::Debug; use super::super::io::{IoBase, Read, Seek, SeekFrom, Write}; -use super::dir_entry::DirFileEntryData; +use super::dir_entry::DirEntryEditor; use super::error::Error; -use super::fs::{FatType, FileSystem, ReadWriteSeek}; +use super::fs::{FileSystem, ReadWriteSeek}; use super::time::{Date, DateTime, TimeProvider}; use awkernel_sync::mcs::MCSNode; @@ -13,92 +13,13 @@ use awkernel_sync::mutex::Mutex; const MAX_FILE_SIZE: u32 = u32::MAX; -/// Shared file metadata that is consistent across all file handles -#[derive(Debug)] -pub(crate) struct FileMetadata { - // Core file information - pub(crate) first_cluster: Option, - pub(crate) entry_data: DirFileEntryData, - pub(crate) entry_pos: u64, - pub(crate) size: u32, - pub(crate) dirty: bool, -} - -impl FileMetadata { - pub(crate) fn new( - first_cluster: Option, - entry_data: DirFileEntryData, - entry_pos: u64, - ) -> Self { - let size = entry_data.size().unwrap_or(0); - Self { - first_cluster, - entry_data, - entry_pos, - size, - dirty: false, - } - } - - pub(crate) fn size(&self) -> Option { - if self.entry_data.is_dir() { - None - } else { - Some(self.size) - } - } - - pub(crate) fn set_size(&mut self, size: u32) { - if self.size != size { - self.size = size; - self.dirty = true; - } - } - - pub(crate) fn set_first_cluster(&mut self, cluster: Option, fat_type: FatType) { - if cluster != self.first_cluster { - self.first_cluster = cluster; - self.entry_data.set_first_cluster(cluster, fat_type); - self.dirty = true; - } - } - - pub(crate) fn set_created(&mut self, date_time: DateTime) { - self.entry_data.set_created(date_time); - self.dirty = true; - } - - pub(crate) fn set_modified(&mut self, date_time: DateTime) { - self.entry_data.set_modified(date_time); - self.dirty = true; - } - - pub(crate) fn set_accessed(&mut self, date: Date) { - self.entry_data.set_accessed(date); - self.dirty = true; - } - - pub(crate) fn flush( - &mut self, - disk: &mut IO, - ) -> Result<(), IO::Error> { - if self.dirty { - // Update size before serializing - self.entry_data.set_size(self.size); - disk.seek(SeekFrom::Start(self.entry_pos))?; - self.entry_data.serialize(disk)?; - self.dirty = false; - } - Ok(()) - } -} /// A FAT filesystem file object used for reading and writing data. /// /// This struct is created by the `open_file` or `create_file` methods on `Dir`. pub struct File { // Shared metadata for this file (None for root dir) - metadata: Option>>, + metadata: Option>>, // Note: if offset points between clusters current_cluster is the previous cluster current_cluster: Option, // current position in this file @@ -120,7 +41,7 @@ pub struct Extent { impl File { pub(crate) fn new( - metadata: Option>>, + metadata: Option>>, fs: Arc>, ) -> Self { File { @@ -149,7 +70,7 @@ impl File { if self.offset == 0 { metadata.set_first_cluster(None, self.fs.fat_type()); } - let first_cluster = metadata.first_cluster; + let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); drop(metadata); if let Some(current_cluster) = self.current_cluster { @@ -181,7 +102,7 @@ impl File { let first_cluster = if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let metadata = metadata_arc.lock(&mut node); - metadata.first_cluster + metadata.inner().first_cluster(self.fs.fat_type()) } else { None }; @@ -233,9 +154,7 @@ impl File { if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let mut metadata = metadata_arc.lock(&mut node); - let mut disk_node = MCSNode::new(); - let mut disk = self.fs.disk.lock(&mut disk_node); - metadata.flush(&mut *disk)?; + metadata.flush(&self.fs)?; } Ok(()) } @@ -280,7 +199,7 @@ impl File { if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let metadata = metadata_arc.lock(&mut node); - metadata.size() + metadata.inner().size() } else { None } @@ -290,7 +209,7 @@ impl File { if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let metadata = metadata_arc.lock(&mut node); - metadata.entry_data.is_dir() + metadata.inner().is_dir() } else { true // root directory } @@ -313,7 +232,7 @@ impl File { if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let metadata = metadata_arc.lock(&mut node); - metadata.first_cluster + metadata.inner().first_cluster(self.fs.fat_type()) } else { None } @@ -340,7 +259,7 @@ impl File let mut metadata = metadata_arc.lock(&mut node); let now = self.fs.options.time_provider.get_current_date_time(); metadata.set_modified(now); - if metadata.size().is_some_and(|s| offset > s) { + if metadata.inner().size().is_some_and(|s| offset > s) { metadata.set_size(offset); } } diff --git a/awkernel_lib/src/file/fatfs/fs.rs b/awkernel_lib/src/file/fatfs/fs.rs index ec1660536..305a73643 100644 --- a/awkernel_lib/src/file/fatfs/fs.rs +++ b/awkernel_lib/src/file/fatfs/fs.rs @@ -9,9 +9,9 @@ use core::marker::PhantomData; use super::super::io::{self, IoBase, Read, ReadLeExt, Seek, SeekFrom, Write, WriteLeExt}; use super::boot_sector::{format_boot_sector, BiosParameterBlock, BootSector}; use super::dir::{Dir, DirRawStream}; -use super::dir_entry::{DirFileEntryData, FileAttributes, SFN_PADDING, SFN_SIZE}; +use super::dir_entry::{DirFileEntryData, DirEntryEditor, FileAttributes, SFN_PADDING, SFN_SIZE}; use super::error::Error; -use super::file::{File, FileMetadata}; +use super::file::File; use super::table::{ alloc_cluster, count_free_clusters, format_fat, read_fat_flags, ClusterIterator, RESERVED_FAT_ENTRIES, @@ -344,7 +344,7 @@ pub struct FileSystem< fs_info: Mutex, current_status_flags: Mutex, // Metadata cache for open files - maps entry position to shared metadata - pub(crate) metadata_cache: Mutex>>>, + pub(crate) metadata_cache: Mutex>>>, } impl Debug for FileSystem @@ -737,14 +737,14 @@ impl &self, entry_pos: u64, entry_data: DirFileEntryData, - first_cluster: Option, - ) -> Arc> { + _first_cluster: Option, + ) -> Arc> { let mut node = MCSNode::new(); let mut cache = self.metadata_cache.lock(&mut node); cache.entry(entry_pos) .or_insert_with(|| { - Arc::new(Mutex::new(FileMetadata::new(first_cluster, entry_data, entry_pos))) + Arc::new(Mutex::new(DirEntryEditor::new(entry_data, entry_pos))) }) .clone() } From 67916e4f884b8dfb6caa67c5c3882935097e7cc2 Mon Sep 17 00:00:00 2001 From: Koichi Date: Tue, 15 Jul 2025 11:45:24 +0900 Subject: [PATCH 06/27] DirEntryEditor with generation Signed-off-by: Koichi --- .../tests/test_fatfs_consistency/src/lib.rs | 221 ++++++++++++++++++ awkernel_lib/src/file/fatfs/dir_entry.rs | 8 + awkernel_lib/src/file/fatfs/file.rs | 98 ++++++++ 3 files changed, 327 insertions(+) diff --git a/applications/tests/test_fatfs_consistency/src/lib.rs b/applications/tests/test_fatfs_consistency/src/lib.rs index e88579a6a..b464ebc0f 100644 --- a/applications/tests/test_fatfs_consistency/src/lib.rs +++ b/applications/tests/test_fatfs_consistency/src/lib.rs @@ -255,4 +255,225 @@ async fn consistency_test() { } else { info!("FatFS consistency test FAILED!"); } + + // Run concurrent truncation test + concurrent_truncation_test().await; +} + +async fn truncation_writer_task(id: usize) -> usize { + info!("Truncation writer {} starting", id); + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join("truncation_test.txt").unwrap(); + + let mut lines_written = 0; + + for i in 0..5 { + // Open file for each iteration to get fresh handle + let mut file = match file_path.create_file().await { + Ok(f) => f, + Err(e) => { + info!("Truncation writer {} failed to open file: {:?}", id, e); + continue; + } + }; + + // Write some data + let data = format!("Truncation writer {} iteration {} - this is a longer line to test cluster allocation\n", id, i); + + // Seek to end to append + use awkernel_lib::file::io::SeekFrom; + if let Err(e) = file.seek(SeekFrom::End(0)).await { + info!("Truncation writer {} failed to seek: {:?}", id, e); + continue; + } + + match file.write(data.as_bytes()).await { + Ok(_) => { + lines_written += 1; + info!("Truncation writer {} wrote iteration {}", id, i); + } + Err(e) => { + info!("Truncation writer {} failed to write: {:?}", id, e); + } + } + + // Yield to allow truncation + for _ in 0..5 { + awkernel_async_lib::r#yield().await; + } + } + + info!("Truncation writer {} finished, wrote {} lines", id, lines_written); + lines_written +} + +async fn truncation_task() { + info!("Truncation task starting"); + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join("truncation_test.txt").unwrap(); + + // Wait for writers to write some data + for _ in 0..10 { + awkernel_async_lib::r#yield().await; + } + + for i in 0..3 { + match file_path.open_file().await { + Ok(mut file) => { + // Read current size + use awkernel_lib::file::io::SeekFrom; + let size = match file.seek(SeekFrom::End(0)).await { + Ok(pos) => pos as u32, + Err(e) => { + info!("Truncation task: Failed to seek: {:?}", e); + continue; + } + }; + + if size > 100 { + // Truncate to smaller size + let new_size = size / 2; + if let Err(e) = file.seek(SeekFrom::Start(new_size as u64)).await { + info!("Truncation task: Failed to seek for truncate: {:?}", e); + continue; + } + + // Truncate is not available on async file interface, simulate by writing + // For now just log that we would truncate + info!("Truncation task: Would truncate file from {} to {} bytes", size, new_size); + } + } + Err(e) => { + info!("Truncation task: Failed to open file: {:?}", e); + } + } + + // Wait before next truncation + for _ in 0..20 { + awkernel_async_lib::r#yield().await; + } + } + + info!("Truncation task finished"); +} + +async fn reader_with_position_task(id: usize) { + info!("Position reader {} starting", id); + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join("truncation_test.txt").unwrap(); + + // Wait for file to be created + for _ in 0..5 { + awkernel_async_lib::r#yield().await; + } + + // Open file once and keep reading + let mut file = match file_path.open_file().await { + Ok(f) => f, + Err(e) => { + info!("Position reader {} failed to open file: {:?}", id, e); + return; + } + }; + + let mut last_pos = 0u64; + + for i in 0..10 { + // Try to read from current position + let mut buffer = vec![0u8; 256]; + match file.read(&mut buffer).await { + Ok(bytes_read) => { + use awkernel_lib::file::io::SeekFrom; + let current_pos = file.seek(SeekFrom::Current(0)).await.unwrap_or(0); + info!("Position reader {}: iteration {}, read {} bytes, position {} -> {}", + id, i, bytes_read, last_pos, current_pos); + last_pos = current_pos; + } + Err(e) => { + info!("Position reader {}: Failed to read at iteration {}: {:?}", id, i, e); + // Try to recover by seeking to start + use awkernel_lib::file::io::SeekFrom; + if let Ok(pos) = file.seek(SeekFrom::Start(0)).await { + info!("Position reader {}: Recovered by seeking to start ({})", id, pos); + last_pos = pos; + } + } + } + + // Yield to allow truncation/writing + for _ in 0..3 { + awkernel_async_lib::r#yield().await; + } + } + + info!("Position reader {} finished", id); +} + +async fn concurrent_truncation_test() { + info!("Starting concurrent truncation test"); + + // Create initial file + { + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join("truncation_test.txt").unwrap(); + match file_path.create_file().await { + Ok(mut f) => { + f.write(b"Initial content for truncation test\n") + .await + .expect("Initial write failed"); + info!("Truncation test: Initial file created"); + } + Err(e) => { + info!("Truncation test: Failed to create initial file: {:?}", e); + return; + } + } + } + + // Spawn writers, truncator, and readers + let mut writer_handles = Vec::new(); + let mut reader_handles = Vec::new(); + + // Spawn writers + for i in 0..2 { + let handle = awkernel_async_lib::spawn( + Cow::Owned(format!("truncation_writer_{}", i)), + truncation_writer_task(i), + SchedulerType::FIFO, + ) + .await; + writer_handles.push(handle); + } + + // Spawn truncation task + let truncation_handle = awkernel_async_lib::spawn( + "truncation_task".into(), + truncation_task(), + SchedulerType::FIFO, + ) + .await; + + // Spawn readers + for i in 0..2 { + let handle = awkernel_async_lib::spawn( + Cow::Owned(format!("position_reader_{}", i)), + reader_with_position_task(i), + SchedulerType::FIFO, + ) + .await; + reader_handles.push(handle); + } + + // Wait for all writer tasks to complete + for handle in writer_handles { + let _ = handle.join().await; + } + + // Wait for all reader tasks to complete + for handle in reader_handles { + let _ = handle.join().await; + } + let _ = truncation_handle.join().await; + + info!("Concurrent truncation test completed"); } diff --git a/awkernel_lib/src/file/fatfs/dir_entry.rs b/awkernel_lib/src/file/fatfs/dir_entry.rs index ad9913511..5789ee0ea 100644 --- a/awkernel_lib/src/file/fatfs/dir_entry.rs +++ b/awkernel_lib/src/file/fatfs/dir_entry.rs @@ -470,6 +470,7 @@ pub(crate) struct DirEntryEditor { data: DirFileEntryData, pos: u64, dirty: bool, + generation: u64, } impl DirEntryEditor { @@ -478,9 +479,14 @@ impl DirEntryEditor { data, pos, dirty: false, + generation: 0, } } + pub(crate) fn generation(&self) -> u64 { + self.generation + } + pub(crate) fn inner(&self) -> &DirFileEntryData { &self.data } @@ -489,6 +495,7 @@ impl DirEntryEditor { if first_cluster != self.data.first_cluster(fat_type) { self.data.set_first_cluster(first_cluster, fat_type); self.dirty = true; + self.generation += 1; } } @@ -497,6 +504,7 @@ impl DirEntryEditor { Some(n) if size != n => { self.data.set_size(size); self.dirty = true; + self.generation += 1; } _ => {} } diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 052838f51..d59330a48 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -24,6 +24,8 @@ pub struct File { current_cluster: Option, // current position in this file offset: u32, + // Generation number last seen from metadata - used to detect structural changes + cached_generation: u64, // file-system reference fs: Arc>, } @@ -44,11 +46,19 @@ impl File { metadata: Option>>, fs: Arc>, ) -> Self { + let cached_generation = if let Some(ref m) = metadata { + let mut node = MCSNode::new(); + let guard = m.lock(&mut node); + guard.generation() + } else { + 0 + }; File { metadata, fs, current_cluster: None, // cluster before first one offset: 0, + cached_generation, } } @@ -249,6 +259,87 @@ impl File { pub(crate) fn is_root_dir(&self) -> bool { self.metadata.is_none() } + + /// Validates that the cached position (current_cluster, offset) is still valid + /// after potential concurrent modifications. Returns true if position is valid. + fn validate_position(&mut self) -> Result> { + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + let current_gen = metadata.generation(); + + // If generation hasn't changed, position is still valid + if current_gen == self.cached_generation { + return Ok(true); + } + + // Generation changed - need to validate our position + let file_size = metadata.inner().size().unwrap_or(0); + let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); + drop(metadata); + + // Check if our offset is still within file bounds + if self.offset > file_size { + // File was truncated - reset to end of file + self.offset = file_size; + self.current_cluster = None; + self.cached_generation = current_gen; + + // Re-seek to the new end position + if self.offset > 0 { + let saved_offset = self.offset; + self.offset = 0; + self.current_cluster = None; + self.seek(SeekFrom::Start(u64::from(saved_offset)))?; + } + return Ok(false); + } + + // If we have a current cluster, validate it's still part of the file + if let Some(current) = self.current_cluster { + if let Some(first) = first_cluster { + // Walk the cluster chain to verify our cluster is still valid + let clusters_before = self.fs.clusters_from_bytes(u64::from(self.offset)); + let mut found = false; + + if clusters_before == 0 && current == first { + found = true; + } else { + let mut iter = FileSystem::cluster_iter(&self.fs, first); + for _ in 0..clusters_before { + match iter.next() { + Some(Ok(c)) => { + if c == current { + found = true; + break; + } + } + _ => break, + } + } + } + + if !found { + // Current cluster is no longer valid - need to re-seek + let saved_offset = self.offset; + self.offset = 0; + self.current_cluster = None; + self.cached_generation = current_gen; + self.seek(SeekFrom::Start(u64::from(saved_offset)))?; + return Ok(false); + } + } else { + // File is now empty but we had a cluster - reset position + self.offset = 0; + self.current_cluster = None; + } + } + + // Update cached generation + self.cached_generation = current_gen; + } + Ok(true) + } } impl File { @@ -281,6 +372,7 @@ impl Clone for File { metadata: self.metadata.clone(), current_cluster: self.current_cluster, offset: self.offset, + cached_generation: self.cached_generation, fs: Arc::clone(&self.fs), } } @@ -293,6 +385,8 @@ impl IoBase for File { impl Read for File { fn read(&mut self, buf: &mut [u8]) -> Result { log::trace!("File::read"); + // Validate position before reading + self.validate_position()?; let cluster_size = self.fs.cluster_size(); let current_cluster_opt = if self.offset % cluster_size == 0 { // next cluster @@ -350,6 +444,8 @@ impl Read for File Write for File { fn write(&mut self, buf: &[u8]) -> Result { log::trace!("File::write"); + // Validate position before writing + self.validate_position()?; let cluster_size = self.fs.cluster_size(); let offset_in_cluster = self.offset % cluster_size; let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; @@ -424,6 +520,8 @@ impl Write for File Seek for File { fn seek(&mut self, pos: SeekFrom) -> Result { log::trace!("File::seek"); + // Validate position before seeking + self.validate_position()?; let size_opt = self.size(); let new_offset_opt: Option = match pos { SeekFrom::Current(x) => i64::from(self.offset) From 5e0c7e501e78357819835b22cd3aac86f2cdb15b Mon Sep 17 00:00:00 2001 From: Koichi Date: Tue, 15 Jul 2025 14:43:30 +0900 Subject: [PATCH 07/27] delete generation Signed-off-by: Koichi --- awkernel_lib/src/file/fatfs/dir_entry.rs | 8 -- awkernel_lib/src/file/fatfs/file.rs | 98 ------------------------ 2 files changed, 106 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/dir_entry.rs b/awkernel_lib/src/file/fatfs/dir_entry.rs index 5789ee0ea..ad9913511 100644 --- a/awkernel_lib/src/file/fatfs/dir_entry.rs +++ b/awkernel_lib/src/file/fatfs/dir_entry.rs @@ -470,7 +470,6 @@ pub(crate) struct DirEntryEditor { data: DirFileEntryData, pos: u64, dirty: bool, - generation: u64, } impl DirEntryEditor { @@ -479,14 +478,9 @@ impl DirEntryEditor { data, pos, dirty: false, - generation: 0, } } - pub(crate) fn generation(&self) -> u64 { - self.generation - } - pub(crate) fn inner(&self) -> &DirFileEntryData { &self.data } @@ -495,7 +489,6 @@ impl DirEntryEditor { if first_cluster != self.data.first_cluster(fat_type) { self.data.set_first_cluster(first_cluster, fat_type); self.dirty = true; - self.generation += 1; } } @@ -504,7 +497,6 @@ impl DirEntryEditor { Some(n) if size != n => { self.data.set_size(size); self.dirty = true; - self.generation += 1; } _ => {} } diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index d59330a48..052838f51 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -24,8 +24,6 @@ pub struct File { current_cluster: Option, // current position in this file offset: u32, - // Generation number last seen from metadata - used to detect structural changes - cached_generation: u64, // file-system reference fs: Arc>, } @@ -46,19 +44,11 @@ impl File { metadata: Option>>, fs: Arc>, ) -> Self { - let cached_generation = if let Some(ref m) = metadata { - let mut node = MCSNode::new(); - let guard = m.lock(&mut node); - guard.generation() - } else { - 0 - }; File { metadata, fs, current_cluster: None, // cluster before first one offset: 0, - cached_generation, } } @@ -259,87 +249,6 @@ impl File { pub(crate) fn is_root_dir(&self) -> bool { self.metadata.is_none() } - - /// Validates that the cached position (current_cluster, offset) is still valid - /// after potential concurrent modifications. Returns true if position is valid. - fn validate_position(&mut self) -> Result> { - if let Some(ref metadata_arc) = self.metadata { - let mut node = MCSNode::new(); - let metadata = metadata_arc.lock(&mut node); - let current_gen = metadata.generation(); - - // If generation hasn't changed, position is still valid - if current_gen == self.cached_generation { - return Ok(true); - } - - // Generation changed - need to validate our position - let file_size = metadata.inner().size().unwrap_or(0); - let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); - drop(metadata); - - // Check if our offset is still within file bounds - if self.offset > file_size { - // File was truncated - reset to end of file - self.offset = file_size; - self.current_cluster = None; - self.cached_generation = current_gen; - - // Re-seek to the new end position - if self.offset > 0 { - let saved_offset = self.offset; - self.offset = 0; - self.current_cluster = None; - self.seek(SeekFrom::Start(u64::from(saved_offset)))?; - } - return Ok(false); - } - - // If we have a current cluster, validate it's still part of the file - if let Some(current) = self.current_cluster { - if let Some(first) = first_cluster { - // Walk the cluster chain to verify our cluster is still valid - let clusters_before = self.fs.clusters_from_bytes(u64::from(self.offset)); - let mut found = false; - - if clusters_before == 0 && current == first { - found = true; - } else { - let mut iter = FileSystem::cluster_iter(&self.fs, first); - for _ in 0..clusters_before { - match iter.next() { - Some(Ok(c)) => { - if c == current { - found = true; - break; - } - } - _ => break, - } - } - } - - if !found { - // Current cluster is no longer valid - need to re-seek - let saved_offset = self.offset; - self.offset = 0; - self.current_cluster = None; - self.cached_generation = current_gen; - self.seek(SeekFrom::Start(u64::from(saved_offset)))?; - return Ok(false); - } - } else { - // File is now empty but we had a cluster - reset position - self.offset = 0; - self.current_cluster = None; - } - } - - // Update cached generation - self.cached_generation = current_gen; - } - Ok(true) - } } impl File { @@ -372,7 +281,6 @@ impl Clone for File { metadata: self.metadata.clone(), current_cluster: self.current_cluster, offset: self.offset, - cached_generation: self.cached_generation, fs: Arc::clone(&self.fs), } } @@ -385,8 +293,6 @@ impl IoBase for File { impl Read for File { fn read(&mut self, buf: &mut [u8]) -> Result { log::trace!("File::read"); - // Validate position before reading - self.validate_position()?; let cluster_size = self.fs.cluster_size(); let current_cluster_opt = if self.offset % cluster_size == 0 { // next cluster @@ -444,8 +350,6 @@ impl Read for File Write for File { fn write(&mut self, buf: &[u8]) -> Result { log::trace!("File::write"); - // Validate position before writing - self.validate_position()?; let cluster_size = self.fs.cluster_size(); let offset_in_cluster = self.offset % cluster_size; let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; @@ -520,8 +424,6 @@ impl Write for File Seek for File { fn seek(&mut self, pos: SeekFrom) -> Result { log::trace!("File::seek"); - // Validate position before seeking - self.validate_position()?; let size_opt = self.size(); let new_offset_opt: Option = match pos { SeekFrom::Current(x) => i64::from(self.offset) From cb0c21343d19121db56fa13c58139f32784f07c9 Mon Sep 17 00:00:00 2001 From: Koichi Date: Tue, 15 Jul 2025 17:01:49 +0900 Subject: [PATCH 08/27] remove cache when there is no corresponding file handle Signed-off-by: Koichi --- .../tests/test_fatfs_consistency/src/lib.rs | 62 +++++++++++++++++++ awkernel_lib/src/file/fatfs/dir_entry.rs | 2 +- awkernel_lib/src/file/fatfs/file.rs | 9 +++ awkernel_lib/src/file/fatfs/fs.rs | 15 +++++ 4 files changed, 87 insertions(+), 1 deletion(-) diff --git a/applications/tests/test_fatfs_consistency/src/lib.rs b/applications/tests/test_fatfs_consistency/src/lib.rs index b464ebc0f..c3b7e1289 100644 --- a/applications/tests/test_fatfs_consistency/src/lib.rs +++ b/applications/tests/test_fatfs_consistency/src/lib.rs @@ -38,6 +38,16 @@ pub async fn run() { .await .join() .await; + + // Run metadata cache cleanup test + awkernel_async_lib::spawn( + "metadata cache cleanup test".into(), + metadata_cache_cleanup_test(), + SchedulerType::FIFO, + ) + .await + .join() + .await; } async fn writer_task(id: usize) -> usize { @@ -477,3 +487,55 @@ async fn concurrent_truncation_test() { info!("Concurrent truncation test completed"); } + +// Test metadata cache cleanup +async fn metadata_cache_cleanup_test() { + info!("Starting metadata cache cleanup test"); + + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join("cache_test.txt").unwrap(); + + // Create and drop multiple file handles to test cache cleanup + for i in 0..5 { + // Create a file + let mut file = match file_path.create_file().await { + Ok(f) => f, + Err(e) => { + info!("Failed to create file in iteration {}: {:?}", i, e); + continue; + } + }; + + // Write some data + let data = format!("Test data for iteration {}\n", i); + if let Err(e) = file.write(data.as_bytes()).await { + info!("Failed to write in iteration {}: {:?}", i, e); + } + + // Open the same file multiple times to test shared metadata + let _file2 = file_path.open_file().await.ok(); + let _file3 = file_path.open_file().await.ok(); + + // All handles will be dropped here, should clean up cache + } + + // Open the file again to verify it still works + match file_path.open_file().await { + Ok(mut file) => { + let mut buffer = vec![0u8; 100]; + match file.read(&mut buffer).await { + Ok(bytes_read) => { + info!("Successfully read {} bytes after cache cleanup", bytes_read); + } + Err(e) => { + info!("Failed to read after cache cleanup: {:?}", e); + } + } + } + Err(e) => { + info!("Failed to open file after cache cleanup: {:?}", e); + } + } + + info!("Metadata cache cleanup test completed"); +} diff --git a/awkernel_lib/src/file/fatfs/dir_entry.rs b/awkernel_lib/src/file/fatfs/dir_entry.rs index ad9913511..a1e671755 100644 --- a/awkernel_lib/src/file/fatfs/dir_entry.rs +++ b/awkernel_lib/src/file/fatfs/dir_entry.rs @@ -468,7 +468,7 @@ impl DirEntryData { #[derive(Clone, Debug)] pub(crate) struct DirEntryEditor { data: DirFileEntryData, - pos: u64, + pub(crate) pos: u64, dirty: bool, } diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 052838f51..e0c4387c8 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -271,6 +271,15 @@ impl Drop for File { if let Err(err) = self.flush() { log::error!("flush failed {err:?}"); } + + // Clean up metadata cache if this is the last reference + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + let entry_pos = metadata.pos; + drop(metadata); // Release the lock before calling cleanup + self.fs.cleanup_metadata_if_unused(entry_pos); + } } } diff --git a/awkernel_lib/src/file/fatfs/fs.rs b/awkernel_lib/src/file/fatfs/fs.rs index 305a73643..d067a4752 100644 --- a/awkernel_lib/src/file/fatfs/fs.rs +++ b/awkernel_lib/src/file/fatfs/fs.rs @@ -748,6 +748,21 @@ impl }) .clone() } + + /// Clean up metadata cache entry if it's no longer referenced by any file handles + pub(crate) fn cleanup_metadata_if_unused(&self, entry_pos: u64) { + let mut node = MCSNode::new(); + let mut cache = self.metadata_cache.lock(&mut node); + + // Check if the entry exists and if we're the only one holding a reference + // Arc::strong_count == 2 means only the cache and the caller hold references + if let Some(metadata_arc) = cache.get(&entry_pos) { + if Arc::strong_count(metadata_arc) <= 2 { + // Remove from cache since no other file handles are using it + cache.remove(&entry_pos); + } + } + } } impl From 78b10cdb56341150459372a1f675a41898815288 Mon Sep 17 00:00:00 2001 From: Koichi Imai Date: Tue, 15 Jul 2025 17:13:08 +0900 Subject: [PATCH 09/27] minor fix Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 9 +++------ awkernel_lib/src/file/fatfs/fs.rs | 29 +++++++++-------------------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index e0c4387c8..9472f143d 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -8,12 +8,10 @@ use super::error::Error; use super::fs::{FileSystem, ReadWriteSeek}; use super::time::{Date, DateTime, TimeProvider}; -use awkernel_sync::mcs::MCSNode; -use awkernel_sync::mutex::Mutex; +use awkernel_sync::{mcs::MCSNode, mutex::Mutex}; const MAX_FILE_SIZE: u32 = u32::MAX; - /// A FAT filesystem file object used for reading and writing data. /// /// This struct is created by the `open_file` or `create_file` methods on `Dir`. @@ -271,13 +269,12 @@ impl Drop for File { if let Err(err) = self.flush() { log::error!("flush failed {err:?}"); } - - // Clean up metadata cache if this is the last reference + if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let metadata = metadata_arc.lock(&mut node); let entry_pos = metadata.pos; - drop(metadata); // Release the lock before calling cleanup + drop(metadata); self.fs.cleanup_metadata_if_unused(entry_pos); } } diff --git a/awkernel_lib/src/file/fatfs/fs.rs b/awkernel_lib/src/file/fatfs/fs.rs index d067a4752..aa3110fa9 100644 --- a/awkernel_lib/src/file/fatfs/fs.rs +++ b/awkernel_lib/src/file/fatfs/fs.rs @@ -1,6 +1,6 @@ +use alloc::collections::BTreeMap; use alloc::string::String; use alloc::sync::Arc; -use alloc::collections::BTreeMap; use core::borrow::BorrowMut; use core::convert::TryFrom; use core::fmt::Debug; @@ -9,7 +9,7 @@ use core::marker::PhantomData; use super::super::io::{self, IoBase, Read, ReadLeExt, Seek, SeekFrom, Write, WriteLeExt}; use super::boot_sector::{format_boot_sector, BiosParameterBlock, BootSector}; use super::dir::{Dir, DirRawStream}; -use super::dir_entry::{DirFileEntryData, DirEntryEditor, FileAttributes, SFN_PADDING, SFN_SIZE}; +use super::dir_entry::{DirEntryEditor, DirFileEntryData, FileAttributes, SFN_PADDING, SFN_SIZE}; use super::error::Error; use super::file::File; use super::table::{ @@ -704,10 +704,7 @@ impl FileSystem { &fs.bpb, FsIoAdapter { fs: Arc::clone(fs) }, )), - FatType::Fat32 => DirRawStream::File(File::new( - None, - Arc::clone(fs), - )), + FatType::Fat32 => DirRawStream::File(File::new(None, Arc::clone(fs))), } }; Dir::new(root_rdr, Arc::clone(fs)) @@ -729,10 +726,7 @@ impl FileSystem - FileSystem -{ - /// Get or create shared metadata for a file +impl FileSystem { pub(crate) fn get_or_create_metadata( &self, entry_pos: u64, @@ -741,24 +735,19 @@ impl ) -> Arc> { let mut node = MCSNode::new(); let mut cache = self.metadata_cache.lock(&mut node); - - cache.entry(entry_pos) - .or_insert_with(|| { - Arc::new(Mutex::new(DirEntryEditor::new(entry_data, entry_pos))) - }) + + cache + .entry(entry_pos) + .or_insert_with(|| Arc::new(Mutex::new(DirEntryEditor::new(entry_data, entry_pos)))) .clone() } - /// Clean up metadata cache entry if it's no longer referenced by any file handles pub(crate) fn cleanup_metadata_if_unused(&self, entry_pos: u64) { let mut node = MCSNode::new(); let mut cache = self.metadata_cache.lock(&mut node); - - // Check if the entry exists and if we're the only one holding a reference - // Arc::strong_count == 2 means only the cache and the caller hold references + if let Some(metadata_arc) = cache.get(&entry_pos) { if Arc::strong_count(metadata_arc) <= 2 { - // Remove from cache since no other file handles are using it cache.remove(&entry_pos); } } From 38f91f4dbbac19d5b43fcfaefaff6d205e4cfed9 Mon Sep 17 00:00:00 2001 From: Koichi Imai Date: Tue, 15 Jul 2025 17:28:51 +0900 Subject: [PATCH 10/27] cargo format for test Signed-off-by: Koichi Imai --- .../tests/test_fatfs_consistency/src/lib.rs | 108 ++++++++++-------- 1 file changed, 60 insertions(+), 48 deletions(-) diff --git a/applications/tests/test_fatfs_consistency/src/lib.rs b/applications/tests/test_fatfs_consistency/src/lib.rs index c3b7e1289..1444d6758 100644 --- a/applications/tests/test_fatfs_consistency/src/lib.rs +++ b/applications/tests/test_fatfs_consistency/src/lib.rs @@ -38,7 +38,7 @@ pub async fn run() { .await .join() .await; - + // Run metadata cache cleanup test awkernel_async_lib::spawn( "metadata cache cleanup test".into(), @@ -189,11 +189,6 @@ async fn consistency_test() { ) .await; writer_handles.push(handle); - - // Small delay between spawning writers - for _ in 0..2 { - awkernel_async_lib::r#yield().await; - } } // Spawn monitor task @@ -265,7 +260,7 @@ async fn consistency_test() { } else { info!("FatFS consistency test FAILED!"); } - + // Run concurrent truncation test concurrent_truncation_test().await; } @@ -274,9 +269,9 @@ async fn truncation_writer_task(id: usize) -> usize { info!("Truncation writer {} starting", id); let root_path = AsyncVfsPath::new_in_memory_fatfs(); let file_path = root_path.join("truncation_test.txt").unwrap(); - + let mut lines_written = 0; - + for i in 0..5 { // Open file for each iteration to get fresh handle let mut file = match file_path.create_file().await { @@ -286,17 +281,20 @@ async fn truncation_writer_task(id: usize) -> usize { continue; } }; - + // Write some data - let data = format!("Truncation writer {} iteration {} - this is a longer line to test cluster allocation\n", id, i); - + let data = format!( + "Truncation writer {} iteration {} - this is a longer line to test cluster allocation\n", + id, i + ); + // Seek to end to append use awkernel_lib::file::io::SeekFrom; if let Err(e) = file.seek(SeekFrom::End(0)).await { info!("Truncation writer {} failed to seek: {:?}", id, e); continue; } - + match file.write(data.as_bytes()).await { Ok(_) => { lines_written += 1; @@ -306,14 +304,17 @@ async fn truncation_writer_task(id: usize) -> usize { info!("Truncation writer {} failed to write: {:?}", id, e); } } - + // Yield to allow truncation for _ in 0..5 { awkernel_async_lib::r#yield().await; } } - - info!("Truncation writer {} finished, wrote {} lines", id, lines_written); + + info!( + "Truncation writer {} finished, wrote {} lines", + id, lines_written + ); lines_written } @@ -321,12 +322,12 @@ async fn truncation_task() { info!("Truncation task starting"); let root_path = AsyncVfsPath::new_in_memory_fatfs(); let file_path = root_path.join("truncation_test.txt").unwrap(); - + // Wait for writers to write some data for _ in 0..10 { awkernel_async_lib::r#yield().await; } - + for i in 0..3 { match file_path.open_file().await { Ok(mut file) => { @@ -339,7 +340,7 @@ async fn truncation_task() { continue; } }; - + if size > 100 { // Truncate to smaller size let new_size = size / 2; @@ -347,23 +348,26 @@ async fn truncation_task() { info!("Truncation task: Failed to seek for truncate: {:?}", e); continue; } - + // Truncate is not available on async file interface, simulate by writing // For now just log that we would truncate - info!("Truncation task: Would truncate file from {} to {} bytes", size, new_size); + info!( + "Truncation task: Would truncate file from {} to {} bytes", + size, new_size + ); } } Err(e) => { info!("Truncation task: Failed to open file: {:?}", e); } } - + // Wait before next truncation for _ in 0..20 { awkernel_async_lib::r#yield().await; } } - + info!("Truncation task finished"); } @@ -371,12 +375,12 @@ async fn reader_with_position_task(id: usize) { info!("Position reader {} starting", id); let root_path = AsyncVfsPath::new_in_memory_fatfs(); let file_path = root_path.join("truncation_test.txt").unwrap(); - + // Wait for file to be created for _ in 0..5 { awkernel_async_lib::r#yield().await; } - + // Open file once and keep reading let mut file = match file_path.open_file().await { Ok(f) => f, @@ -385,9 +389,9 @@ async fn reader_with_position_task(id: usize) { return; } }; - + let mut last_pos = 0u64; - + for i in 0..10 { // Try to read from current position let mut buffer = vec![0u8; 256]; @@ -395,33 +399,41 @@ async fn reader_with_position_task(id: usize) { Ok(bytes_read) => { use awkernel_lib::file::io::SeekFrom; let current_pos = file.seek(SeekFrom::Current(0)).await.unwrap_or(0); - info!("Position reader {}: iteration {}, read {} bytes, position {} -> {}", - id, i, bytes_read, last_pos, current_pos); + info!( + "Position reader {}: iteration {}, read {} bytes, position {} -> {}", + id, i, bytes_read, last_pos, current_pos + ); last_pos = current_pos; } Err(e) => { - info!("Position reader {}: Failed to read at iteration {}: {:?}", id, i, e); + info!( + "Position reader {}: Failed to read at iteration {}: {:?}", + id, i, e + ); // Try to recover by seeking to start use awkernel_lib::file::io::SeekFrom; if let Ok(pos) = file.seek(SeekFrom::Start(0)).await { - info!("Position reader {}: Recovered by seeking to start ({})", id, pos); + info!( + "Position reader {}: Recovered by seeking to start ({})", + id, pos + ); last_pos = pos; } } } - + // Yield to allow truncation/writing for _ in 0..3 { awkernel_async_lib::r#yield().await; } } - + info!("Position reader {} finished", id); } async fn concurrent_truncation_test() { info!("Starting concurrent truncation test"); - + // Create initial file { let root_path = AsyncVfsPath::new_in_memory_fatfs(); @@ -439,11 +451,11 @@ async fn concurrent_truncation_test() { } } } - + // Spawn writers, truncator, and readers let mut writer_handles = Vec::new(); let mut reader_handles = Vec::new(); - + // Spawn writers for i in 0..2 { let handle = awkernel_async_lib::spawn( @@ -454,7 +466,7 @@ async fn concurrent_truncation_test() { .await; writer_handles.push(handle); } - + // Spawn truncation task let truncation_handle = awkernel_async_lib::spawn( "truncation_task".into(), @@ -462,7 +474,7 @@ async fn concurrent_truncation_test() { SchedulerType::FIFO, ) .await; - + // Spawn readers for i in 0..2 { let handle = awkernel_async_lib::spawn( @@ -473,28 +485,28 @@ async fn concurrent_truncation_test() { .await; reader_handles.push(handle); } - + // Wait for all writer tasks to complete for handle in writer_handles { let _ = handle.join().await; } - + // Wait for all reader tasks to complete for handle in reader_handles { let _ = handle.join().await; } let _ = truncation_handle.join().await; - + info!("Concurrent truncation test completed"); } // Test metadata cache cleanup async fn metadata_cache_cleanup_test() { info!("Starting metadata cache cleanup test"); - + let root_path = AsyncVfsPath::new_in_memory_fatfs(); let file_path = root_path.join("cache_test.txt").unwrap(); - + // Create and drop multiple file handles to test cache cleanup for i in 0..5 { // Create a file @@ -505,20 +517,20 @@ async fn metadata_cache_cleanup_test() { continue; } }; - + // Write some data let data = format!("Test data for iteration {}\n", i); if let Err(e) = file.write(data.as_bytes()).await { info!("Failed to write in iteration {}: {:?}", i, e); } - + // Open the same file multiple times to test shared metadata let _file2 = file_path.open_file().await.ok(); let _file3 = file_path.open_file().await.ok(); - + // All handles will be dropped here, should clean up cache } - + // Open the file again to verify it still works match file_path.open_file().await { Ok(mut file) => { @@ -536,6 +548,6 @@ async fn metadata_cache_cleanup_test() { info!("Failed to open file after cache cleanup: {:?}", e); } } - + info!("Metadata cache cleanup test completed"); } From c22fc89ee6ec55434abcec1a4711c6b0bf215cf7 Mon Sep 17 00:00:00 2001 From: Koichi Date: Tue, 15 Jul 2025 19:18:00 +0900 Subject: [PATCH 11/27] delete current_cluster from File Signed-off-by: Koichi --- awkernel_lib/src/file/fatfs/file.rs | 118 +++++++++++++--------------- 1 file changed, 55 insertions(+), 63 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 9472f143d..b821fae9e 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -18,8 +18,6 @@ const MAX_FILE_SIZE: u32 = u32::MAX; pub struct File { // Shared metadata for this file (None for root dir) metadata: Option>>, - // Note: if offset points between clusters current_cluster is the previous cluster - current_cluster: Option, // current position in this file offset: u32, // file-system reference @@ -45,7 +43,6 @@ impl File { File { metadata, fs, - current_cluster: None, // cluster before first one offset: 0, } } @@ -71,10 +68,11 @@ impl File { let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); drop(metadata); - if let Some(current_cluster) = self.current_cluster { + let current_cluster = self.get_current_cluster()?; + if let Some(cluster) = current_cluster { // current cluster is none only if offset is 0 debug_assert!(self.offset > 0); - FileSystem::truncate_cluster_chain(&self.fs, current_cluster) + FileSystem::truncate_cluster_chain(&self.fs, cluster) } else { debug_assert!(self.offset == 0); if let Some(n) = first_cluster { @@ -130,7 +128,7 @@ impl File { pub(crate) fn abs_pos(&self) -> Option { // Returns current position relative to filesystem start // Note: when between clusters it returns position after previous cluster - match self.current_cluster { + match self.get_current_cluster().ok()? { Some(n) => { let cluster_size = self.fs.cluster_size(); let offset_mod_cluster_size = self.offset % cluster_size; @@ -236,6 +234,34 @@ impl File { } } + fn get_current_cluster(&self) -> Result, Error> { + if self.offset == 0 { + return Ok(None); + } + + let Some(first_cluster) = self.first_cluster() else { + return Ok(None); // empty file + }; + + let offset_in_clusters = self.fs.clusters_from_bytes(u64::from(self.offset)); + debug_assert!(offset_in_clusters > 0); + + let clusters_to_skip = offset_in_clusters - 1; + let mut cluster = first_cluster; + let mut iter = FileSystem::cluster_iter(&self.fs, first_cluster); + + for _i in 0..clusters_to_skip { + cluster = if let Some(r) = iter.next() { + r? + } else { + // cluster chain ends before the current position + return Ok(None); + }; + } + + Ok(Some(cluster)) + } + fn flush(&mut self) -> Result<(), Error> { self.flush_dir_entry()?; let mut node = MCSNode::new(); @@ -285,7 +311,6 @@ impl Clone for File { fn clone(&self) -> Self { File { metadata: self.metadata.clone(), - current_cluster: self.current_cluster, offset: self.offset, fs: Arc::clone(&self.fs), } @@ -301,20 +326,19 @@ impl Read for File self.first_cluster(), - Some(n) => { - let r = FileSystem::cluster_iter(&self.fs, n).next(); - match r { - Some(Err(err)) => return Err(err), - Some(Ok(n)) => Some(n), - None => None, - } + // at cluster boundary - get next cluster + if let Some(current) = self.get_current_cluster()? { + let r = FileSystem::cluster_iter(&self.fs, current).next(); + match r { + Some(Err(err)) => return Err(err), + Some(Ok(n)) => Some(n), + None => None, } + } else { + self.first_cluster() } } else { - self.current_cluster + self.get_current_cluster()? }; let Some(current_cluster) = current_cluster_opt else { return Ok(0); @@ -339,7 +363,6 @@ impl Read for File Write for File self.first_cluster(), - Some(n) => { - let r = FileSystem::cluster_iter(&self.fs, n).next(); - match r { - Some(Err(err)) => return Err(err), - Some(Ok(n)) => Some(n), - None => None, - } + // at cluster boundary - get next cluster or allocate + let next_cluster = if let Some(current) = self.get_current_cluster()? { + let r = FileSystem::cluster_iter(&self.fs, current).next(); + match r { + Some(Err(err)) => return Err(err), + Some(Ok(n)) => Some(n), + None => None, } + } else { + self.first_cluster() }; + if let Some(n) = next_cluster { n } else { // end of chain reached - allocate new cluster let new_cluster = - FileSystem::alloc_cluster(&self.fs, self.current_cluster, self.is_dir())?; + FileSystem::alloc_cluster(&self.fs, self.get_current_cluster()?, self.is_dir())?; log::trace!("allocated cluster {new_cluster}"); if self.first_cluster().is_none() { self.set_first_cluster(new_cluster); @@ -397,8 +420,8 @@ impl Write for File n, None => panic!("Offset inside cluster but no cluster allocated"), } @@ -417,7 +440,6 @@ impl Write for File Seek for File { // position is the same - nothing to do return Ok(u64::from(self.offset)); } - let new_offset_in_clusters = self.fs.clusters_from_bytes(u64::from(new_offset)); - let old_offset_in_clusters = self.fs.clusters_from_bytes(u64::from(self.offset)); - let new_cluster = if new_offset == 0 { - None - } else if new_offset_in_clusters == old_offset_in_clusters { - self.current_cluster - } else if let Some(first_cluster) = self.first_cluster() { - // calculate number of clusters to skip - // return the previous cluster if the offset points to the cluster boundary - // Note: new_offset_in_clusters cannot be 0 here because new_offset is not 0 - debug_assert!(new_offset_in_clusters > 0); - let clusters_to_skip = new_offset_in_clusters - 1; - let mut cluster = first_cluster; - let mut iter = FileSystem::cluster_iter(&self.fs, first_cluster); - for i in 0..clusters_to_skip { - cluster = if let Some(r) = iter.next() { - r? - } else { - // cluster chain ends before the new position - seek to the end of the last cluster - new_offset = self.fs.bytes_from_clusters(i + 1) as u32; - break; - }; - } - Some(cluster) - } else { - // empty file - always seek to 0 - new_offset = 0; - None - }; self.offset = new_offset; - self.current_cluster = new_cluster; Ok(u64::from(self.offset)) } } From 74628efd1af75e3aa893dbab6d843f8080c5a3ce Mon Sep 17 00:00:00 2001 From: Koichi Date: Wed, 16 Jul 2025 00:15:16 +0900 Subject: [PATCH 12/27] add comments and Signed-off-by: Koichi --- awkernel_lib/src/file/fatfs/file.rs | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index b821fae9e..843e3a35d 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -234,6 +234,17 @@ impl File { } } + /// Get the cluster that contains the current file position. + /// + /// Returns the cluster number that contains the byte at the current offset. + /// Special case: when offset is at a cluster boundary (offset % cluster_size == 0), + /// this returns the PREVIOUS cluster, not the next one. This matches the original + /// FatFS semantics where current_cluster represents "the last accessed cluster". + /// + /// Returns: + /// - Ok(Some(cluster)) - The cluster containing the current position + /// - Ok(None) - When offset is 0 or beyond the file's cluster chain + /// - Err - On I/O errors during cluster chain traversal fn get_current_cluster(&self) -> Result, Error> { if self.offset == 0 { return Ok(None); @@ -334,8 +345,12 @@ impl Read for File Some(n), None => None, } - } else { + } else if self.offset == 0 { + // at offset 0, use first cluster self.first_cluster() + } else { + // beyond end of file + None } } else { self.get_current_cluster()? @@ -403,8 +418,12 @@ impl Write for File Some(n), None => None, } - } else { + } else if self.offset == 0 { + // at offset 0, use first cluster self.first_cluster() + } else { + // beyond end of file - need to allocate + None }; if let Some(n) = next_cluster { From cede1940ea75287a735dd2316f5cc0bc7ed96e0c Mon Sep 17 00:00:00 2001 From: Koichi Date: Wed, 16 Jul 2025 13:08:19 +0900 Subject: [PATCH 13/27] fix about cleanup_metadata_if_unused Signed-off-by: Koichi --- awkernel_lib/src/file/fatfs/file.rs | 3 ++- awkernel_lib/src/file/fatfs/fs.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 843e3a35d..60ebac063 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -307,11 +307,12 @@ impl Drop for File { log::error!("flush failed {err:?}"); } - if let Some(ref metadata_arc) = self.metadata { + if let Some(metadata_arc) = self.metadata.take() { let mut node = MCSNode::new(); let metadata = metadata_arc.lock(&mut node); let entry_pos = metadata.pos; drop(metadata); + drop(metadata_arc); // Drop our Arc reference self.fs.cleanup_metadata_if_unused(entry_pos); } } diff --git a/awkernel_lib/src/file/fatfs/fs.rs b/awkernel_lib/src/file/fatfs/fs.rs index aa3110fa9..0995cd77b 100644 --- a/awkernel_lib/src/file/fatfs/fs.rs +++ b/awkernel_lib/src/file/fatfs/fs.rs @@ -747,7 +747,8 @@ impl FileSystem { let mut cache = self.metadata_cache.lock(&mut node); if let Some(metadata_arc) = cache.get(&entry_pos) { - if Arc::strong_count(metadata_arc) <= 2 { + // Only the cache holds a reference - safe to remove + if Arc::strong_count(metadata_arc) == 1 { cache.remove(&entry_pos); } } From 463f1b423990b98c93405d9215ef5d457ee9ee64 Mon Sep 17 00:00:00 2001 From: Koichi Date: Wed, 16 Jul 2025 17:46:43 +0900 Subject: [PATCH 14/27] fix truncate&read&write atomicity Signed-off-by: Koichi --- awkernel_lib/src/file/fatfs/file.rs | 306 +++++++++++++++++++++------- awkernel_lib/src/file/fatfs/fs.rs | 21 +- 2 files changed, 250 insertions(+), 77 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 60ebac063..b56b95858 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -1,3 +1,42 @@ +//! FAT filesystem file implementation. +//! +//! # Concurrency Limitations +//! +//! The FAT filesystem was not designed for concurrent access. While this implementation +//! provides some consistency guarantees through shared metadata and atomic operations, +//! there are inherent limitations: +//! +//! ## Known Limitations: +//! +//! 1. **Reader-Truncate Race**: A reader may access clusters that are being freed by a +//! concurrent truncate operation. This can lead to reading stale or corrupted data. +//! +//! 2. **Byte-Level Write Interleaving**: Multiple concurrent writers to the same file +//! may have their writes interleaved at the byte level within a cluster. +//! +//! 3. **No Transactional Guarantees**: Operations like file extension (allocating new +//! clusters and updating size) are not fully transactional. A crash or error during +//! these operations may leave the file in an inconsistent state. +//! +//! ## Consistency Guarantees: +//! +//! 1. **Atomic Truncation**: The truncate operation holds the metadata lock while updating +//! size and freeing clusters, preventing truncate-truncate races. +//! +//! 2. **Atomic First Cluster Allocation**: The first write to an empty file atomically +//! allocates and sets the first cluster, preventing lost cluster allocations. +//! +//! 3. **Shared Metadata**: Multiple file handles to the same file share the same metadata +//! (size, timestamps, first cluster), ensuring a consistent view. +//! +//! ## Recommendations: +//! +//! For applications requiring strong consistency guarantees: +//! - Use file-level locking (e.g., flock) to coordinate access +//! - Avoid concurrent reads during file truncation +//! - Design applications with single-writer, multiple-reader patterns +//! - Use fsync() to ensure data is written to disk before critical operations + use alloc::sync::Arc; use core::convert::TryFrom; use core::fmt::Debug; @@ -49,6 +88,11 @@ impl File { /// Truncate file in current position. /// + /// This operation is atomic - it holds the metadata lock while updating + /// size and freeing clusters, preventing truncate-truncate races. + /// + /// Lock ordering: metadata → disk (via FAT operations) + /// /// # Errors /// /// `Error::Io` will be returned if the underlying storage object returned an I/O error. @@ -59,16 +103,30 @@ impl File { pub fn truncate(&mut self) -> Result<(), Error> { log::trace!("File::truncate"); if let Some(ref metadata_arc) = self.metadata { + // IMPORTANT: We hold the metadata lock during the entire truncate operation + // to ensure atomicity. This follows the lock ordering: metadata → disk let mut node = MCSNode::new(); let mut metadata = metadata_arc.lock(&mut node); + + // Update metadata metadata.set_size(self.offset); if self.offset == 0 { metadata.set_first_cluster(None, self.fs.fat_type()); } + + // Get cluster info while holding lock let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); - drop(metadata); - - let current_cluster = self.get_current_cluster()?; + + // Calculate current cluster while still holding metadata lock + // This ensures consistent view of the file state + let current_cluster = if self.offset == 0 { + None + } else { + self.get_current_cluster_with_metadata(&metadata)? + }; + + // Free clusters while still holding metadata lock + // Metadata lock released here when metadata goes out of scope if let Some(cluster) = current_cluster { // current cluster is none only if offset is 0 debug_assert!(self.offset > 0); @@ -216,13 +274,6 @@ impl File { self.size().map(|s| (s - self.offset) as usize) } - fn set_first_cluster(&mut self, cluster: u32) { - if let Some(ref metadata_arc) = self.metadata { - let mut node = MCSNode::new(); - let mut metadata = metadata_arc.lock(&mut node); - metadata.set_first_cluster(Some(cluster), self.fs.fat_type()); - } - } pub(crate) fn first_cluster(&self) -> Option { if let Some(ref metadata_arc) = self.metadata { @@ -234,23 +285,18 @@ impl File { } } - /// Get the cluster that contains the current file position. - /// - /// Returns the cluster number that contains the byte at the current offset. - /// Special case: when offset is at a cluster boundary (offset % cluster_size == 0), - /// this returns the PREVIOUS cluster, not the next one. This matches the original - /// FatFS semantics where current_cluster represents "the last accessed cluster". - /// - /// Returns: - /// - Ok(Some(cluster)) - The cluster containing the current position - /// - Ok(None) - When offset is 0 or beyond the file's cluster chain - /// - Err - On I/O errors during cluster chain traversal - fn get_current_cluster(&self) -> Result, Error> { + /// Calculate current cluster with an existing metadata lock held. + /// This avoids code duplication between get_current_cluster() and operations + /// that need to hold the metadata lock for consistency. + fn get_current_cluster_with_metadata( + &self, + metadata: &impl core::ops::Deref + ) -> Result, Error> { if self.offset == 0 { return Ok(None); } - let Some(first_cluster) = self.first_cluster() else { + let Some(first_cluster) = metadata.inner().first_cluster(self.fs.fat_type()) else { return Ok(None); // empty file }; @@ -273,6 +319,31 @@ impl File { Ok(Some(cluster)) } + /// Get the cluster that contains the current file position. + /// + /// Returns the cluster number that contains the byte at the current offset. + /// Special case: when offset is at a cluster boundary (offset % cluster_size == 0), + /// this returns the PREVIOUS cluster, not the next one. This matches the original + /// FatFS semantics where current_cluster represents "the last accessed cluster". + /// + /// Returns: + /// - Ok(Some(cluster)) - The cluster containing the current position + /// - Ok(None) - When offset is 0 or beyond the file's cluster chain + /// - Err - On I/O errors during cluster chain traversal + fn get_current_cluster(&self) -> Result, Error> { + if self.offset == 0 { + return Ok(None); + } + + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + self.get_current_cluster_with_metadata(&metadata) + } else { + Ok(None) + } + } + fn flush(&mut self) -> Result<(), Error> { self.flush_dir_entry()?; let mut node = MCSNode::new(); @@ -337,58 +408,127 @@ impl Read for File Result { log::trace!("File::read"); let cluster_size = self.fs.cluster_size(); - let current_cluster_opt = if self.offset % cluster_size == 0 { - // at cluster boundary - get next cluster - if let Some(current) = self.get_current_cluster()? { - let r = FileSystem::cluster_iter(&self.fs, current).next(); - match r { - Some(Err(err)) => return Err(err), - Some(Ok(n)) => Some(n), - None => None, + + // Hold metadata lock during entire read operation to prevent truncate races + // Lock ordering: metadata → disk (allowed) + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + + // Check file size under lock + let size = metadata.inner().size(); + if let Some(s) = size { + if self.offset >= s { + return Ok(0); + } + } + + // Calculate current cluster under lock + let current_cluster_opt = if self.offset % cluster_size == 0 { + // at cluster boundary - get next cluster + if let Some(current) = self.get_current_cluster_with_metadata(&metadata)? { + let r = FileSystem::cluster_iter(&self.fs, current).next(); + match r { + Some(Err(err)) => return Err(err), + Some(Ok(n)) => Some(n), + None => None, + } + } else if self.offset == 0 { + metadata.inner().first_cluster(self.fs.fat_type()) + } else { + None } - } else if self.offset == 0 { - // at offset 0, use first cluster - self.first_cluster() } else { - // beyond end of file - None + self.get_current_cluster_with_metadata(&metadata)? + }; + + let Some(current_cluster) = current_cluster_opt else { + return Ok(0); + }; + + // Calculate read parameters + let offset_in_cluster = self.offset % cluster_size; + let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; + let bytes_left_in_file = size.map(|s| ((s - self.offset) as usize).min(bytes_left_in_cluster)) + .unwrap_or(bytes_left_in_cluster); + let read_size = buf.len().min(bytes_left_in_cluster).min(bytes_left_in_file); + if read_size == 0 { + return Ok(0); } - } else { - self.get_current_cluster()? - }; - let Some(current_cluster) = current_cluster_opt else { - return Ok(0); - }; - let offset_in_cluster = self.offset % cluster_size; - let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; - let bytes_left_in_file = self.bytes_left_in_file().unwrap_or(bytes_left_in_cluster); - let read_size = buf.len().min(bytes_left_in_cluster).min(bytes_left_in_file); - if read_size == 0 { - return Ok(0); - } - log::trace!("read {read_size} bytes in cluster {current_cluster}"); - let offset_in_fs = - self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); - let read_bytes = { - let mut node = MCSNode::new(); - let mut disk_guard = self.fs.disk.lock(&mut node); - disk_guard.seek(SeekFrom::Start(offset_in_fs))?; - disk_guard.read(&mut buf[..read_size])? - }; - if read_bytes == 0 { - return Ok(0); - } - self.offset += read_bytes as u32; - - if let Some(ref metadata_arc) = self.metadata { + + log::trace!("read {read_size} bytes in cluster {current_cluster}"); + let offset_in_fs = self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); + + // Perform I/O while holding metadata lock + // This is lock ordering: metadata → disk (allowed) + let read_bytes = { + let mut node_disk = MCSNode::new(); + let mut disk_guard = self.fs.disk.lock(&mut node_disk); + disk_guard.seek(SeekFrom::Start(offset_in_fs))?; + disk_guard.read(&mut buf[..read_size])? + }; + + if read_bytes == 0 { + return Ok(0); + } + + self.offset += read_bytes as u32; + + // Update accessed date while still holding metadata lock if self.fs.options.update_accessed_date { - let mut node = MCSNode::new(); - let mut metadata = metadata_arc.lock(&mut node); let now = self.fs.options.time_provider.get_current_date(); metadata.set_accessed(now); } + + Ok(read_bytes) + } else { + // Root directory has no metadata + // For root dir, we can proceed with the original logic since it can't be truncated + let current_cluster_opt = if self.offset % cluster_size == 0 { + if let Some(current) = self.get_current_cluster()? { + let r = FileSystem::cluster_iter(&self.fs, current).next(); + match r { + Some(Err(err)) => return Err(err), + Some(Ok(n)) => Some(n), + None => None, + } + } else if self.offset == 0 { + self.first_cluster() + } else { + None + } + } else { + self.get_current_cluster()? + }; + + let Some(current_cluster) = current_cluster_opt else { + return Ok(0); + }; + + let offset_in_cluster = self.offset % cluster_size; + let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; + let bytes_left_in_file = self.bytes_left_in_file().unwrap_or(bytes_left_in_cluster); + let read_size = buf.len().min(bytes_left_in_cluster).min(bytes_left_in_file); + if read_size == 0 { + return Ok(0); + } + + log::trace!("read {read_size} bytes in cluster {current_cluster}"); + let offset_in_fs = self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); + let read_bytes = { + let mut node = MCSNode::new(); + let mut disk_guard = self.fs.disk.lock(&mut node); + disk_guard.seek(SeekFrom::Start(offset_in_fs))?; + disk_guard.read(&mut buf[..read_size])? + }; + + if read_bytes == 0 { + return Ok(0); + } + + self.offset += read_bytes as u32; + Ok(read_bytes) } - Ok(read_bytes) } } @@ -431,13 +571,35 @@ impl Write for File FileSystem { fn flush_fs_info(&self) -> Result<(), Error> { let mut node = MCSNode::new(); - let fs_info_guard = self.fs_info.lock(&mut node); + let mut fs_info_guard = self.fs_info.lock(&mut node); if self.fat_type == FatType::Fat32 && fs_info_guard.dirty { - drop(fs_info_guard); let fs_info_sector_offset = self.offset_from_sector(u32::from(self.bpb.fs_info_sector)); + + // Acquire disk lock while holding fs_info lock (allowed by lock ordering: fs_info → disk) + // This prevents race conditions where fs_info could be modified between checking + // dirty flag and writing the data let mut node_disk = MCSNode::new(); let mut disk_guard = self.disk.lock(&mut node_disk); disk_guard.seek(SeekFrom::Start(fs_info_sector_offset))?; - - let mut node = MCSNode::new(); - let mut fs_info_guard = self.fs_info.lock(&mut node); fs_info_guard.serialize(&mut *disk_guard)?; fs_info_guard.dirty = false; } From cadebb8fd3892a29281f9deefd0edaa0c4eb8b71 Mon Sep 17 00:00:00 2001 From: Koichi Date: Wed, 16 Jul 2025 19:11:42 +0900 Subject: [PATCH 15/27] fix comments Signed-off-by: Koichi --- awkernel_lib/src/file/fatfs/file.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index b56b95858..85e728d08 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -8,13 +8,10 @@ //! //! ## Known Limitations: //! -//! 1. **Reader-Truncate Race**: A reader may access clusters that are being freed by a -//! concurrent truncate operation. This can lead to reading stale or corrupted data. -//! -//! 2. **Byte-Level Write Interleaving**: Multiple concurrent writers to the same file +//! 1. **Byte-Level Write Interleaving**: Multiple concurrent writers to the same file //! may have their writes interleaved at the byte level within a cluster. //! -//! 3. **No Transactional Guarantees**: Operations like file extension (allocating new +//! 2. **No Transactional Guarantees**: Operations like file extension (allocating new //! clusters and updating size) are not fully transactional. A crash or error during //! these operations may leave the file in an inconsistent state. //! @@ -29,11 +26,14 @@ //! 3. **Shared Metadata**: Multiple file handles to the same file share the same metadata //! (size, timestamps, first cluster), ensuring a consistent view. //! +//! 4. **Reader-Truncate Protection**: Read operations hold the metadata lock during cluster +//! chain traversal, preventing concurrent truncate operations from freeing clusters +//! while they're being read. +//! //! ## Recommendations: //! //! For applications requiring strong consistency guarantees: //! - Use file-level locking (e.g., flock) to coordinate access -//! - Avoid concurrent reads during file truncation //! - Design applications with single-writer, multiple-reader patterns //! - Use fsync() to ensure data is written to disk before critical operations From e04c9e5c0fafea84886f34da8b6f491d6c8a6ed8 Mon Sep 17 00:00:00 2001 From: Koichi Date: Wed, 16 Jul 2025 21:47:28 +0900 Subject: [PATCH 16/27] fix Signed-off-by: Koichi --- awkernel_lib/src/file/fatfs/file.rs | 175 ++++++++++------------------ 1 file changed, 60 insertions(+), 115 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 85e728d08..026c07995 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -7,7 +7,7 @@ //! there are inherent limitations: //! //! ## Known Limitations: -//! +//! //! 1. **Byte-Level Write Interleaving**: Multiple concurrent writers to the same file //! may have their writes interleaved at the byte level within a cluster. //! @@ -90,7 +90,7 @@ impl File { /// /// This operation is atomic - it holds the metadata lock while updating /// size and freeing clusters, preventing truncate-truncate races. - /// + /// /// Lock ordering: metadata → disk (via FAT operations) /// /// # Errors @@ -107,32 +107,23 @@ impl File { // to ensure atomicity. This follows the lock ordering: metadata → disk let mut node = MCSNode::new(); let mut metadata = metadata_arc.lock(&mut node); - - // Update metadata + metadata.set_size(self.offset); if self.offset == 0 { metadata.set_first_cluster(None, self.fs.fat_type()); } - - // Get cluster info while holding lock + let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); - - // Calculate current cluster while still holding metadata lock - // This ensures consistent view of the file state - let current_cluster = if self.offset == 0 { - None - } else { - self.get_current_cluster_with_metadata(&metadata)? - }; - - // Free clusters while still holding metadata lock - // Metadata lock released here when metadata goes out of scope + + let current_cluster = self.get_current_cluster_with_metadata(&metadata)?; + if let Some(cluster) = current_cluster { // current cluster is none only if offset is 0 debug_assert!(self.offset > 0); FileSystem::truncate_cluster_chain(&self.fs, cluster) } else { debug_assert!(self.offset == 0); + // TODO - better handle when get_current_cluster returns None especially when "cluster chain ends before the current position". if let Some(n) = first_cluster { FileSystem::free_cluster_chain(&self.fs, n)?; } @@ -274,7 +265,6 @@ impl File { self.size().map(|s| (s - self.offset) as usize) } - pub(crate) fn first_cluster(&self) -> Option { if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); @@ -286,27 +276,25 @@ impl File { } /// Calculate current cluster with an existing metadata lock held. - /// This avoids code duplication between get_current_cluster() and operations - /// that need to hold the metadata lock for consistency. fn get_current_cluster_with_metadata( &self, - metadata: &impl core::ops::Deref + metadata: &impl core::ops::Deref, ) -> Result, Error> { if self.offset == 0 { return Ok(None); } - + let Some(first_cluster) = metadata.inner().first_cluster(self.fs.fat_type()) else { return Ok(None); // empty file }; - + let offset_in_clusters = self.fs.clusters_from_bytes(u64::from(self.offset)); debug_assert!(offset_in_clusters > 0); - + let clusters_to_skip = offset_in_clusters - 1; let mut cluster = first_cluster; let mut iter = FileSystem::cluster_iter(&self.fs, first_cluster); - + for _i in 0..clusters_to_skip { cluster = if let Some(r) = iter.next() { r? @@ -315,26 +303,19 @@ impl File { return Ok(None); }; } - + Ok(Some(cluster)) } /// Get the cluster that contains the current file position. - /// - /// Returns the cluster number that contains the byte at the current offset. + /// /// Special case: when offset is at a cluster boundary (offset % cluster_size == 0), - /// this returns the PREVIOUS cluster, not the next one. This matches the original - /// FatFS semantics where current_cluster represents "the last accessed cluster". - /// - /// Returns: - /// - Ok(Some(cluster)) - The cluster containing the current position - /// - Ok(None) - When offset is 0 or beyond the file's cluster chain - /// - Err - On I/O errors during cluster chain traversal + /// this returns the PREVIOUS cluster, not the next one. fn get_current_cluster(&self) -> Result, Error> { if self.offset == 0 { return Ok(None); } - + if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let metadata = metadata_arc.lock(&mut node); @@ -383,7 +364,7 @@ impl Drop for File { let metadata = metadata_arc.lock(&mut node); let entry_pos = metadata.pos; drop(metadata); - drop(metadata_arc); // Drop our Arc reference + drop(metadata_arc); self.fs.cleanup_metadata_if_unused(entry_pos); } } @@ -408,57 +389,62 @@ impl Read for File Result { log::trace!("File::read"); let cluster_size = self.fs.cluster_size(); - + // Hold metadata lock during entire read operation to prevent truncate races // Lock ordering: metadata → disk (allowed) if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let mut metadata = metadata_arc.lock(&mut node); - - // Check file size under lock + let size = metadata.inner().size(); if let Some(s) = size { if self.offset >= s { return Ok(0); } } - - // Calculate current cluster under lock + let current_cluster_opt = if self.offset % cluster_size == 0 { - // at cluster boundary - get next cluster - if let Some(current) = self.get_current_cluster_with_metadata(&metadata)? { - let r = FileSystem::cluster_iter(&self.fs, current).next(); - match r { - Some(Err(err)) => return Err(err), - Some(Ok(n)) => Some(n), - None => None, + // next cluster + match self.get_current_cluster_with_metadata(&metadata)? { + None => { + if self.offset == 0 { + metadata.inner().first_cluster(self.fs.fat_type()) + } else { + // TODO - better handle when get_current_cluster returns None especially when "cluster chain ends before the current position". + None + } + } + Some(n) => { + let r = FileSystem::cluster_iter(&self.fs, n).next(); + match r { + Some(Err(err)) => return Err(err), + Some(Ok(n)) => Some(n), + None => None, + } } - } else if self.offset == 0 { - metadata.inner().first_cluster(self.fs.fat_type()) - } else { - None } } else { self.get_current_cluster_with_metadata(&metadata)? }; - + let Some(current_cluster) = current_cluster_opt else { return Ok(0); }; - - // Calculate read parameters + let offset_in_cluster = self.offset % cluster_size; let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; - let bytes_left_in_file = size.map(|s| ((s - self.offset) as usize).min(bytes_left_in_cluster)) + let bytes_left_in_file = size + .map(|s| (s - self.offset) as usize) .unwrap_or(bytes_left_in_cluster); let read_size = buf.len().min(bytes_left_in_cluster).min(bytes_left_in_file); if read_size == 0 { return Ok(0); } - + log::trace!("read {read_size} bytes in cluster {current_cluster}"); - let offset_in_fs = self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); - + let offset_in_fs = + self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); + // Perform I/O while holding metadata lock // This is lock ordering: metadata → disk (allowed) let read_bytes = { @@ -467,67 +453,22 @@ impl Read for File return Err(err), - Some(Ok(n)) => Some(n), - None => None, - } - } else if self.offset == 0 { - self.first_cluster() - } else { - None - } - } else { - self.get_current_cluster()? - }; - - let Some(current_cluster) = current_cluster_opt else { - return Ok(0); - }; - - let offset_in_cluster = self.offset % cluster_size; - let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; - let bytes_left_in_file = self.bytes_left_in_file().unwrap_or(bytes_left_in_cluster); - let read_size = buf.len().min(bytes_left_in_cluster).min(bytes_left_in_file); - if read_size == 0 { - return Ok(0); - } - - log::trace!("read {read_size} bytes in cluster {current_cluster}"); - let offset_in_fs = self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); - let read_bytes = { - let mut node = MCSNode::new(); - let mut disk_guard = self.fs.disk.lock(&mut node); - disk_guard.seek(SeekFrom::Start(offset_in_fs))?; - disk_guard.read(&mut buf[..read_size])? - }; - - if read_bytes == 0 { - return Ok(0); - } - - self.offset += read_bytes as u32; - Ok(read_bytes) + unreachable!("Root directory should not be a file"); } } } @@ -566,7 +507,7 @@ impl Write for File Write for File Write for File Date: Thu, 17 Jul 2025 00:05:20 +0900 Subject: [PATCH 17/27] fix seek Signed-off-by: Koichi --- awkernel_lib/src/file/fatfs/file.rs | 87 +++++++++++++++++++---------- 1 file changed, 59 insertions(+), 28 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 026c07995..062903edf 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -91,7 +91,7 @@ impl File { /// This operation is atomic - it holds the metadata lock while updating /// size and freeing clusters, preventing truncate-truncate races. /// - /// Lock ordering: metadata → disk (via FAT operations) + /// Lock ordering: metadata → disk /// /// # Errors /// @@ -260,10 +260,6 @@ impl File { } } - fn bytes_left_in_file(&self) -> Option { - // Note: seeking beyond end of file is not allowed so overflow is impossible - self.size().map(|s| (s - self.offset) as usize) - } pub(crate) fn first_cluster(&self) -> Option { if let Some(ref metadata_arc) = self.metadata { @@ -391,7 +387,7 @@ impl Read for File Read for File Read for File Write for File return Err(err), - Some(Ok(n)) => Some(n), - None => None, + // next cluster + let next_cluster = match self.get_current_cluster()? { + None => { + if self.offset == 0 { + self.first_cluster() + } else { + None + } + } + Some(n) => { + let r = FileSystem::cluster_iter(&self.fs, n).next(); + match r { + Some(Err(err)) => return Err(err), + Some(Ok(n)) => Some(n), + None => None, + } } - } else if self.offset == 0 { - // at offset 0, use first cluster - self.first_cluster() - } else { - // beyond end of file - need to allocate - None }; if let Some(n) = next_cluster { n } else { // end of chain reached - allocate new cluster - // For first cluster allocation, we need to hold metadata lock to prevent races if self.first_cluster().is_none() { if let Some(ref metadata_arc) = self.metadata { - // IMPORTANT: Lock ordering - metadata → fs_info → disk (via alloc_cluster) - // We must acquire metadata lock before calling alloc_cluster + // IMPORTANT: Lock ordering - metadata held while calling alloc_cluster internally acquires: fs_info, disk (one each) let mut node = MCSNode::new(); let mut metadata = metadata_arc.lock(&mut node); - // Double-check under lock in case another thread allocated first cluster if metadata.inner().first_cluster(self.fs.fat_type()).is_none() { let new_cluster = @@ -533,10 +527,9 @@ impl Write for File Seek for File { log::error!("Invalid seek offset"); return Err(Error::InvalidInput); }; + + // First, clamp to file size if known if let Some(size) = size_opt { if new_offset > size { log::warn!("Seek beyond the end of the file"); new_offset = size; } } + + // Then, clamp to existing cluster chain to prevent write allocation bugs + if new_offset > 0 { + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); + + if let Some(first) = first_cluster { + // Calculate how many clusters are needed for the target offset + let offset_in_clusters = self.fs.clusters_from_bytes(u64::from(new_offset)); + if offset_in_clusters > 0 { + let clusters_to_skip = offset_in_clusters - 1; + let mut _cluster = first; + let mut iter = FileSystem::cluster_iter(&self.fs, first); + + for i in 0..clusters_to_skip { + _cluster = if let Some(r) = iter.next() { + r? + } else { + // Cluster chain ends before the target position + // Seek to the end of the last allocated cluster + new_offset = self.fs.bytes_from_clusters(i + 1) as u32; + log::warn!("Seek beyond allocated clusters, clamped to {}", new_offset); + break; + }; + } + } + } else { + // No clusters allocated yet - can only seek to position 0 + new_offset = 0; + log::warn!("Seek in empty file, clamped to 0"); + } + } + } + log::trace!( "file seek {} -> {} - metadata {:?}", self.offset, From d167e8f7f6d6bcd4c4e24326774445822a3bab56 Mon Sep 17 00:00:00 2001 From: Koichi Imai Date: Thu, 17 Jul 2025 09:29:14 +0900 Subject: [PATCH 18/27] delete unnecessary codes Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 44 +++++++++-------------------- 1 file changed, 13 insertions(+), 31 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 062903edf..70572aadf 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -260,7 +260,6 @@ impl File { } } - pub(crate) fn first_cluster(&self) -> Option { if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); @@ -392,13 +391,6 @@ impl Read for File= s { - return Ok(0); - } - } - let current_cluster_opt = if self.offset % cluster_size == 0 { // next cluster match self.get_current_cluster_with_metadata(&metadata)? { @@ -427,6 +419,12 @@ impl Read for File= s { + return Ok(0); + } + } let offset_in_cluster = self.offset % cluster_size; let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; let bytes_left_in_file = size @@ -540,7 +538,6 @@ impl Write for File n, None => panic!("Offset inside cluster but no cluster allocated"), @@ -586,22 +583,20 @@ impl Seek for File { log::error!("Invalid seek offset"); return Err(Error::InvalidInput); }; - - // First, clamp to file size if known + if let Some(size) = size_opt { if new_offset > size { log::warn!("Seek beyond the end of the file"); new_offset = size; } } - - // Then, clamp to existing cluster chain to prevent write allocation bugs + if new_offset > 0 { if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let metadata = metadata_arc.lock(&mut node); let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); - + if let Some(first) = first_cluster { // Calculate how many clusters are needed for the target offset let offset_in_clusters = self.fs.clusters_from_bytes(u64::from(new_offset)); @@ -609,37 +604,24 @@ impl Seek for File { let clusters_to_skip = offset_in_clusters - 1; let mut _cluster = first; let mut iter = FileSystem::cluster_iter(&self.fs, first); - + for i in 0..clusters_to_skip { _cluster = if let Some(r) = iter.next() { r? } else { - // Cluster chain ends before the target position - // Seek to the end of the last allocated cluster + // Cluster chain ends before the new position seek to the end of the last cluster new_offset = self.fs.bytes_from_clusters(i + 1) as u32; - log::warn!("Seek beyond allocated clusters, clamped to {}", new_offset); break; }; } } } else { - // No clusters allocated yet - can only seek to position 0 + // empty file - always seek to 0 new_offset = 0; - log::warn!("Seek in empty file, clamped to 0"); } } } - - log::trace!( - "file seek {} -> {} - metadata {:?}", - self.offset, - new_offset, - self.metadata.is_some() - ); - if new_offset == self.offset { - // position is the same - nothing to do - return Ok(u64::from(self.offset)); - } + self.offset = new_offset; Ok(u64::from(self.offset)) } From 748c0359da49ec3f24e053007466cf2302387b22 Mon Sep 17 00:00:00 2001 From: Koichi Imai Date: Thu, 17 Jul 2025 10:04:07 +0900 Subject: [PATCH 19/27] fix err mngment get_current_cluster Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 33 ++--------------------------- 1 file changed, 2 insertions(+), 31 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 70572aadf..23c669a52 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -14,28 +14,6 @@ //! 2. **No Transactional Guarantees**: Operations like file extension (allocating new //! clusters and updating size) are not fully transactional. A crash or error during //! these operations may leave the file in an inconsistent state. -//! -//! ## Consistency Guarantees: -//! -//! 1. **Atomic Truncation**: The truncate operation holds the metadata lock while updating -//! size and freeing clusters, preventing truncate-truncate races. -//! -//! 2. **Atomic First Cluster Allocation**: The first write to an empty file atomically -//! allocates and sets the first cluster, preventing lost cluster allocations. -//! -//! 3. **Shared Metadata**: Multiple file handles to the same file share the same metadata -//! (size, timestamps, first cluster), ensuring a consistent view. -//! -//! 4. **Reader-Truncate Protection**: Read operations hold the metadata lock during cluster -//! chain traversal, preventing concurrent truncate operations from freeing clusters -//! while they're being read. -//! -//! ## Recommendations: -//! -//! For applications requiring strong consistency guarantees: -//! - Use file-level locking (e.g., flock) to coordinate access -//! - Design applications with single-writer, multiple-reader patterns -//! - Use fsync() to ensure data is written to disk before critical operations use alloc::sync::Arc; use core::convert::TryFrom; @@ -88,11 +66,6 @@ impl File { /// Truncate file in current position. /// - /// This operation is atomic - it holds the metadata lock while updating - /// size and freeing clusters, preventing truncate-truncate races. - /// - /// Lock ordering: metadata → disk - /// /// # Errors /// /// `Error::Io` will be returned if the underlying storage object returned an I/O error. @@ -104,7 +77,7 @@ impl File { log::trace!("File::truncate"); if let Some(ref metadata_arc) = self.metadata { // IMPORTANT: We hold the metadata lock during the entire truncate operation - // to ensure atomicity. This follows the lock ordering: metadata → disk + // to ensure atomicity. Lock ordering: metadata → disk let mut node = MCSNode::new(); let mut metadata = metadata_arc.lock(&mut node); @@ -123,7 +96,6 @@ impl File { FileSystem::truncate_cluster_chain(&self.fs, cluster) } else { debug_assert!(self.offset == 0); - // TODO - better handle when get_current_cluster returns None especially when "cluster chain ends before the current position". if let Some(n) = first_cluster { FileSystem::free_cluster_chain(&self.fs, n)?; } @@ -295,7 +267,7 @@ impl File { r? } else { // cluster chain ends before the current position - return Ok(None); + return Err(Error::CorruptedFileSystem); }; } @@ -398,7 +370,6 @@ impl Read for File Date: Thu, 17 Jul 2025 11:49:50 +0900 Subject: [PATCH 20/27] fix comments Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 38 +++++------------------------ 1 file changed, 6 insertions(+), 32 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 23c669a52..77ff72052 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -1,20 +1,3 @@ -//! FAT filesystem file implementation. -//! -//! # Concurrency Limitations -//! -//! The FAT filesystem was not designed for concurrent access. While this implementation -//! provides some consistency guarantees through shared metadata and atomic operations, -//! there are inherent limitations: -//! -//! ## Known Limitations: -//! -//! 1. **Byte-Level Write Interleaving**: Multiple concurrent writers to the same file -//! may have their writes interleaved at the byte level within a cluster. -//! -//! 2. **No Transactional Guarantees**: Operations like file extension (allocating new -//! clusters and updating size) are not fully transactional. A crash or error during -//! these operations may leave the file in an inconsistent state. - use alloc::sync::Arc; use core::convert::TryFrom; use core::fmt::Debug; @@ -69,10 +52,6 @@ impl File { /// # Errors /// /// `Error::Io` will be returned if the underlying storage object returned an I/O error. - /// - /// # Panics - /// - /// Will panic if this is the root directory. pub fn truncate(&mut self) -> Result<(), Error> { log::trace!("File::truncate"); if let Some(ref metadata_arc) = self.metadata { @@ -102,7 +81,7 @@ impl File { Ok(()) } } else { - panic!("Trying to truncate a file without metadata (root directory?)"); + unreachable!("Root directory should not be a file"); } } @@ -276,7 +255,7 @@ impl File { /// Get the cluster that contains the current file position. /// - /// Special case: when offset is at a cluster boundary (offset % cluster_size == 0), + /// NOTE: when offset is at a cluster boundary (offset % cluster_size == 0), /// this returns the PREVIOUS cluster, not the next one. fn get_current_cluster(&self) -> Result, Error> { if self.offset == 0 { @@ -487,7 +466,7 @@ impl Write for File Write for File n, - None => panic!("Offset inside cluster but no cluster allocated"), + None => return Err(Error::CorruptedFileSystem), } }; log::trace!("write {write_size} bytes in cluster {current_cluster}"); @@ -569,18 +548,13 @@ impl Seek for File { let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); if let Some(first) = first_cluster { - // Calculate how many clusters are needed for the target offset let offset_in_clusters = self.fs.clusters_from_bytes(u64::from(new_offset)); if offset_in_clusters > 0 { let clusters_to_skip = offset_in_clusters - 1; - let mut _cluster = first; let mut iter = FileSystem::cluster_iter(&self.fs, first); - for i in 0..clusters_to_skip { - _cluster = if let Some(r) = iter.next() { - r? - } else { - // Cluster chain ends before the new position seek to the end of the last cluster + if iter.next().is_none() { + // cluster chain ends before the new position - seek to the end of the last cluster new_offset = self.fs.bytes_from_clusters(i + 1) as u32; break; }; From 4533cee64ac82c6126c06c8de9de35e63bebbad8 Mon Sep 17 00:00:00 2001 From: Koichi Imai Date: Thu, 17 Jul 2025 15:25:20 +0900 Subject: [PATCH 21/27] organize comments Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 4 ++-- awkernel_lib/src/file/fatfs/fs.rs | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 77ff72052..339b91e01 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -462,7 +462,7 @@ impl Write for File Write for File FileSystem { let mut fs_info_guard = self.fs_info.lock(&mut node); if self.fat_type == FatType::Fat32 && fs_info_guard.dirty { let fs_info_sector_offset = self.offset_from_sector(u32::from(self.bpb.fs_info_sector)); - - // Acquire disk lock while holding fs_info lock (allowed by lock ordering: fs_info → disk) + // This prevents race conditions where fs_info could be modified between checking // dirty flag and writing the data + // Lock ordering: fs_info → disk let mut node_disk = MCSNode::new(); let mut disk_guard = self.disk.lock(&mut node_disk); disk_guard.seek(SeekFrom::Start(fs_info_sector_offset))?; From ec4999aa45f7d2b83c134e17ddac1c1c5023e1d7 Mon Sep 17 00:00:00 2001 From: Koichi Imai Date: Thu, 17 Jul 2025 18:03:34 +0900 Subject: [PATCH 22/27] fix get_current_cluster_with_metadatas Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 339b91e01..620f13c4b 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -231,7 +231,7 @@ impl File { } let Some(first_cluster) = metadata.inner().first_cluster(self.fs.fat_type()) else { - return Ok(None); // empty file + return Err(Error::CorruptedFileSystem); }; let offset_in_clusters = self.fs.clusters_from_bytes(u64::from(self.offset)); @@ -346,11 +346,8 @@ impl Read for File { - if self.offset == 0 { - metadata.inner().first_cluster(self.fs.fat_type()) - } else { - None - } + // only if offset is 0 + metadata.inner().first_cluster(self.fs.fat_type()) } Some(n) => { let r = FileSystem::cluster_iter(&self.fs, n).next(); @@ -437,11 +434,8 @@ impl Write for File { - if self.offset == 0 { - self.first_cluster() - } else { - None - } + // only if offset is 0 + self.first_cluster() } Some(n) => { let r = FileSystem::cluster_iter(&self.fs, n).next(); @@ -459,7 +453,7 @@ impl Write for File Date: Thu, 17 Jul 2025 18:16:33 +0900 Subject: [PATCH 23/27] fix write function Signed-off-by: Koichi --- awkernel_lib/src/file/fatfs/file.rs | 113 +++++++++++++++------------- 1 file changed, 60 insertions(+), 53 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 620f13c4b..b5ec16292 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -423,83 +423,90 @@ impl Write for File { - // only if offset is 0 - self.first_cluster() - } - Some(n) => { - let r = FileSystem::cluster_iter(&self.fs, n).next(); - match r { - Some(Err(err)) => return Err(err), - Some(Ok(n)) => Some(n), - None => None, + + // Hold metadata lock during cluster calculation and allocation + // Lock ordering: metadata → fs_info → disk + let current_cluster = if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + + if self.offset % cluster_size == 0 { + // At cluster boundary - need next cluster + let current_cluster_opt = self.get_current_cluster_with_metadata(&metadata)?; + + // Check if next cluster exists + let next_cluster = match current_cluster_opt { + None => { + // Only if offset is 0 + metadata.inner().first_cluster(self.fs.fat_type()) } - } - }; - - if let Some(n) = next_cluster { - n - } else { - // end of chain reached - allocate new cluster - if self.first_cluster().is_none() { - if let Some(ref metadata_arc) = self.metadata { - // Lock ordering - metadata held while calling alloc_cluster internally acquires: fs_info, disk (one each) - let mut node = MCSNode::new(); - let mut metadata = metadata_arc.lock(&mut node); - // Double-check under lock in case another task allocated first cluster - if metadata.inner().first_cluster(self.fs.fat_type()).is_none() { - let new_cluster = - FileSystem::alloc_cluster(&self.fs, None, self.is_dir())?; - log::trace!("allocated cluster {new_cluster}"); - metadata.set_first_cluster(Some(new_cluster), self.fs.fat_type()); - new_cluster - } else { - // Another task allocated first cluster, try again - drop(metadata); - return self.write(buf); + Some(n) => { + let r = FileSystem::cluster_iter(&self.fs, n).next(); + match r { + Some(Err(err)) => return Err(err), + Some(Ok(n)) => Some(n), + None => None, } - } else { - unreachable!("Root directory should not be a file"); } + }; + + if let Some(n) = next_cluster { + n } else { - let new_cluster = FileSystem::alloc_cluster( - &self.fs, - self.get_current_cluster()?, - self.is_dir(), - )?; - log::trace!("allocated cluster {new_cluster}"); - new_cluster + // Need to allocate new cluster + let first_cluster_opt = metadata.inner().first_cluster(self.fs.fat_type()); + + if first_cluster_opt.is_none() { + // Allocate first cluster + let new_cluster = FileSystem::alloc_cluster(&self.fs, None, self.is_dir())?; + log::trace!("allocated first cluster {new_cluster}"); + metadata.set_first_cluster(Some(new_cluster), self.fs.fat_type()); + new_cluster + } else { + // Allocate subsequent cluster + let new_cluster = FileSystem::alloc_cluster( + &self.fs, + current_cluster_opt, + self.is_dir(), + )?; + log::trace!("allocated cluster {new_cluster}"); + new_cluster + } + } + } else { + // Within cluster - get current cluster + match self.get_current_cluster_with_metadata(&metadata)? { + Some(n) => n, + None => return Err(Error::CorruptedFileSystem), } } } else { - match self.get_current_cluster()? { - Some(n) => n, - None => return Err(Error::CorruptedFileSystem), - } + unreachable!("Root directory should not be a file"); }; + log::trace!("write {write_size} bytes in cluster {current_cluster}"); - let offset_in_fs = - self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); + let offset_in_fs = self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); + let written_bytes = { let mut node = MCSNode::new(); let mut disk_guard = self.fs.disk.lock(&mut node); disk_guard.seek(SeekFrom::Start(offset_in_fs))?; disk_guard.write(&buf[..write_size])? }; + if written_bytes == 0 { return Ok(0); } - // some bytes were writter - update position and optionally size + + // Update position and optionally size self.offset += written_bytes as u32; self.update_dir_entry_after_write(); Ok(written_bytes) From ee6663e04af692ec813c7df73f4eb46d4a213eca Mon Sep 17 00:00:00 2001 From: Koichi Imai Date: Thu, 17 Jul 2025 18:31:38 +0900 Subject: [PATCH 24/27] refactor write Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 46 ++++++++++------------------- 1 file changed, 16 insertions(+), 30 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index b5ec16292..3d8906c48 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -423,26 +423,20 @@ impl Write for File { // Only if offset is 0 @@ -457,29 +451,20 @@ impl Write for File Write for File Date: Thu, 17 Jul 2025 18:39:18 +0900 Subject: [PATCH 25/27] minor fix Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 3d8906c48..6e56afb6d 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -366,14 +366,14 @@ impl Read for File= s { return Ok(0); } } - let offset_in_cluster = self.offset % cluster_size; - let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; let bytes_left_in_file = size .map(|s| (s - self.offset) as usize) .unwrap_or(bytes_left_in_cluster); @@ -458,16 +458,15 @@ impl Write for File n, None => return Err(Error::CorruptedFileSystem), @@ -480,19 +479,16 @@ impl Write for File Date: Thu, 17 Jul 2025 18:48:25 +0900 Subject: [PATCH 26/27] fix seek Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/file.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 6e56afb6d..110bfcc98 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -516,7 +516,6 @@ impl Seek for File { log::error!("Invalid seek offset"); return Err(Error::InvalidInput); }; - if let Some(size) = size_opt { if new_offset > size { log::warn!("Seek beyond the end of the file"); @@ -528,13 +527,13 @@ impl Seek for File { if let Some(ref metadata_arc) = self.metadata { let mut node = MCSNode::new(); let metadata = metadata_arc.lock(&mut node); - let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); + let first_cluster_opt = metadata.inner().first_cluster(self.fs.fat_type()); - if let Some(first) = first_cluster { - let offset_in_clusters = self.fs.clusters_from_bytes(u64::from(new_offset)); - if offset_in_clusters > 0 { - let clusters_to_skip = offset_in_clusters - 1; - let mut iter = FileSystem::cluster_iter(&self.fs, first); + if let Some(first_cluster) = first_cluster_opt { + let new_offset_in_clusters = self.fs.clusters_from_bytes(u64::from(new_offset)); + if new_offset_in_clusters > 0 { + let clusters_to_skip = new_offset_in_clusters - 1; + let mut iter = FileSystem::cluster_iter(&self.fs, first_cluster); for i in 0..clusters_to_skip { if iter.next().is_none() { // cluster chain ends before the new position - seek to the end of the last cluster From c71071f9a27988a65d9b34e252735cb630152124 Mon Sep 17 00:00:00 2001 From: Koichi Imai Date: Thu, 17 Jul 2025 20:39:37 +0900 Subject: [PATCH 27/27] fix root_dir metadata bug Signed-off-by: Koichi Imai --- awkernel_lib/src/file/fatfs/dir_entry.rs | 23 +++++++++++++++-------- awkernel_lib/src/file/fatfs/file.rs | 23 +++++++---------------- awkernel_lib/src/file/fatfs/fs.rs | 14 +++++++++++--- 3 files changed, 33 insertions(+), 27 deletions(-) diff --git a/awkernel_lib/src/file/fatfs/dir_entry.rs b/awkernel_lib/src/file/fatfs/dir_entry.rs index a1e671755..4718e05e3 100644 --- a/awkernel_lib/src/file/fatfs/dir_entry.rs +++ b/awkernel_lib/src/file/fatfs/dir_entry.rs @@ -131,16 +131,16 @@ impl ShortName { #[derive(Clone, Debug, Default)] pub(crate) struct DirFileEntryData { name: [u8; SFN_SIZE], - attrs: FileAttributes, + pub attrs: FileAttributes, reserved_0: u8, create_time_0: u8, create_time_1: u16, create_date: u16, access_date: u16, - first_cluster_hi: u16, + pub first_cluster_hi: u16, modify_time: u16, modify_date: u16, - first_cluster_lo: u16, + pub first_cluster_lo: u16, size: u32, } @@ -153,6 +153,15 @@ impl DirFileEntryData { } } + pub(crate) fn new_for_rootdir(first_cluster: u32) -> Self { + Self { + attrs: FileAttributes::DIRECTORY, + first_cluster_hi: (first_cluster >> 16) as u16, + first_cluster_lo: (first_cluster & 0xFFFF) as u16, + ..Self::default() + } + } + pub(crate) fn renamed(&self, new_name: [u8; SFN_SIZE]) -> Self { let mut sfn_entry = self.clone(); sfn_entry.name = new_name; @@ -640,11 +649,9 @@ impl DirEntry File { assert!(!self.is_dir(), "Not a file entry"); - let metadata = self.fs.get_or_create_metadata( - self.entry_pos, - self.data.clone(), - self.first_cluster(), - ); + let metadata = + self.fs + .get_or_create_metadata(self.entry_pos, self.data.clone(), self.first_cluster()); File::new(Some(metadata), self.fs.clone()) } diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 110bfcc98..ff5a37beb 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -55,8 +55,7 @@ impl File { pub fn truncate(&mut self) -> Result<(), Error> { log::trace!("File::truncate"); if let Some(ref metadata_arc) = self.metadata { - // IMPORTANT: We hold the metadata lock during the entire truncate operation - // to ensure atomicity. Lock ordering: metadata → disk + // Lock ordering: metadata → disk let mut node = MCSNode::new(); let mut metadata = metadata_arc.lock(&mut node); @@ -66,13 +65,12 @@ impl File { } let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); + let current_cluster_opt = self.get_current_cluster_with_metadata(&metadata)?; - let current_cluster = self.get_current_cluster_with_metadata(&metadata)?; - - if let Some(cluster) = current_cluster { + if let Some(current_cluster) = current_cluster_opt { // current cluster is none only if offset is 0 debug_assert!(self.offset > 0); - FileSystem::truncate_cluster_chain(&self.fs, cluster) + FileSystem::truncate_cluster_chain(&self.fs, current_cluster) } else { debug_assert!(self.offset == 0); if let Some(n) = first_cluster { @@ -81,7 +79,7 @@ impl File { Ok(()) } } else { - unreachable!("Root directory should not be a file"); + unreachable!("File must have metadata before it is dropped"); } } @@ -245,7 +243,6 @@ impl File { cluster = if let Some(r) = iter.next() { r? } else { - // cluster chain ends before the current position return Err(Error::CorruptedFileSystem); }; } @@ -365,7 +362,6 @@ impl Read for File Read for File Read for File Write for File FileSystem { &fs.bpb, FsIoAdapter { fs: Arc::clone(fs) }, )), - FatType::Fat32 => DirRawStream::File(File::new(None, Arc::clone(fs))), + FatType::Fat32 => { + let metadata = DirEntryEditor::new( + DirFileEntryData::new_for_rootdir(fs.bpb.root_dir_first_cluster), + 0, + ); + DirRawStream::File(File::new( + Some(Arc::new(Mutex::new(metadata))), + Arc::clone(fs), + )) + } } }; Dir::new(root_rdr, Arc::clone(fs)) @@ -758,7 +767,6 @@ impl FileSystem { let mut cache = self.metadata_cache.lock(&mut node); if let Some(metadata_arc) = cache.get(&entry_pos) { - // Only the cache holds a reference - safe to remove if Arc::strong_count(metadata_arc) == 1 { cache.remove(&entry_pos); }