-
Notifications
You must be signed in to change notification settings - Fork 78
Add tracking functionality for memory management in allocators #1273
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -12,6 +12,7 @@ use crate::util::metadata; | |
| use crate::util::object_enum::ClosureObjectEnumerator; | ||
| use crate::util::object_enum::ObjectEnumerator; | ||
| use crate::util::opaque_pointer::*; | ||
| use crate::util::track::track_free; | ||
| use crate::util::treadmill::TreadMill; | ||
| use crate::util::{Address, ObjectReference}; | ||
| use crate::vm::ObjectModel; | ||
|
|
@@ -352,10 +353,7 @@ impl<VM: VMBinding> LargeObjectSpace<VM> { | |
| let sweep = |object: ObjectReference| { | ||
| #[cfg(feature = "vo_bit")] | ||
| crate::util::metadata::vo_bit::unset_vo_bit(object); | ||
| // Clear log bits for dead objects to prevent a new nursery object having the unlog bit set | ||
| if self.clear_log_bit_on_sweep { | ||
| VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::<VM>(object, Ordering::SeqCst); | ||
| } | ||
| track_free(object.to_object_start::<VM>(), 0 /* TODO: Size */); | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The original code was accidentally removed? |
||
| self.pr | ||
| .release_pages(get_super_page(object.to_object_start::<VM>())); | ||
| }; | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,5 +1,6 @@ | ||
| use std::sync::Arc; | ||
|
|
||
| use crate::util::track::track_malloc; | ||
| use crate::util::Address; | ||
|
|
||
| use crate::util::alloc::Allocator; | ||
|
|
@@ -116,13 +117,16 @@ impl<VM: VMBinding> Allocator<VM> for BumpAllocator<VM> { | |
| self.bump_pointer.cursor, | ||
| self.bump_pointer.limit | ||
| ); | ||
| track_malloc(result, size, false); | ||
| result | ||
| } | ||
| } | ||
|
|
||
| fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address { | ||
| trace!("alloc_slow"); | ||
| self.acquire_block(size, align, offset, false) | ||
| let block = self.acquire_block(size, align, offset, false); | ||
| track_malloc(block, size, false); | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This doesn't look right: the change above marks each allocation/object as 'malloc', but this marks the entire thread local buffer as 'malloc'. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What happens if you mark overlapping memory? E.g. marking the buffer first, then marking objects in it. |
||
| block | ||
| } | ||
|
|
||
| /// Slow path for allocation if precise stress testing has been enabled. | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -9,6 +9,7 @@ use crate::util::heap::space_descriptor::SpaceDescriptor; | |
| use crate::util::linear_scan::Region; | ||
| use crate::util::opaque_pointer::*; | ||
| use crate::util::rust_util::zeroed_alloc::new_zeroed_vec; | ||
| use crate::util::track::{track_mempool, track_mempool_alloc, track_mempool_free, untrack_mempool}; | ||
| use crate::vm::*; | ||
| use atomic::Ordering; | ||
| use spin::RwLock; | ||
|
|
@@ -30,6 +31,10 @@ pub struct BlockPageResource<VM: VMBinding, B: Region + 'static> { | |
| } | ||
|
|
||
| impl<VM: VMBinding, B: Region> PageResource<VM> for BlockPageResource<VM, B> { | ||
| fn track(&self) { | ||
| track_mempool(self, 0, false); | ||
| } | ||
|
|
||
| fn common(&self) -> &CommonPageResource { | ||
| self.flpr.common() | ||
| } | ||
|
|
@@ -58,6 +63,12 @@ impl<VM: VMBinding, B: Region> PageResource<VM> for BlockPageResource<VM, B> { | |
| } | ||
| } | ||
|
|
||
| impl<VM: VMBinding, B: Region> Drop for BlockPageResource<VM, B> { | ||
| fn drop(&mut self) { | ||
| untrack_mempool(self); | ||
| } | ||
| } | ||
|
|
||
| impl<VM: VMBinding, B: Region> BlockPageResource<VM, B> { | ||
| /// Block granularity in pages | ||
| const LOG_PAGES: usize = B::LOG_BYTES - LOG_BYTES_IN_PAGE as usize; | ||
|
|
@@ -136,6 +147,7 @@ impl<VM: VMBinding, B: Region> BlockPageResource<VM, B> { | |
| self.block_queue.add_global_array(array); | ||
| // Finish slow-allocation | ||
| self.commit_pages(reserved_pages, required_pages, tls); | ||
| track_mempool_alloc(self, first_block, required_pages * BYTES_IN_PAGE); | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This creates a chunk for valgrind that starts at If not, we could just call 'track_mempool_alloc` for each block, and free each block separately. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This also applies to other |
||
| Result::Ok(PRAllocResult { | ||
| start: first_block, | ||
| pages: required_pages, | ||
|
|
@@ -156,6 +168,7 @@ impl<VM: VMBinding, B: Region> BlockPageResource<VM, B> { | |
| // Fast allocate from the blocks list | ||
| if let Some(block) = self.block_queue.pop() { | ||
| self.commit_pages(reserved_pages, required_pages, tls); | ||
| track_mempool_alloc(self, block.start(), required_pages * BYTES_IN_PAGE); | ||
| return Result::Ok(PRAllocResult { | ||
| start: block.start(), | ||
| pages: required_pages, | ||
|
|
@@ -170,6 +183,7 @@ impl<VM: VMBinding, B: Region> BlockPageResource<VM, B> { | |
| let pages = 1 << Self::LOG_PAGES; | ||
| debug_assert!(pages as usize <= self.common().accounting.get_committed_pages()); | ||
| self.common().accounting.release(pages as _); | ||
| track_mempool_free(self, block.start()); | ||
| self.block_queue.push(block) | ||
| } | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -28,6 +28,10 @@ pub struct ExternalPages { | |
| } | ||
|
|
||
| impl<VM: VMBinding> PageResource<VM> for ExternalPageResource<VM> { | ||
| fn track(&self) { | ||
| /* cannot track external pages reliably? */ | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Use |
||
| } | ||
|
|
||
| fn common(&self) -> &CommonPageResource { | ||
| &self.common | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,7 +2,8 @@ use super::layout::vm_layout::{BYTES_IN_CHUNK, PAGES_IN_CHUNK}; | |
| use crate::policy::space::required_chunks; | ||
| use crate::util::address::Address; | ||
| use crate::util::constants::BYTES_IN_PAGE; | ||
| use crate::util::conversions::*; | ||
| use crate::util::conversions::{self, *}; | ||
| use crate::util::track::{track_mempool, track_mempool_alloc}; | ||
| use std::ops::Range; | ||
| use std::sync::{Mutex, MutexGuard}; | ||
|
|
||
|
|
@@ -45,6 +46,10 @@ pub enum MonotonePageResourceConditional { | |
| Discontiguous, | ||
| } | ||
| impl<VM: VMBinding> PageResource<VM> for MonotonePageResource<VM> { | ||
| fn track(&self) { | ||
| track_mempool(self, 0, true); | ||
| } | ||
|
|
||
| fn common(&self) -> &CommonPageResource { | ||
| &self.common | ||
| } | ||
|
|
@@ -149,7 +154,7 @@ impl<VM: VMBinding> PageResource<VM> for MonotonePageResource<VM> { | |
| sync.current_chunk = chunk_align_down(sync.cursor); | ||
| } | ||
| self.commit_pages(reserved_pages, required_pages, tls); | ||
|
|
||
| track_mempool_alloc(self, rtn, conversions::pages_to_bytes(required_pages)); | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is never freed. Pages can be freed when the page resource releases the pages. |
||
| Result::Ok(PRAllocResult { | ||
| start: rtn, | ||
| pages: required_pages, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -353,6 +353,21 @@ pub fn handle_mmap_error<VM: VMBinding>( | |
| /// This function is currently left empty for non-linux, and should be implemented in the future. | ||
| /// As the function is only used for assertions, MMTk will still run even if we never panic. | ||
| pub(crate) fn panic_if_unmapped(_start: Address, _size: usize, _anno: &MmapAnnotation) { | ||
| #[cfg(feature = "crabgrind")] | ||
| { | ||
| use crabgrind::memcheck::Error; | ||
| let result = crabgrind::memcheck::is_defined(_start.to_mut_ptr(), _size); | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We may use this function for side metadata memory. Those memory is not annotated for valgrind. Will this work? |
||
| match result { | ||
| Ok(_) => panic!("{} of size {} is not mapped", _start, _size), | ||
| Err(err) => match err { | ||
| Error::NotAddressable(addr) => { | ||
| panic!("Address {addr:x} is not addressable, start={_start}"); | ||
| } | ||
|
|
||
| _ => (), | ||
| }, | ||
| } | ||
| } | ||
| #[cfg(target_os = "linux")] | ||
| { | ||
| let flags = MMAP_FLAGS; | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -29,6 +29,7 @@ use crate::{ | |
| util::{ | ||
| linear_scan::Region, | ||
| metadata::{vo_bit, MetadataSpec}, | ||
| track::{track_free, tracking_enabled}, | ||
| ObjectReference, | ||
| }, | ||
| vm::{ObjectModel, VMBinding}, | ||
|
|
@@ -184,6 +185,18 @@ pub(crate) fn on_object_forwarded<VM: VMBinding>(new_object: ObjectReference) { | |
| } | ||
|
|
||
| pub(crate) fn on_region_swept<VM: VMBinding, R: Region>(region: &R, is_occupied: bool) { | ||
| if tracking_enabled() { | ||
| let mut cursor = region.start(); | ||
| while cursor < region.end() { | ||
| if let Some(object) = vo_bit::is_vo_bit_set_for_addr(cursor) { | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This will not work reliably, depending on how we maintain VO bits (see the code below it). This is the issue that I explained: MMTk as a GC usually does not know what objects are dead, and only knows what is alive. We may not be able to reliably tell what objects are freed, we can only tell valgrind when the page is empty, and let valgrind to automatically free those objects. |
||
| if object.is_live() { | ||
| track_free(object.to_object_start::<VM>(), 0); | ||
| } | ||
| } | ||
| cursor += VM::MIN_ALIGNMENT; | ||
| } | ||
| } | ||
|
|
||
| match strategy::<VM>() { | ||
| VOBitUpdateStrategy::ClearAndReconstruct => { | ||
| // Do nothing. The VO bit metadata is already reconstructed. | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I guess that you use the address of
PageResourceto register as a mempool, and the address is only fixed here. If this is the case, just make this explicit in the comments.