From 039fe176b9bee0e78a685aab6f1d4ceb796b82d1 Mon Sep 17 00:00:00 2001 From: Bobby Holley Date: Wed, 27 Sep 2017 17:01:34 -0700 Subject: Protect the hashmaps outside of rebuilds. MozReview-Commit-ID: KACmfw4pZY2 --- components/hashglobe/src/hash_map.rs | 6 ++++ components/hashglobe/src/protected.rs | 59 +++++++++++++++++++++++++++++++++-- components/hashglobe/src/table.rs | 18 +++++++++++ 3 files changed, 81 insertions(+), 2 deletions(-) (limited to 'components/hashglobe') diff --git a/components/hashglobe/src/hash_map.rs b/components/hashglobe/src/hash_map.rs index 69bd06344e0..c1c4db4d33e 100644 --- a/components/hashglobe/src/hash_map.rs +++ b/components/hashglobe/src/hash_map.rs @@ -1027,6 +1027,12 @@ impl HashMap self.table.size() } + /// Access to the raw buffer backing this hashmap. + pub fn raw_buffer(&self) -> (*const (), usize) { + assert!(self.raw_capacity() != 0); + self.table.raw_buffer() + } + /// Returns true if the map contains no elements. /// /// # Examples diff --git a/components/hashglobe/src/protected.rs b/components/hashglobe/src/protected.rs index 99b0879c276..cd23aed78fe 100644 --- a/components/hashglobe/src/protected.rs +++ b/components/hashglobe/src/protected.rs @@ -25,12 +25,14 @@ impl ProtectedHashMap #[inline(always)] pub fn begin_mutation(&mut self) { assert!(self.readonly); + self.unprotect(); self.readonly = false; } #[inline(always)] pub fn end_mutation(&mut self) { assert!(!self.readonly); + self.protect(); self.readonly = true; } @@ -130,6 +132,36 @@ impl ProtectedHashMap self.map.clear(); self.end_mutation(); } + + fn protect(&mut self) { + if self.map.capacity() == 0 { + return; + } + let buff = self.map.raw_buffer(); + if buff.0 as usize % ::SYSTEM_PAGE_SIZE.load(::std::sync::atomic::Ordering::Relaxed) != 0 { + // Safely handle weird allocators like ASAN that return + // non-page-aligned buffers to page-sized allocations. + return; + } + unsafe { + Gecko_ProtectBuffer(buff.0 as *mut _, buff.1); + } + } + + fn unprotect(&mut self) { + if self.map.capacity() == 0 { + return; + } + let buff = self.map.raw_buffer(); + if buff.0 as usize % ::SYSTEM_PAGE_SIZE.load(::std::sync::atomic::Ordering::Relaxed) != 0 { + // Safely handle weird allocators like ASAN that return + // non-page-aligned buffers to page-sized allocations. + return; + } + unsafe { + Gecko_UnprotectBuffer(buff.0 as *mut _, buff.1); + } + } } impl ProtectedHashMap @@ -143,10 +175,12 @@ impl ProtectedHashMap } pub fn with_capacity(capacity: usize) -> Self { - Self { + let mut result = Self { map: HashMap::with_capacity(capacity), readonly: true, - } + }; + result.protect(); + result } } @@ -178,3 +212,24 @@ impl Default for ProtectedHashMap } } } + +impl Drop for ProtectedHashMap + where K: Eq + Hash, + S: BuildHasher +{ + fn drop(&mut self) { + debug_assert!(self.readonly, "Dropped while mutating"); + self.unprotect(); + } +} + +// Manually declare the FFI functions since we don't depend on the crate with +// the bindings. +extern "C" { + pub fn Gecko_ProtectBuffer(buffer: *mut ::std::os::raw::c_void, + size: usize); +} +extern "C" { + pub fn Gecko_UnprotectBuffer(buffer: *mut ::std::os::raw::c_void, + size: usize); +} diff --git a/components/hashglobe/src/table.rs b/components/hashglobe/src/table.rs index 602cd2131b4..23b2ea00f16 100644 --- a/components/hashglobe/src/table.rs +++ b/components/hashglobe/src/table.rs @@ -813,6 +813,24 @@ impl RawTable { } } + /// Access to the raw buffer backing this table. + pub fn raw_buffer(&self) -> (*const (), usize) { + debug_assert!(self.capacity() != 0); + + let buffer = self.hashes.ptr() as *const (); + let size = { + let hashes_size = self.capacity() * size_of::(); + let pairs_size = self.capacity() * size_of::<(K, V)>(); + let (_, _, size, _) = calculate_allocation(hashes_size, + align_of::(), + pairs_size, + align_of::<(K, V)>()); + round_up_to_page_size(size) + }; + (buffer, size) + } + + /// Creates a new raw table from a given capacity. All buckets are /// initially empty. pub fn new(capacity: usize) -> Result, FailedAllocationError> { -- cgit v1.2.3