aboutsummaryrefslogtreecommitdiffstats
path: root/components/hashglobe
diff options
context:
space:
mode:
authorBobby Holley <bobbyholley@gmail.com>2017-09-27 17:01:34 -0700
committerBobby Holley <bobbyholley@gmail.com>2017-09-28 15:06:49 -0700
commit039fe176b9bee0e78a685aab6f1d4ceb796b82d1 (patch)
tree6b1e5774bfa8115ead7b15b7b8a29580e7e2788c /components/hashglobe
parentef042899d25428fc675525ad2b3f81da86f61849 (diff)
downloadservo-039fe176b9bee0e78a685aab6f1d4ceb796b82d1.tar.gz
servo-039fe176b9bee0e78a685aab6f1d4ceb796b82d1.zip
Protect the hashmaps outside of rebuilds.
MozReview-Commit-ID: KACmfw4pZY2
Diffstat (limited to 'components/hashglobe')
-rw-r--r--components/hashglobe/src/hash_map.rs6
-rw-r--r--components/hashglobe/src/protected.rs59
-rw-r--r--components/hashglobe/src/table.rs18
3 files changed, 81 insertions, 2 deletions
diff --git a/components/hashglobe/src/hash_map.rs b/components/hashglobe/src/hash_map.rs
index 69bd06344e0..c1c4db4d33e 100644
--- a/components/hashglobe/src/hash_map.rs
+++ b/components/hashglobe/src/hash_map.rs
@@ -1027,6 +1027,12 @@ impl<K, V, S> HashMap<K, V, S>
self.table.size()
}
+ /// Access to the raw buffer backing this hashmap.
+ pub fn raw_buffer(&self) -> (*const (), usize) {
+ assert!(self.raw_capacity() != 0);
+ self.table.raw_buffer()
+ }
+
/// Returns true if the map contains no elements.
///
/// # Examples
diff --git a/components/hashglobe/src/protected.rs b/components/hashglobe/src/protected.rs
index 99b0879c276..cd23aed78fe 100644
--- a/components/hashglobe/src/protected.rs
+++ b/components/hashglobe/src/protected.rs
@@ -25,12 +25,14 @@ impl<K: Hash + Eq, V, S: BuildHasher> ProtectedHashMap<K, V, S>
#[inline(always)]
pub fn begin_mutation(&mut self) {
assert!(self.readonly);
+ self.unprotect();
self.readonly = false;
}
#[inline(always)]
pub fn end_mutation(&mut self) {
assert!(!self.readonly);
+ self.protect();
self.readonly = true;
}
@@ -130,6 +132,36 @@ impl<K: Hash + Eq, V, S: BuildHasher> ProtectedHashMap<K, V, S>
self.map.clear();
self.end_mutation();
}
+
+ fn protect(&mut self) {
+ if self.map.capacity() == 0 {
+ return;
+ }
+ let buff = self.map.raw_buffer();
+ if buff.0 as usize % ::SYSTEM_PAGE_SIZE.load(::std::sync::atomic::Ordering::Relaxed) != 0 {
+ // Safely handle weird allocators like ASAN that return
+ // non-page-aligned buffers to page-sized allocations.
+ return;
+ }
+ unsafe {
+ Gecko_ProtectBuffer(buff.0 as *mut _, buff.1);
+ }
+ }
+
+ fn unprotect(&mut self) {
+ if self.map.capacity() == 0 {
+ return;
+ }
+ let buff = self.map.raw_buffer();
+ if buff.0 as usize % ::SYSTEM_PAGE_SIZE.load(::std::sync::atomic::Ordering::Relaxed) != 0 {
+ // Safely handle weird allocators like ASAN that return
+ // non-page-aligned buffers to page-sized allocations.
+ return;
+ }
+ unsafe {
+ Gecko_UnprotectBuffer(buff.0 as *mut _, buff.1);
+ }
+ }
}
impl<K, V> ProtectedHashMap<K, V, RandomState>
@@ -143,10 +175,12 @@ impl<K, V> ProtectedHashMap<K, V, RandomState>
}
pub fn with_capacity(capacity: usize) -> Self {
- Self {
+ let mut result = Self {
map: HashMap::with_capacity(capacity),
readonly: true,
- }
+ };
+ result.protect();
+ result
}
}
@@ -178,3 +212,24 @@ impl<K, V, S> Default for ProtectedHashMap<K, V, S>
}
}
}
+
+impl<K: Hash + Eq, V, S: BuildHasher> Drop for ProtectedHashMap<K, V, S>
+ where K: Eq + Hash,
+ S: BuildHasher
+{
+ fn drop(&mut self) {
+ debug_assert!(self.readonly, "Dropped while mutating");
+ self.unprotect();
+ }
+}
+
+// Manually declare the FFI functions since we don't depend on the crate with
+// the bindings.
+extern "C" {
+ pub fn Gecko_ProtectBuffer(buffer: *mut ::std::os::raw::c_void,
+ size: usize);
+}
+extern "C" {
+ pub fn Gecko_UnprotectBuffer(buffer: *mut ::std::os::raw::c_void,
+ size: usize);
+}
diff --git a/components/hashglobe/src/table.rs b/components/hashglobe/src/table.rs
index 602cd2131b4..23b2ea00f16 100644
--- a/components/hashglobe/src/table.rs
+++ b/components/hashglobe/src/table.rs
@@ -813,6 +813,24 @@ impl<K, V> RawTable<K, V> {
}
}
+ /// Access to the raw buffer backing this table.
+ pub fn raw_buffer(&self) -> (*const (), usize) {
+ debug_assert!(self.capacity() != 0);
+
+ let buffer = self.hashes.ptr() as *const ();
+ let size = {
+ let hashes_size = self.capacity() * size_of::<HashUint>();
+ let pairs_size = self.capacity() * size_of::<(K, V)>();
+ let (_, _, size, _) = calculate_allocation(hashes_size,
+ align_of::<HashUint>(),
+ pairs_size,
+ align_of::<(K, V)>());
+ round_up_to_page_size(size)
+ };
+ (buffer, size)
+ }
+
+
/// Creates a new raw table from a given capacity. All buckets are
/// initially empty.
pub fn new(capacity: usize) -> Result<RawTable<K, V>, FailedAllocationError> {