diff options
-rw-r--r-- | components/layout_thread/lib.rs | 7 | ||||
-rw-r--r-- | components/style/rule_tree/mod.rs | 34 |
2 files changed, 34 insertions, 7 deletions
diff --git a/components/layout_thread/lib.rs b/components/layout_thread/lib.rs index 1d0ba9728cb..add38e67dc3 100644 --- a/components/layout_thread/lib.rs +++ b/components/layout_thread/lib.rs @@ -1180,11 +1180,8 @@ impl LayoutThread { shared_layout_context.style_context.stylist.rule_tree.dump_stdout(); } - // GC The rule tree. - // - // FIXME(emilio): The whole point of the free list is not always freeing - // the list, find a good heuristic here for that. - unsafe { shared_layout_context.style_context.stylist.rule_tree.gc() } + // GC the rule tree if some heuristics are met. + unsafe { shared_layout_context.style_context.stylist.rule_tree.maybe_gc(); } // Perform post-style recalculation layout passes. self.perform_post_style_recalc_layout_passes(&data.reflow_info, diff --git a/components/style/rule_tree/mod.rs b/components/style/rule_tree/mod.rs index 90afc23c337..394e1f88d60 100644 --- a/components/style/rule_tree/mod.rs +++ b/components/style/rule_tree/mod.rs @@ -124,8 +124,18 @@ impl RuleTree { pub unsafe fn gc(&self) { self.root.gc(); } + + /// This can only be called when no other threads is accessing this tree. + pub unsafe fn maybe_gc(&self) { + self.root.maybe_gc(); + } } +/// The number of RuleNodes added to the free list before we will consider +/// doing a GC when calling maybe_gc(). (The value is copied from Gecko, +/// where it likely did not result from a rigorous performance analysis.) +const RULE_TREE_GC_INTERVAL: usize = 300; + struct RuleNode { /// The root node. Only the root has no root pointer, for obvious reasons. root: Option<WeakRuleNode>, @@ -148,6 +158,14 @@ struct RuleNode { /// The next item in the rule tree free list, that starts on the root node. next_free: AtomicPtr<RuleNode>, + + /// Number of RuleNodes we have added to the free list since the last GC. + /// (We don't update this if we rescue a RuleNode from the free list. It's + /// just used as a heuristic to decide when to run GC.) + /// + /// Only used on the root RuleNode. (We could probably re-use one of the + /// sibling pointers to save space.) + free_count: AtomicUsize, } unsafe impl Sync for RuleTree {} @@ -169,6 +187,7 @@ impl RuleNode { next_sibling: AtomicPtr::new(ptr::null_mut()), prev_sibling: AtomicPtr::new(ptr::null_mut()), next_free: AtomicPtr::new(ptr::null_mut()), + free_count: AtomicUsize::new(0), } } @@ -183,6 +202,7 @@ impl RuleNode { next_sibling: AtomicPtr::new(ptr::null_mut()), prev_sibling: AtomicPtr::new(ptr::null_mut()), next_free: AtomicPtr::new(FREE_LIST_SENTINEL), + free_count: AtomicUsize::new(0), } } @@ -455,8 +475,17 @@ impl StrongRuleNode { } } + me.free_count.store(0, Ordering::SeqCst); + debug_assert!(me.next_free.load(Ordering::SeqCst) == FREE_LIST_SENTINEL); } + + unsafe fn maybe_gc(&self) { + debug_assert!(self.get().is_root(), "Can't call GC on a non-root node!"); + if self.get().free_count.load(Ordering::SeqCst) > RULE_TREE_GC_INTERVAL { + self.gc(); + } + } } #[derive(Clone)] @@ -521,8 +550,8 @@ impl Drop for StrongRuleNode { return; } - let free_list = - &unsafe { &*node.root.as_ref().unwrap().ptr() }.next_free; + let root = unsafe { &*node.root.as_ref().unwrap().ptr() }; + let free_list = &root.next_free; loop { let next_free = free_list.load(Ordering::SeqCst); debug_assert!(!next_free.is_null()); @@ -535,6 +564,7 @@ impl Drop for StrongRuleNode { Ordering::SeqCst); if existing == next_free { // Successfully inserted, yay! Otherwise try again. + root.free_count.fetch_add(1, Ordering::Relaxed); break; } } |