aboutsummaryrefslogtreecommitdiffstats
path: root/components/style/parallel.rs
diff options
context:
space:
mode:
authorEmilio Cobos Álvarez <emilio@crisal.io>2023-06-08 08:29:55 +0000
committerMartin Robinson <mrobinson@igalia.com>2023-11-24 08:57:14 +0100
commit23d60c21951af2211dd6c3dd4203c78b19add193 (patch)
tree8bf9ca6d20691e4dd0d3d30749f1d0a59a39f241 /components/style/parallel.rs
parent7771cf25a867338b0e3adc1ef64ba8d2e69e4339 (diff)
downloadservo-23d60c21951af2211dd6c3dd4203c78b19add193.tar.gz
servo-23d60c21951af2211dd6c3dd4203c78b19add193.zip
style: Unify parallel and sequential traversal scheduling
Use in_place_scope_fifo to spawn work into the thread pool while doing work in the main thread. Differential Revision: https://phabricator.services.mozilla.com/D179492
Diffstat (limited to 'components/style/parallel.rs')
-rw-r--r--components/style/parallel.rs296
1 files changed, 97 insertions, 199 deletions
diff --git a/components/style/parallel.rs b/components/style/parallel.rs
index d6045ff3802..08f081b30e1 100644
--- a/components/style/parallel.rs
+++ b/components/style/parallel.rs
@@ -27,7 +27,7 @@ use crate::dom::{OpaqueNode, SendNode, TElement};
use crate::scoped_tls::ScopedTLS;
use crate::traversal::{DomTraversal, PerLevelTraversalData};
use rayon;
-use smallvec::SmallVec;
+use std::collections::VecDeque;
/// The minimum stack size for a thread in the styling pool, in kilobytes.
#[cfg(feature = "gecko")]
@@ -54,17 +54,8 @@ pub const STYLE_THREAD_STACK_SIZE_KB: usize = 512;
///
/// [1] https://bugzilla.mozilla.org/show_bug.cgi?id=1395708#c15
/// [2] See Gecko bug 1376883 for more discussion on the measurements.
-///
pub const STACK_SAFETY_MARGIN_KB: usize = 168;
-/// See documentation of the pref for performance characteristics.
-pub fn work_unit_max() -> usize {
- #[cfg(feature = "gecko")]
- return static_prefs::pref!("layout.css.stylo-work-unit-size") as usize;
- #[cfg(feature = "servo")]
- return 16;
-}
-
/// A callback to create our thread local context. This needs to be
/// out of line so we don't allocate stack space for the entire struct
/// in the caller.
@@ -76,223 +67,130 @@ where
*slot = Some(ThreadLocalStyleContext::new());
}
-/// A parallel top-down DOM traversal.
-///
-/// This algorithm traverses the DOM in a breadth-first, top-down manner. The
-/// goals are:
-/// * Never process a child before its parent (since child style depends on
-/// parent style). If this were to happen, the styling algorithm would panic.
-/// * Prioritize discovering nodes as quickly as possible to maximize
-/// opportunities for parallelism. But this needs to be weighed against
-/// styling cousins on a single thread to improve sharing.
-/// * Style all the children of a given node (i.e. all sibling nodes) on
-/// a single thread (with an upper bound to handle nodes with an
-/// abnormally large number of children). This is important because we use
-/// a thread-local cache to share styles between siblings.
-#[inline(always)]
-#[allow(unsafe_code)]
-fn top_down_dom<'a, 'scope, E, D>(
- nodes: &'a [SendNode<E::ConcreteNode>],
- root: OpaqueNode,
- mut traversal_data: PerLevelTraversalData,
+// Sends one chunk of work to the thread-pool.
+fn distribute_one_chunk<'a, 'scope, E, D>(
+ items: VecDeque<SendNode<E::ConcreteNode>>,
+ traversal_root: OpaqueNode,
+ work_unit_max: usize,
+ traversal_data: PerLevelTraversalData,
scope: &'a rayon::ScopeFifo<'scope>,
- pool: &'scope rayon::ThreadPool,
traversal: &'scope D,
tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>,
) where
E: TElement + 'scope,
D: DomTraversal<E>,
{
- let work_unit_max = work_unit_max();
- debug_assert!(nodes.len() <= work_unit_max);
-
- // We set this below, when we have a borrow of the thread-local-context
- // available.
- let recursion_ok;
-
- // Collect all the children of the elements in our work unit. This will
- // contain the combined children of up to work_unit_max nodes, which may
- // be numerous. As such, we store it in a large SmallVec to minimize heap-
- // spilling, and never move it.
- let mut discovered_child_nodes = SmallVec::<[SendNode<E::ConcreteNode>; 128]>::new();
- {
- // Scope the borrow of the TLS so that the borrow is dropped before
- // a potential recursive call when we pass TailCall.
- let mut tlc = tls.ensure(|slot: &mut Option<ThreadLocalStyleContext<E>>| {
- create_thread_local_context(slot)
- });
-
- // Check that we're not in danger of running out of stack.
- recursion_ok = !tlc.stack_limit_checker.limit_exceeded();
-
+ scope.spawn_fifo(move |scope| {
+ gecko_profiler_label!(Layout, StyleComputation);
+ let mut tlc = tls.ensure(create_thread_local_context);
let mut context = StyleContext {
shared: traversal.shared_context(),
thread_local: &mut *tlc,
};
+ style_trees(
+ &mut context,
+ items,
+ traversal_root,
+ work_unit_max,
+ static_prefs::pref!("layout.css.stylo-local-work-queue.in-worker") as usize,
+ traversal_data,
+ Some(scope),
+ traversal,
+ Some(tls),
+ );
+ })
+}
- for n in nodes {
- // If the last node we processed produced children, we may want to
- // spawn them off into a work item. We do this at the beginning of
- // the loop (rather than at the end) so that we can traverse our
- // last bits of work directly on this thread without a spawn call.
- //
- // This has the important effect of removing the allocation and
- // context-switching overhead of the parallel traversal for perfectly
- // linear regions of the DOM, i.e.:
- //
- // <russian><doll><tag><nesting></nesting></tag></doll></russian>
- //
- // which are not at all uncommon.
- //
- // There's a tension here between spawning off a work item as soon
- // as discovered_child_nodes is nonempty and waiting until we have a
- // full work item to do so. The former optimizes for speed of
- // discovery (we'll start discovering the kids of the things in
- // "nodes" ASAP). The latter gives us better sharing (e.g. we can
- // share between cousins much better, because we don't hand them off
- // as separate work items, which are likely to end up on separate
- // threads) and gives us a chance to just handle everything on this
- // thread for small DOM subtrees, as in the linear example above.
- //
- // There are performance and "number of ComputedValues"
- // measurements for various testcases in
- // https://bugzilla.mozilla.org/show_bug.cgi?id=1385982#c10 and
- // following.
- //
- // The worst case behavior for waiting until we have a full work
- // item is a deep tree which has work_unit_max "linear" branches,
- // hence work_unit_max elements at each level. Such a tree would
- // end up getting processed entirely sequentially, because we would
- // process each level one at a time as a single work unit, whether
- // via our end-of-loop tail call or not. If we kicked off a
- // traversal as soon as we discovered kids, we would instead
- // process such a tree more or less with a thread-per-branch,
- // multiplexed across our actual threadpool.
- if discovered_child_nodes.len() >= work_unit_max {
- let mut traversal_data_copy = traversal_data.clone();
- traversal_data_copy.current_dom_depth += 1;
- traverse_nodes(
- &discovered_child_nodes,
- DispatchMode::NotTailCall,
- recursion_ok,
- root,
- traversal_data_copy,
- scope,
- pool,
- traversal,
- tls,
- );
- discovered_child_nodes.clear();
- }
-
- let node = **n;
- let mut children_to_process = 0isize;
- traversal.process_preorder(&traversal_data, &mut context, node, |n| {
- children_to_process += 1;
- let send_n = unsafe { SendNode::new(n) };
- discovered_child_nodes.push(send_n);
- });
-
- traversal.handle_postorder_traversal(&mut context, root, node, children_to_process);
- }
- }
-
- // Handle whatever elements we have queued up but not kicked off traversals
- // for yet. If any exist, we can process them (or at least one work unit's
- // worth of them) directly on this thread by passing TailCall.
- if !discovered_child_nodes.is_empty() {
- traversal_data.current_dom_depth += 1;
- traverse_nodes(
- &discovered_child_nodes,
- DispatchMode::TailCall,
- recursion_ok,
- root,
+/// Distributes all items into the thread pool, in `work_unit_max` chunks.
+fn distribute_work<'a, 'scope, E, D>(
+ mut items: VecDeque<SendNode<E::ConcreteNode>>,
+ traversal_root: OpaqueNode,
+ work_unit_max: usize,
+ traversal_data: PerLevelTraversalData,
+ scope: &'a rayon::ScopeFifo<'scope>,
+ traversal: &'scope D,
+ tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>,
+) where
+ E: TElement + 'scope,
+ D: DomTraversal<E>,
+{
+ while items.len() > work_unit_max {
+ let rest = items.split_off(work_unit_max);
+ distribute_one_chunk(
+ items,
+ traversal_root,
+ work_unit_max,
traversal_data,
scope,
- pool,
traversal,
tls,
);
+ items = rest;
}
+ distribute_one_chunk(
+ items,
+ traversal_root,
+ work_unit_max,
+ traversal_data,
+ scope,
+ traversal,
+ tls,
+ );
}
-/// Controls whether traverse_nodes may make a recursive call to continue
-/// doing work, or whether it should always dispatch work asynchronously.
-#[derive(Clone, Copy, PartialEq)]
-pub enum DispatchMode {
- /// This is the last operation by the caller.
- TailCall,
- /// This is not the last operation by the caller.
- NotTailCall,
-}
-
-impl DispatchMode {
- fn is_tail_call(&self) -> bool {
- matches!(*self, DispatchMode::TailCall)
- }
-}
-
-/// Enqueues |nodes| for processing, possibly on this thread if the tail call
-/// conditions are met.
+/// Processes `discovered` items, possibly spawning work in other threads as needed.
#[inline]
-pub fn traverse_nodes<'a, 'scope, E, D>(
- nodes: &[SendNode<E::ConcreteNode>],
- mode: DispatchMode,
- recursion_ok: bool,
- root: OpaqueNode,
- traversal_data: PerLevelTraversalData,
- scope: &'a rayon::ScopeFifo<'scope>,
- pool: &'scope rayon::ThreadPool,
+pub fn style_trees<'a, 'scope, E, D>(
+ context: &mut StyleContext<E>,
+ mut discovered: VecDeque<SendNode<E::ConcreteNode>>,
+ traversal_root: OpaqueNode,
+ work_unit_max: usize,
+ local_queue_size: usize,
+ mut traversal_data: PerLevelTraversalData,
+ scope: Option<&'a rayon::ScopeFifo<'scope>>,
traversal: &'scope D,
- tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>,
+ tls: Option<&'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>>,
) where
E: TElement + 'scope,
D: DomTraversal<E>,
{
- debug_assert_ne!(nodes.len(), 0);
-
- // This is a tail call from the perspective of the caller. However, we only
- // want to actually dispatch the job as a tail call if there's nothing left
- // in our local queue. Otherwise we need to return to it to maintain proper
- // breadth-first ordering. We also need to take care to avoid stack
- // overflow due to excessive tail recursion. The stack overflow avoidance
- // isn't observable to content -- we're still completely correct, just not
- // using tail recursion any more. See Gecko bugs 1368302 and 1376883.
- let may_dispatch_tail =
- mode.is_tail_call() && recursion_ok && !pool.current_thread_has_pending_tasks().unwrap();
+ let mut nodes_remaining_at_current_depth = discovered.len();
+ while let Some(node) = discovered.pop_front() {
+ let mut children_to_process = 0isize;
+ traversal.process_preorder(&traversal_data, context, *node, |n| {
+ children_to_process += 1;
+ discovered.push_back(unsafe { SendNode::new(n) });
+ });
- let work_unit_max = work_unit_max();
- // In the common case, our children fit within a single work unit, in which case we can pass
- // the nodes directly and avoid extra allocation.
- if nodes.len() <= work_unit_max {
- if may_dispatch_tail {
- top_down_dom(&nodes, root, traversal_data, scope, pool, traversal, tls);
- } else {
- let work = nodes.to_vec();
- scope.spawn_fifo(move |scope| {
- #[cfg(feature = "gecko")]
- gecko_profiler_label!(Layout, StyleComputation);
- top_down_dom(&work, root, traversal_data, scope, pool, traversal, tls);
- });
+ traversal.handle_postorder_traversal(context, traversal_root, *node, children_to_process);
+
+ nodes_remaining_at_current_depth -= 1;
+
+ // If we have enough children at the next depth in the DOM, spawn them to a different job
+ // relatively soon, while keeping always at least `local_queue_size` worth of work for
+ // ourselves.
+ let discovered_children = discovered.len() - nodes_remaining_at_current_depth;
+ if discovered_children >= work_unit_max &&
+ discovered.len() >= local_queue_size + work_unit_max &&
+ scope.is_some()
+ {
+ let kept_work = std::cmp::max(nodes_remaining_at_current_depth, local_queue_size);
+ let mut traversal_data_copy = traversal_data.clone();
+ traversal_data_copy.current_dom_depth += 1;
+ distribute_work(
+ discovered.split_off(kept_work),
+ traversal_root,
+ work_unit_max,
+ traversal_data_copy,
+ scope.unwrap(),
+ traversal,
+ tls.unwrap(),
+ );
}
- } else {
- for chunk in nodes.chunks(work_unit_max) {
- let work = chunk.to_vec();
- let traversal_data_copy = traversal_data.clone();
- scope.spawn_fifo(move |scope| {
- #[cfg(feature = "gecko")]
- gecko_profiler_label!(Layout, StyleComputation);
- let work = work;
- top_down_dom(
- &work,
- root,
- traversal_data_copy,
- scope,
- pool,
- traversal,
- tls,
- )
- });
+
+ if nodes_remaining_at_current_depth == 0 {
+ traversal_data.current_dom_depth += 1;
+ nodes_remaining_at_current_depth = discovered.len();
}
}
}