aboutsummaryrefslogtreecommitdiffstats
path: root/components/style/parallel.rs
diff options
context:
space:
mode:
Diffstat (limited to 'components/style/parallel.rs')
-rw-r--r--components/style/parallel.rs79
1 files changed, 39 insertions, 40 deletions
diff --git a/components/style/parallel.rs b/components/style/parallel.rs
index fd2ca7bc3a9..25aaed5f3a3 100644
--- a/components/style/parallel.rs
+++ b/components/style/parallel.rs
@@ -78,8 +78,7 @@ type WorkUnit<N> = ArrayVec<[SendNode<N>; WORK_UNIT_MAX]>;
fn create_thread_local_context<'scope, E, D>(
traversal: &'scope D,
slot: &mut Option<ThreadLocalStyleContext<E>>,
-)
-where
+) where
E: TElement + 'scope,
D: DomTraversal<E>,
{
@@ -109,8 +108,7 @@ fn top_down_dom<'a, 'scope, E, D>(
pool: &'scope rayon::ThreadPool,
traversal: &'scope D,
tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>,
-)
-where
+) where
E: TElement + 'scope,
D: DomTraversal<E>,
{
@@ -128,8 +126,9 @@ where
{
// Scope the borrow of the TLS so that the borrow is dropped before
// a potential recursive call when we pass TailCall.
- let mut tlc = tls.ensure(
- |slot: &mut Option<ThreadLocalStyleContext<E>>| create_thread_local_context(traversal, slot));
+ let mut tlc = tls.ensure(|slot: &mut Option<ThreadLocalStyleContext<E>>| {
+ create_thread_local_context(traversal, slot)
+ });
// Check that we're not in danger of running out of stack.
recursion_ok = !tlc.stack_limit_checker.limit_exceeded();
@@ -180,15 +179,17 @@ where
if discovered_child_nodes.len() >= WORK_UNIT_MAX {
let mut traversal_data_copy = traversal_data.clone();
traversal_data_copy.current_dom_depth += 1;
- traverse_nodes(discovered_child_nodes.drain(),
- DispatchMode::NotTailCall,
- recursion_ok,
- root,
- traversal_data_copy,
- scope,
- pool,
- traversal,
- tls);
+ traverse_nodes(
+ discovered_child_nodes.drain(),
+ DispatchMode::NotTailCall,
+ recursion_ok,
+ root,
+ traversal_data_copy,
+ scope,
+ pool,
+ traversal,
+ tls,
+ );
}
let node = **n;
@@ -199,8 +200,7 @@ where
discovered_child_nodes.push(send_n);
});
- traversal.handle_postorder_traversal(&mut context, root, node,
- children_to_process);
+ traversal.handle_postorder_traversal(&mut context, root, node, children_to_process);
}
}
@@ -209,15 +209,17 @@ where
// worth of them) directly on this thread by passing TailCall.
if !discovered_child_nodes.is_empty() {
traversal_data.current_dom_depth += 1;
- traverse_nodes(discovered_child_nodes.drain(),
- DispatchMode::TailCall,
- recursion_ok,
- root,
- traversal_data,
- scope,
- pool,
- traversal,
- tls);
+ traverse_nodes(
+ discovered_child_nodes.drain(),
+ DispatchMode::TailCall,
+ recursion_ok,
+ root,
+ traversal_data,
+ scope,
+ pool,
+ traversal,
+ tls,
+ );
}
}
@@ -232,7 +234,9 @@ pub enum DispatchMode {
}
impl DispatchMode {
- fn is_tail_call(&self) -> bool { matches!(*self, DispatchMode::TailCall) }
+ fn is_tail_call(&self) -> bool {
+ matches!(*self, DispatchMode::TailCall)
+ }
}
/// Enqueues |nodes| for processing, possibly on this thread if the tail call
@@ -247,12 +251,11 @@ pub fn traverse_nodes<'a, 'scope, E, D, I>(
scope: &'a rayon::Scope<'scope>,
pool: &'scope rayon::ThreadPool,
traversal: &'scope D,
- tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>
-)
-where
+ tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>,
+) where
E: TElement + 'scope,
D: DomTraversal<E>,
- I: ExactSizeIterator<Item = SendNode<E::ConcreteNode>>
+ I: ExactSizeIterator<Item = SendNode<E::ConcreteNode>>,
{
debug_assert_ne!(nodes.len(), 0);
@@ -263,22 +266,19 @@ where
// overflow due to excessive tail recursion. The stack overflow avoidance
// isn't observable to content -- we're still completely correct, just not
// using tail recursion any more. See Gecko bugs 1368302 and 1376883.
- let may_dispatch_tail = mode.is_tail_call() &&
- recursion_ok &&
- !pool.current_thread_has_pending_tasks().unwrap();
+ let may_dispatch_tail =
+ mode.is_tail_call() && recursion_ok && !pool.current_thread_has_pending_tasks().unwrap();
// In the common case, our children fit within a single work unit, in which
// case we can pass the SmallVec directly and avoid extra allocation.
if nodes.len() <= WORK_UNIT_MAX {
let work: WorkUnit<E::ConcreteNode> = nodes.collect();
if may_dispatch_tail {
- top_down_dom(&work, root,
- traversal_data, scope, pool, traversal, tls);
+ top_down_dom(&work, root, traversal_data, scope, pool, traversal, tls);
} else {
scope.spawn(move |scope| {
let work = work;
- top_down_dom(&work, root,
- traversal_data, scope, pool, traversal, tls);
+ top_down_dom(&work, root, traversal_data, scope, pool, traversal, tls);
});
}
} else {
@@ -287,8 +287,7 @@ where
let traversal_data_copy = traversal_data.clone();
scope.spawn(move |scope| {
let n = nodes;
- top_down_dom(&*n, root,
- traversal_data_copy, scope, pool, traversal, tls)
+ top_down_dom(&*n, root, traversal_data_copy, scope, pool, traversal, tls)
});
}
}