diff options
-rw-r--r-- | components/layout/layout_task.rs | 3 | ||||
-rw-r--r-- | components/layout/parallel.rs | 7 | ||||
-rw-r--r-- | components/util/workqueue.rs | 10 |
3 files changed, 5 insertions, 15 deletions
diff --git a/components/layout/layout_task.rs b/components/layout/layout_task.rs index e3349ef19de..172fcf2ed3f 100644 --- a/components/layout/layout_task.rs +++ b/components/layout/layout_task.rs @@ -300,8 +300,7 @@ impl LayoutTask { opts::get().initial_window_size.as_f32() * ScaleFactor::new(1.0)); let parallel_traversal = if opts::get().layout_threads != 1 { Some(WorkQueue::new("LayoutWorker", task_state::LAYOUT, - opts::get().layout_threads, - SharedLayoutContextWrapper(ptr::null()))) + opts::get().layout_threads)) } else { None }; diff --git a/components/layout/parallel.rs b/components/layout/parallel.rs index 99bed12911e..65bf7fb0eae 100644 --- a/components/layout/parallel.rs +++ b/components/layout/parallel.rs @@ -21,7 +21,6 @@ use wrapper::{PreorderDomTraversal, PostorderDomTraversal}; use profile_traits::time::{self, ProfilerMetadata, profile}; use std::mem; -use std::ptr; use std::sync::atomic::{AtomicIsize, Ordering}; use util::opts; use util::workqueue::{WorkQueue, WorkUnit, WorkerProxy}; @@ -449,15 +448,11 @@ fn run_queue_with_custom_work_data_type<To,F>( callback: F, shared_layout_context: &SharedLayoutContext) where To: 'static + Send, F: FnOnce(&mut WorkQueue<SharedLayoutContextWrapper,To>) { - queue.data = SharedLayoutContextWrapper(shared_layout_context as *const _); - let queue: &mut WorkQueue<SharedLayoutContextWrapper,To> = unsafe { mem::transmute(queue) }; callback(queue); - queue.run(); - - queue.data = SharedLayoutContextWrapper(ptr::null()); + queue.run(SharedLayoutContextWrapper(shared_layout_context as *const _)); } pub fn traverse_dom_preorder(root: LayoutNode, diff --git a/components/util/workqueue.rs b/components/util/workqueue.rs index 2f0523a09ef..e8d9e408f00 100644 --- a/components/util/workqueue.rs +++ b/components/util/workqueue.rs @@ -244,8 +244,6 @@ pub struct WorkQueue<QueueData: 'static, WorkData: 'static> { port: Receiver<SupervisorMsg<QueueData, WorkData>>, /// The amount of work that has been enqueued. work_count: usize, - /// Arbitrary user data. - pub data: QueueData, } impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> { @@ -253,8 +251,7 @@ impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> { /// it. pub fn new(task_name: &'static str, state: task_state::TaskState, - thread_count: usize, - user_data: QueueData) -> WorkQueue<QueueData, WorkData> { + thread_count: usize) -> WorkQueue<QueueData, WorkData> { // Set up data structures. let (supervisor_chan, supervisor_port) = channel(); let (mut infos, mut threads) = (vec!(), vec!()); @@ -302,7 +299,6 @@ impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> { workers: infos, port: supervisor_port, work_count: 0, - data: user_data, } } @@ -320,13 +316,13 @@ impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> { } /// Synchronously runs all the enqueued tasks and waits for them to complete. - pub fn run(&mut self) { + pub fn run(&mut self, data: QueueData) { // Tell the workers to start. let mut work_count = AtomicUsize::new(self.work_count); for worker in self.workers.iter_mut() { worker.chan.send(WorkerMsg::Start(worker.deque.take().unwrap(), &mut work_count, - &self.data)).unwrap() + &data)).unwrap() } // Wait for the work to finish. |