diff options
-rw-r--r-- | src/components/script/script_task.rs | 44 | ||||
-rw-r--r-- | src/components/util/workqueue.rs | 75 |
2 files changed, 57 insertions, 62 deletions
diff --git a/src/components/script/script_task.rs b/src/components/script/script_task.rs index 1b459ca7f05..12035753527 100644 --- a/src/components/script/script_task.rs +++ b/src/components/script/script_task.rs @@ -131,7 +131,7 @@ pub struct Page { pub subpage_id: Option<SubpageId>, /// Unique id for last reflow request; used for confirming completion reply. - pub last_reflow_id: Traceable<Cell<uint>>, + last_reflow_id: Traceable<Cell<uint>>, /// The outermost frame containing the document, window, and page URL. pub frame: Traceable<RefCell<Option<Frame>>>, @@ -140,29 +140,29 @@ pub struct Page { pub layout_chan: Untraceable<LayoutChan>, /// The port that we will use to join layout. If this is `None`, then layout is not running. - pub layout_join_port: Untraceable<RefCell<Option<Receiver<()>>>>, + layout_join_port: Untraceable<RefCell<Option<Receiver<()>>>>, /// What parts of the document are dirty, if any. - pub damage: Traceable<RefCell<Option<DocumentDamage>>>, + damage: Traceable<RefCell<Option<DocumentDamage>>>, /// The current size of the window, in pixels. - pub window_size: Untraceable<Cell<Size2D<uint>>>, + window_size: Untraceable<Cell<Size2D<uint>>>, - pub js_info: Traceable<RefCell<Option<JSPageInfo>>>, + js_info: Traceable<RefCell<Option<JSPageInfo>>>, /// Cached copy of the most recent url loaded by the script /// TODO(tkuehn): this currently does not follow any particular caching policy /// and simply caches pages forever (!). The bool indicates if reflow is required /// when reloading. - pub url: Untraceable<RefCell<Option<(Url, bool)>>>, + url: Untraceable<RefCell<Option<(Url, bool)>>>, - pub next_subpage_id: Untraceable<Cell<SubpageId>>, + next_subpage_id: Untraceable<Cell<SubpageId>>, /// Pending resize event, if any. - pub resize_event: Untraceable<Cell<Option<Size2D<uint>>>>, + resize_event: Untraceable<Cell<Option<Size2D<uint>>>>, /// Pending scroll to fragment event, if any - pub fragment_node: Traceable<RefCell<Option<JS<Element>>>>, + fragment_node: Traceable<RefCell<Option<JS<Element>>>>, /// Associated resource task for use by DOM objects like XMLHttpRequest pub resource_task: Untraceable<ResourceTask>, @@ -229,10 +229,6 @@ impl Page { } } - fn id(&self) -> PipelineId { - self.id - } - // must handle root case separately pub fn remove(&self, id: PipelineId) -> Option<Rc<Page>> { let remove_idx = { @@ -246,7 +242,7 @@ impl Page { let page_tree = unsafe { cast::transmute_lifetime(page_tree) }; - page_tree.id() == id + page_tree.id == id }) .map(|(idx, _)| idx) }; @@ -545,29 +541,29 @@ impl Drop for StackRootTLS { /// FIXME: Rename to `Page`, following WebKit? pub struct ScriptTask { /// A handle to the information pertaining to page layout - pub page: RefCell<Rc<Page>>, + page: RefCell<Rc<Page>>, /// A handle to the image cache task. - pub image_cache_task: ImageCacheTask, + image_cache_task: ImageCacheTask, /// A handle to the resource task. - pub resource_task: ResourceTask, + resource_task: ResourceTask, /// The port on which the script task receives messages (load URL, exit, etc.) - pub port: Receiver<ScriptMsg>, + port: Receiver<ScriptMsg>, /// A channel to hand out when some other task needs to be able to respond to a message from /// the script task. - pub chan: ScriptChan, + chan: ScriptChan, /// For communicating load url messages to the constellation - pub constellation_chan: ConstellationChan, + constellation_chan: ConstellationChan, /// A handle to the compositor for communicating ready state messages. - pub compositor: Box<ScriptListener>, + compositor: Box<ScriptListener>, /// The JavaScript runtime. - pub js_runtime: js::rust::rt, + js_runtime: js::rust::rt, /// The JSContext. - pub js_context: RefCell<Option<Rc<Cx>>>, + js_context: RefCell<Option<Rc<Cx>>>, - pub mouse_over_targets: RefCell<Option<Vec<JS<Node>>>> + mouse_over_targets: RefCell<Option<Vec<JS<Node>>>> } /// In the event of task failure, all data on the stack runs its destructor. However, there diff --git a/src/components/util/workqueue.rs b/src/components/util/workqueue.rs index b2b0f7d844e..7c28abbb821 100644 --- a/src/components/util/workqueue.rs +++ b/src/components/util/workqueue.rs @@ -18,66 +18,65 @@ use std::task::TaskOpts; /// A unit of work. /// -/// The type parameter `QUD` stands for "queue user data" and represents global custom data for the -/// entire work queue, and the type parameter `WUD` stands for "work user data" and represents -/// custom data specific to each unit of work. -pub struct WorkUnit<QUD,WUD> { +/// # Type parameters +/// +/// - `QueueData`: global custom data for the entire work queue. +/// - `WorkData`: custom data specific to each unit of work. +pub struct WorkUnit<QueueData, WorkData> { /// The function to execute. - pub fun: extern "Rust" fn(WUD, &mut WorkerProxy<QUD,WUD>), + pub fun: extern "Rust" fn(WorkData, &mut WorkerProxy<QueueData, WorkData>), /// Arbitrary data. - pub data: WUD, + pub data: WorkData, } /// Messages from the supervisor to the worker. -enum WorkerMsg<QUD,WUD> { +enum WorkerMsg<QueueData, WorkData> { /// Tells the worker to start work. - StartMsg(Worker<WorkUnit<QUD,WUD>>, *mut AtomicUint, *QUD), - + StartMsg(Worker<WorkUnit<QueueData, WorkData>>, *mut AtomicUint, *QueueData), /// Tells the worker to stop. It can be restarted again with a `StartMsg`. StopMsg, - /// Tells the worker thread to terminate. ExitMsg, } /// Messages to the supervisor. -enum SupervisorMsg<QUD,WUD> { +enum SupervisorMsg<QueueData, WorkData> { FinishedMsg, - ReturnDequeMsg(uint, Worker<WorkUnit<QUD,WUD>>), + ReturnDequeMsg(uint, Worker<WorkUnit<QueueData, WorkData>>), } /// Information that the supervisor thread keeps about the worker threads. -struct WorkerInfo<QUD,WUD> { +struct WorkerInfo<QueueData, WorkData> { /// The communication channel to the workers. - chan: Sender<WorkerMsg<QUD,WUD>>, + chan: Sender<WorkerMsg<QueueData, WorkData>>, /// The buffer pool for this deque. - pool: BufferPool<WorkUnit<QUD,WUD>>, + pool: BufferPool<WorkUnit<QueueData, WorkData>>, /// The worker end of the deque, if we have it. - deque: Option<Worker<WorkUnit<QUD,WUD>>>, + deque: Option<Worker<WorkUnit<QueueData, WorkData>>>, /// The thief end of the work-stealing deque. - thief: Stealer<WorkUnit<QUD,WUD>>, + thief: Stealer<WorkUnit<QueueData, WorkData>>, } /// Information specific to each worker thread that the thread keeps. -struct WorkerThread<QUD,WUD> { +struct WorkerThread<QueueData, WorkData> { /// The index of this worker. index: uint, /// The communication port from the supervisor. - port: Receiver<WorkerMsg<QUD,WUD>>, + port: Receiver<WorkerMsg<QueueData, WorkData>>, /// The communication channel on which messages are sent to the supervisor. - chan: Sender<SupervisorMsg<QUD,WUD>>, + chan: Sender<SupervisorMsg<QueueData, WorkData>>, /// The thief end of the work-stealing deque for all other workers. - other_deques: Vec<Stealer<WorkUnit<QUD,WUD>>>, + other_deques: Vec<Stealer<WorkUnit<QueueData, WorkData>>>, /// The random number generator for this worker. rng: XorShiftRng, } static SPIN_COUNT: uint = 1000; -impl<QUD:Send,WUD:Send> WorkerThread<QUD,WUD> { +impl<QueueData: Send, WorkData: Send> WorkerThread<QueueData, WorkData> { /// The main logic. This function starts up the worker and listens for /// messages. - pub fn start(&mut self) { + fn start(&mut self) { loop { // Wait for a start message. let (mut deque, ref_count, queue_data) = match self.port.recv() { @@ -160,16 +159,16 @@ impl<QUD:Send,WUD:Send> WorkerThread<QUD,WUD> { } /// A handle to the work queue that individual work units have. -pub struct WorkerProxy<'a,QUD,WUD> { - pub worker: &'a mut Worker<WorkUnit<QUD,WUD>>, - pub ref_count: *mut AtomicUint, - pub queue_data: *QUD, +pub struct WorkerProxy<'a, QueueData, WorkData> { + worker: &'a mut Worker<WorkUnit<QueueData, WorkData>>, + ref_count: *mut AtomicUint, + queue_data: *QueueData, } -impl<'a,QUD,WUD:Send> WorkerProxy<'a,QUD,WUD> { +impl<'a, QueueData, WorkData: Send> WorkerProxy<'a, QueueData, WorkData> { /// Enqueues a block into the work queue. #[inline] - pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) { + pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) { unsafe { drop((*self.ref_count).fetch_add(1, SeqCst)); } @@ -178,7 +177,7 @@ impl<'a,QUD,WUD:Send> WorkerProxy<'a,QUD,WUD> { /// Retrieves the queue user data. #[inline] - pub fn user_data<'a>(&'a self) -> &'a QUD { + pub fn user_data<'a>(&'a self) -> &'a QueueData { unsafe { cast::transmute(self.queue_data) } @@ -186,21 +185,21 @@ impl<'a,QUD,WUD:Send> WorkerProxy<'a,QUD,WUD> { } /// A work queue on which units of work can be submitted. -pub struct WorkQueue<QUD,WUD> { +pub struct WorkQueue<QueueData, WorkData> { /// Information about each of the workers. - workers: Vec<WorkerInfo<QUD,WUD>>, + workers: Vec<WorkerInfo<QueueData, WorkData>>, /// A port on which deques can be received from the workers. - port: Receiver<SupervisorMsg<QUD,WUD>>, + port: Receiver<SupervisorMsg<QueueData, WorkData>>, /// The amount of work that has been enqueued. - pub work_count: uint, + work_count: uint, /// Arbitrary user data. - pub data: QUD, + pub data: QueueData, } -impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> { +impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> { /// Creates a new work queue and spawns all the threads associated with /// it. - pub fn new(task_name: &'static str, thread_count: uint, user_data: QUD) -> WorkQueue<QUD,WUD> { + pub fn new(task_name: &'static str, thread_count: uint, user_data: QueueData) -> WorkQueue<QueueData, WorkData> { // Set up data structures. let (supervisor_chan, supervisor_port) = channel(); let (mut infos, mut threads) = (vec!(), vec!()); @@ -253,7 +252,7 @@ impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> { /// Enqueues a block into the work queue. #[inline] - pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) { + pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) { match self.workers.get_mut(0).deque { None => { fail!("tried to push a block but we don't have the deque?!") |