aboutsummaryrefslogtreecommitdiffstats
path: root/components/constellation/constellation.rs
diff options
context:
space:
mode:
Diffstat (limited to 'components/constellation/constellation.rs')
-rw-r--r--components/constellation/constellation.rs66
1 files changed, 51 insertions, 15 deletions
diff --git a/components/constellation/constellation.rs b/components/constellation/constellation.rs
index 9cd1008a177..7e60dc869af 100644
--- a/components/constellation/constellation.rs
+++ b/components/constellation/constellation.rs
@@ -124,7 +124,7 @@ use keyboard_types::webdriver::Event as WebDriverInputEvent;
use keyboard_types::KeyboardEvent;
use layout_traits::LayoutThreadFactory;
use log::{Level, LevelFilter, Log, Metadata, Record};
-use msg::constellation_msg::{BackgroundHangMonitorRegister, HangAlert};
+use msg::constellation_msg::{BackgroundHangMonitorRegister, HangMonitorAlert, SamplerControlMsg};
use msg::constellation_msg::{
BrowsingContextId, HistoryStateId, PipelineId, TopLevelBrowsingContextId,
};
@@ -207,13 +207,16 @@ pub struct Constellation<Message, LTF, STF> {
/// None when in multiprocess mode.
background_monitor_register: Option<Box<BackgroundHangMonitorRegister>>,
+ /// Channels to control all sampling profilers.
+ sampling_profiler_control: Vec<IpcSender<SamplerControlMsg>>,
+
/// A channel for the background hang monitor to send messages
/// to the constellation.
- background_hang_monitor_sender: IpcSender<HangAlert>,
+ background_hang_monitor_sender: IpcSender<HangMonitorAlert>,
/// A channel for the constellation to receiver messages
/// from the background hang monitor.
- background_hang_monitor_receiver: Receiver<Result<HangAlert, IpcError>>,
+ background_hang_monitor_receiver: Receiver<Result<HangMonitorAlert, IpcError>>,
/// An IPC channel for layout threads to send messages to the constellation.
/// This is the layout threads' view of `layout_receiver`.
@@ -613,12 +616,19 @@ where
// If we are in multiprocess mode,
// a dedicated per-process hang monitor will be initialized later inside the content process.
// See run_content_process in servo/lib.rs
- let background_monitor_register = if opts::multiprocess() {
- None
+ let (background_monitor_register, sampler_chan) = if opts::multiprocess() {
+ (None, vec![])
} else {
- Some(HangMonitorRegister::init(
- background_hang_monitor_sender.clone(),
- ))
+ let (sampling_profiler_control, sampling_profiler_port) =
+ ipc::channel().expect("ipc channel failure");
+
+ (
+ Some(HangMonitorRegister::init(
+ background_hang_monitor_sender.clone(),
+ sampling_profiler_port,
+ )),
+ vec![sampling_profiler_control],
+ )
};
let (ipc_layout_sender, ipc_layout_receiver) =
@@ -639,6 +649,7 @@ where
background_hang_monitor_sender,
background_hang_monitor_receiver,
background_monitor_register,
+ sampling_profiler_control: sampler_chan,
layout_sender: ipc_layout_sender,
script_receiver: script_receiver,
compositor_receiver: compositor_receiver,
@@ -840,6 +851,10 @@ where
Err(e) => return self.handle_send_error(pipeline_id, e),
};
+ if let Some(sampler_chan) = pipeline.sampler_control_chan {
+ self.sampling_profiler_control.push(sampler_chan);
+ }
+
if let Some(host) = host {
debug!(
"Adding new host entry {} for top-level browsing context {}.",
@@ -847,11 +862,11 @@ where
);
let _ = self
.event_loops
- .insert(host, Rc::downgrade(&pipeline.event_loop));
+ .insert(host, Rc::downgrade(&pipeline.pipeline.event_loop));
}
assert!(!self.pipelines.contains_key(&pipeline_id));
- self.pipelines.insert(pipeline_id, pipeline);
+ self.pipelines.insert(pipeline_id, pipeline.pipeline);
}
/// Get an iterator for the fully active browsing contexts in a subtree.
@@ -941,7 +956,7 @@ where
#[derive(Debug)]
enum Request {
Script((PipelineId, FromScriptMsg)),
- BackgroundHangMonitor(HangAlert),
+ BackgroundHangMonitor(HangMonitorAlert),
Compositor(FromCompositorMsg),
Layout(FromLayoutMsg),
NetworkListener((PipelineId, FetchResponseMsg)),
@@ -1007,10 +1022,17 @@ where
}
}
- fn handle_request_from_background_hang_monitor(&self, message: HangAlert) {
- // TODO: In case of a permanent hang being reported, add a "kill script" workflow,
- // via the embedder?
- warn!("Component hang alert: {:?}", message);
+ fn handle_request_from_background_hang_monitor(&self, message: HangMonitorAlert) {
+ match message {
+ HangMonitorAlert::Profile(bytes) => self
+ .embedder_proxy
+ .send((None, EmbedderMsg::ReportProfile(bytes))),
+ HangMonitorAlert::Hang(hang) => {
+ // TODO: In case of a permanent hang being reported, add a "kill script" workflow,
+ // via the embedder?
+ warn!("Component hang alert: {:?}", hang);
+ },
+ }
}
fn handle_request_from_network_listener(&mut self, message: (PipelineId, FetchResponseMsg)) {
@@ -1194,6 +1216,20 @@ where
self.forward_event(destination_pipeline_id, event);
},
FromCompositorMsg::SetCursor(cursor) => self.handle_set_cursor_msg(cursor),
+ FromCompositorMsg::EnableProfiler(rate) => {
+ for chan in &self.sampling_profiler_control {
+ if let Err(e) = chan.send(SamplerControlMsg::Enable(rate)) {
+ warn!("error communicating with sampling profiler: {}", e);
+ }
+ }
+ },
+ FromCompositorMsg::DisableProfiler => {
+ for chan in &self.sampling_profiler_control {
+ if let Err(e) = chan.send(SamplerControlMsg::Disable) {
+ warn!("error communicating with sampling profiler: {}", e);
+ }
+ }
+ },
}
}