aboutsummaryrefslogtreecommitdiffstats
path: root/components/script
diff options
context:
space:
mode:
authorGregory Terzian <gterzian@users.noreply.github.com>2020-06-24 15:07:48 +0800
committerGregory Terzian <gterzian@users.noreply.github.com>2020-06-30 13:22:38 +0800
commit44ebca72da45575df4e5970e25920c46c14aa0cb (patch)
tree88a9ed7d5529b9b83425d7b7cdd9b86f18ec12c2 /components/script
parent0b61cfc3ae803ac0f9deef937f890f83b24c9a35 (diff)
downloadservo-44ebca72da45575df4e5970e25920c46c14aa0cb.tar.gz
servo-44ebca72da45575df4e5970e25920c46c14aa0cb.zip
ensure clean shutdown of all threads running JS
Diffstat (limited to 'components/script')
-rw-r--r--components/script/dom/bindings/trace.rs3
-rw-r--r--components/script/dom/dedicatedworkerglobalscope.rs8
-rw-r--r--components/script/dom/globalscope.rs42
-rw-r--r--components/script/dom/serviceworkerglobalscope.rs14
-rw-r--r--components/script/dom/worker.rs8
-rw-r--r--components/script/dom/workerglobalscope.rs23
-rw-r--r--components/script/dom/xmlhttprequest.rs5
-rw-r--r--components/script/microtask.rs4
-rw-r--r--components/script/script_runtime.rs33
-rw-r--r--components/script/script_thread.rs167
-rw-r--r--components/script/serviceworker_manager.rs42
-rw-r--r--components/script/task.rs8
-rw-r--r--components/script/task_manager.rs2
-rw-r--r--components/script/timers.rs7
14 files changed, 293 insertions, 73 deletions
diff --git a/components/script/dom/bindings/trace.rs b/components/script/dom/bindings/trace.rs
index 9d7b385adc3..411327bca39 100644
--- a/components/script/dom/bindings/trace.rs
+++ b/components/script/dom/bindings/trace.rs
@@ -42,7 +42,7 @@ use crate::dom::gpucommandencoder::GPUCommandEncoderState;
use crate::dom::htmlimageelement::SourceSet;
use crate::dom::htmlmediaelement::{HTMLMediaElementFetchContext, MediaFrameRenderer};
use crate::dom::identityhub::Identities;
-use crate::script_runtime::StreamConsumer;
+use crate::script_runtime::{ContextForRequestInterrupt, StreamConsumer};
use crate::script_thread::IncompleteParserContexts;
use crate::task::TaskBox;
use app_units::Au;
@@ -502,6 +502,7 @@ unsafe_no_jsmanaged_fields!(TimelineMarkerType);
unsafe_no_jsmanaged_fields!(WorkerId);
unsafe_no_jsmanaged_fields!(BufferQueue, QuirksMode, StrTendril);
unsafe_no_jsmanaged_fields!(Runtime);
+unsafe_no_jsmanaged_fields!(ContextForRequestInterrupt);
unsafe_no_jsmanaged_fields!(HeaderMap, Method);
unsafe_no_jsmanaged_fields!(WindowProxyHandler);
unsafe_no_jsmanaged_fields!(UntrustedNodeAddress, OpaqueNode);
diff --git a/components/script/dom/dedicatedworkerglobalscope.rs b/components/script/dom/dedicatedworkerglobalscope.rs
index 0095429b571..1bc71fad4d5 100644
--- a/components/script/dom/dedicatedworkerglobalscope.rs
+++ b/components/script/dom/dedicatedworkerglobalscope.rs
@@ -30,7 +30,8 @@ use crate::fetch::load_whole_resource;
use crate::realms::{enter_realm, AlreadyInRealm, InRealm};
use crate::script_runtime::ScriptThreadEventCategory::WorkerEvent;
use crate::script_runtime::{
- new_child_runtime, CommonScriptMsg, JSContext as SafeJSContext, Runtime, ScriptChan, ScriptPort,
+ new_child_runtime, CommonScriptMsg, ContextForRequestInterrupt, JSContext as SafeJSContext,
+ Runtime, ScriptChan, ScriptPort,
};
use crate::task_queue::{QueuedTask, QueuedTaskConversion, TaskQueue};
use crate::task_source::networking::NetworkingTaskSource;
@@ -256,7 +257,7 @@ impl DedicatedWorkerGlobalScope {
worker_url,
runtime,
from_devtools_receiver,
- Some(closing),
+ closing,
gpu_id_hub,
),
task_queue: TaskQueue::new(receiver, own_sender.clone()),
@@ -324,6 +325,7 @@ impl DedicatedWorkerGlobalScope {
browsing_context: Option<BrowsingContextId>,
gpu_id_hub: Arc<Mutex<Identities>>,
control_receiver: Receiver<DedicatedWorkerControlMsg>,
+ context_sender: Sender<ContextForRequestInterrupt>,
) -> JoinHandle<()> {
let serialized_worker_url = worker_url.to_string();
let name = format!("WebWorker for {}", serialized_worker_url);
@@ -377,6 +379,8 @@ impl DedicatedWorkerGlobalScope {
new_child_runtime(parent, Some(task_source))
};
+ let _ = context_sender.send(ContextForRequestInterrupt::new(runtime.cx()));
+
let (devtools_mpsc_chan, devtools_mpsc_port) = unbounded();
ROUTER.route_ipc_receiver_to_crossbeam_sender(
from_devtools_receiver,
diff --git a/components/script/dom/globalscope.rs b/components/script/dom/globalscope.rs
index 433a24ae786..eec99116bcf 100644
--- a/components/script/dom/globalscope.rs
+++ b/components/script/dom/globalscope.rs
@@ -52,7 +52,9 @@ use crate::dom::workletglobalscope::WorkletGlobalScope;
use crate::microtask::{Microtask, MicrotaskQueue, UserMicrotask};
use crate::realms::{enter_realm, AlreadyInRealm, InRealm};
use crate::script_module::ModuleTree;
-use crate::script_runtime::{CommonScriptMsg, JSContext as SafeJSContext, ScriptChan, ScriptPort};
+use crate::script_runtime::{
+ CommonScriptMsg, ContextForRequestInterrupt, JSContext as SafeJSContext, ScriptChan, ScriptPort,
+};
use crate::script_thread::{MainThreadScriptChan, ScriptThread};
use crate::task::TaskCanceller;
use crate::task_source::dom_manipulation::DOMManipulationTaskSource;
@@ -130,6 +132,8 @@ pub struct AutoCloseWorker {
/// A sender of control messages,
/// currently only used to signal shutdown.
control_sender: Sender<DedicatedWorkerControlMsg>,
+ /// The context to request an interrupt on the worker thread.
+ context: ContextForRequestInterrupt,
}
impl Drop for AutoCloseWorker {
@@ -146,6 +150,8 @@ impl Drop for AutoCloseWorker {
warn!("Couldn't send an exit message to a dedicated worker.");
}
+ self.context.request_interrupt();
+
// TODO: step 2 and 3.
// Step 4 is unnecessary since we don't use actual ports for dedicated workers.
if self
@@ -2049,6 +2055,7 @@ impl GlobalScope {
closing: Arc<AtomicBool>,
join_handle: JoinHandle<()>,
control_sender: Sender<DedicatedWorkerControlMsg>,
+ context: ContextForRequestInterrupt,
) {
self.list_auto_close_worker
.borrow_mut()
@@ -2056,6 +2063,7 @@ impl GlobalScope {
closing,
join_handle: Some(join_handle),
control_sender: control_sender,
+ context,
});
}
@@ -2713,6 +2721,20 @@ impl GlobalScope {
unreachable!();
}
+ /// Returns a boolean indicating whether the event-loop
+ /// where this global is running on can continue running JS.
+ pub fn can_continue_running(&self) -> bool {
+ if self.downcast::<Window>().is_some() {
+ return ScriptThread::can_continue_running();
+ }
+ if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
+ return !worker.is_closing();
+ }
+
+ // TODO: plug worklets into this.
+ true
+ }
+
/// Returns the task canceller of this global to ensure that everything is
/// properly cancelled when the global scope is destroyed.
pub fn task_canceller(&self, name: TaskSourceName) -> TaskCanceller {
@@ -2730,11 +2752,14 @@ impl GlobalScope {
/// Perform a microtask checkpoint.
pub fn perform_a_microtask_checkpoint(&self) {
- self.microtask_queue.checkpoint(
- self.get_cx(),
- |_| Some(DomRoot::from_ref(self)),
- vec![DomRoot::from_ref(self)],
- );
+ // Only perform the checkpoint if we're not shutting down.
+ if self.can_continue_running() {
+ self.microtask_queue.checkpoint(
+ self.get_cx(),
+ |_| Some(DomRoot::from_ref(self)),
+ vec![DomRoot::from_ref(self)],
+ );
+ }
}
/// Enqueue a microtask for subsequent execution.
@@ -2761,8 +2786,9 @@ impl GlobalScope {
}
/// Process a single event as if it were the next event
- /// in the thread queue for this global scope.
- pub fn process_event(&self, msg: CommonScriptMsg) {
+ /// in the queue for the event-loop where this global scope is running on.
+ /// Returns a boolean indicating whether further events should be processed.
+ pub fn process_event(&self, msg: CommonScriptMsg) -> bool {
if self.is::<Window>() {
return ScriptThread::process_event(msg);
}
diff --git a/components/script/dom/serviceworkerglobalscope.rs b/components/script/dom/serviceworkerglobalscope.rs
index 4bea29cda50..2e742ec9360 100644
--- a/components/script/dom/serviceworkerglobalscope.rs
+++ b/components/script/dom/serviceworkerglobalscope.rs
@@ -25,7 +25,8 @@ use crate::dom::workerglobalscope::WorkerGlobalScope;
use crate::fetch::load_whole_resource;
use crate::realms::{enter_realm, AlreadyInRealm, InRealm};
use crate::script_runtime::{
- new_rt_and_cx, CommonScriptMsg, JSContext as SafeJSContext, Runtime, ScriptChan,
+ new_rt_and_cx, CommonScriptMsg, ContextForRequestInterrupt, JSContext as SafeJSContext,
+ Runtime, ScriptChan,
};
use crate::task_queue::{QueuedTask, QueuedTaskConversion, TaskQueue};
use crate::task_source::TaskSourceName;
@@ -44,6 +45,7 @@ use script_traits::{ScopeThings, ServiceWorkerMsg, WorkerGlobalScopeInit, Worker
use servo_config::pref;
use servo_rand::random;
use servo_url::ServoUrl;
+use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use std::thread::{self, JoinHandle};
use std::time::{Duration, Instant};
@@ -225,6 +227,7 @@ impl ServiceWorkerGlobalScope {
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl,
control_receiver: Receiver<ServiceWorkerControlMsg>,
+ closing: Arc<AtomicBool>,
) -> ServiceWorkerGlobalScope {
ServiceWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope::new_inherited(
@@ -234,7 +237,7 @@ impl ServiceWorkerGlobalScope {
worker_url,
runtime,
from_devtools_receiver,
- None,
+ closing,
Arc::new(Mutex::new(Identities::new())),
),
task_queue: TaskQueue::new(receiver, own_sender.clone()),
@@ -258,6 +261,7 @@ impl ServiceWorkerGlobalScope {
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl,
control_receiver: Receiver<ServiceWorkerControlMsg>,
+ closing: Arc<AtomicBool>,
) -> DomRoot<ServiceWorkerGlobalScope> {
let cx = runtime.cx();
let scope = Box::new(ServiceWorkerGlobalScope::new_inherited(
@@ -271,6 +275,7 @@ impl ServiceWorkerGlobalScope {
swmanager_sender,
scope_url,
control_receiver,
+ closing,
));
unsafe { ServiceWorkerGlobalScopeBinding::Wrap(SafeJSContext::from_ptr(cx), scope) }
}
@@ -285,6 +290,8 @@ impl ServiceWorkerGlobalScope {
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl,
control_receiver: Receiver<ServiceWorkerControlMsg>,
+ context_sender: Sender<ContextForRequestInterrupt>,
+ closing: Arc<AtomicBool>,
) -> JoinHandle<()> {
let ScopeThings {
script_url,
@@ -300,6 +307,8 @@ impl ServiceWorkerGlobalScope {
.spawn(move || {
thread_state::initialize(ThreadState::SCRIPT | ThreadState::IN_WORKER);
let runtime = new_rt_and_cx(None);
+ let _ = context_sender.send(ContextForRequestInterrupt::new(runtime.cx()));
+
let roots = RootCollection::new();
let _stack_roots = ThreadLocalStackRoots::new(&roots);
@@ -330,6 +339,7 @@ impl ServiceWorkerGlobalScope {
swmanager_sender,
scope_url,
control_receiver,
+ closing,
);
let referrer = referrer_url
diff --git a/components/script/dom/worker.rs b/components/script/dom/worker.rs
index ca3b811b042..563a04679ea 100644
--- a/components/script/dom/worker.rs
+++ b/components/script/dom/worker.rs
@@ -125,6 +125,7 @@ impl Worker {
let init = prepare_workerscope_init(global, Some(devtools_sender), Some(worker_id));
let (control_sender, control_receiver) = unbounded();
+ let (context_sender, context_receiver) = unbounded();
let join_handle = DedicatedWorkerGlobalScope::run_worker_scope(
init,
@@ -142,9 +143,14 @@ impl Worker {
browsing_context,
global.wgpu_id_hub(),
control_receiver,
+ context_sender,
);
- global.track_worker(closing, join_handle, control_sender);
+ let context = context_receiver
+ .recv()
+ .expect("Couldn't receive a context for worker.");
+
+ global.track_worker(closing, join_handle, control_sender, context);
Ok(worker)
}
diff --git a/components/script/dom/workerglobalscope.rs b/components/script/dom/workerglobalscope.rs
index 3ff31542b56..aabda0ab248 100644
--- a/components/script/dom/workerglobalscope.rs
+++ b/components/script/dom/workerglobalscope.rs
@@ -98,7 +98,7 @@ pub struct WorkerGlobalScope {
worker_id: WorkerId,
worker_url: DomRefCell<ServoUrl>,
#[ignore_malloc_size_of = "Arc"]
- closing: Option<Arc<AtomicBool>>,
+ closing: Arc<AtomicBool>,
#[ignore_malloc_size_of = "Defined in js"]
runtime: DomRefCell<Option<Runtime>>,
location: MutNullableDom<WorkerLocation>,
@@ -126,7 +126,7 @@ impl WorkerGlobalScope {
worker_url: ServoUrl,
runtime: Runtime,
from_devtools_receiver: Receiver<DevtoolScriptControlMsg>,
- closing: Option<Arc<AtomicBool>>,
+ closing: Arc<AtomicBool>,
gpu_id_hub: Arc<Mutex<Identities>>,
) -> Self {
// Install a pipeline-namespace in the current thread.
@@ -193,11 +193,7 @@ impl WorkerGlobalScope {
}
pub fn is_closing(&self) -> bool {
- if let Some(ref closing) = self.closing {
- closing.load(Ordering::SeqCst)
- } else {
- false
- }
+ self.closing.load(Ordering::SeqCst)
}
pub fn get_url(&self) -> Ref<ServoUrl> {
@@ -494,7 +490,13 @@ impl WorkerGlobalScope {
}
}
- pub fn process_event(&self, msg: CommonScriptMsg) {
+ /// Process a single event as if it were the next event
+ /// in the queue for this worker event-loop.
+ /// Returns a boolean indicating whether further events should be processed.
+ pub fn process_event(&self, msg: CommonScriptMsg) -> bool {
+ if self.is_closing() {
+ return false;
+ }
match msg {
CommonScriptMsg::Task(_, task, _, _) => task.run_box(),
CommonScriptMsg::CollectReports(reports_chan) => {
@@ -504,11 +506,10 @@ impl WorkerGlobalScope {
reports_chan.send(reports);
},
}
+ true
}
pub fn close(&self) {
- if let Some(ref closing) = self.closing {
- closing.store(true, Ordering::SeqCst);
- }
+ self.closing.store(true, Ordering::SeqCst);
}
}
diff --git a/components/script/dom/xmlhttprequest.rs b/components/script/dom/xmlhttprequest.rs
index 06b6699d3a9..842bf5f9441 100644
--- a/components/script/dom/xmlhttprequest.rs
+++ b/components/script/dom/xmlhttprequest.rs
@@ -1544,7 +1544,10 @@ impl XMLHttpRequest {
if let Some(script_port) = script_port {
loop {
- global.process_event(script_port.recv().unwrap());
+ if !global.process_event(script_port.recv().unwrap()) {
+ // We're exiting.
+ return Err(Error::Abort);
+ }
let context = context.lock().unwrap();
let sync_status = context.sync_status.borrow();
if let Some(ref status) = *sync_status {
diff --git a/components/script/microtask.rs b/components/script/microtask.rs
index ae4d3c6944e..b97368fcad5 100644
--- a/components/script/microtask.rs
+++ b/components/script/microtask.rs
@@ -147,4 +147,8 @@ impl MicrotaskQueue {
pub fn empty(&self) -> bool {
self.microtask_queue.borrow().is_empty()
}
+
+ pub fn clear(&self) {
+ self.microtask_queue.borrow_mut().clear();
+ }
}
diff --git a/components/script/script_runtime.rs b/components/script/script_runtime.rs
index 4016f3b9858..f2d8bb73947 100644
--- a/components/script/script_runtime.rs
+++ b/components/script/script_runtime.rs
@@ -52,7 +52,10 @@ use js::jsapi::{BuildIdCharVector, DisableIncrementalGC, GCDescription, GCProgre
use js::jsapi::{Dispatchable as JSRunnable, Dispatchable_MaybeShuttingDown};
use js::jsapi::{HandleObject, Heap, JobQueue};
use js::jsapi::{JSContext as RawJSContext, JSTracer, SetDOMCallbacks, SetGCSliceCallback};
-use js::jsapi::{JSGCInvocationKind, JSGCStatus, JS_AddExtraGCRootsTracer, JS_SetGCCallback};
+use js::jsapi::{
+ JSGCInvocationKind, JSGCStatus, JS_AddExtraGCRootsTracer, JS_RequestInterruptCallback,
+ JS_SetGCCallback,
+};
use js::jsapi::{JSGCMode, JSGCParamKey, JS_SetGCParameter, JS_SetGlobalJitCompilerOption};
use js::jsapi::{
JSJitCompilerOption, JS_SetOffthreadIonCompilationEnabled, JS_SetParallelParsingEnabled,
@@ -845,6 +848,34 @@ unsafe fn set_gc_zeal_options(cx: *mut RawJSContext) {
#[cfg(not(feature = "debugmozjs"))]
unsafe fn set_gc_zeal_options(_: *mut RawJSContext) {}
+#[repr(transparent)]
+/// A wrapper around a JSContext that is Send,
+/// enabling an interrupt to be requested
+/// from a thread other than the one running JS using that context.
+pub struct ContextForRequestInterrupt(*mut RawJSContext);
+
+impl ContextForRequestInterrupt {
+ pub fn new(context: *mut RawJSContext) -> ContextForRequestInterrupt {
+ ContextForRequestInterrupt(context)
+ }
+
+ #[allow(unsafe_code)]
+ /// Can be called from any thread, to request the callback set by
+ /// JS_AddInterruptCallback to be called
+ /// on the thread where that context is running.
+ pub fn request_interrupt(&self) {
+ unsafe {
+ JS_RequestInterruptCallback(self.0);
+ }
+ }
+}
+
+#[allow(unsafe_code)]
+/// It is safe to call `JS_RequestInterruptCallback(cx)` from any thread.
+/// See the docs for the corresponding `requestInterrupt` method,
+/// at `mozjs/js/src/vm/JSContext.h`.
+unsafe impl Send for ContextForRequestInterrupt {}
+
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct JSContext(*mut RawJSContext);
diff --git a/components/script/script_thread.rs b/components/script/script_thread.rs
index 89acc25e720..0ef48e78b17 100644
--- a/components/script/script_thread.rs
+++ b/components/script/script_thread.rs
@@ -63,7 +63,9 @@ use crate::dom::workletglobalscope::WorkletGlobalScopeInit;
use crate::fetch::FetchCanceller;
use crate::microtask::{Microtask, MicrotaskQueue};
use crate::realms::enter_realm;
-use crate::script_runtime::{get_reports, new_rt_and_cx, JSContext, Runtime, ScriptPort};
+use crate::script_runtime::{
+ get_reports, new_rt_and_cx, ContextForRequestInterrupt, JSContext, Runtime, ScriptPort,
+};
use crate::script_runtime::{CommonScriptMsg, ScriptChan, ScriptThreadEventCategory};
use crate::task_manager::TaskManager;
use crate::task_queue::{QueuedTask, QueuedTaskConversion, TaskQueue};
@@ -97,14 +99,17 @@ use ipc_channel::ipc::{self, IpcSender};
use ipc_channel::router::ROUTER;
use js::glue::GetWindowProxyClass;
use js::jsapi::JS_SetWrapObjectCallbacks;
-use js::jsapi::{JSTracer, SetWindowProxyClass};
+use js::jsapi::{
+ JSContext as UnsafeJSContext, JSTracer, JS_AddInterruptCallback, SetWindowProxyClass,
+};
use js::jsval::UndefinedValue;
use js::rust::ParentRuntime;
use media::WindowGLContext;
use metrics::{PaintTimeMetrics, MAX_TASK_NS};
use mime::{self, Mime};
use msg::constellation_msg::{
- BackgroundHangMonitor, BackgroundHangMonitorRegister, ScriptHangAnnotation,
+ BackgroundHangMonitor, BackgroundHangMonitorExitSignal, BackgroundHangMonitorRegister,
+ ScriptHangAnnotation,
};
use msg::constellation_msg::{BrowsingContextId, HistoryStateId, PipelineId};
use msg::constellation_msg::{HangAnnotation, MonitoredComponentId, MonitoredComponentType};
@@ -149,7 +154,7 @@ use std::option::Option;
use std::ptr;
use std::rc::Rc;
use std::result::Result;
-use std::sync::atomic::AtomicBool;
+use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime};
@@ -531,9 +536,11 @@ pub struct ScriptThread {
task_queue: TaskQueue<MainThreadScriptMsg>,
/// A handle to register associated layout threads for hang-monitoring.
- background_hang_monitor_register: Option<Box<dyn BackgroundHangMonitorRegister>>,
+ background_hang_monitor_register: Box<dyn BackgroundHangMonitorRegister>,
/// The dedicated means of communication with the background-hang-monitor for this script-thread.
- background_hang_monitor: Option<Box<dyn BackgroundHangMonitor>>,
+ background_hang_monitor: Box<dyn BackgroundHangMonitor>,
+ /// A flag set to `true` by the BHM on exit, and checked from within the interrupt handler.
+ closing: Arc<AtomicBool>,
/// A channel to hand out to script thread-based entities that need to be able to enqueue
/// events in the event queue.
@@ -686,6 +693,27 @@ pub struct ScriptThread {
webgpu_port: RefCell<Option<Receiver<WebGPUMsg>>>,
}
+struct BHMExitSignal {
+ closing: Arc<AtomicBool>,
+ js_context: ContextForRequestInterrupt,
+}
+
+impl BackgroundHangMonitorExitSignal for BHMExitSignal {
+ fn signal_to_exit(&self) {
+ self.closing.store(true, Ordering::SeqCst);
+ self.js_context.request_interrupt();
+ }
+}
+
+#[allow(unsafe_code)]
+unsafe extern "C" fn interrupt_callback(_cx: *mut UnsafeJSContext) -> bool {
+ let res = ScriptThread::can_continue_running();
+ if !res {
+ ScriptThread::prepare_for_shutdown();
+ }
+ res
+}
+
/// In the event of thread panic, all data on the stack runs its destructor. However, there
/// are no reachable, owning pointers to the DOM memory, so it never gets freed by default
/// when the script thread fails. The ScriptMemoryFailsafe uses the destructor bomb pattern
@@ -818,6 +846,20 @@ impl ScriptThread {
})
}
+ pub fn can_continue_running() -> bool {
+ SCRIPT_THREAD_ROOT.with(|root| {
+ let script_thread = unsafe { &*root.get().unwrap() };
+ script_thread.can_continue_running_inner()
+ })
+ }
+
+ pub fn prepare_for_shutdown() {
+ SCRIPT_THREAD_ROOT.with(|root| {
+ let script_thread = unsafe { &*root.get().unwrap() };
+ script_thread.prepare_for_shutdown_inner();
+ })
+ }
+
pub fn set_mutation_observer_microtask_queued(value: bool) {
SCRIPT_THREAD_ROOT.with(|root| {
let script_thread = unsafe { &*root.get().unwrap() };
@@ -876,13 +918,22 @@ impl ScriptThread {
})
}
- pub fn process_event(msg: CommonScriptMsg) {
+ /// Process a single event as if it were the next event
+ /// in the queue for this window event-loop.
+ /// Returns a boolean indicating whether further events should be processed.
+ pub fn process_event(msg: CommonScriptMsg) -> bool {
SCRIPT_THREAD_ROOT.with(|root| {
if let Some(script_thread) = root.get() {
let script_thread = unsafe { &*script_thread };
- script_thread.handle_msg_from_script(MainThreadScriptMsg::Common(msg));
+ if !script_thread.can_continue_running_inner() {
+ return false;
+ } else {
+ script_thread.handle_msg_from_script(MainThreadScriptMsg::Common(msg));
+ return true;
+ }
}
- });
+ false
+ })
}
// https://html.spec.whatwg.org/multipage/#await-a-stable-state
@@ -1231,6 +1282,7 @@ impl ScriptThread {
unsafe {
JS_SetWrapObjectCallbacks(cx, &WRAP_CALLBACKS);
SetWindowProxyClass(cx, GetWindowProxyClass());
+ JS_AddInterruptCallback(cx, Some(interrupt_callback));
}
// Ask the router to proxy IPC messages from the devtools to us.
@@ -1242,13 +1294,18 @@ impl ScriptThread {
let task_queue = TaskQueue::new(port, chan.clone());
- let background_hang_monitor = state.background_hang_monitor_register.clone().map(|bhm| {
- bhm.register_component(
- MonitoredComponentId(state.id.clone(), MonitoredComponentType::Script),
- Duration::from_millis(1000),
- Duration::from_millis(5000),
- )
- });
+ let closing = Arc::new(AtomicBool::new(false));
+ let background_hang_monitor_exit_signal = BHMExitSignal {
+ closing: closing.clone(),
+ js_context: ContextForRequestInterrupt::new(cx),
+ };
+
+ let background_hang_monitor = state.background_hang_monitor_register.register_component(
+ MonitoredComponentId(state.id.clone(), MonitoredComponentType::Script),
+ Duration::from_millis(1000),
+ Duration::from_millis(5000),
+ Some(Box::new(background_hang_monitor_exit_signal)),
+ );
// Ask the router to proxy IPC messages from the control port to us.
let control_port = ROUTER.route_ipc_receiver_to_new_crossbeam_receiver(state.control_port);
@@ -1270,6 +1327,7 @@ impl ScriptThread {
background_hang_monitor_register: state.background_hang_monitor_register,
background_hang_monitor,
+ closing,
chan: MainThreadScriptChan(chan.clone()),
dom_manipulation_task_sender: boxed_script_sender.clone(),
@@ -1349,6 +1407,23 @@ impl ScriptThread {
unsafe { JSContext::from_ptr(self.js_runtime.cx()) }
}
+ /// Check if we are closing.
+ fn can_continue_running_inner(&self) -> bool {
+ if self.closing.load(Ordering::SeqCst) {
+ return false;
+ }
+ true
+ }
+
+ /// We are closing, ensure no script can run and potentially hang.
+ fn prepare_for_shutdown_inner(&self) {
+ let docs = self.documents.borrow();
+ for (_, document) in docs.iter() {
+ let window = document.window();
+ window.ignore_all_tasks();
+ }
+ }
+
/// Starts the script thread. After calling this method, the script thread will loop receiving
/// messages on its port.
pub fn start(&self) {
@@ -1386,9 +1461,7 @@ impl ScriptThread {
let mut sequential = vec![];
// Notify the background-hang-monitor we are waiting for an event.
- self.background_hang_monitor
- .as_ref()
- .map(|bhm| bhm.notify_wait());
+ self.background_hang_monitor.notify_wait();
// Receive at least one message so we don't spinloop.
debug!("Waiting for event.");
@@ -1530,6 +1603,24 @@ impl ScriptThread {
let category = self.categorize_msg(&msg);
let pipeline_id = self.message_to_pipeline(&msg);
+ if self.closing.load(Ordering::SeqCst) {
+ // If we've received the closed signal from the BHM, only handle exit messages.
+ match msg {
+ FromConstellation(ConstellationControlMsg::ExitScriptThread) => {
+ self.handle_exit_script_thread_msg();
+ return false;
+ },
+ FromConstellation(ConstellationControlMsg::ExitPipeline(
+ pipeline_id,
+ discard_browsing_context,
+ )) => {
+ self.handle_exit_pipeline_msg(pipeline_id, discard_browsing_context);
+ },
+ _ => {},
+ }
+ continue;
+ }
+
let result = self.profile_event(category, pipeline_id, move || {
match msg {
FromConstellation(ConstellationControlMsg::ExitScriptThread) => {
@@ -1546,12 +1637,12 @@ impl ScriptThread {
None
});
- // https://html.spec.whatwg.org/multipage/#event-loop-processing-model step 6
- self.perform_a_microtask_checkpoint();
-
if let Some(retval) = result {
return retval;
}
+
+ // https://html.spec.whatwg.org/multipage/#event-loop-processing-model step 6
+ self.perform_a_microtask_checkpoint();
}
{
@@ -1665,8 +1756,7 @@ impl ScriptThread {
ScriptThreadEventCategory::WebGPUMsg => ScriptHangAnnotation::WebGPUMsg,
};
self.background_hang_monitor
- .as_ref()
- .map(|bhm| bhm.notify_activity(HangAnnotation::Script(hang_annotation)));
+ .notify_activity(HangAnnotation::Script(hang_annotation));
}
fn message_to_pipeline(&self, msg: &MixedMessage) -> Option<PipelineId> {
@@ -2855,9 +2945,7 @@ impl ScriptThread {
self.handle_exit_pipeline_msg(pipeline_id, DiscardBrowsingContext::Yes);
}
- self.background_hang_monitor
- .as_ref()
- .map(|bhm| bhm.unregister());
+ self.background_hang_monitor.unregister();
// If we're in multiprocess mode, shut-down the IPC router for this process.
if opts::multiprocess() {
@@ -3867,18 +3955,21 @@ impl ScriptThread {
}
fn perform_a_microtask_checkpoint(&self) {
- let globals = self
- .documents
- .borrow()
- .iter()
- .map(|(_id, document)| document.global())
- .collect();
+ // Only perform the checkpoint if we're not shutting down.
+ if self.can_continue_running_inner() {
+ let globals = self
+ .documents
+ .borrow()
+ .iter()
+ .map(|(_id, document)| document.global())
+ .collect();
- self.microtask_queue.checkpoint(
- self.get_cx(),
- |id| self.documents.borrow().find_global(id),
- globals,
- )
+ self.microtask_queue.checkpoint(
+ self.get_cx(),
+ |id| self.documents.borrow().find_global(id),
+ globals,
+ )
+ }
}
}
diff --git a/components/script/serviceworker_manager.rs b/components/script/serviceworker_manager.rs
index 1cbca40e8cc..46cf22531c8 100644
--- a/components/script/serviceworker_manager.rs
+++ b/components/script/serviceworker_manager.rs
@@ -12,6 +12,7 @@ use crate::dom::serviceworkerglobalscope::{
ServiceWorkerControlMsg, ServiceWorkerGlobalScope, ServiceWorkerScriptMsg,
};
use crate::dom::serviceworkerregistration::longest_prefix_match;
+use crate::script_runtime::ContextForRequestInterrupt;
use crossbeam_channel::{unbounded, Receiver, RecvError, Sender};
use ipc_channel::ipc::{self, IpcSender};
use ipc_channel::router::ROUTER;
@@ -26,6 +27,8 @@ use servo_config::pref;
use servo_url::ImmutableOrigin;
use servo_url::ServoUrl;
use std::collections::HashMap;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::Arc;
use std::thread::{self, JoinHandle};
enum Message {
@@ -93,6 +96,15 @@ impl Drop for ServiceWorkerRegistration {
warn!("Failed to send exit message to service worker scope.");
}
+ self.closing
+ .take()
+ .expect("No close flag for worker")
+ .store(true, Ordering::SeqCst);
+ self.context
+ .take()
+ .expect("No context to request interrupt.")
+ .request_interrupt();
+
// TODO: Step 1, 2 and 3.
if self
.join_handle
@@ -121,6 +133,10 @@ struct ServiceWorkerRegistration {
control_sender: Option<Sender<ServiceWorkerControlMsg>>,
/// A handle to join on the worker thread.
join_handle: Option<JoinHandle<()>>,
+ /// A context to request an interrupt.
+ context: Option<ContextForRequestInterrupt>,
+ /// The closing flag for the worker.
+ closing: Option<Arc<AtomicBool>>,
}
impl ServiceWorkerRegistration {
@@ -132,6 +148,8 @@ impl ServiceWorkerRegistration {
installing_worker: None,
join_handle: None,
control_sender: None,
+ context: None,
+ closing: None,
}
}
@@ -139,12 +157,20 @@ impl ServiceWorkerRegistration {
&mut self,
join_handle: JoinHandle<()>,
control_sender: Sender<ServiceWorkerControlMsg>,
+ context: ContextForRequestInterrupt,
+ closing: Arc<AtomicBool>,
) {
assert!(self.join_handle.is_none());
self.join_handle = Some(join_handle);
assert!(self.control_sender.is_none());
self.control_sender = Some(control_sender);
+
+ assert!(self.context.is_none());
+ self.context = Some(context);
+
+ assert!(self.closing.is_none());
+ self.closing = Some(closing);
}
/// <https://w3c.github.io/ServiceWorker/#get-newest-worker>
@@ -378,11 +404,11 @@ impl ServiceWorkerManager {
// Very roughly steps 5 to 18.
// TODO: implement all steps precisely.
- let (new_worker, join_handle, control_sender) =
+ let (new_worker, join_handle, control_sender, context, closing) =
update_serviceworker(self.own_sender.clone(), job.scope_url.clone(), scope_things);
// Since we've just started the worker thread, ensure we can shut it down later.
- registration.note_worker_thread(join_handle, control_sender);
+ registration.note_worker_thread(join_handle, control_sender, context, closing);
// Step 19, run Install.
@@ -422,12 +448,16 @@ fn update_serviceworker(
ServiceWorker,
JoinHandle<()>,
Sender<ServiceWorkerControlMsg>,
+ ContextForRequestInterrupt,
+ Arc<AtomicBool>,
) {
let (sender, receiver) = unbounded();
let (_devtools_sender, devtools_receiver) = ipc::channel().unwrap();
let worker_id = ServiceWorkerId::new();
let (control_sender, control_receiver) = unbounded();
+ let (context_sender, context_receiver) = unbounded();
+ let closing = Arc::new(AtomicBool::new(false));
let join_handle = ServiceWorkerGlobalScope::run_serviceworker_scope(
scope_things.clone(),
@@ -437,12 +467,20 @@ fn update_serviceworker(
own_sender,
scope_url.clone(),
control_receiver,
+ context_sender,
+ closing.clone(),
);
+ let context = context_receiver
+ .recv()
+ .expect("Couldn't receive a context for worker.");
+
(
ServiceWorker::new(scope_things.script_url, sender, worker_id),
join_handle,
control_sender,
+ context,
+ closing,
)
}
diff --git a/components/script/task.rs b/components/script/task.rs
index 27600fdf8d7..b3200e23afe 100644
--- a/components/script/task.rs
+++ b/components/script/task.rs
@@ -69,7 +69,7 @@ impl fmt::Debug for dyn TaskBox {
/// Encapsulated state required to create cancellable tasks from non-script threads.
#[derive(Clone)]
pub struct TaskCanceller {
- pub cancelled: Option<Arc<AtomicBool>>,
+ pub cancelled: Arc<AtomicBool>,
}
impl TaskCanceller {
@@ -88,7 +88,7 @@ impl TaskCanceller {
/// A task that can be cancelled by toggling a shared flag.
pub struct CancellableTask<T: TaskOnce> {
- cancelled: Option<Arc<AtomicBool>>,
+ cancelled: Arc<AtomicBool>,
inner: T,
}
@@ -97,9 +97,7 @@ where
T: TaskOnce,
{
fn is_cancelled(&self) -> bool {
- self.cancelled
- .as_ref()
- .map_or(false, |cancelled| cancelled.load(Ordering::SeqCst))
+ self.cancelled.load(Ordering::SeqCst)
}
}
diff --git a/components/script/task_manager.rs b/components/script/task_manager.rs
index 063b5700484..184b427cfcb 100644
--- a/components/script/task_manager.rs
+++ b/components/script/task_manager.rs
@@ -182,7 +182,7 @@ impl TaskManager {
let mut flags = self.task_cancellers.borrow_mut();
let cancel_flag = flags.entry(name).or_insert(Default::default());
TaskCanceller {
- cancelled: Some(cancel_flag.clone()),
+ cancelled: cancel_flag.clone(),
}
}
}
diff --git a/components/script/timers.rs b/components/script/timers.rs
index 5d8893c3055..5e236f07494 100644
--- a/components/script/timers.rs
+++ b/components/script/timers.rs
@@ -218,6 +218,13 @@ impl OneshotTimers {
}
for timer in timers_to_run {
+ // Since timers can be coalesced together inside a task,
+ // this loop can keep running, including after an interrupt of the JS,
+ // and prevent a clean-shutdown of a JS-running thread.
+ // This check prevents such a situation.
+ if !global.can_continue_running() {
+ return;
+ }
let callback = timer.callback;
callback.invoke(global, &self.js_timers);
}