aboutsummaryrefslogtreecommitdiffstats
path: root/components/webgpu
diff options
context:
space:
mode:
authorSamson <16504129+sagudev@users.noreply.github.com>2024-05-22 18:47:35 +0200
committerGitHub <noreply@github.com>2024-05-22 16:47:35 +0000
commit794110ebe58ad72d809291e9feb3f2cc92819941 (patch)
tree9c01451fa022f433fa8a305d58c2a79da747e838 /components/webgpu
parent9f32809671c8c8e79d59c95194dcc466452299fc (diff)
downloadservo-794110ebe58ad72d809291e9feb3f2cc92819941.tar.gz
servo-794110ebe58ad72d809291e9feb3f2cc92819941.zip
webgpu: Move errorscopes to WGPU thread (#32304)
* Prepare errorscopes logic in wgpu_thread * remove scope_id from ipc * new GPUErrors per spec * remove cotent timeline error_scope * fixup poperrorscope types * device_scope -> gpu_error and nice errors * Handle errors detection more elegantly * good expectations * new expectations * Make error_scope.errors Vec as per spec
Diffstat (limited to 'components/webgpu')
-rw-r--r--components/webgpu/dom_messages.rs15
-rw-r--r--components/webgpu/gpu_error.rs100
-rw-r--r--components/webgpu/lib.rs9
-rw-r--r--components/webgpu/script_messages.rs16
-rw-r--r--components/webgpu/wgpu_thread.rs275
5 files changed, 297 insertions, 118 deletions
diff --git a/components/webgpu/dom_messages.rs b/components/webgpu/dom_messages.rs
index 1bf37233ad6..f9f0fc0ffe5 100644
--- a/components/webgpu/dom_messages.rs
+++ b/components/webgpu/dom_messages.rs
@@ -29,7 +29,7 @@ use wgc::resource::{
pub use {wgpu_core as wgc, wgpu_types as wgt};
use crate::identity::*;
-use crate::{WebGPU, PRESENTATION_BUFFER_COUNT};
+use crate::{Error, ErrorFilter, PopError, WebGPU, PRESENTATION_BUFFER_COUNT};
#[derive(Debug, Deserialize, Serialize)]
#[allow(clippy::large_enum_variant)]
@@ -48,6 +48,7 @@ pub enum WebGPUResponse {
},
BufferMapAsync(IpcSharedMemory),
SubmittedWorkDone,
+ PoppedErrorScope(Result<Option<Error>, PopError>),
}
pub type WebGPUResponseResult = Result<WebGPUResponse, String>;
@@ -251,4 +252,16 @@ pub enum WebGPURequest {
sender: IpcSender<Option<WebGPUResponseResult>>,
queue_id: id::QueueId,
},
+ PushErrorScope {
+ device_id: id::DeviceId,
+ filter: ErrorFilter,
+ },
+ DispatchError {
+ device_id: id::DeviceId,
+ error: Error,
+ },
+ PopErrorScope {
+ device_id: id::DeviceId,
+ sender: IpcSender<Option<WebGPUResponseResult>>,
+ },
}
diff --git a/components/webgpu/gpu_error.rs b/components/webgpu/gpu_error.rs
new file mode 100644
index 00000000000..173598e1c8e
--- /dev/null
+++ b/components/webgpu/gpu_error.rs
@@ -0,0 +1,100 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+//! Error scopes and GPUError types
+
+use std::fmt;
+
+use serde::{Deserialize, Serialize};
+
+use crate::wgc;
+
+/// <https://www.w3.org/TR/webgpu/#gpu-error-scope>
+#[derive(Clone, Debug, Eq, Hash, PartialEq)]
+pub(crate) struct ErrorScope {
+ pub errors: Vec<Error>,
+ pub filter: ErrorFilter,
+}
+
+impl ErrorScope {
+ pub fn new(filter: ErrorFilter) -> Self {
+ Self {
+ filter,
+ errors: Vec::new(),
+ }
+ }
+}
+
+/// <https://www.w3.org/TR/webgpu/#enumdef-gpuerrorfilter>
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
+pub enum ErrorFilter {
+ Validation,
+ OutOfMemory,
+ Internal,
+}
+
+/// <https://www.w3.org/TR/webgpu/#gpuerror>
+#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
+pub enum Error {
+ Validation(String),
+ OutOfMemory(String),
+ Internal(String),
+}
+
+impl std::error::Error for Error {
+ fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
+ None
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(self.message())
+ }
+}
+
+impl Error {
+ pub fn filter(&self) -> ErrorFilter {
+ match self {
+ Error::Validation(_) => ErrorFilter::Validation,
+ Error::OutOfMemory(_) => ErrorFilter::OutOfMemory,
+ Error::Internal(_) => ErrorFilter::Internal,
+ }
+ }
+
+ pub fn message(&self) -> &str {
+ match self {
+ Error::Validation(m) => m,
+ Error::OutOfMemory(m) => m,
+ Error::Internal(m) => m,
+ }
+ }
+
+ // TODO: labels
+ // based on https://github.com/gfx-rs/wgpu/blob/trunk/wgpu/src/backend/wgpu_core.rs#L289
+ pub fn from_error<E: std::error::Error + 'static>(error: E) -> Self {
+ let mut source_opt: Option<&(dyn std::error::Error + 'static)> = Some(&error);
+ while let Some(source) = source_opt {
+ if let Some(wgc::device::DeviceError::OutOfMemory) =
+ source.downcast_ref::<wgc::device::DeviceError>()
+ {
+ return Self::OutOfMemory(error.to_string());
+ }
+ source_opt = source.source();
+ }
+ // TODO: This hack is needed because there are
+ // multiple OutOfMemory error variant in wgpu-core
+ // and even upstream does not handle them correctly
+ if format!("{error:?}").contains("OutOfMemory") {
+ return Self::OutOfMemory(error.to_string());
+ }
+ Self::Validation(error.to_string())
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
+pub enum PopError {
+ Lost,
+ Empty,
+}
diff --git a/components/webgpu/lib.rs b/components/webgpu/lib.rs
index 2006e9a477b..d3fc3068b97 100644
--- a/components/webgpu/lib.rs
+++ b/components/webgpu/lib.rs
@@ -13,11 +13,11 @@ mod wgpu_thread;
use std::borrow::Cow;
use std::collections::HashMap;
-use std::num::NonZeroU64;
use std::sync::{Arc, Mutex};
use arrayvec::ArrayVec;
use euclid::default::Size2D;
+pub use gpu_error::{Error, ErrorFilter, PopError};
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use serde::{Deserialize, Serialize};
use servo_config::pref;
@@ -28,16 +28,15 @@ use webrender_traits::{
use wgc::id;
mod dom_messages;
+mod gpu_error;
mod script_messages;
pub use dom_messages::*;
pub use identity::*;
pub use script_messages::*;
-
-pub type ErrorScopeId = NonZeroU64;
pub use wgpu_thread::PRESENTATION_BUFFER_COUNT;
#[derive(Clone, Debug, Deserialize, Serialize)]
-pub struct WebGPU(pub IpcSender<(Option<ErrorScopeId>, WebGPURequest)>);
+pub struct WebGPU(pub IpcSender<WebGPURequest>);
impl WebGPU {
pub fn new(
@@ -95,7 +94,7 @@ impl WebGPU {
pub fn exit(&self, sender: IpcSender<()>) -> Result<(), &'static str> {
self.0
- .send((None, WebGPURequest::Exit(sender)))
+ .send(WebGPURequest::Exit(sender))
.map_err(|_| "Failed to send Exit message")
}
}
diff --git a/components/webgpu/script_messages.rs b/components/webgpu/script_messages.rs
index 74a827d6dd1..7142641a466 100644
--- a/components/webgpu/script_messages.rs
+++ b/components/webgpu/script_messages.rs
@@ -7,20 +7,13 @@
use base::id::PipelineId;
use serde::{Deserialize, Serialize};
+use crate::gpu_error::Error;
use crate::identity::WebGPUDevice;
use crate::wgc::id::{
AdapterId, BindGroupId, BindGroupLayoutId, BufferId, CommandBufferId, ComputePipelineId,
DeviceId, PipelineLayoutId, QuerySetId, RenderBundleId, RenderPipelineId, SamplerId,
ShaderModuleId, StagingBufferId, SurfaceId, TextureId, TextureViewId,
};
-use crate::ErrorScopeId;
-
-#[derive(Clone, Debug, Deserialize, Serialize)]
-pub enum WebGPUOpResult {
- ValidationError(String),
- OutOfMemoryError,
- Success,
-}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum WebGPUMsg {
@@ -41,15 +34,14 @@ pub enum WebGPUMsg {
FreeRenderBundle(RenderBundleId),
FreeStagingBuffer(StagingBufferId),
FreeQuerySet(QuerySetId),
- WebGPUOpResult {
+ CleanDevice {
device: WebGPUDevice,
- scope_id: Option<ErrorScopeId>,
pipeline_id: PipelineId,
- result: WebGPUOpResult,
},
- CleanDevice {
+ UncapturedError {
device: WebGPUDevice,
pipeline_id: PipelineId,
+ error: Error,
},
Exit,
}
diff --git a/components/webgpu/wgpu_thread.rs b/components/webgpu/wgpu_thread.rs
index 9faec2eb5a4..51778693d1d 100644
--- a/components/webgpu/wgpu_thread.rs
+++ b/components/webgpu/wgpu_thread.rs
@@ -20,28 +20,53 @@ use webrender_traits::{WebrenderExternalImageRegistry, WebrenderImageHandlerType
use wgc::command::{ImageCopyBuffer, ImageCopyTexture};
use wgc::device::queue::SubmittedWorkDoneClosure;
use wgc::device::{DeviceDescriptor, HostMap, ImplicitPipelineIds};
+use wgc::id::DeviceId;
use wgc::pipeline::ShaderModuleDescriptor;
use wgc::resource::{BufferMapCallback, BufferMapOperation};
use wgc::{gfx_select, id};
use wgt::InstanceDescriptor;
pub use {wgpu_core as wgc, wgpu_types as wgt};
+use crate::gpu_error::ErrorScope;
use crate::poll_thread::Poller;
use crate::{
- ErrorScopeId, PresentationData, Transmute, WebGPU, WebGPUAdapter, WebGPUDevice, WebGPUMsg,
- WebGPUOpResult, WebGPUQueue, WebGPURequest, WebGPUResponse,
+ Error, PopError, PresentationData, Transmute, WebGPU, WebGPUAdapter, WebGPUDevice, WebGPUMsg,
+ WebGPUQueue, WebGPURequest, WebGPUResponse,
};
pub const PRESENTATION_BUFFER_COUNT: usize = 10;
+#[derive(Eq, Hash, PartialEq)]
+pub(crate) struct DeviceScope {
+ pub device_id: DeviceId,
+ pub pipeline_id: PipelineId,
+ /// <https://www.w3.org/TR/webgpu/#dom-gpudevice-errorscopestack-slot>
+ pub error_scope_stack: Vec<ErrorScope>,
+ // TODO:
+ // Queue for this device (to remove transmutes)
+ // queue_id: QueueId,
+ // Poller for this device
+ // poller: Poller,
+}
+
+impl DeviceScope {
+ pub fn new(device_id: DeviceId, pipeline_id: PipelineId) -> Self {
+ Self {
+ device_id,
+ pipeline_id,
+ error_scope_stack: Vec::new(),
+ }
+ }
+}
+
#[allow(clippy::upper_case_acronyms)] // Name of the library
pub(crate) struct WGPU {
- receiver: IpcReceiver<(Option<ErrorScopeId>, WebGPURequest)>,
- sender: IpcSender<(Option<ErrorScopeId>, WebGPURequest)>,
+ receiver: IpcReceiver<WebGPURequest>,
+ sender: IpcSender<WebGPURequest>,
script_sender: IpcSender<WebGPUMsg>,
global: Arc<wgc::global::Global>,
adapters: Vec<WebGPUAdapter>,
- devices: HashMap<WebGPUDevice, PipelineId>,
+ devices: HashMap<DeviceId, DeviceScope>,
// Track invalid adapters https://gpuweb.github.io/gpuweb/#invalid
_invalid_adapters: Vec<WebGPUAdapter>,
//TODO: Remove this (https://github.com/gfx-rs/wgpu/issues/867)
@@ -55,8 +80,8 @@ pub(crate) struct WGPU {
impl WGPU {
pub(crate) fn new(
- receiver: IpcReceiver<(Option<ErrorScopeId>, WebGPURequest)>,
- sender: IpcSender<(Option<ErrorScopeId>, WebGPURequest)>,
+ receiver: IpcReceiver<WebGPURequest>,
+ sender: IpcSender<WebGPURequest>,
script_sender: IpcSender<WebGPUMsg>,
webrender_api_sender: RenderApiSender,
webrender_document: DocumentId,
@@ -89,7 +114,7 @@ impl WGPU {
pub(crate) fn run(&mut self) {
loop {
- if let Ok((scope_id, msg)) = self.receiver.recv() {
+ if let Ok(msg) = self.receiver.recv() {
match msg {
WebGPURequest::BufferMapAsync {
sender,
@@ -151,7 +176,7 @@ impl WGPU {
warn!("Failed to send BufferMapAsync Response ({:?})", w);
}
}
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_wgpu_error(device_id, result.err());
},
WebGPURequest::CommandEncoderFinish {
command_encoder_id,
@@ -160,24 +185,28 @@ impl WGPU {
} => {
let global = &self.global;
let result = if is_error {
- Err(String::from("Invalid GPUCommandEncoder"))
+ Err(Error::Internal(String::from("Invalid GPUCommandEncoder")))
} else if let Some(err) = self
.error_command_encoders
.borrow()
.get(&command_encoder_id)
{
- Err(err.clone())
+ Err(Error::Internal(err.clone()))
} else {
- tuple_to_result(
+ if let Some(error) =
gfx_select!(command_encoder_id => global.command_encoder_finish(
command_encoder_id,
&wgt::CommandBufferDescriptor::default()
- )),
- )
- .map_err(|e| format!("{:?}", e))
+ ))
+ .1
+ {
+ Err(Error::from_error(error))
+ } else {
+ Ok(())
+ }
};
self.encoder_record_error(command_encoder_id, &result);
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_error(device_id, result.err());
},
WebGPURequest::CopyBufferToBuffer {
command_encoder_id,
@@ -249,9 +278,9 @@ impl WGPU {
descriptor,
} => {
let global = &self.global;
- let result = tuple_to_result(gfx_select!(bind_group_id =>
- global.device_create_bind_group(device_id, &descriptor, Some(bind_group_id))));
- self.send_result(device_id, scope_id, result);
+ let (_, error) = gfx_select!(bind_group_id =>
+ global.device_create_bind_group(device_id, &descriptor, Some(bind_group_id)));
+ self.maybe_dispatch_wgpu_error(device_id, error);
},
WebGPURequest::CreateBindGroupLayout {
device_id,
@@ -260,10 +289,10 @@ impl WGPU {
} => {
let global = &self.global;
if let Some(desc) = descriptor {
- let result = tuple_to_result(gfx_select!(bind_group_layout_id =>
- global.device_create_bind_group_layout(device_id, &desc, Some(bind_group_layout_id))));
+ let (_, error) = gfx_select!(bind_group_layout_id =>
+ global.device_create_bind_group_layout(device_id, &desc, Some(bind_group_layout_id)));
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_wgpu_error(device_id, error);
}
},
WebGPURequest::CreateBuffer {
@@ -273,10 +302,10 @@ impl WGPU {
} => {
let global = &self.global;
if let Some(desc) = descriptor {
- let result = tuple_to_result(gfx_select!(buffer_id =>
- global.device_create_buffer(device_id, &desc, Some(buffer_id))));
+ let (_, error) = gfx_select!(buffer_id =>
+ global.device_create_buffer(device_id, &desc, Some(buffer_id)));
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_wgpu_error(device_id, error);
}
},
WebGPURequest::CreateCommandEncoder {
@@ -286,10 +315,10 @@ impl WGPU {
} => {
let global = &self.global;
let desc = wgt::CommandEncoderDescriptor { label };
- let result = tuple_to_result(gfx_select!(command_encoder_id =>
- global.device_create_command_encoder(device_id, &desc, Some(command_encoder_id))));
+ let (_, error) = gfx_select!(command_encoder_id =>
+ global.device_create_command_encoder(device_id, &desc, Some(command_encoder_id)));
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_wgpu_error(device_id, error);
},
WebGPURequest::CreateComputePipeline {
device_id,
@@ -310,15 +339,14 @@ impl WGPU {
root_id: Some(*layout),
group_ids: bgls.as_slice(),
});
- let result = tuple_to_result(
- gfx_select!(compute_pipeline_id => global.device_create_compute_pipeline(
+ let (_, error) = gfx_select!(compute_pipeline_id => global.device_create_compute_pipeline(
device_id,
&descriptor,
Some(compute_pipeline_id),
implicit
- )),
+ )
);
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_wgpu_error(device_id, error);
},
WebGPURequest::CreateContext(sender) => {
let id = self
@@ -336,9 +364,9 @@ impl WGPU {
descriptor,
} => {
let global = &self.global;
- let result = tuple_to_result(gfx_select!(pipeline_layout_id =>
- global.device_create_pipeline_layout(device_id, &descriptor, Some(pipeline_layout_id))));
- self.send_result(device_id, scope_id, result);
+ let (_, error) = gfx_select!(pipeline_layout_id =>
+ global.device_create_pipeline_layout(device_id, &descriptor, Some(pipeline_layout_id)));
+ self.maybe_dispatch_wgpu_error(device_id, error);
},
WebGPURequest::CreateRenderPipeline {
device_id,
@@ -360,14 +388,14 @@ impl WGPU {
group_ids: bgls.as_slice(),
});
if let Some(desc) = descriptor {
- let result = tuple_to_result(gfx_select!(render_pipeline_id =>
+ let (_, error) = gfx_select!(render_pipeline_id =>
global.device_create_render_pipeline(
device_id,
&desc,
Some(render_pipeline_id),
implicit)
- ));
- self.send_result(device_id, scope_id, result);
+ );
+ self.maybe_dispatch_wgpu_error(device_id, error);
}
},
WebGPURequest::CreateSampler {
@@ -376,14 +404,12 @@ impl WGPU {
descriptor,
} => {
let global = &self.global;
- let result = tuple_to_result(
- gfx_select!(sampler_id => global.device_create_sampler(
- device_id,
- &descriptor,
- Some(sampler_id)
- )),
- );
- self.send_result(device_id, scope_id, result);
+ let (_, error) = gfx_select!(sampler_id => global.device_create_sampler(
+ device_id,
+ &descriptor,
+ Some(sampler_id)
+ ));
+ self.maybe_dispatch_wgpu_error(device_id, error);
},
WebGPURequest::CreateShaderModule {
device_id,
@@ -399,9 +425,9 @@ impl WGPU {
label: label.map(|s| s.into()),
shader_bound_checks: wgt::ShaderBoundChecks::default(),
};
- let result = tuple_to_result(gfx_select!(program_id =>
- global.device_create_shader_module(device_id, &desc, source, Some(program_id))));
- self.send_result(device_id, scope_id, result);
+ let (_, error) = gfx_select!(program_id =>
+ global.device_create_shader_module(device_id, &desc, source, Some(program_id)));
+ self.maybe_dispatch_wgpu_error(device_id, error);
},
WebGPURequest::CreateSwapChain {
device_id,
@@ -454,14 +480,13 @@ impl WGPU {
} => {
let global = &self.global;
if let Some(desc) = descriptor {
- let result = tuple_to_result(
- gfx_select!(texture_id => global.device_create_texture(
+ let (_, error) = gfx_select!(texture_id => global.device_create_texture(
device_id,
&desc,
Some(texture_id)
- )),
+ )
);
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_wgpu_error(device_id, error);
}
},
WebGPURequest::CreateTextureView {
@@ -472,15 +497,14 @@ impl WGPU {
} => {
let global = &self.global;
if let Some(desc) = descriptor {
- let result = tuple_to_result(
- gfx_select!(texture_view_id => global.texture_create_view(
+ let (_, error) = gfx_select!(texture_view_id => global.texture_create_view(
texture_id,
&desc,
Some(texture_view_id)
- )),
+ )
);
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_wgpu_error(device_id, error);
}
},
WebGPURequest::DestroyBuffer(buffer) => {
@@ -526,7 +550,7 @@ impl WGPU {
} => {
let global = &self.global;
let result = gfx_select!(texture_id => global.texture_destroy(texture_id));
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_wgpu_error(device_id, result.err());
},
WebGPURequest::Exit(sender) => {
if let Err(e) = sender.send(()) {
@@ -546,7 +570,7 @@ impl WGPU {
},
WebGPURequest::DropDevice(device_id) => {
let device = WebGPUDevice(device_id);
- let pipeline_id = self.devices.remove(&device).unwrap();
+ let pipeline_id = self.devices.remove(&device_id).unwrap().pipeline_id;
if let Err(e) = self.script_sender.send(WebGPUMsg::CleanDevice {
device,
pipeline_id,
@@ -566,15 +590,14 @@ impl WGPU {
device_id,
} => {
let global = &self.global;
- let result = tuple_to_result(
- gfx_select!(render_bundle_id => global.render_bundle_encoder_finish(
+ let (_, error) = gfx_select!(render_bundle_id => global.render_bundle_encoder_finish(
render_bundle_encoder,
&descriptor,
Some(render_bundle_id)
- )),
+ )
);
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_wgpu_error(device_id, error);
},
WebGPURequest::RequestAdapter {
sender,
@@ -652,7 +675,8 @@ impl WGPU {
};
let device = WebGPUDevice(device_id);
let queue = WebGPUQueue(queue_id);
- self.devices.insert(device, pipeline_id);
+ self.devices
+ .insert(device_id, DeviceScope::new(device_id, pipeline_id));
if let Err(e) = sender.send(Some(Ok(WebGPUResponse::RequestDevice {
device_id: device,
queue_id: queue,
@@ -705,12 +729,14 @@ impl WGPU {
.contains_key(&id.into_command_encoder_id())
});
let result = if cmd_id.is_some() {
- Err(String::from("Invalid command buffer submitted"))
+ Err(Error::Internal(String::from(
+ "Invalid command buffer submitted",
+ )))
} else {
gfx_select!(queue_id => global.queue_submit(queue_id, &command_buffers))
- .map_err(|e| format!("{:?}", e))
+ .map_err(|e| Error::from_error(e))
};
- self.send_result(queue_id.transmute(), scope_id, result);
+ self.maybe_dispatch_error(queue_id.transmute(), result.err());
},
WebGPURequest::SwapChainPresent {
external_id,
@@ -888,7 +914,7 @@ impl WGPU {
.copy_from_slice(&array_buffer);
}
let result = gfx_select!(buffer_id => global.buffer_unmap(buffer_id));
- self.send_result(device_id, scope_id, result);
+ self.maybe_dispatch_wgpu_error(device_id, result.err());
},
WebGPURequest::WriteBuffer {
queue_id,
@@ -904,7 +930,7 @@ impl WGPU {
buffer_offset as wgt::BufferAddress,
&data
));
- self.send_result(queue_id.transmute(), scope_id, result);
+ self.maybe_dispatch_wgpu_error(queue_id.transmute(), result.err());
},
WebGPURequest::WriteTexture {
queue_id,
@@ -922,7 +948,7 @@ impl WGPU {
&data_layout,
&size
));
- self.send_result(queue_id.transmute(), scope_id, result);
+ self.maybe_dispatch_wgpu_error(queue_id.transmute(), result.err());
},
WebGPURequest::QueueOnSubmittedWorkDone { sender, queue_id } => {
let global = &self.global;
@@ -936,7 +962,7 @@ impl WGPU {
}));
let result = gfx_select!(queue_id => global.queue_on_submitted_work_done(queue_id, callback));
self.poller.wake();
- self.send_result(queue_id.transmute(), scope_id, result);
+ self.maybe_dispatch_wgpu_error(queue_id.transmute(), result.err());
},
WebGPURequest::DropTexture(id) => {
let global = &self.global;
@@ -1031,6 +1057,48 @@ impl WGPU {
warn!("Unable to send FreeQuerySet({:?}) ({:?})", id, e);
};
},
+ WebGPURequest::PushErrorScope { device_id, filter } => {
+ // <https://www.w3.org/TR/webgpu/#dom-gpudevice-pusherrorscope>
+ let device_scope = self
+ .devices
+ .get_mut(&device_id)
+ .expect("Using invalid device");
+ device_scope.error_scope_stack.push(ErrorScope::new(filter));
+ },
+ WebGPURequest::DispatchError { device_id, error } => {
+ self.dispatch_error(device_id, error);
+ },
+ WebGPURequest::PopErrorScope { device_id, sender } => {
+ // <https://www.w3.org/TR/webgpu/#dom-gpudevice-poperrorscope>
+ if let Some(device_scope) = self.devices.get_mut(&device_id) {
+ if let Some(error_scope) = device_scope.error_scope_stack.pop() {
+ if let Err(e) =
+ sender.send(Some(Ok(WebGPUResponse::PoppedErrorScope(Ok(
+ // TODO: Do actual selection instead of selecting first error
+ error_scope.errors.first().cloned(),
+ )))))
+ {
+ warn!(
+ "Unable to send {:?} to poperrorscope: {e:?}",
+ error_scope.errors
+ );
+ }
+ } else {
+ if let Err(e) = sender.send(Some(Ok(
+ WebGPUResponse::PoppedErrorScope(Err(PopError::Empty)),
+ ))) {
+ warn!("Unable to send PopError::Empty: {e:?}");
+ }
+ }
+ } else {
+ // device lost
+ if let Err(e) = sender.send(Some(Ok(WebGPUResponse::PoppedErrorScope(
+ Err(PopError::Lost),
+ )))) {
+ warn!("Unable to send PopError::Lost due {e:?}");
+ }
+ }
+ },
}
}
}
@@ -1039,32 +1107,47 @@ impl WGPU {
}
}
- fn send_result<U, T: std::fmt::Debug>(
- &self,
+ fn maybe_dispatch_wgpu_error<E: std::error::Error + 'static>(
+ &mut self,
device_id: id::DeviceId,
- scope_id: Option<ErrorScopeId>,
- result: Result<U, T>,
+ error: Option<E>,
) {
- let &pipeline_id = self.devices.get(&WebGPUDevice(device_id)).unwrap();
- if let Err(w) = self.script_sender.send(WebGPUMsg::WebGPUOpResult {
- device: WebGPUDevice(device_id),
- scope_id,
- pipeline_id,
- result: if let Err(e) = result {
- let err = format!("{:?}", e);
- if err.contains("OutOfMemory") {
- WebGPUOpResult::OutOfMemoryError
- } else {
- WebGPUOpResult::ValidationError(err)
- }
- } else {
- WebGPUOpResult::Success
- },
- }) {
- warn!("Failed to send WebGPUOpResult ({})", w);
+ self.maybe_dispatch_error(device_id, error.map(|e| Error::from_error(e)))
+ }
+
+ /// Dispatches error (if there is any)
+ fn maybe_dispatch_error(&mut self, device_id: id::DeviceId, error: Option<Error>) {
+ if let Some(error) = error {
+ self.dispatch_error(device_id, error);
}
}
+ /// <https://www.w3.org/TR/webgpu/#abstract-opdef-dispatch-error>
+ fn dispatch_error(&mut self, device_id: id::DeviceId, error: Error) {
+ if let Some(device_scope) = self.devices.get_mut(&device_id) {
+ if let Some(error_scope) = device_scope
+ .error_scope_stack
+ .iter_mut()
+ .rev()
+ .find(|error_scope| error_scope.filter == error.filter())
+ {
+ error_scope.errors.push(error);
+ } else {
+ if self
+ .script_sender
+ .send(WebGPUMsg::UncapturedError {
+ device: WebGPUDevice(device_id),
+ pipeline_id: device_scope.pipeline_id,
+ error: error.clone(),
+ })
+ .is_err()
+ {
+ warn!("Failed to send WebGPUMsg::UncapturedError: {error:?}");
+ }
+ }
+ } // else device is lost
+ }
+
fn encoder_record_error<U, T: std::fmt::Debug>(
&self,
encoder_id: id::CommandEncoderId,
@@ -1078,11 +1161,3 @@ impl WGPU {
}
}
}
-
-fn tuple_to_result<T, E>(res: (T, Option<E>)) -> Result<T, E> {
- if let Some(err) = res.1 {
- Err(err)
- } else {
- Ok(res.0)
- }
-}