aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZhen Zhang <izgzhen@gmail.com>2016-07-11 08:33:55 +0800
committerZhen Zhang <izgzhen@gmail.com>2016-07-11 10:51:55 +0800
commit0ff6f313e881279c75daa0d6c974e726ef2759f3 (patch)
tree3bc9d920e4c3bfdf8e9acf5c03395d346ffe9d41
parentc6827a9e66585c6d8bd9b02b77cab66b860d0af2 (diff)
downloadservo-0ff6f313e881279c75daa0d6c974e726ef2759f3.tar.gz
servo-0ff6f313e881279c75daa0d6c974e726ef2759f3.zip
Add FileID validity setting/checking logic to Blob URL implementation
-rw-r--r--components/net/filemanager_thread.rs104
-rw-r--r--components/net_traits/filemanager_thread.rs18
-rw-r--r--components/script/dom/blob.rs62
-rw-r--r--components/script/dom/url.rs9
4 files changed, 149 insertions, 44 deletions
diff --git a/components/net/filemanager_thread.rs b/components/net/filemanager_thread.rs
index 627dcced0e9..4be5b6a010d 100644
--- a/components/net/filemanager_thread.rs
+++ b/components/net/filemanager_thread.rs
@@ -16,7 +16,7 @@ use std::fs::File;
use std::io::Read;
use std::ops::Index;
use std::path::{Path, PathBuf};
-use std::sync::atomic::{self, AtomicUsize, Ordering};
+use std::sync::atomic::{self, AtomicUsize, AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
#[cfg(any(target_os = "macos", target_os = "linux"))]
use tinyfiledialogs;
@@ -99,6 +99,8 @@ struct FileStoreEntry {
file_impl: FileImpl,
/// Reference counting
refs: AtomicUsize,
+ /// UUID key's validity as Blob URL
+ is_valid_url: AtomicBool
}
/// File backend implementation
@@ -147,14 +149,14 @@ impl<UI: 'static + UIProvider> FileManager<UI> {
}
})
}
- FileManagerThreadMsg::TransferMemory(entry, sender, origin) => {
+ FileManagerThreadMsg::PromoteMemory(entry, sender, origin) => {
spawn_named("transfer memory".to_owned(), move || {
- store.transfer_memory(entry, sender, origin);
+ store.promote_memory(entry, sender, origin);
})
}
- FileManagerThreadMsg::AddSlicedEntry(id, rel_pos, sender, origin) =>{
- spawn_named("add sliced entry".to_owned(), move || {
- store.add_sliced_entry(id, rel_pos, sender, origin);
+ FileManagerThreadMsg::AddSlicedURLEntry(id, rel_pos, sender, origin) =>{
+ spawn_named("add sliced URL entry".to_owned(), move || {
+ store.add_sliced_url_entry(id, rel_pos, sender, origin);
})
}
FileManagerThreadMsg::LoadBlob(load_data, consumer) => {
@@ -165,15 +167,30 @@ impl<UI: 'static + UIProvider> FileManager<UI> {
send_error(load_data.url.clone(), format_err, consumer);
}
Some((id, _fragment)) => {
- self.process_request(load_data, consumer, RelativePos::full_range(), id);
+ // check_url_validity is true since content is requested by this URL
+ self.process_request(load_data, consumer, RelativePos::full_range(), id, true);
}
}
},
+ FileManagerThreadMsg::RevokeBlobURL(id, origin, sender) => {
+ if let Ok(id) = Uuid::parse_str(&id.0) {
+ spawn_named("revoke blob url".to_owned(), move || {
+ // Since it is revocation, unset_url_validity is true
+ let _ = sender.send(store.dec_ref(&id, &origin, true));
+ })
+ } else {
+ let _ = sender.send(Err(BlobURLStoreError::InvalidFileID));
+ }
+ }
FileManagerThreadMsg::DecRef(id, origin, sender) => {
if let Ok(id) = Uuid::parse_str(&id.0) {
spawn_named("dec ref".to_owned(), move || {
- let _ = sender.send(store.dec_ref(&id, &origin));
+ // Since it is simple DecRef (possibly caused by close/drop),
+ // unset_url_validity is false
+ let _ = sender.send(store.dec_ref(&id, &origin, false));
})
+ } else {
+ let _ = sender.send(Err(BlobURLStoreError::InvalidFileID));
}
}
FileManagerThreadMsg::IncRef(id, origin) => {
@@ -183,15 +200,24 @@ impl<UI: 'static + UIProvider> FileManager<UI> {
})
}
}
+ FileManagerThreadMsg::ActivateBlobURL(id, sender, origin) => {
+ if let Ok(id) = Uuid::parse_str(&id.0) {
+ spawn_named("activate blob url".to_owned(), move || {
+ let _ = sender.send(store.activate_blob_url(&id, &origin));
+ });
+ } else {
+ let _ = sender.send(Err(BlobURLStoreError::InvalidFileID));
+ }
+ }
FileManagerThreadMsg::Exit => break,
};
}
}
fn process_request(&self, load_data: LoadData, consumer: LoadConsumer,
- rel_pos: RelativePos, id: Uuid) {
+ rel_pos: RelativePos, id: Uuid, check_url_validity: bool) {
let origin_in = load_data.url.origin().unicode_serialization();
- match self.store.get_impl(&id, &origin_in) {
+ match self.store.get_impl(&id, &origin_in, check_url_validity) {
Ok(file_impl) => {
match file_impl {
FileImpl::Memory(buffered) => {
@@ -224,7 +250,9 @@ impl<UI: 'static + UIProvider> FileManager<UI> {
opt_filename, rel_pos, entry));
},
FileImpl::Sliced(id, rel_pos) => {
- self.process_request(load_data, consumer, rel_pos, id);
+ // Next time we don't need to check validity since
+ // we have already done that for requesting URL
+ self.process_request(load_data, consumer, rel_pos, id, false);
}
}
}
@@ -249,13 +277,18 @@ impl <UI: 'static + UIProvider> FileManagerStore<UI> {
}
/// Copy out the file backend implementation content
- fn get_impl(&self, id: &Uuid, origin_in: &FileOrigin) -> Result<FileImpl, BlobURLStoreError> {
+ fn get_impl(&self, id: &Uuid, origin_in: &FileOrigin,
+ check_url_validity: bool) -> Result<FileImpl, BlobURLStoreError> {
match self.entries.read().unwrap().get(id) {
- Some(ref e) => {
- if *origin_in != *e.origin {
+ Some(ref entry) => {
+ if *origin_in != *entry.origin {
Err(BlobURLStoreError::InvalidOrigin)
} else {
- Ok(e.file_impl.clone())
+ if check_url_validity && !entry.is_valid_url.load(Ordering::Acquire) {
+ Err(BlobURLStoreError::InvalidFileID)
+ } else {
+ Ok(entry.file_impl.clone())
+ }
}
}
None => Err(BlobURLStoreError::InvalidFileID),
@@ -284,9 +317,9 @@ impl <UI: 'static + UIProvider> FileManagerStore<UI> {
}
}
- fn add_sliced_entry(&self, parent_id: SelectedFileId, rel_pos: RelativePos,
- sender: IpcSender<Result<SelectedFileId, BlobURLStoreError>>,
- origin_in: FileOrigin) {
+ fn add_sliced_url_entry(&self, parent_id: SelectedFileId, rel_pos: RelativePos,
+ sender: IpcSender<Result<SelectedFileId, BlobURLStoreError>>,
+ origin_in: FileOrigin) {
if let Ok(parent_id) = Uuid::parse_str(&parent_id.0) {
match self.inc_ref(&parent_id, &origin_in) {
Ok(_) => {
@@ -295,6 +328,8 @@ impl <UI: 'static + UIProvider> FileManagerStore<UI> {
origin: origin_in,
file_impl: FileImpl::Sliced(parent_id, rel_pos),
refs: AtomicUsize::new(1),
+ // Valid here since AddSlicedURLEntry implies URL creation
+ is_valid_url: AtomicBool::new(true),
});
let _ = sender.send(Ok(SelectedFileId(new_id.simple().to_string())));
@@ -384,6 +419,8 @@ impl <UI: 'static + UIProvider> FileManagerStore<UI> {
origin: origin.to_string(),
file_impl: file_impl,
refs: AtomicUsize::new(1),
+ // Invalid here since create_entry is called by file selection
+ is_valid_url: AtomicBool::new(false),
});
// Unix Epoch: https://doc.servo.org/std/time/constant.UNIX_EPOCH.html
@@ -422,7 +459,7 @@ impl <UI: 'static + UIProvider> FileManagerStore<UI> {
fn try_read_file(&self, id: SelectedFileId, origin_in: FileOrigin) -> Result<Vec<u8>, BlobURLStoreError> {
let id = try!(Uuid::parse_str(&id.0).map_err(|_| BlobURLStoreError::InvalidFileID));
- match self.get_impl(&id, &origin_in) {
+ match self.get_impl(&id, &origin_in, false) {
Ok(file_impl) => {
match file_impl {
FileImpl::PathOnly(filepath) => {
@@ -446,13 +483,18 @@ impl <UI: 'static + UIProvider> FileManagerStore<UI> {
}
}
- fn dec_ref(&self, id: &Uuid, origin_in: &FileOrigin) -> Result<(), BlobURLStoreError> {
+ fn dec_ref(&self, id: &Uuid, origin_in: &FileOrigin,
+ unset_url_validity: bool) -> Result<(), BlobURLStoreError> {
let (is_last_ref, opt_parent_id) = match self.entries.read().unwrap().get(id) {
Some(entry) => {
if *entry.origin == *origin_in {
let old_refs = entry.refs.fetch_sub(1, Ordering::Release);
if old_refs > 1 {
+ if unset_url_validity {
+ entry.is_valid_url.store(false, Ordering::Release);
+ }
+
(false, None)
} else {
if let FileImpl::Sliced(ref parent_id, _) = entry.file_impl {
@@ -474,14 +516,16 @@ impl <UI: 'static + UIProvider> FileManagerStore<UI> {
self.remove(id);
if let Some(parent_id) = opt_parent_id {
- return self.dec_ref(&parent_id, origin_in);
+ // unset_url_validity for parent is false since we only need
+ // to unset the initial requesting URL
+ return self.dec_ref(&parent_id, origin_in, false);
}
}
Ok(())
}
- fn transfer_memory(&self, entry: BlobURLStoreEntry,
+ fn promote_memory(&self, entry: BlobURLStoreEntry,
sender: IpcSender<Result<SelectedFileId, BlobURLStoreError>>, origin: FileOrigin) {
match Url::parse(&origin) { // parse to check sanity
Ok(_) => {
@@ -490,6 +534,8 @@ impl <UI: 'static + UIProvider> FileManagerStore<UI> {
origin: origin.clone(),
file_impl: FileImpl::Memory(entry),
refs: AtomicUsize::new(1),
+ // Valid here since PromoteMemory implies URL creation
+ is_valid_url: AtomicBool::new(true),
});
let _ = sender.send(Ok(SelectedFileId(id.simple().to_string())));
@@ -499,6 +545,20 @@ impl <UI: 'static + UIProvider> FileManagerStore<UI> {
}
}
}
+
+ fn activate_blob_url(&self, id: &Uuid, origin_in: &FileOrigin) -> Result<(), BlobURLStoreError> {
+ match self.entries.read().unwrap().get(id) {
+ Some(entry) => {
+ if *entry.origin == *origin_in {
+ entry.is_valid_url.store(true, Ordering::Release);
+ Ok(())
+ } else {
+ Err(BlobURLStoreError::InvalidOrigin)
+ }
+ }
+ None => Err(BlobURLStoreError::InvalidFileID)
+ }
+ }
}
diff --git a/components/net_traits/filemanager_thread.rs b/components/net_traits/filemanager_thread.rs
index 88f1b147302..6bbf8b59c9f 100644
--- a/components/net_traits/filemanager_thread.rs
+++ b/components/net_traits/filemanager_thread.rs
@@ -129,18 +129,26 @@ pub enum FileManagerThreadMsg {
/// Load resource by Blob URL
LoadBlob(LoadData, LoadConsumer),
- /// Add an entry and send back the associated uuid
- TransferMemory(BlobURLStoreEntry, IpcSender<Result<SelectedFileId, BlobURLStoreError>>, FileOrigin),
+ /// Add an entry as promoted memory-based blob and send back the associated FileID
+ /// as part of a valid Blob URL
+ PromoteMemory(BlobURLStoreEntry, IpcSender<Result<SelectedFileId, BlobURLStoreError>>, FileOrigin),
- /// Add a sliced entry pointing to the parent id with a relative slicing positing
- AddSlicedEntry(SelectedFileId, RelativePos, IpcSender<Result<SelectedFileId, BlobURLStoreError>>, FileOrigin),
+ /// Add a sliced entry pointing to the parent FileID, and send back the associated FileID
+ /// as part of a valid Blob URL
+ AddSlicedURLEntry(SelectedFileId, RelativePos, IpcSender<Result<SelectedFileId, BlobURLStoreError>>, FileOrigin),
- /// Decrease reference count
+ /// Revoke Blob URL and send back the acknowledgement
+ RevokeBlobURL(SelectedFileId, FileOrigin, IpcSender<Result<(), BlobURLStoreError>>),
+
+ /// Decrease reference count and send back the acknowledgement
DecRef(SelectedFileId, FileOrigin, IpcSender<Result<(), BlobURLStoreError>>),
/// Increase reference count
IncRef(SelectedFileId, FileOrigin),
+ /// Activate an internal FileID so it becomes valid as part of a Blob URL
+ ActivateBlobURL(SelectedFileId, IpcSender<Result<(), BlobURLStoreError>>, FileOrigin),
+
/// Shut down this thread
Exit,
}
diff --git a/components/script/dom/blob.rs b/components/script/dom/blob.rs
index 8e9e96322d9..1b114e90652 100644
--- a/components/script/dom/blob.rs
+++ b/components/script/dom/blob.rs
@@ -151,9 +151,23 @@ impl Blob {
}
}
- pub fn get_id(&self) -> SelectedFileId {
+ /// Get a FileID representing the Blob content,
+ /// used by URL.createObjectURL
+ pub fn get_blob_url_id(&self) -> SelectedFileId {
match *self.blob_impl.borrow() {
- BlobImpl::File(ref id, _) => id.clone(),
+ BlobImpl::File(ref id, _) => {
+ let global = self.global();
+ let origin = global.r().get_url().origin().unicode_serialization();
+ let filemanager = global.r().resource_threads().sender();
+ let (tx, rx) = ipc::channel().unwrap();
+
+ let _ = filemanager.send(FileManagerThreadMsg::ActivateBlobURL(id.clone(), tx, origin.clone()));
+
+ match rx.recv().unwrap() {
+ Ok(_) => id.clone(),
+ Err(_) => SelectedFileId("".to_string()) // Return a dummy id on error
+ }
+ }
BlobImpl::Memory(ref slice) => self.promote_to_file(slice),
BlobImpl::Sliced(ref parent, ref rel_pos) => {
match *parent.blob_impl.borrow() {
@@ -163,11 +177,11 @@ impl Blob {
SelectedFileId("".to_string())
}
BlobImpl::File(ref parent_id, _) =>
- self.create_sliced_id(parent_id, rel_pos),
+ self.create_sliced_url_id(parent_id, rel_pos),
BlobImpl::Memory(ref bytes) => {
let parent_id = parent.promote_to_file(bytes);
*self.blob_impl.borrow_mut() = BlobImpl::Sliced(parent.clone(), rel_pos.clone());
- self.create_sliced_id(&parent_id, rel_pos)
+ self.create_sliced_url_id(&parent_id, rel_pos)
}
}
}
@@ -188,7 +202,7 @@ impl Blob {
};
let (tx, rx) = ipc::channel().unwrap();
- let _ = filemanager.send(FileManagerThreadMsg::TransferMemory(entry, tx, origin.clone()));
+ let _ = filemanager.send(FileManagerThreadMsg::PromoteMemory(entry, tx, origin.clone()));
match rx.recv().unwrap() {
Ok(new_id) => SelectedFileId(new_id.0),
@@ -197,23 +211,47 @@ impl Blob {
}
}
- fn create_sliced_id(&self, parent_id: &SelectedFileId,
- rel_pos: &RelativePos) -> SelectedFileId {
+ /// Get a FileID representing sliced parent-blob content
+ fn create_sliced_url_id(&self, parent_id: &SelectedFileId,
+ rel_pos: &RelativePos) -> SelectedFileId {
let global = self.global();
let origin = global.r().get_url().origin().unicode_serialization();
let filemanager = global.r().resource_threads().sender();
let (tx, rx) = ipc::channel().unwrap();
- let msg = FileManagerThreadMsg::AddSlicedEntry(parent_id.clone(),
- rel_pos.clone(),
- tx, origin.clone());
+ let msg = FileManagerThreadMsg::AddSlicedURLEntry(parent_id.clone(),
+ rel_pos.clone(),
+ tx, origin.clone());
let _ = filemanager.send(msg);
let new_id = rx.recv().unwrap().unwrap();
// Return the indirect id reference
SelectedFileId(new_id.0)
}
+
+ /// Cleanups at the time of destruction/closing
+ fn clean_up_file_resource(&self) {
+ if let BlobImpl::File(ref id, _) = *self.blob_impl.borrow() {
+ let global = self.global();
+ let origin = global.r().get_url().origin().unicode_serialization();
+
+ let filemanager = global.r().resource_threads().sender();
+ let (tx, rx) = ipc::channel().unwrap();
+
+ let msg = FileManagerThreadMsg::DecRef(id.clone(), origin, tx);
+ let _ = filemanager.send(msg);
+ let _ = rx.recv().unwrap();
+ }
+ }
+}
+
+impl Drop for Blob {
+ fn drop(&mut self) {
+ if !self.IsClosed() {
+ self.clean_up_file_resource();
+ }
+ }
}
fn read_file(global: GlobalRef, id: SelectedFileId) -> Result<Vec<u8>, ()> {
@@ -307,8 +345,8 @@ impl BlobMethods for Blob {
// Step 2
self.isClosed_.set(true);
- // TODO Step 3 if Blob URL Store is implemented
-
+ // Step 3
+ self.clean_up_file_resource();
}
}
diff --git a/components/script/dom/url.rs b/components/script/dom/url.rs
index c2d7516dd6f..961d3c2fd29 100644
--- a/components/script/dom/url.rs
+++ b/components/script/dom/url.rs
@@ -16,7 +16,7 @@ use dom::urlsearchparams::URLSearchParams;
use ipc_channel::ipc;
use net_traits::IpcSend;
use net_traits::blob_url_store::parse_blob_url;
-use net_traits::filemanager_thread::{SelectedFileId, FileManagerThreadMsg};
+use net_traits::filemanager_thread::{FileOrigin, SelectedFileId, FileManagerThreadMsg};
use std::borrow::ToOwned;
use std::default::Default;
use url::quirks::domain_to_unicode;
@@ -125,7 +125,7 @@ impl URL {
return DOMString::from(URL::unicode_serialization_blob_url(&origin, &id));
}
- let id = blob.get_id();
+ let id = blob.get_blob_url_id();
DOMString::from(URL::unicode_serialization_blob_url(&origin, &id.0))
}
@@ -148,7 +148,7 @@ impl URL {
let filemanager = global.resource_threads().sender();
let id = SelectedFileId(id.simple().to_string());
let (tx, rx) = ipc::channel().unwrap();
- let msg = FileManagerThreadMsg::DecRef(id, origin, tx);
+ let msg = FileManagerThreadMsg::RevokeBlobURL(id, origin, tx);
let _ = filemanager.send(msg);
let _ = rx.recv().unwrap();
@@ -173,12 +173,11 @@ impl URL {
result
}
- // XXX: change String to FileOrigin
/* NOTE(izgzhen): WebKit will return things like blob:file:///XXX
while Chrome will return blob:null/XXX
This is not well-specified, and I prefer the WebKit way here
*/
- fn get_blob_origin(url: &Url) -> String {
+ fn get_blob_origin(url: &Url) -> FileOrigin {
if url.scheme() == "file" {
"file://".to_string()
} else {