mirror of
https://github.com/spacedriveapp/spacedrive.git
synced 2025-12-11 20:15:30 +01:00
A bunch of minor fixes
This commit is contained in:
parent
d91bb9aa1d
commit
c1944005b7
@ -136,12 +136,13 @@ pub(crate) fn mount() -> rspc::RouterBuilder<
|
||||
let mut items = Vec::with_capacity(file_paths.len());
|
||||
|
||||
for file_path in file_paths {
|
||||
let has_thumbnail = match &file_path.cas_id {
|
||||
None => false,
|
||||
Some(cas_id) => library
|
||||
let has_thumbnail = if let Some(cas_id) = &file_path.cas_id {
|
||||
library
|
||||
.thumbnail_exists(cas_id)
|
||||
.await
|
||||
.map_err(LocationError::IOError)?,
|
||||
.map_err(LocationError::IOError)?
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
items.push(ExplorerItem::Path {
|
||||
|
||||
@ -55,7 +55,9 @@ pub(crate) fn mount() -> RouterBuilder {
|
||||
let oldest_path = &object.file_paths[0];
|
||||
object.name = Some(oldest_path.name.clone());
|
||||
object.extension = oldest_path.extension.clone();
|
||||
// a long term fix for this would be to have the indexer give the Object a name and extension, sacrificing its own and only store newly found Path names that differ from the Object name
|
||||
// a long term fix for this would be to have the indexer give the Object
|
||||
// a name and extension, sacrificing its own and only store newly found Path
|
||||
// names that differ from the Object name
|
||||
|
||||
let cas_id = object
|
||||
.file_paths
|
||||
@ -63,9 +65,16 @@ pub(crate) fn mount() -> RouterBuilder {
|
||||
.map(|fp| fp.cas_id.as_ref())
|
||||
.find_map(|c| c);
|
||||
|
||||
let has_thumbnail = match cas_id {
|
||||
None => false,
|
||||
Some(cas_id) => library.thumbnail_exists(cas_id).await.unwrap(),
|
||||
let has_thumbnail = if let Some(cas_id) = cas_id {
|
||||
library.thumbnail_exists(cas_id).await.map_err(|e| {
|
||||
rspc::Error::with_cause(
|
||||
ErrorCode::InternalServerError,
|
||||
"Failed to check that thumbnail exists".to_string(),
|
||||
e,
|
||||
)
|
||||
})?
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
items.push(ExplorerItem::Object {
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
use crate::prisma::file_path;
|
||||
use crate::{
|
||||
invalidate_query,
|
||||
library::LibraryContext,
|
||||
@ -15,7 +14,7 @@ use crate::{
|
||||
},
|
||||
validation::hash::file_checksum,
|
||||
},
|
||||
prisma::object,
|
||||
prisma::{file_path, object},
|
||||
};
|
||||
|
||||
use std::{
|
||||
@ -188,38 +187,35 @@ async fn inner_create_file(
|
||||
|
||||
let size_str = fs_metadata.len().to_string();
|
||||
|
||||
let object = match existing_object {
|
||||
Some(object) => {
|
||||
db.object()
|
||||
.update(
|
||||
object::id::equals(object.id),
|
||||
vec![
|
||||
object::size_in_bytes::set(size_str),
|
||||
object::date_indexed::set(
|
||||
Utc::now().with_timezone(&FixedOffset::east_opt(0).unwrap()),
|
||||
),
|
||||
],
|
||||
)
|
||||
.select(object_id::select())
|
||||
.exec()
|
||||
.await?
|
||||
}
|
||||
None => {
|
||||
db.object()
|
||||
.create(
|
||||
Uuid::new_v4().as_bytes().to_vec(),
|
||||
vec![
|
||||
object::date_created::set(
|
||||
DateTime::<Local>::from(fs_metadata.created().unwrap()).into(),
|
||||
),
|
||||
object::kind::set(kind.int_value()),
|
||||
object::size_in_bytes::set(size_str.clone()),
|
||||
],
|
||||
)
|
||||
.select(object_id::select())
|
||||
.exec()
|
||||
.await?
|
||||
}
|
||||
let object = if let Some(object) = existing_object {
|
||||
db.object()
|
||||
.update(
|
||||
object::id::equals(object.id),
|
||||
vec![
|
||||
object::size_in_bytes::set(size_str),
|
||||
object::date_indexed::set(
|
||||
Utc::now().with_timezone(&FixedOffset::east_opt(0).unwrap()),
|
||||
),
|
||||
],
|
||||
)
|
||||
.select(object_id::select())
|
||||
.exec()
|
||||
.await?
|
||||
} else {
|
||||
db.object()
|
||||
.create(
|
||||
Uuid::new_v4().as_bytes().to_vec(),
|
||||
vec![
|
||||
object::date_created::set(
|
||||
DateTime::<Local>::from(fs_metadata.created().unwrap()).into(),
|
||||
),
|
||||
object::kind::set(kind.int_value()),
|
||||
object::size_in_bytes::set(size_str.clone()),
|
||||
],
|
||||
)
|
||||
.select(object_id::select())
|
||||
.exec()
|
||||
.await?
|
||||
};
|
||||
|
||||
db.file_path()
|
||||
@ -311,7 +307,7 @@ async fn inner_update_file(
|
||||
|
||||
let FileMetadata {
|
||||
cas_id,
|
||||
kind,
|
||||
kind: _,
|
||||
fs_metadata,
|
||||
} = FileMetadata::new(location_local_path, &file_path.materialized_path).await?;
|
||||
|
||||
|
||||
@ -33,13 +33,14 @@ pub enum FileCopierJobStep {
|
||||
|
||||
impl From<FsInfo> for FileCopierJobStep {
|
||||
fn from(value: FsInfo) -> Self {
|
||||
match value.path_data.is_dir {
|
||||
true => Self::Directory {
|
||||
if value.path_data.is_dir {
|
||||
Self::Directory {
|
||||
path: value.fs_path,
|
||||
},
|
||||
false => Self::File {
|
||||
}
|
||||
} else {
|
||||
Self::File {
|
||||
path: value.fs_path,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -79,15 +80,14 @@ impl StatefulJob for FileCopierJob {
|
||||
let target_file_name = state.init.target_file_name_suffix.as_ref().map_or_else(
|
||||
|| Ok::<_, JobError>(file_name.clone()),
|
||||
|suffix| {
|
||||
Ok(match source_fs_info.path_data.is_dir {
|
||||
true => format!("{file_name}{suffix}"),
|
||||
false => {
|
||||
osstr_to_string(source_fs_info.fs_path.file_stem())?
|
||||
+ suffix + &source_fs_info.fs_path.extension().map_or_else(
|
||||
|| Ok(String::new()),
|
||||
|ext| ext.to_str().map(|e| format!(".{e}")).ok_or(JobError::OsStr),
|
||||
)?
|
||||
}
|
||||
Ok(if source_fs_info.path_data.is_dir {
|
||||
format!("{file_name}{suffix}")
|
||||
} else {
|
||||
osstr_to_string(source_fs_info.fs_path.file_stem())?
|
||||
+ suffix + &source_fs_info.fs_path.extension().map_or_else(
|
||||
|| Ok(String::new()),
|
||||
|ext| ext.to_str().map(|e| format!(".{e}")).ok_or(JobError::OsStr),
|
||||
)?
|
||||
})
|
||||
},
|
||||
)?;
|
||||
|
||||
@ -1,8 +1,11 @@
|
||||
use super::{context_menu_fs_info, FsInfo};
|
||||
use crate::job::{JobError, JobReportUpdate, JobResult, JobState, StatefulJob, WorkerContext};
|
||||
|
||||
use std::hash::Hash;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use std::{collections::VecDeque, hash::Hash};
|
||||
|
||||
use super::{context_menu_fs_info, FsInfo};
|
||||
|
||||
pub struct FileDeleterJob {}
|
||||
|
||||
@ -15,18 +18,13 @@ pub struct FileDeleterJobInit {
|
||||
pub path_id: i32,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct FileDeleterJobStep {
|
||||
pub fs_info: FsInfo,
|
||||
}
|
||||
|
||||
pub const DELETE_JOB_NAME: &str = "file_deleter";
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StatefulJob for FileDeleterJob {
|
||||
type Data = FileDeleterJobState;
|
||||
type Init = FileDeleterJobInit;
|
||||
type Step = FileDeleterJobStep;
|
||||
type Data = FileDeleterJobState;
|
||||
type Step = FsInfo;
|
||||
|
||||
fn name(&self) -> &'static str {
|
||||
DELETE_JOB_NAME
|
||||
@ -40,8 +38,7 @@ impl StatefulJob for FileDeleterJob {
|
||||
)
|
||||
.await?;
|
||||
|
||||
state.steps = VecDeque::new();
|
||||
state.steps.push_back(FileDeleterJobStep { fs_info });
|
||||
state.steps = [fs_info].into_iter().collect();
|
||||
|
||||
ctx.progress(vec![JobReportUpdate::TaskCount(state.steps.len())]);
|
||||
|
||||
@ -53,15 +50,15 @@ impl StatefulJob for FileDeleterJob {
|
||||
ctx: WorkerContext,
|
||||
state: &mut JobState<Self>,
|
||||
) -> Result<(), JobError> {
|
||||
let step = &state.steps[0];
|
||||
let info = &step.fs_info;
|
||||
let info = &state.steps[0];
|
||||
|
||||
// need to handle stuff such as querying prisma for all paths of a file, and deleting all of those if requested (with a checkbox in the ui)
|
||||
// maybe a files.countOccurances/and or files.getPath(location_id, path_id) to show how many of these files would be deleted (and where?)
|
||||
|
||||
match info.path_data.is_dir {
|
||||
false => tokio::fs::remove_file(info.fs_path.clone()).await,
|
||||
true => tokio::fs::remove_dir_all(info.fs_path.clone()).await,
|
||||
if info.path_data.is_dir {
|
||||
tokio::fs::remove_dir_all(info.fs_path.clone()).await
|
||||
} else {
|
||||
tokio::fs::remove_file(info.fs_path.clone()).await
|
||||
}?;
|
||||
|
||||
ctx.progress(vec![JobReportUpdate::CompletedTaskCount(
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
use super::{context_menu_fs_info, FsInfo};
|
||||
use crate::{job::*, library::LibraryContext};
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use chrono::FixedOffset;
|
||||
use sd_crypto::{
|
||||
crypto::stream::{Algorithm, StreamEncryption},
|
||||
@ -11,10 +13,11 @@ use sd_crypto::{
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use std::path::PathBuf;
|
||||
use tokio::{fs::File, io::AsyncReadExt};
|
||||
use tracing::warn;
|
||||
|
||||
use super::{context_menu_fs_info, FsInfo};
|
||||
|
||||
pub struct FileEncryptorJob;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
@ -82,142 +85,139 @@ impl StatefulJob for FileEncryptorJob {
|
||||
|
||||
let LibraryContext { key_manager, .. } = &ctx.library_ctx;
|
||||
|
||||
match info.path_data.is_dir {
|
||||
false => {
|
||||
// handle overwriting checks, and making sure there's enough available space
|
||||
if !info.path_data.is_dir {
|
||||
// handle overwriting checks, and making sure there's enough available space
|
||||
|
||||
let user_key = key_manager.access_keymount(state.init.key_uuid)?.hashed_key;
|
||||
let user_key = key_manager.access_keymount(state.init.key_uuid)?.hashed_key;
|
||||
|
||||
let user_key_details = key_manager.access_keystore(state.init.key_uuid)?;
|
||||
let user_key_details = key_manager.access_keystore(state.init.key_uuid)?;
|
||||
|
||||
let output_path = state.init.output_path.clone().map_or_else(
|
||||
|| {
|
||||
let mut path = info.fs_path.clone();
|
||||
let extension = path.extension().map_or_else(
|
||||
|| Ok("sdenc".to_string()),
|
||||
|extension| {
|
||||
Ok::<String, JobError>(
|
||||
extension
|
||||
.to_str()
|
||||
.ok_or(JobError::MissingData {
|
||||
value: String::from(
|
||||
"path contents when converted to string",
|
||||
),
|
||||
})?
|
||||
.to_string() + ".sdenc",
|
||||
)
|
||||
},
|
||||
)?;
|
||||
let output_path = state.init.output_path.clone().map_or_else(
|
||||
|| {
|
||||
let mut path = info.fs_path.clone();
|
||||
let extension = path.extension().map_or_else(
|
||||
|| Ok("sdenc".to_string()),
|
||||
|extension| {
|
||||
Ok::<String, JobError>(
|
||||
extension
|
||||
.to_str()
|
||||
.ok_or(JobError::MissingData {
|
||||
value: String::from(
|
||||
"path contents when converted to string",
|
||||
),
|
||||
})?
|
||||
.to_string() + ".sdenc",
|
||||
)
|
||||
},
|
||||
)?;
|
||||
|
||||
path.set_extension(extension);
|
||||
Ok::<PathBuf, JobError>(path)
|
||||
},
|
||||
Ok,
|
||||
)?;
|
||||
path.set_extension(extension);
|
||||
Ok::<PathBuf, JobError>(path)
|
||||
},
|
||||
Ok,
|
||||
)?;
|
||||
|
||||
let _guard = ctx
|
||||
.library_ctx
|
||||
.location_manager()
|
||||
.temporary_ignore_events_for_path(
|
||||
state.init.location_id,
|
||||
ctx.library_ctx.clone(),
|
||||
&output_path,
|
||||
let _guard = ctx
|
||||
.library_ctx
|
||||
.location_manager()
|
||||
.temporary_ignore_events_for_path(
|
||||
state.init.location_id,
|
||||
ctx.library_ctx.clone(),
|
||||
&output_path,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut reader = File::open(&info.fs_path).await?;
|
||||
let mut writer = File::create(output_path).await?;
|
||||
|
||||
let master_key = generate_master_key();
|
||||
|
||||
let mut header = FileHeader::new(
|
||||
LATEST_FILE_HEADER,
|
||||
state.init.algorithm,
|
||||
vec![
|
||||
Keyslot::new(
|
||||
LATEST_KEYSLOT,
|
||||
state.init.algorithm,
|
||||
user_key_details.hashing_algorithm,
|
||||
user_key_details.content_salt,
|
||||
user_key,
|
||||
master_key.clone(),
|
||||
)
|
||||
.await?;
|
||||
.await?,
|
||||
],
|
||||
);
|
||||
|
||||
let mut reader = File::open(&info.fs_path).await?;
|
||||
let mut writer = File::create(output_path).await?;
|
||||
if state.init.metadata || state.init.preview_media {
|
||||
// if any are requested, we can make the query as it'll be used at least once
|
||||
if let Some(object) = info.path_data.object.clone() {
|
||||
if state.init.metadata {
|
||||
let metadata = Metadata {
|
||||
path_id: state.init.path_id,
|
||||
name: info.path_data.materialized_path.clone(),
|
||||
hidden: object.hidden,
|
||||
favourite: object.favorite,
|
||||
important: object.important,
|
||||
note: object.note,
|
||||
date_created: object.date_created,
|
||||
date_modified: object.date_modified,
|
||||
};
|
||||
|
||||
let master_key = generate_master_key();
|
||||
|
||||
let mut header = FileHeader::new(
|
||||
LATEST_FILE_HEADER,
|
||||
state.init.algorithm,
|
||||
vec![
|
||||
Keyslot::new(
|
||||
LATEST_KEYSLOT,
|
||||
state.init.algorithm,
|
||||
user_key_details.hashing_algorithm,
|
||||
user_key_details.content_salt,
|
||||
user_key,
|
||||
master_key.clone(),
|
||||
)
|
||||
.await?,
|
||||
],
|
||||
);
|
||||
|
||||
if state.init.metadata || state.init.preview_media {
|
||||
// if any are requested, we can make the query as it'll be used at least once
|
||||
if let Some(object) = info.path_data.object.clone() {
|
||||
if state.init.metadata {
|
||||
let metadata = Metadata {
|
||||
path_id: state.init.path_id,
|
||||
name: info.path_data.materialized_path.clone(),
|
||||
hidden: object.hidden,
|
||||
favourite: object.favorite,
|
||||
important: object.important,
|
||||
note: object.note,
|
||||
date_created: object.date_created,
|
||||
date_modified: object.date_modified,
|
||||
};
|
||||
|
||||
header
|
||||
.add_metadata(
|
||||
LATEST_METADATA,
|
||||
state.init.algorithm,
|
||||
master_key.clone(),
|
||||
&metadata,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// if state.init.preview_media
|
||||
// && (object.has_thumbnail
|
||||
// || object.has_video_preview || object.has_thumbstrip)
|
||||
|
||||
// may not be the best - pvm isn't guaranteed to be webp
|
||||
let pvm_path = ctx
|
||||
.library_ctx
|
||||
.config()
|
||||
.data_directory()
|
||||
.join("thumbnails")
|
||||
.join(info.path_data.cas_id.as_ref().unwrap())
|
||||
.with_extension("wepb");
|
||||
|
||||
if tokio::fs::metadata(&pvm_path).await.is_ok() {
|
||||
let mut pvm_bytes = Vec::new();
|
||||
let mut pvm_file = File::open(pvm_path).await?;
|
||||
pvm_file.read_to_end(&mut pvm_bytes).await?;
|
||||
|
||||
header
|
||||
.add_preview_media(
|
||||
LATEST_PREVIEW_MEDIA,
|
||||
state.init.algorithm,
|
||||
master_key.clone(),
|
||||
&pvm_bytes,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
} else {
|
||||
// should use container encryption if it's a directory
|
||||
warn!(
|
||||
"skipping metadata/preview media inclusion, no associated object found"
|
||||
)
|
||||
header
|
||||
.add_metadata(
|
||||
LATEST_METADATA,
|
||||
state.init.algorithm,
|
||||
master_key.clone(),
|
||||
&metadata,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// if state.init.preview_media
|
||||
// && (object.has_thumbnail
|
||||
// || object.has_video_preview || object.has_thumbstrip)
|
||||
|
||||
// may not be the best - pvm isn't guaranteed to be webp
|
||||
let pvm_path = ctx
|
||||
.library_ctx
|
||||
.config()
|
||||
.data_directory()
|
||||
.join("thumbnails")
|
||||
.join(info.path_data.cas_id.as_ref().unwrap())
|
||||
.with_extension("wepb");
|
||||
|
||||
if tokio::fs::metadata(&pvm_path).await.is_ok() {
|
||||
let mut pvm_bytes = Vec::new();
|
||||
let mut pvm_file = File::open(pvm_path).await?;
|
||||
pvm_file.read_to_end(&mut pvm_bytes).await?;
|
||||
|
||||
header
|
||||
.add_preview_media(
|
||||
LATEST_PREVIEW_MEDIA,
|
||||
state.init.algorithm,
|
||||
master_key.clone(),
|
||||
&pvm_bytes,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
} else {
|
||||
// should use container encryption if it's a directory
|
||||
warn!("skipping metadata/preview media inclusion, no associated object found")
|
||||
}
|
||||
|
||||
header.write(&mut writer).await?;
|
||||
|
||||
let encryptor = StreamEncryption::new(master_key, &header.nonce, header.algorithm)?;
|
||||
|
||||
encryptor
|
||||
.encrypt_streams(&mut reader, &mut writer, &header.generate_aad())
|
||||
.await?;
|
||||
}
|
||||
_ => warn!(
|
||||
|
||||
header.write(&mut writer).await?;
|
||||
|
||||
let encryptor = StreamEncryption::new(master_key, &header.nonce, header.algorithm)?;
|
||||
|
||||
encryptor
|
||||
.encrypt_streams(&mut reader, &mut writer, &header.generate_aad())
|
||||
.await?;
|
||||
} else {
|
||||
warn!(
|
||||
"encryption is skipping {} as it isn't a file",
|
||||
info.path_data.materialized_path
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
ctx.progress(vec![JobReportUpdate::CompletedTaskCount(
|
||||
|
||||
@ -11,11 +11,6 @@ use super::{context_menu_fs_info, FsInfo};
|
||||
|
||||
pub struct FileEraserJob {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct FileEraserJobState {
|
||||
pub fs_info: FsInfo,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Hash, Type)]
|
||||
pub struct FileEraserJobInit {
|
||||
pub location_id: i32,
|
||||
@ -31,13 +26,14 @@ pub enum FileEraserJobStep {
|
||||
|
||||
impl From<FsInfo> for FileEraserJobStep {
|
||||
fn from(value: FsInfo) -> Self {
|
||||
match value.path_data.is_dir {
|
||||
true => Self::Directory {
|
||||
if value.path_data.is_dir {
|
||||
Self::Directory {
|
||||
path: value.fs_path,
|
||||
},
|
||||
false => Self::File {
|
||||
}
|
||||
} else {
|
||||
Self::File {
|
||||
path: value.fs_path,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -47,7 +43,7 @@ pub const ERASE_JOB_NAME: &str = "file_eraser";
|
||||
#[async_trait::async_trait]
|
||||
impl StatefulJob for FileEraserJob {
|
||||
type Init = FileEraserJobInit;
|
||||
type Data = FileEraserJobState;
|
||||
type Data = FsInfo;
|
||||
type Step = FileEraserJobStep;
|
||||
|
||||
fn name(&self) -> &'static str {
|
||||
@ -62,9 +58,7 @@ impl StatefulJob for FileEraserJob {
|
||||
)
|
||||
.await?;
|
||||
|
||||
state.data = Some(FileEraserJobState {
|
||||
fs_info: fs_info.clone(),
|
||||
});
|
||||
state.data = Some(fs_info.clone());
|
||||
|
||||
state.steps = [fs_info.into()].into_iter().collect();
|
||||
|
||||
@ -106,12 +100,11 @@ impl StatefulJob for FileEraserJob {
|
||||
let mut dir = tokio::fs::read_dir(&path).await?;
|
||||
|
||||
while let Some(entry) = dir.next_entry().await? {
|
||||
state
|
||||
.steps
|
||||
.push_back(match entry.metadata().await?.is_dir() {
|
||||
true => FileEraserJobStep::Directory { path: entry.path() },
|
||||
false => FileEraserJobStep::File { path: entry.path() },
|
||||
});
|
||||
state.steps.push_back(if entry.metadata().await?.is_dir() {
|
||||
FileEraserJobStep::Directory { path: entry.path() }
|
||||
} else {
|
||||
FileEraserJobStep::File { path: entry.path() }
|
||||
});
|
||||
|
||||
ctx.progress(vec![JobReportUpdate::TaskCount(state.steps.len())]);
|
||||
}
|
||||
@ -126,8 +119,8 @@ impl StatefulJob for FileEraserJob {
|
||||
|
||||
async fn finalize(&self, _ctx: WorkerContext, state: &mut JobState<Self>) -> JobResult {
|
||||
if let Some(ref info) = state.data {
|
||||
if info.fs_info.path_data.is_dir {
|
||||
tokio::fs::remove_dir_all(&info.fs_info.fs_path).await?;
|
||||
if info.path_data.is_dir {
|
||||
tokio::fs::remove_dir_all(&info.fs_path).await?;
|
||||
}
|
||||
} else {
|
||||
warn!("missing job state, unable to fully finalise erase job");
|
||||
|
||||
@ -172,6 +172,7 @@ async fn identifier_job_step(
|
||||
.map(|(id, object)| {
|
||||
file_path_object_connect_ops(
|
||||
id,
|
||||
// SAFETY: This pub_id is generated by the uuid lib, but we have to store bytes in sqlite
|
||||
Uuid::from_slice(&object.pub_id).unwrap(),
|
||||
location,
|
||||
sync,
|
||||
|
||||
@ -179,16 +179,14 @@ impl Extension {
|
||||
return None
|
||||
};
|
||||
|
||||
let Some(ext) = Extension::from_str(ext_str)else {
|
||||
let Some(ext) = Extension::from_str(ext_str) else {
|
||||
return None
|
||||
};
|
||||
|
||||
let Ok(mut file) = fs::File::open(&path).await else {
|
||||
let Ok(ref mut file) = File::open(&path).await else {
|
||||
return None
|
||||
};
|
||||
|
||||
let file = &mut file;
|
||||
|
||||
match ext {
|
||||
// we don't need to check the magic bytes unless there is conflict
|
||||
// always_check_magic_bytes forces the check for tests
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user