mirror of
https://github.com/spacedriveapp/spacedrive.git
synced 2025-12-11 20:15:30 +01:00
Add batch file and directory creation/deletion scenarios to ephemeral watcher tests
- Implemented methods for batch creation and deletion of files and directories in the test harness. - Added scenarios to verify the creation and deletion of multiple files and directories, ensuring proper event handling. - Enhanced logging for better visibility during test execution. - Updated the macOS event handler to improve directory deduplication and manage recent directory creations effectively.
This commit is contained in:
parent
deadc713be
commit
1adabc7999
@ -291,6 +291,32 @@ impl TestHarness {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a directory recursively
|
||||
async fn delete_dir(&self, name: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let path = self.path(name);
|
||||
tokio::fs::remove_dir_all(&path).await?;
|
||||
println!("Deleted directory recursively: {}", name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create multiple files at the top level (batch creation test)
|
||||
async fn create_batch_files(&self, files: &[&str]) -> Result<(), Box<dyn std::error::Error>> {
|
||||
for file in files {
|
||||
let full_path = self.path(file);
|
||||
tokio::fs::write(&full_path, format!("Content of {}", file)).await?;
|
||||
println!("Created file: {}", file);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create multiple directories at the top level
|
||||
async fn create_batch_dirs(&self, dirs: &[&str]) -> Result<(), Box<dyn std::error::Error>> {
|
||||
for dir in dirs {
|
||||
self.create_dir(dir).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify entry exists in ephemeral index
|
||||
async fn verify_entry_exists(&self, name: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let path = self.path(name);
|
||||
@ -567,6 +593,80 @@ async fn run_test_scenarios(harness: &TestHarness) -> Result<(), Box<dyn std::er
|
||||
harness.verify_is_directory("projects").await?;
|
||||
harness.verify_entry_count(4).await?; // root + initial.txt, notes-renamed.md, projects/
|
||||
|
||||
// ========================================================================
|
||||
// Scenario 7: Batch Create Files and Directories
|
||||
// ========================================================================
|
||||
println!("\n--- Scenario 7: Batch Create Files and Directories ---");
|
||||
|
||||
// Create multiple files at once (simulating drag-and-drop or copy operations)
|
||||
let batch_files = [
|
||||
"readme.txt",
|
||||
"config.json",
|
||||
"data.csv",
|
||||
"report.md",
|
||||
"script.sh",
|
||||
"image.png",
|
||||
];
|
||||
harness.create_batch_files(&batch_files).await?;
|
||||
|
||||
// Create multiple directories at once
|
||||
// Note: Avoid names like "temp", "cache", etc. that may be filtered by indexing rules
|
||||
let batch_dirs = ["workspace", "backups", "archives"];
|
||||
harness.create_batch_dirs(&batch_dirs).await?;
|
||||
|
||||
// Give the watcher time to process all the create events
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
|
||||
// Verify all batch-created files appear
|
||||
for file in &batch_files {
|
||||
harness.verify_entry_exists(file).await?;
|
||||
harness.verify_is_file(file).await?;
|
||||
}
|
||||
|
||||
// Verify all batch-created directories appear
|
||||
for dir in &batch_dirs {
|
||||
harness.verify_entry_exists(dir).await?;
|
||||
harness.verify_is_directory(dir).await?;
|
||||
}
|
||||
|
||||
// Count: root(1) + initial.txt(1) + notes-renamed.md(1) + projects(1) +
|
||||
// 6 files + 3 dirs = 13
|
||||
harness.verify_entry_count(13).await?;
|
||||
|
||||
println!("✓ All batch-created entries verified in index");
|
||||
harness.dump_index_state().await;
|
||||
|
||||
// ========================================================================
|
||||
// Scenario 8: Delete Multiple Files and Directory
|
||||
// ========================================================================
|
||||
println!("\n--- Scenario 8: Delete Multiple Files and Directory ---");
|
||||
|
||||
// Delete multiple files
|
||||
for file in &batch_files {
|
||||
harness.delete_file(file).await?;
|
||||
}
|
||||
|
||||
// Delete directories (they're empty at top level, so just rmdir)
|
||||
for dir in &batch_dirs {
|
||||
harness.delete_dir(dir).await?;
|
||||
}
|
||||
|
||||
// Give the watcher time to process all the delete events
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
|
||||
// Verify all batch-created entries are removed
|
||||
for file in &batch_files {
|
||||
harness.verify_entry_not_exists(file).await?;
|
||||
}
|
||||
for dir in &batch_dirs {
|
||||
harness.verify_entry_not_exists(dir).await?;
|
||||
}
|
||||
|
||||
// Count should be back to: root(1) + initial.txt(1) + notes-renamed.md(1) + projects(1) = 4
|
||||
harness.verify_entry_count(4).await?;
|
||||
|
||||
println!("✓ All batch-deleted entries removed from index");
|
||||
|
||||
// ========================================================================
|
||||
// Final State Verification
|
||||
// ========================================================================
|
||||
@ -579,6 +679,9 @@ async fn run_test_scenarios(harness: &TestHarness) -> Result<(), Box<dyn std::er
|
||||
harness.verify_entry_exists("projects").await?;
|
||||
harness.verify_entry_not_exists("document.txt").await?;
|
||||
harness.verify_entry_not_exists("notes.md").await?;
|
||||
// All batch-created items should be gone
|
||||
harness.verify_entry_not_exists("workspace").await?;
|
||||
harness.verify_entry_not_exists("readme.txt").await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -615,6 +718,8 @@ async fn test_ephemeral_watcher() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!(" ✓ File renaming (ephemeral index updated)");
|
||||
println!(" ✓ File deletion (removed from ephemeral index)");
|
||||
println!(" ✓ Directory creation (shallow watch)");
|
||||
println!(" ✓ Batch file/directory creation");
|
||||
println!(" ✓ Batch file/directory deletion");
|
||||
|
||||
harness.cleanup().await?;
|
||||
|
||||
|
||||
@ -31,6 +31,9 @@ const STABILIZATION_TIMEOUT_MS: u64 = 500;
|
||||
/// Longer timeout for files with rapid successive changes
|
||||
const REINCIDENT_TIMEOUT_MS: u64 = 10_000;
|
||||
|
||||
/// Timeout for directory dedup cache (how long to remember recent directory creates)
|
||||
const DIR_DEDUP_TIMEOUT_MS: u64 = 5_000;
|
||||
|
||||
/// macOS event handler with rename detection
|
||||
pub struct MacOsHandler {
|
||||
/// Files pending potential rename (by inode) - the "old path" side
|
||||
@ -49,8 +52,9 @@ pub struct MacOsHandler {
|
||||
/// Key: path, Value: first change timestamp
|
||||
reincident_updates: RwLock<HashMap<PathBuf, Instant>>,
|
||||
|
||||
/// Last created directory path - for Finder duplicate event deduplication
|
||||
last_created_dir: RwLock<Option<PathBuf>>,
|
||||
/// Recently created directories - for duplicate event deduplication
|
||||
/// Key: path, Value: timestamp of creation
|
||||
recent_dirs: RwLock<HashMap<PathBuf, Instant>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@ -76,7 +80,7 @@ impl MacOsHandler {
|
||||
pending_creates: RwLock::new(HashMap::new()),
|
||||
pending_updates: RwLock::new(HashMap::new()),
|
||||
reincident_updates: RwLock::new(HashMap::new()),
|
||||
last_created_dir: RwLock::new(None),
|
||||
recent_dirs: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,19 +120,18 @@ impl MacOsHandler {
|
||||
async fn process_create(&self, path: PathBuf) -> Result<Vec<FsEvent>> {
|
||||
// Check if this is a directory
|
||||
if Self::is_directory(&path).await {
|
||||
// Dedupe Finder's duplicate directory creation events
|
||||
// Dedupe duplicate directory creation events using recent_dirs cache
|
||||
{
|
||||
let mut last_dir = self.last_created_dir.write().await;
|
||||
if let Some(ref last) = *last_dir {
|
||||
if *last == path {
|
||||
trace!(
|
||||
"Ignoring duplicate directory create event: {}",
|
||||
path.display()
|
||||
);
|
||||
return Ok(vec![]);
|
||||
}
|
||||
let mut recent = self.recent_dirs.write().await;
|
||||
if recent.contains_key(&path) {
|
||||
trace!(
|
||||
"Ignoring duplicate directory create event: {}",
|
||||
path.display()
|
||||
);
|
||||
return Ok(vec![]);
|
||||
}
|
||||
*last_dir = Some(path.clone());
|
||||
// Track this directory creation
|
||||
recent.insert(path.clone(), Instant::now());
|
||||
}
|
||||
|
||||
// Directories emit immediately (no rename detection needed)
|
||||
@ -139,6 +142,19 @@ impl MacOsHandler {
|
||||
return Ok(vec![FsEvent::create_dir(path)]);
|
||||
}
|
||||
|
||||
// For files, check if we already have this path in recent_dirs
|
||||
// (edge case: directory metadata check failed initially but file is actually a dir)
|
||||
{
|
||||
let recent = self.recent_dirs.read().await;
|
||||
if recent.contains_key(&path) {
|
||||
trace!(
|
||||
"Ignoring create event for recent directory: {}",
|
||||
path.display()
|
||||
);
|
||||
return Ok(vec![]);
|
||||
}
|
||||
}
|
||||
|
||||
// For files, get inode for rename detection
|
||||
let Some(inode) = Self::get_inode(&path).await else {
|
||||
// File might have been deleted already
|
||||
@ -226,23 +242,69 @@ impl MacOsHandler {
|
||||
/// Evict pending creates that have timed out
|
||||
async fn evict_creates(&self, timeout: Duration) -> Vec<FsEvent> {
|
||||
let mut events = Vec::new();
|
||||
let mut creates = self.pending_creates.write().await;
|
||||
let mut to_remove = Vec::new();
|
||||
let mut to_process = Vec::new();
|
||||
|
||||
for (inode, pending) in creates.iter() {
|
||||
if pending.timestamp.elapsed() > timeout {
|
||||
to_remove.push(*inode);
|
||||
// Files only - directories are emitted immediately in process_create
|
||||
events.push(FsEvent::create_file(pending.path.clone()));
|
||||
trace!(
|
||||
"Evicting create (no matching remove): {}",
|
||||
pending.path.display()
|
||||
);
|
||||
// Collect timed-out entries
|
||||
{
|
||||
let mut creates = self.pending_creates.write().await;
|
||||
let mut to_remove = Vec::new();
|
||||
|
||||
for (inode, pending) in creates.iter() {
|
||||
if pending.timestamp.elapsed() > timeout {
|
||||
to_remove.push(*inode);
|
||||
}
|
||||
}
|
||||
|
||||
for inode in to_remove {
|
||||
if let Some(pending) = creates.remove(&inode) {
|
||||
to_process.push(pending);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for inode in to_remove {
|
||||
creates.remove(&inode);
|
||||
// Process evictions without holding the creates lock
|
||||
for pending in to_process {
|
||||
// Check if this path was already emitted as a directory
|
||||
// (handles race condition where directory got buffered initially)
|
||||
let skip = {
|
||||
let recent = self.recent_dirs.read().await;
|
||||
let found = recent.contains_key(&pending.path);
|
||||
if found {
|
||||
debug!(
|
||||
"Skipping eviction for already-emitted directory: {}",
|
||||
pending.path.display()
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
"Path not in recent_dirs, will evict: {} (recent_dirs has {} entries)",
|
||||
pending.path.display(),
|
||||
recent.len()
|
||||
);
|
||||
}
|
||||
found
|
||||
};
|
||||
|
||||
if skip {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if the path is actually a directory now
|
||||
let is_dir = Self::is_directory(&pending.path).await;
|
||||
if is_dir {
|
||||
// Add to recent_dirs to prevent future duplicates
|
||||
{
|
||||
let mut recent = self.recent_dirs.write().await;
|
||||
recent.insert(pending.path.clone(), Instant::now());
|
||||
}
|
||||
events.push(FsEvent::create_dir(pending.path.clone()));
|
||||
debug!(
|
||||
"Evicting create as directory (was buffered as file): {}",
|
||||
pending.path.display()
|
||||
);
|
||||
} else {
|
||||
events.push(FsEvent::create_file(pending.path.clone()));
|
||||
debug!("Evicting create as file: {}", pending.path.display());
|
||||
}
|
||||
}
|
||||
|
||||
events
|
||||
@ -308,6 +370,12 @@ impl MacOsHandler {
|
||||
|
||||
events
|
||||
}
|
||||
|
||||
/// Clean up old entries from the recent directories cache
|
||||
async fn cleanup_recent_dirs(&self, timeout: Duration) {
|
||||
let mut recent = self.recent_dirs.write().await;
|
||||
recent.retain(|_, timestamp| timestamp.elapsed() < timeout);
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MacOsHandler {
|
||||
@ -374,6 +442,7 @@ impl EventHandler for MacOsHandler {
|
||||
async fn tick(&self) -> Result<Vec<FsEvent>> {
|
||||
let rename_timeout = Duration::from_millis(RENAME_TIMEOUT_MS);
|
||||
let stabilization_timeout = Duration::from_millis(STABILIZATION_TIMEOUT_MS);
|
||||
let dir_dedup_timeout = Duration::from_millis(DIR_DEDUP_TIMEOUT_MS);
|
||||
|
||||
let mut events = Vec::new();
|
||||
|
||||
@ -383,6 +452,9 @@ impl EventHandler for MacOsHandler {
|
||||
events.extend(self.evict_creates(rename_timeout).await);
|
||||
events.extend(self.evict_removes(rename_timeout).await);
|
||||
|
||||
// Clean up old entries from recent_dirs cache
|
||||
self.cleanup_recent_dirs(dir_dedup_timeout).await;
|
||||
|
||||
Ok(events)
|
||||
}
|
||||
|
||||
@ -391,7 +463,7 @@ impl EventHandler for MacOsHandler {
|
||||
self.pending_creates.write().await.clear();
|
||||
self.pending_updates.write().await.clear();
|
||||
self.reincident_updates.write().await.clear();
|
||||
*self.last_created_dir.write().await = None;
|
||||
self.recent_dirs.write().await.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@ -408,7 +480,7 @@ mod tests {
|
||||
assert!(handler.pending_creates.read().await.is_empty());
|
||||
assert!(handler.pending_updates.read().await.is_empty());
|
||||
assert!(handler.reincident_updates.read().await.is_empty());
|
||||
assert!(handler.last_created_dir.read().await.is_none());
|
||||
assert!(handler.recent_dirs.read().await.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@ -421,15 +493,15 @@ mod tests {
|
||||
updates.insert(PathBuf::from("/test"), Instant::now());
|
||||
}
|
||||
{
|
||||
let mut last_dir = handler.last_created_dir.write().await;
|
||||
*last_dir = Some(PathBuf::from("/test/dir"));
|
||||
let mut recent = handler.recent_dirs.write().await;
|
||||
recent.insert(PathBuf::from("/test/dir"), Instant::now());
|
||||
}
|
||||
|
||||
// Reset should clear everything
|
||||
handler.reset().await;
|
||||
|
||||
assert!(handler.pending_updates.read().await.is_empty());
|
||||
assert!(handler.last_created_dir.read().await.is_none());
|
||||
assert!(handler.recent_dirs.read().await.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user