mirror of
https://github.com/spacedriveapp/spacedrive.git
synced 2025-12-11 20:15:30 +01:00
597 lines
16 KiB
TypeScript
597 lines
16 KiB
TypeScript
/**
|
|
* useNormalizedQuery - Normalized cache with real-time updates
|
|
*
|
|
* A typesafe TanStack Query wrapper providing instant cache updates
|
|
* via filtered WebSocket subscriptions. The counterpart to the Identifiable
|
|
* trait in the Rust core, processing ResourceEvents to update the cache.
|
|
* - Runtime type safety with Valibot
|
|
* - Deep merging with ts-deepmerge
|
|
* - Stable callbacks with React 19 useEvent
|
|
* - Rrror handling with tiny-invariant
|
|
*
|
|
* ## Example
|
|
*
|
|
* ```tsx
|
|
* const { data: files } = useNormalizedQuery({
|
|
* wireMethod: 'query:files.directory_listing',
|
|
* input: { path: currentPath },
|
|
* resourceType: 'file',
|
|
* pathScope: currentPath,
|
|
* includeDescendants: false, // Exact mode - only direct children
|
|
* });
|
|
* ```
|
|
*/
|
|
|
|
import { useEffect, useMemo, useState, useRef } from "react";
|
|
import { useQuery, useQueryClient, QueryClient } from "@tanstack/react-query";
|
|
import { useSpacedriveClient } from "./useClient";
|
|
import type { Event } from "../generated/types";
|
|
import invariant from "tiny-invariant";
|
|
import * as v from "valibot";
|
|
import type { Simplify } from "type-fest";
|
|
|
|
// Types
|
|
|
|
export type UseNormalizedQueryOptions<I> = Simplify<{
|
|
/** Wire method to call (e.g., "query:files.directory_listing") */
|
|
wireMethod: string;
|
|
/** Input for the query */
|
|
input: I;
|
|
/** Resource type for event filtering (e.g., "file", "location") */
|
|
resourceType: string;
|
|
/** Whether query is enabled (default: true) */
|
|
enabled?: boolean;
|
|
/** Optional path scope for server-side filtering */
|
|
pathScope?: any; // SdPath type
|
|
/** Whether to include descendants (recursive) or only direct children (exact) */
|
|
includeDescendants?: boolean;
|
|
/** Resource ID for single-resource queries */
|
|
resourceId?: string;
|
|
}>;
|
|
|
|
// Runtime Validation Schemas (Valibot)
|
|
|
|
const ResourceChangedSchema = v.object({
|
|
ResourceChanged: v.object({
|
|
resource_type: v.string(),
|
|
resource: v.any(),
|
|
metadata: v.nullish(
|
|
v.object({
|
|
no_merge_fields: v.optional(v.array(v.string())),
|
|
affected_paths: v.optional(v.array(v.any())),
|
|
alternate_ids: v.optional(v.array(v.any())),
|
|
}),
|
|
),
|
|
}),
|
|
});
|
|
|
|
const ResourceChangedBatchSchema = v.object({
|
|
ResourceChangedBatch: v.object({
|
|
resource_type: v.string(),
|
|
resources: v.array(v.any()),
|
|
metadata: v.nullish(
|
|
v.object({
|
|
no_merge_fields: v.optional(v.array(v.string())),
|
|
affected_paths: v.optional(v.array(v.any())),
|
|
alternate_ids: v.optional(v.array(v.any())),
|
|
}),
|
|
),
|
|
}),
|
|
});
|
|
|
|
const ResourceDeletedSchema = v.object({
|
|
ResourceDeleted: v.object({
|
|
resource_type: v.string(),
|
|
resource_id: v.string(),
|
|
}),
|
|
});
|
|
|
|
// Main Hook
|
|
|
|
/**
|
|
* useNormalizedQuery - Main hook
|
|
*/
|
|
export function useNormalizedQuery<I, O>(
|
|
options: UseNormalizedQueryOptions<I>,
|
|
) {
|
|
const client = useSpacedriveClient();
|
|
const queryClient = useQueryClient();
|
|
const [libraryId, setLibraryId] = useState<string | null>(
|
|
client.getCurrentLibraryId(),
|
|
);
|
|
|
|
// Listen for library changes
|
|
useEffect(() => {
|
|
const handleLibraryChange = (newLibraryId: string) => {
|
|
setLibraryId(newLibraryId);
|
|
};
|
|
|
|
client.on("library-changed", handleLibraryChange);
|
|
return () => {
|
|
client.off("library-changed", handleLibraryChange);
|
|
};
|
|
}, [client]);
|
|
|
|
// Query key
|
|
const queryKey = useMemo(
|
|
() => [options.wireMethod, libraryId, options.input],
|
|
[options.wireMethod, libraryId, JSON.stringify(options.input)],
|
|
);
|
|
|
|
// Standard TanStack Query
|
|
const query = useQuery<O>({
|
|
queryKey,
|
|
queryFn: async () => {
|
|
invariant(libraryId, "Library ID must be set before querying");
|
|
return await client.execute<I, O>(options.wireMethod, options.input);
|
|
},
|
|
enabled: (options.enabled ?? true) && !!libraryId,
|
|
});
|
|
|
|
// Refs for stable access to latest values without triggering re-subscription
|
|
const optionsRef = useRef(options);
|
|
const queryKeyRef = useRef(queryKey);
|
|
|
|
// Update refs on every render
|
|
useEffect(() => {
|
|
optionsRef.current = options;
|
|
queryKeyRef.current = queryKey;
|
|
});
|
|
|
|
// Event subscription
|
|
// Only re-subscribe when filter criteria change
|
|
// Using refs for event handler to avoid re-subscription on every render
|
|
useEffect(() => {
|
|
if (!libraryId) return;
|
|
|
|
// Skip subscription for file queries without pathScope (prevent overly broad subscriptions)
|
|
// Unless resourceId is provided (single-file queries like FileInspector don't need pathScope)
|
|
if (options.resourceType === "file" && !options.pathScope && !options.resourceId) {
|
|
return;
|
|
}
|
|
|
|
let unsubscribe: (() => void) | undefined;
|
|
|
|
const handleEvent = (event: Event) => {
|
|
handleResourceEvent(
|
|
event,
|
|
optionsRef.current,
|
|
queryKeyRef.current,
|
|
queryClient,
|
|
);
|
|
};
|
|
|
|
client
|
|
.subscribeFiltered(
|
|
{
|
|
resource_type: options.resourceType,
|
|
path_scope: options.pathScope,
|
|
library_id: libraryId,
|
|
include_descendants: options.includeDescendants ?? false,
|
|
},
|
|
handleEvent,
|
|
)
|
|
.then((unsub) => {
|
|
unsubscribe = unsub;
|
|
});
|
|
|
|
return () => {
|
|
unsubscribe?.();
|
|
};
|
|
}, [
|
|
client,
|
|
queryClient,
|
|
options.resourceType,
|
|
options.resourceId,
|
|
options.pathScope,
|
|
options.includeDescendants,
|
|
libraryId,
|
|
// options and queryKey accessed via refs - don't need to be in deps
|
|
]);
|
|
|
|
return query;
|
|
}
|
|
|
|
// Event Handling
|
|
|
|
/**
|
|
* Event handler dispatcher with runtime validation
|
|
*
|
|
* Routes validated events to appropriate update functions.
|
|
* Exported for testing.
|
|
*/
|
|
export function handleResourceEvent(
|
|
event: Event,
|
|
options: UseNormalizedQueryOptions<any>,
|
|
queryKey: any[],
|
|
queryClient: QueryClient,
|
|
) {
|
|
// Skip string events (like "CoreStarted", "CoreShutdown")
|
|
if (typeof event === "string") {
|
|
return;
|
|
}
|
|
|
|
// Refresh event - invalidate all queries
|
|
if ("Refresh" in event) {
|
|
queryClient.invalidateQueries();
|
|
return;
|
|
}
|
|
|
|
// Single resource changed - validate and process
|
|
if ("ResourceChanged" in event) {
|
|
const result = v.safeParse(ResourceChangedSchema, event);
|
|
if (!result.success) {
|
|
console.warn(
|
|
"[useNormalizedQuery] Invalid ResourceChanged event:",
|
|
result.issues,
|
|
);
|
|
return;
|
|
}
|
|
|
|
const { resource_type, resource, metadata } = result.output.ResourceChanged;
|
|
if (resource_type === options.resourceType) {
|
|
updateSingleResource(resource, metadata, queryKey, queryClient);
|
|
}
|
|
}
|
|
|
|
// Batch resource changed - validate and process
|
|
else if ("ResourceChangedBatch" in event) {
|
|
const result = v.safeParse(ResourceChangedBatchSchema, event);
|
|
if (!result.success) {
|
|
console.warn(
|
|
"[useNormalizedQuery] Invalid ResourceChangedBatch event:",
|
|
result.issues,
|
|
);
|
|
return;
|
|
}
|
|
|
|
const { resource_type, resources, metadata } =
|
|
result.output.ResourceChangedBatch;
|
|
|
|
if (resource_type === options.resourceType && Array.isArray(resources)) {
|
|
updateBatchResources(resources, metadata, options, queryKey, queryClient);
|
|
}
|
|
}
|
|
|
|
// Resource deleted - validate and process
|
|
else if ("ResourceDeleted" in event) {
|
|
const result = v.safeParse(ResourceDeletedSchema, event);
|
|
if (!result.success) {
|
|
console.warn(
|
|
"[useNormalizedQuery] Invalid ResourceDeleted event:",
|
|
result.issues,
|
|
);
|
|
return;
|
|
}
|
|
|
|
const { resource_type, resource_id } = result.output.ResourceDeleted;
|
|
if (resource_type === options.resourceType) {
|
|
deleteResource(resource_id, queryKey, queryClient);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Batch Filtering
|
|
|
|
/**
|
|
* Filter batch resources by pathScope for exact mode
|
|
*
|
|
* ## Why This Exists
|
|
*
|
|
* Server-side filtering reduces events by 90%+, but can't split atomic batches.
|
|
* If a batch has 100 files and 1 belongs to our scope, the entire batch is sent.
|
|
* This client-side filter ensures only relevant resources are cached.
|
|
*
|
|
* ## The Critical Bug This Prevents
|
|
*
|
|
* Scenario: Viewing /Desktop, indexing creates batch with:
|
|
* - /Desktop/file1.txt (direct child)
|
|
* - /Desktop/Subfolder/file2.txt (grandchild)
|
|
*
|
|
* Without filtering: Both files appear in /Desktop view
|
|
* With filtering: Only file1.txt appears
|
|
*
|
|
* @param resources - Resources from batch event
|
|
* @param options - Query options
|
|
* @returns Filtered resources for this query scope
|
|
*
|
|
* Exported for testing
|
|
*/
|
|
export function filterBatchResources(
|
|
resources: any[],
|
|
options: UseNormalizedQueryOptions<any>,
|
|
): any[] {
|
|
let filtered = resources;
|
|
|
|
// Filter by resourceId (single-resource queries like file inspector)
|
|
if (options.resourceId) {
|
|
filtered = filtered.filter((r: any) => r.id === options.resourceId);
|
|
}
|
|
|
|
// Filter by pathScope for file resources in exact mode
|
|
if (
|
|
options.pathScope &&
|
|
options.resourceType === "file" &&
|
|
!options.includeDescendants
|
|
) {
|
|
filtered = filtered.filter((resource: any) => {
|
|
// Get the scope path (must be Physical)
|
|
const scopeStr = (options.pathScope as any).Physical?.path;
|
|
if (!scopeStr) {
|
|
return false; // No Physical scope path
|
|
}
|
|
|
|
// Normalize scope: remove trailing slashes for consistent comparison
|
|
const normalizedScope = String(scopeStr).replace(/\/+$/, "");
|
|
|
|
// Try to find a Physical path - check alternate_paths first, then sd_path
|
|
const alternatePaths = resource.alternate_paths || [];
|
|
const physicalFromAlternate = alternatePaths.find((p: any) => p.Physical);
|
|
const physicalFromSdPath = resource.sd_path?.Physical;
|
|
|
|
const physicalPath = physicalFromAlternate?.Physical || physicalFromSdPath;
|
|
|
|
if (!physicalPath?.path) {
|
|
return false; // No physical path found
|
|
}
|
|
|
|
const pathStr = String(physicalPath.path);
|
|
|
|
// Extract parent directory from file path
|
|
const lastSlash = pathStr.lastIndexOf("/");
|
|
if (lastSlash === -1) {
|
|
return false; // File path has no parent directory
|
|
}
|
|
|
|
const parentDir = pathStr.substring(0, lastSlash);
|
|
|
|
// Only match if parent equals scope (normalized)
|
|
return parentDir === normalizedScope;
|
|
});
|
|
}
|
|
|
|
return filtered;
|
|
}
|
|
|
|
// Cache Update Functions
|
|
|
|
/**
|
|
* Update a single resource using type-safe deep merge
|
|
*
|
|
* Exported for testing
|
|
*/
|
|
export function updateSingleResource<O>(
|
|
resource: any,
|
|
metadata: any,
|
|
queryKey: any[],
|
|
queryClient: QueryClient,
|
|
) {
|
|
const noMergeFields = metadata?.no_merge_fields || [];
|
|
|
|
queryClient.setQueryData<O>(queryKey, (oldData: any) => {
|
|
if (!oldData) return oldData;
|
|
|
|
// Handle array responses
|
|
if (Array.isArray(oldData)) {
|
|
return updateArrayCache(oldData, [resource], noMergeFields) as O;
|
|
}
|
|
|
|
// Handle wrapped responses { files: [...] }
|
|
if (oldData && typeof oldData === "object") {
|
|
return updateWrappedCache(oldData, [resource], noMergeFields) as O;
|
|
}
|
|
|
|
return oldData;
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Update batch resources with filtering and deep merge
|
|
*
|
|
* Exported for testing
|
|
*/
|
|
export function updateBatchResources<O>(
|
|
resources: any[],
|
|
metadata: any,
|
|
options: UseNormalizedQueryOptions<any>,
|
|
queryKey: any[],
|
|
queryClient: QueryClient,
|
|
) {
|
|
const noMergeFields = metadata?.no_merge_fields || [];
|
|
|
|
// Apply client-side filtering (safety fallback)
|
|
const filteredResources = filterBatchResources(resources, options);
|
|
|
|
if (filteredResources.length === 0) {
|
|
return; // No matching resources
|
|
}
|
|
|
|
queryClient.setQueryData<O>(queryKey, (oldData: any) => {
|
|
if (!oldData) return oldData;
|
|
|
|
// Handle array responses
|
|
if (Array.isArray(oldData)) {
|
|
return updateArrayCache(oldData, filteredResources, noMergeFields) as O;
|
|
}
|
|
|
|
// Handle wrapped responses { files: [...] }
|
|
if (oldData && typeof oldData === "object") {
|
|
return updateWrappedCache(oldData, filteredResources, noMergeFields) as O;
|
|
}
|
|
|
|
return oldData;
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Delete a resource from cache
|
|
*
|
|
* Exported for testing
|
|
*/
|
|
export function deleteResource<O>(
|
|
resourceId: string,
|
|
queryKey: any[],
|
|
queryClient: QueryClient,
|
|
) {
|
|
queryClient.setQueryData<O>(queryKey, (oldData: any) => {
|
|
if (!oldData) return oldData;
|
|
|
|
if (Array.isArray(oldData)) {
|
|
return oldData.filter((item: any) => item.id !== resourceId) as O;
|
|
}
|
|
|
|
if (oldData && typeof oldData === "object") {
|
|
const arrayField = Object.keys(oldData).find((key) =>
|
|
Array.isArray((oldData as any)[key]),
|
|
);
|
|
|
|
if (arrayField) {
|
|
return {
|
|
...oldData,
|
|
[arrayField]: (oldData as any)[arrayField].filter(
|
|
(item: any) => item.id !== resourceId,
|
|
),
|
|
};
|
|
}
|
|
}
|
|
|
|
return oldData;
|
|
});
|
|
}
|
|
|
|
// Cache Update Helpers
|
|
|
|
/**
|
|
* Update array cache (direct array response)
|
|
*/
|
|
function updateArrayCache(
|
|
oldData: any[],
|
|
newResources: any[],
|
|
noMergeFields: string[],
|
|
): any[] {
|
|
const newData = [...oldData];
|
|
const seenIds = new Set();
|
|
|
|
// Update existing items
|
|
for (let i = 0; i < newData.length; i++) {
|
|
const item: any = newData[i];
|
|
const match = newResources.find((r: any) => r.id === item.id);
|
|
if (match) {
|
|
newData[i] = safeMerge(item, match, noMergeFields);
|
|
seenIds.add(item.id);
|
|
}
|
|
}
|
|
|
|
// Append new items
|
|
for (const resource of newResources) {
|
|
if (!seenIds.has(resource.id)) {
|
|
newData.push(resource);
|
|
}
|
|
}
|
|
|
|
return newData;
|
|
}
|
|
|
|
/**
|
|
* Update wrapped cache ({ files: [...], locations: [...], etc. })
|
|
*/
|
|
function updateWrappedCache(
|
|
oldData: any,
|
|
newResources: any[],
|
|
noMergeFields: string[],
|
|
): any {
|
|
// First check: if oldData has an id that matches incoming, merge directly
|
|
// This handles single object responses like files.by_id
|
|
const match = newResources.find((r: any) => r.id === oldData.id);
|
|
if (match) {
|
|
return safeMerge(oldData, match, noMergeFields);
|
|
}
|
|
|
|
// Second check: wrapped responses like { files: [...] }
|
|
const arrayField = Object.keys(oldData).find((key) =>
|
|
Array.isArray(oldData[key]),
|
|
);
|
|
|
|
if (arrayField) {
|
|
const array = [...oldData[arrayField]];
|
|
const seenIds = new Set();
|
|
|
|
// Update existing
|
|
for (let i = 0; i < array.length; i++) {
|
|
const item: any = array[i];
|
|
const match = newResources.find((r: any) => r.id === item.id);
|
|
if (match) {
|
|
array[i] = safeMerge(item, match, noMergeFields);
|
|
seenIds.add(item.id);
|
|
}
|
|
}
|
|
|
|
// Append new
|
|
for (const resource of newResources) {
|
|
if (!seenIds.has(resource.id)) {
|
|
array.push(resource);
|
|
}
|
|
}
|
|
|
|
return { ...oldData, [arrayField]: array };
|
|
}
|
|
|
|
return oldData;
|
|
}
|
|
|
|
/**
|
|
* Safe deep merge for resource updates
|
|
*
|
|
* Arrays are REPLACED (not concatenated) because:
|
|
* - sidecars: Server sends complete list, duplicating would corrupt data
|
|
* - alternate_paths: Same - server is authoritative
|
|
* - tags: Same pattern
|
|
*
|
|
* Only nested objects are deep merged (like content_identity).
|
|
*
|
|
* Exported for testing
|
|
*/
|
|
export function safeMerge(
|
|
existing: any,
|
|
incoming: any,
|
|
noMergeFields: string[] = [],
|
|
): any {
|
|
// Handle null/undefined
|
|
if (incoming === null || incoming === undefined) {
|
|
return existing !== null && existing !== undefined ? existing : incoming;
|
|
}
|
|
|
|
// Shallow merge with incoming winning, but deep merge nested objects
|
|
const result: any = { ...existing };
|
|
|
|
for (const key of Object.keys(incoming)) {
|
|
const incomingVal = incoming[key];
|
|
const existingVal = existing[key];
|
|
|
|
// noMergeFields: incoming always wins
|
|
if (noMergeFields.includes(key)) {
|
|
result[key] = incomingVal;
|
|
}
|
|
// Arrays: replace entirely (don't concatenate)
|
|
else if (Array.isArray(incomingVal)) {
|
|
result[key] = incomingVal;
|
|
}
|
|
// Nested objects: deep merge recursively
|
|
else if (
|
|
incomingVal !== null &&
|
|
typeof incomingVal === "object" &&
|
|
existingVal !== null &&
|
|
typeof existingVal === "object" &&
|
|
!Array.isArray(existingVal)
|
|
) {
|
|
result[key] = safeMerge(existingVal, incomingVal, noMergeFields);
|
|
}
|
|
// Primitives: incoming wins
|
|
else {
|
|
result[key] = incomingVal;
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|