diff --git a/.github/workflows/regular-tests.yaml b/.github/workflows/regular-tests.yaml index 7a29eaf2..148f3f0f 100644 --- a/.github/workflows/regular-tests.yaml +++ b/.github/workflows/regular-tests.yaml @@ -21,7 +21,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: '1.22.5' + go-version: '1.23.3' - uses: golangci/golangci-lint-action@v5 with: version: v1.60 @@ -41,4 +41,11 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-node@v4 - working-directory: frontend - run: npm i eslint && npm run lint + run: npm i && npm run lint + test-frontend: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + - working-directory: frontend + run: npm i && npm run test diff --git a/CHANGELOG.md b/CHANGELOG.md index c8ca3190..4b635ec5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,24 @@ All notable changes to this project will be documented in this file. For commit guidelines, please refer to [Standard Version](https://github.com/conventional-changelog/standard-version). +## v0.3.1 + + **New Features** + - Adds Smart Indexing by default. + + **Notes**: + - Optimized api request response times via improved caching and simplified actions. + - User information persists more reliably. + - Added [indexing doc](./docs/indexing.md) to explain the expectations around indexing and how it works. + - The index should also use less RAM than it did in v0.3.0. + + **Bugfixes**: + - Tweaked sorting by name, fixes case sensitive and numeric sorting. https://github.com/gtsteffaniak/filebrowser/issues/230 + - Fixed unnecessary authentication status checks each route change + - Fix create file action issue. + - some small javascript related issues. + - Fixes pretty big bug viewing raw content in v0.3.0 (utf format message) + ## v0.3.0 This Release focuses on the API and making it more accessible for developers to access functions without the UI. diff --git a/README.md b/README.md index 18567213..3d631a9e 100644 --- a/README.md +++ b/README.md @@ -10,19 +10,25 @@

> [!WARNING] -> Starting with `v0.3.0` API routes have been slightly altered for friendly usage outside of the UI. +> Starting with `v0.3.0` API routes have been slightly altered for friendly usage outside of the UI. The resources api returns items in separate `files` and `folder` objects now. + +> [!WARNING] > If on windows, please use docker. The windows binary is unstable and may not work. +> [!WARNING] +> There is no stable version yet. Always check release notes for bugfixes on functionality that may have been changed. If you notice any unexpected behavior -- please open an issue to have it fixed soon. + FileBrowser Quantum is a fork of the file browser opensource project with the following changes: - 1. [x] Efficiently indexed files + 1. [x] Indexes files efficiently. See [indexing readme](./docs/indexing.md) - Real-time search results as you type - - Search Works with more type filters - - Enhanced interactive results page. - - file/folder sizes are shown in the response + - Search supports file/folder sizes and many file type filters. + - Enhanced interactive results that shows file/folder sizes. 1. [x] Revamped and simplified GUI navbar and sidebar menu. - Additional compact view mode as well as refreshed view mode styles. + - Many graphical and user experience improvements. + - right-click context menu 1. [x] Revamped and simplified configuration via `filebrowser.yml` config file. 1. [x] Better listing browsing - Switching view modes is instant @@ -33,6 +39,13 @@ FileBrowser Quantum is a fork of the file browser opensource project with the fo - Can create long-live API Tokens. - Helpful Swagger page available at `/swagger` endpoint. +Notable features that this fork *does not* have (removed): + + - jobs/runners are not supported yet (planned). + - shell commands are completely removed and will not be returning. + - themes and branding are not fully supported yet (planned). + - see feature matrix below for more. + ## About FileBrowser Quantum provides a file-managing interface within a specified directory diff --git a/backend/cmd/root.go b/backend/cmd/root.go index 68f33cd8..fda40a7d 100644 --- a/backend/cmd/root.go +++ b/backend/cmd/root.go @@ -114,10 +114,6 @@ func StartFilebrowser() { } } store, dbExists := getStore(configPath) - indexingInterval := fmt.Sprint(settings.Config.Server.IndexingInterval, " minutes") - if !settings.Config.Server.Indexing { - indexingInterval = "disabled" - } database := fmt.Sprintf("Using existing database : %v", settings.Config.Server.Database) if !dbExists { database = fmt.Sprintf("Creating new database : %v", settings.Config.Server.Database) @@ -127,14 +123,13 @@ func StartFilebrowser() { log.Println("Embeded frontend :", os.Getenv("FILEBROWSER_NO_EMBEDED") != "true") log.Println(database) log.Println("Sources :", settings.Config.Server.Root) - log.Println("Indexing interval :", indexingInterval) serverConfig := settings.Config.Server swagInfo := docs.SwaggerInfo swagInfo.BasePath = serverConfig.BaseURL swag.Register(docs.SwaggerInfo.InstanceName(), swagInfo) // initialize indexing and schedule indexing ever n minutes (default 5) - go files.InitializeIndex(serverConfig.IndexingInterval, serverConfig.Indexing) + go files.InitializeIndex(serverConfig.Indexing) if err := rootCMD(store, &serverConfig); err != nil { log.Fatal("Error starting filebrowser:", err) } diff --git a/backend/files/conditions.go b/backend/files/conditions.go index 09d70b43..29860183 100644 --- a/backend/files/conditions.go +++ b/backend/files/conditions.go @@ -14,24 +14,72 @@ var AllFiletypeOptions = []string{ "archive", "video", "doc", - "dir", "text", } + +// Document file extensions var documentTypes = []string{ - ".word", - ".pdf", - ".doc", - ".docx", -} -var textTypes = []string{ - ".text", - ".sh", - ".yaml", - ".yml", - ".json", - ".env", + // Common Document Formats + ".doc", ".docx", // Microsoft Word + ".pdf", // Portable Document Format + ".odt", // OpenDocument Text + ".rtf", // Rich Text Format + + // Presentation Formats + ".ppt", ".pptx", // Microsoft PowerPoint + ".odp", // OpenDocument Presentation + + // Spreadsheet Formats + ".xls", ".xlsx", // Microsoft Excel + ".ods", // OpenDocument Spreadsheet + + // Other Document Formats + ".epub", // Electronic Publication + ".mobi", // Amazon Kindle + ".fb2", // FictionBook } +// Text-based file extensions +var textTypes = []string{ + // Common Text Formats + ".txt", + ".md", // Markdown + + // Scripting and Programming Languages + ".sh", // Bash script + ".py", // Python + ".js", // JavaScript + ".ts", // TypeScript + ".php", // PHP + ".rb", // Ruby + ".go", // Go + ".java", // Java + ".c", ".cpp", // C/C++ + ".cs", // C# + ".swift", // Swift + + // Configuration Files + ".yaml", ".yml", // YAML + ".json", // JSON + ".xml", // XML + ".ini", // INI + ".toml", // TOML + ".cfg", // Configuration file + + // Other Text-Based Formats + ".css", // Cascading Style Sheets + ".html", ".htm", // HyperText Markup Language + ".sql", // SQL + ".csv", // Comma-Separated Values + ".tsv", // Tab-Separated Values + ".log", // Log file + ".bat", // Batch file + ".ps1", // PowerShell script + ".tex", // LaTeX + ".bib", // BibTeX +} + +// Compressed file extensions var compressedFile = []string{ ".7z", ".rar", @@ -39,6 +87,12 @@ var compressedFile = []string{ ".tar", ".gz", ".xz", + ".bz2", + ".tgz", // tar.gz + ".tbz2", // tar.bz2 + ".lzma", + ".lz4", + ".zstd", } type SearchOptions struct { @@ -48,8 +102,8 @@ type SearchOptions struct { Terms []string } -func ParseSearch(value string) *SearchOptions { - opts := &SearchOptions{ +func ParseSearch(value string) SearchOptions { + opts := SearchOptions{ Conditions: map[string]bool{ "exact": strings.Contains(value, "case:exact"), }, diff --git a/backend/files/file.go b/backend/files/file.go index e254221f..8bd4bfda 100644 --- a/backend/files/file.go +++ b/backend/files/file.go @@ -13,6 +13,8 @@ import ( "net/http" "os" "path/filepath" + "sort" + "strconv" "strings" "sync" "time" @@ -22,6 +24,7 @@ import ( "github.com/gtsteffaniak/filebrowser/fileutils" "github.com/gtsteffaniak/filebrowser/settings" "github.com/gtsteffaniak/filebrowser/users" + "github.com/gtsteffaniak/filebrowser/utils" ) var ( @@ -29,34 +32,30 @@ var ( pathMutexesMu sync.Mutex // Mutex to protect the pathMutexes map ) -type ReducedItem struct { - Name string `json:"name"` - Size int64 `json:"size"` - ModTime time.Time `json:"modified"` - Type string `json:"type"` - Mode os.FileMode `json:"-"` - Content string `json:"content,omitempty"` +type ItemInfo struct { + Name string `json:"name"` + Size int64 `json:"size"` + ModTime time.Time `json:"modified"` + Type string `json:"type"` } // FileInfo describes a file. // reduced item is non-recursive reduced "Items", used to pass flat items array type FileInfo struct { - Files []ReducedItem `json:"-"` - Dirs map[string]*FileInfo `json:"-"` - Path string `json:"path"` - Name string `json:"name"` - Items []ReducedItem `json:"items"` - Size int64 `json:"size"` - Extension string `json:"-"` - ModTime time.Time `json:"modified"` - CacheTime time.Time `json:"-"` - Mode os.FileMode `json:"-"` - IsSymlink bool `json:"isSymlink,omitempty"` - Type string `json:"type"` - Subtitles []string `json:"subtitles,omitempty"` - Content string `json:"content,omitempty"` - Checksums map[string]string `json:"checksums,omitempty"` - Token string `json:"token,omitempty"` + ItemInfo + Files []ItemInfo `json:"files"` + Folders []ItemInfo `json:"folders"` + Path string `json:"path"` +} + +// for efficiency, a response will be a pointer to the data +// extra calculated fields can be added here +type ExtendedFileInfo struct { + *FileInfo + Content string `json:"content,omitempty"` + Subtitles []string `json:"subtitles,omitempty"` + Checksums map[string]string `json:"checksums,omitempty"` + Token string `json:"token,omitempty"` } // FileOptions are the options when getting a file info. @@ -66,7 +65,6 @@ type FileOptions struct { Modify bool Expand bool ReadHeader bool - Token string Checker users.Checker Content bool } @@ -75,206 +73,70 @@ func (f FileOptions) Components() (string, string) { return filepath.Dir(f.Path), filepath.Base(f.Path) } -func FileInfoFaster(opts FileOptions) (*FileInfo, error) { +func FileInfoFaster(opts FileOptions) (ExtendedFileInfo, error) { index := GetIndex(rootPath) opts.Path = index.makeIndexPath(opts.Path) - + response := ExtendedFileInfo{} // Lock access for the specific path pathMutex := getMutex(opts.Path) pathMutex.Lock() defer pathMutex.Unlock() if !opts.Checker.Check(opts.Path) { - return nil, os.ErrPermission + return response, os.ErrPermission } + _, isDir, err := GetRealPath(opts.Path) if err != nil { - return nil, err + return response, err } opts.IsDir = isDir + + // TODO : whats the best way to save trips to disk here? + // disabled using cache because its not clear if this is helping or hurting // check if the file exists in the index - info, exists := index.GetReducedMetadata(opts.Path, opts.IsDir) - if exists { - // Let's not refresh if less than a second has passed - if time.Since(info.CacheTime) > time.Second { - RefreshFileInfo(opts) //nolint:errcheck - } - if opts.Content { - content := "" - content, err = getContent(opts.Path) - if err != nil { - return info, err - } - info.Content = content - } - return info, nil - } - err = RefreshFileInfo(opts) + //info, exists := index.GetReducedMetadata(opts.Path, opts.IsDir) + //if exists { + // err := RefreshFileInfo(opts) + // if err != nil { + // return info, err + // } + // if opts.Content { + // content := "" + // content, err = getContent(opts.Path) + // if err != nil { + // return info, err + // } + // info.Content = content + // } + // return info, nil + //} + + err = index.RefreshFileInfo(opts) if err != nil { - return nil, err + return response, err } - info, exists = index.GetReducedMetadata(opts.Path, opts.IsDir) + info, exists := index.GetReducedMetadata(opts.Path, opts.IsDir) if !exists { - return nil, err + return response, err } if opts.Content { content, err := getContent(opts.Path) if err != nil { - return info, err + return response, err } - info.Content = content + response.Content = content } - return info, nil -} - -func RefreshFileInfo(opts FileOptions) error { - refreshOptions := FileOptions{ - Path: opts.Path, - IsDir: opts.IsDir, - Token: opts.Token, - } - index := GetIndex(rootPath) - - if !refreshOptions.IsDir { - refreshOptions.Path = index.makeIndexPath(filepath.Dir(refreshOptions.Path)) - refreshOptions.IsDir = true - } else { - refreshOptions.Path = index.makeIndexPath(refreshOptions.Path) - } - - current, exists := index.GetMetadataInfo(refreshOptions.Path, true) - - file, err := stat(refreshOptions) - if err != nil { - return fmt.Errorf("file/folder does not exist to refresh data: %s", refreshOptions.Path) - } - - //utils.PrintStructFields(*file) - result := index.UpdateMetadata(file) - if !result { - return fmt.Errorf("file/folder does not exist in metadata: %s", refreshOptions.Path) - } - if !exists { - return nil - } - if current.Size != file.Size { - index.recursiveUpdateDirSizes(filepath.Dir(refreshOptions.Path), file, current.Size) - } - return nil -} - -func stat(opts FileOptions) (*FileInfo, error) { - realPath, _, err := GetRealPath(rootPath, opts.Path) - if err != nil { - return nil, err - } - info, err := os.Lstat(realPath) - if err != nil { - return nil, err - } - file := &FileInfo{ - Path: opts.Path, - Name: filepath.Base(opts.Path), - ModTime: info.ModTime(), - Mode: info.Mode(), - Size: info.Size(), - Extension: filepath.Ext(info.Name()), - Token: opts.Token, - } - if info.IsDir() { - // Open and read directory contents - dir, err := os.Open(realPath) - if err != nil { - return nil, err - } - defer dir.Close() - - dirInfo, err := dir.Stat() - if err != nil { - return nil, err - } - index := GetIndex(rootPath) - // Check cached metadata to decide if refresh is needed - cachedParentDir, exists := index.GetMetadataInfo(opts.Path, true) - if exists && dirInfo.ModTime().Before(cachedParentDir.CacheTime) { - return cachedParentDir, nil - } - - // Read directory contents and process - files, err := dir.Readdir(-1) - if err != nil { - return nil, err - } - - file.Files = []ReducedItem{} - file.Dirs = map[string]*FileInfo{} - - var totalSize int64 - for _, item := range files { - itemPath := filepath.Join(realPath, item.Name()) - - if item.IsDir() { - itemInfo := &FileInfo{ - Name: item.Name(), - ModTime: item.ModTime(), - Mode: item.Mode(), - } - - if exists { - // if directory size was already cached use that. - cachedDir, ok := cachedParentDir.Dirs[item.Name()] - if ok { - itemInfo.Size = cachedDir.Size - } - } - file.Dirs[item.Name()] = itemInfo - totalSize += itemInfo.Size - } else { - itemInfo := ReducedItem{ - Name: item.Name(), - Size: item.Size(), - ModTime: item.ModTime(), - Mode: item.Mode(), - } - if IsSymlink(item.Mode()) { - itemInfo.Type = "symlink" - info, err := os.Stat(itemPath) - if err == nil { - itemInfo.Name = info.Name() - itemInfo.ModTime = info.ModTime() - itemInfo.Size = info.Size() - itemInfo.Mode = info.Mode() - } else { - file.Type = "invalid_link" - } - } - if file.Type != "invalid_link" { - err := itemInfo.detectType(itemPath, true, opts.Content, opts.ReadHeader) - if err != nil { - fmt.Printf("failed to detect type for %v: %v \n", itemPath, err) - } - file.Files = append(file.Files, itemInfo) - } - totalSize += itemInfo.Size - - } - } - - file.Size = totalSize - } - return file, nil + response.FileInfo = info + return response, nil } // Checksum checksums a given File for a given User, using a specific // algorithm. The checksums data is saved on File object. -func (i *FileInfo) Checksum(algo string) error { - - if i.Checksums == nil { - i.Checksums = map[string]string{} - } - fullpath := filepath.Join(i.Path, i.Name) - reader, err := os.Open(fullpath) +func GetChecksum(fullPath, algo string) (map[string]string, error) { + subs := map[string]string{} + reader, err := os.Open(fullPath) if err != nil { - return err + return subs, err } defer reader.Close() @@ -287,21 +149,21 @@ func (i *FileInfo) Checksum(algo string) error { h, ok := hashFuncs[algo] if !ok { - return errors.ErrInvalidOption + return subs, errors.ErrInvalidOption } _, err = io.Copy(h, reader) if err != nil { - return err + return subs, err } - - i.Checksums[algo] = hex.EncodeToString(h.Sum(nil)) - return nil + subs[algo] = hex.EncodeToString(h.Sum(nil)) + return subs, nil } // RealPath gets the real path for the file, resolving symlinks if supported. func (i *FileInfo) RealPath() string { - realPath, err := filepath.EvalSymlinks(i.Path) + realPath, _, _ := GetRealPath(rootPath, i.Path) + realPath, err := filepath.EvalSymlinks(realPath) if err == nil { return realPath } @@ -314,13 +176,24 @@ func GetRealPath(relativePath ...string) (string, bool, error) { combined = append(combined, strings.TrimPrefix(path, settings.Config.Server.Root)) } joinedPath := filepath.Join(combined...) + + isDir, _ := utils.RealPathCache.Get(joinedPath + ":isdir").(bool) + cached, ok := utils.RealPathCache.Get(joinedPath).(string) + if ok && cached != "" { + return cached, isDir, nil + } // Convert relative path to absolute path absolutePath, err := filepath.Abs(joinedPath) if err != nil { return absolutePath, false, fmt.Errorf("could not get real path: %v, %s", combined, err) } // Resolve symlinks and get the real path - return resolveSymlinks(absolutePath) + realPath, isDir, err := resolveSymlinks(absolutePath) + if err == nil { + utils.RealPathCache.Set(joinedPath, realPath) + utils.RealPathCache.Set(joinedPath+":isdir", isDir) + } + return realPath, isDir, err } func DeleteFiles(absPath string, opts FileOptions) error { @@ -328,7 +201,8 @@ func DeleteFiles(absPath string, opts FileOptions) error { if err != nil { return err } - err = RefreshFileInfo(opts) + index := GetIndex(rootPath) + err = index.RefreshFileInfo(opts) if err != nil { return err } @@ -340,8 +214,9 @@ func MoveResource(realsrc, realdst string, isSrcDir bool) error { if err != nil { return err } + index := GetIndex(rootPath) // refresh info for source and dest - err = RefreshFileInfo(FileOptions{ + err = index.RefreshFileInfo(FileOptions{ Path: realsrc, IsDir: isSrcDir, }) @@ -352,7 +227,7 @@ func MoveResource(realsrc, realdst string, isSrcDir bool) error { if !isSrcDir { refreshConfig.Path = filepath.Dir(realdst) } - err = RefreshFileInfo(refreshConfig) + err = index.RefreshFileInfo(refreshConfig) if err != nil { return errors.ErrEmptyKey } @@ -364,12 +239,12 @@ func CopyResource(realsrc, realdst string, isSrcDir bool) error { if err != nil { return err } - + index := GetIndex(rootPath) refreshConfig := FileOptions{Path: realdst, IsDir: true} if !isSrcDir { refreshConfig.Path = filepath.Dir(realdst) } - err = RefreshFileInfo(refreshConfig) + err = index.RefreshFileInfo(refreshConfig) if err != nil { return errors.ErrEmptyKey } @@ -383,7 +258,8 @@ func WriteDirectory(opts FileOptions) error { if err != nil { return err } - err = RefreshFileInfo(opts) + index := GetIndex(rootPath) + err = index.RefreshFileInfo(opts) if err != nil { return errors.ErrEmptyKey } @@ -391,13 +267,10 @@ func WriteDirectory(opts FileOptions) error { } func WriteFile(opts FileOptions, in io.Reader) error { - dst := opts.Path + dst, _, _ := GetRealPath(rootPath, opts.Path) parentDir := filepath.Dir(dst) - // Split the directory from the destination path - dir := filepath.Dir(dst) - // Create the directory and all necessary parents - err := os.MkdirAll(dir, 0775) + err := os.MkdirAll(parentDir, 0775) if err != nil { return err } @@ -415,35 +288,35 @@ func WriteFile(opts FileOptions, in io.Reader) error { return err } opts.Path = parentDir - err = RefreshFileInfo(opts) - if err != nil { - return errors.ErrEmptyKey - } - return nil + opts.IsDir = true + index := GetIndex(rootPath) + return index.RefreshFileInfo(opts) } // resolveSymlinks resolves symlinks in the given path func resolveSymlinks(path string) (string, bool, error) { for { - // Get the file info + // Get the file info using os.Lstat to handle symlinks info, err := os.Lstat(path) if err != nil { - return path, false, fmt.Errorf("could not stat path: %v, %s", path, err) + return path, false, fmt.Errorf("could not stat path: %s, %v", path, err) } - // Check if it's a symlink + // Check if the path is a symlink if info.Mode()&os.ModeSymlink != 0 { // Read the symlink target target, err := os.Readlink(path) if err != nil { - return path, false, err + return path, false, fmt.Errorf("could not read symlink: %s, %v", path, err) } - // Resolve the target relative to the symlink's directory + // Resolve the symlink's target relative to its directory + // This ensures the resolved path is absolute and correctly calculated path = filepath.Join(filepath.Dir(path), target) } else { - // Not a symlink, so return the resolved path and check if it's a directory - return path, info.IsDir(), nil + // Not a symlink, so return the resolved path and whether it's a directory + isDir := info.IsDir() + return path, isDir, nil } } } @@ -461,7 +334,7 @@ func getContent(path string) (string, error) { } stringContent := string(content) if !utf8.ValidString(stringContent) { - return "", fmt.Errorf("file is not utf8 encoded") + return "", nil } if stringContent == "" { return "empty-file-x6OlSil", nil @@ -470,21 +343,9 @@ func getContent(path string) (string, error) { } // detectType detects the file type. -func (i *ReducedItem) detectType(path string, modify, saveContent, readHeader bool) error { +func (i *ItemInfo) detectType(path string, modify, saveContent, readHeader bool) error { name := i.Name var contentErr error - var contentString string - if saveContent { - contentString, contentErr = getContent(path) - if contentErr == nil { - i.Content = contentString - } - } - - if IsNamedPipe(i.Mode) { - i.Type = "blob" - return contentErr - } ext := filepath.Ext(name) var buffer []byte @@ -533,7 +394,7 @@ func (i *ReducedItem) detectType(path string, modify, saveContent, readHeader bo } // readFirstBytes reads the first bytes of the file. -func (i *ReducedItem) readFirstBytes(path string) []byte { +func (i *ItemInfo) readFirstBytes(path string) []byte { file, err := os.Open(path) if err != nil { i.Type = "blob" @@ -551,6 +412,7 @@ func (i *ReducedItem) readFirstBytes(path string) []byte { return buffer[:n] } +// TODO add subtitles back // detectSubtitles detects subtitles for video files. //func (i *FileInfo) detectSubtitles(path string) { // if i.Type != "video" { @@ -620,3 +482,26 @@ func Exists(path string) bool { } return false } + +func (info *FileInfo) SortItems() { + sort.Slice(info.Folders, func(i, j int) bool { + // Convert strings to integers for numeric sorting if both are numeric + numI, errI := strconv.Atoi(info.Folders[i].Name) + numJ, errJ := strconv.Atoi(info.Folders[j].Name) + if errI == nil && errJ == nil { + return numI < numJ + } + // Fallback to case-insensitive lexicographical sorting + return strings.ToLower(info.Folders[i].Name) < strings.ToLower(info.Folders[j].Name) + }) + sort.Slice(info.Files, func(i, j int) bool { + // Convert strings to integers for numeric sorting if both are numeric + numI, errI := strconv.Atoi(info.Files[i].Name) + numJ, errJ := strconv.Atoi(info.Files[j].Name) + if errI == nil && errJ == nil { + return numI < numJ + } + // Fallback to case-insensitive lexicographical sorting + return strings.ToLower(info.Files[i].Name) < strings.ToLower(info.Files[j].Name) + }) +} diff --git a/backend/files/indexing.go b/backend/files/indexing.go deleted file mode 100644 index 7c1859e0..00000000 --- a/backend/files/indexing.go +++ /dev/null @@ -1,204 +0,0 @@ -package files - -import ( - "log" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/gtsteffaniak/filebrowser/settings" -) - -type Index struct { - Root string - Directories map[string]*FileInfo - NumDirs int - NumFiles int - inProgress bool - LastIndexed time.Time - mu sync.RWMutex -} - -var ( - rootPath string = "/srv" - indexes []*Index - indexesMutex sync.RWMutex -) - -func InitializeIndex(intervalMinutes uint32, schedule bool) { - if schedule { - go indexingScheduler(intervalMinutes) - } -} - -func indexingScheduler(intervalMinutes uint32) { - if settings.Config.Server.Root != "" { - rootPath = settings.Config.Server.Root - } - si := GetIndex(rootPath) - for { - startTime := time.Now() - // Set the indexing flag to indicate that indexing is in progress - si.resetCount() - // Perform the indexing operation - err := si.indexFiles("/") - // Reset the indexing flag to indicate that indexing has finished - si.inProgress = false - // Update the LastIndexed time - si.LastIndexed = time.Now() - if err != nil { - log.Printf("Error during indexing: %v", err) - } - if si.NumFiles+si.NumDirs > 0 { - timeIndexedInSeconds := int(time.Since(startTime).Seconds()) - log.Println("Successfully indexed files.") - log.Printf("Time spent indexing: %v seconds\n", timeIndexedInSeconds) - log.Printf("Files found: %v\n", si.NumFiles) - log.Printf("Directories found: %v\n", si.NumDirs) - } - // Sleep for the specified interval - time.Sleep(time.Duration(intervalMinutes) * time.Minute) - } -} - -// Define a function to recursively index files and directories -func (si *Index) indexFiles(adjustedPath string) error { - realPath := strings.TrimRight(si.Root, "/") + adjustedPath - - // Open the directory - dir, err := os.Open(realPath) - if err != nil { - si.RemoveDirectory(adjustedPath) // Remove if it can't be opened - return err - } - defer dir.Close() - - dirInfo, err := dir.Stat() - if err != nil { - return err - } - - // Skip directories that haven't been modified since the last index - if dirInfo.ModTime().Before(si.LastIndexed) { - return nil - } - - // Read directory contents - files, err := dir.Readdir(-1) - if err != nil { - return err - } - - var totalSize int64 - var numDirs, numFiles int - fileInfos := []ReducedItem{} - dirInfos := map[string]*FileInfo{} - combinedPath := adjustedPath + "/" - if adjustedPath == "/" { - combinedPath = "/" - } - - // Process each file and directory in the current directory - for _, file := range files { - itemInfo := &FileInfo{ - ModTime: file.ModTime(), - } - if file.IsDir() { - itemInfo.Name = file.Name() - itemInfo.Path = combinedPath + file.Name() - // Recursively index the subdirectory - err := si.indexFiles(itemInfo.Path) - if err != nil { - log.Printf("Failed to index directory %s: %v", itemInfo.Path, err) - continue - } - // Fetch the metadata for the subdirectory after indexing - subDirInfo, exists := si.GetMetadataInfo(itemInfo.Path, true) - if exists { - itemInfo.Size = subDirInfo.Size - totalSize += subDirInfo.Size // Add subdirectory size to the total - } - dirInfos[itemInfo.Name] = itemInfo - numDirs++ - } else { - itemInfo := &ReducedItem{ - Name: file.Name(), - ModTime: file.ModTime(), - Size: file.Size(), - Mode: file.Mode(), - } - _ = itemInfo.detectType(combinedPath+file.Name(), true, false, false) - fileInfos = append(fileInfos, *itemInfo) - totalSize += itemInfo.Size - numFiles++ - } - } - - // Create FileInfo for the current directory - dirFileInfo := &FileInfo{ - Path: adjustedPath, - Files: fileInfos, - Dirs: dirInfos, - Size: totalSize, - ModTime: dirInfo.ModTime(), - } - - // Update the current directory metadata in the index - si.UpdateMetadata(dirFileInfo) - si.NumDirs += numDirs - si.NumFiles += numFiles - - return nil -} - -func (si *Index) makeIndexPath(subPath string) string { - if strings.HasPrefix(subPath, "./") { - subPath = strings.TrimPrefix(subPath, ".") - } - if strings.HasPrefix(subPath, ".") || si.Root == subPath { - return "/" - } - // clean path - subPath = strings.TrimSuffix(subPath, "/") - // remove index prefix - adjustedPath := strings.TrimPrefix(subPath, si.Root) - // remove trailing slash - adjustedPath = strings.TrimSuffix(adjustedPath, "/") - if !strings.HasPrefix(adjustedPath, "/") { - adjustedPath = "/" + adjustedPath - } - return adjustedPath -} - -//func getParentPath(path string) string { -// // Trim trailing slash for consistency -// path = strings.TrimSuffix(path, "/") -// if path == "" || path == "/" { -// return "" // Root has no parent -// } -// -// lastSlash := strings.LastIndex(path, "/") -// if lastSlash == -1 { -// return "/" // Parent of a top-level directory -// } -// return path[:lastSlash] -//} - -func (si *Index) recursiveUpdateDirSizes(parentDir string, childInfo *FileInfo, previousSize int64) { - childDirName := filepath.Base(childInfo.Path) - if parentDir == childDirName { - return - } - dir, exists := si.GetMetadataInfo(parentDir, true) - if !exists { - return - } - dir.Dirs[childDirName] = childInfo - newSize := dir.Size - previousSize + childInfo.Size - dir.Size += newSize - si.UpdateMetadata(dir) - dir, _ = si.GetMetadataInfo(parentDir, true) - si.recursiveUpdateDirSizes(filepath.Dir(parentDir), dir, newSize) -} diff --git a/backend/files/indexingFiles.go b/backend/files/indexingFiles.go new file mode 100644 index 00000000..1ce331e5 --- /dev/null +++ b/backend/files/indexingFiles.go @@ -0,0 +1,229 @@ +package files + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/gtsteffaniak/filebrowser/settings" + "github.com/gtsteffaniak/filebrowser/utils" +) + +type Index struct { + Root string + Directories map[string]*FileInfo + NumDirs uint64 + NumFiles uint64 + NumDeleted uint64 + FilesChangedDuringIndexing bool + currentSchedule int + assessment string + indexingTime int + LastIndexed time.Time + SmartModifier time.Duration + mu sync.RWMutex + scannerMu sync.Mutex +} + +var ( + rootPath string = "/srv" + indexes []*Index + indexesMutex sync.RWMutex +) + +func InitializeIndex(enabled bool) { + if enabled { + time.Sleep(time.Second) + if settings.Config.Server.Root != "" { + rootPath = settings.Config.Server.Root + } + si := GetIndex(rootPath) + log.Println("Initializing index and assessing file system complexity") + si.RunIndexing("/", false) + go si.setupIndexingScanners() + } +} + +// Define a function to recursively index files and directories +func (si *Index) indexDirectory(adjustedPath string, quick, recursive bool) error { + realPath := strings.TrimRight(si.Root, "/") + adjustedPath + + // Open the directory + dir, err := os.Open(realPath) + if err != nil { + si.RemoveDirectory(adjustedPath) // Remove, must have been deleted + return err + } + defer dir.Close() + + dirInfo, err := dir.Stat() + if err != nil { + return err + } + combinedPath := adjustedPath + "/" + if adjustedPath == "/" { + combinedPath = "/" + } + // get whats currently in cache + si.mu.RLock() + cacheDirItems := []ItemInfo{} + modChange := true // default to true + cachedDir, exists := si.Directories[adjustedPath] + if exists && quick { + modChange = dirInfo.ModTime() != cachedDir.ModTime + cacheDirItems = cachedDir.Folders + } + si.mu.RUnlock() + + // If the directory has not been modified since the last index, skip expensive readdir + // recursively check cached dirs for mod time changes as well + if !modChange && recursive { + for _, item := range cacheDirItems { + err = si.indexDirectory(combinedPath+item.Name, quick, true) + if err != nil { + fmt.Printf("error indexing directory %v : %v", combinedPath+item.Name, err) + } + } + return nil + } + + if quick { + si.mu.Lock() + si.FilesChangedDuringIndexing = true + si.mu.Unlock() + } + + // Read directory contents + files, err := dir.Readdir(-1) + if err != nil { + return err + } + + var totalSize int64 + fileInfos := []ItemInfo{} + dirInfos := []ItemInfo{} + + // Process each file and directory in the current directory + for _, file := range files { + itemInfo := &ItemInfo{ + Name: file.Name(), + ModTime: file.ModTime(), + } + if file.IsDir() { + dirPath := combinedPath + file.Name() + if recursive { + // Recursively index the subdirectory + err = si.indexDirectory(dirPath, quick, recursive) + if err != nil { + log.Printf("Failed to index directory %s: %v", dirPath, err) + continue + } + } + realDirInfo, exists := si.GetMetadataInfo(dirPath, true) + if exists { + itemInfo.Size = realDirInfo.Size + } + totalSize += itemInfo.Size + itemInfo.Type = "directory" + dirInfos = append(dirInfos, *itemInfo) + si.NumDirs++ + } else { + _ = itemInfo.detectType(combinedPath+file.Name(), true, false, false) + itemInfo.Size = file.Size() + fileInfos = append(fileInfos, *itemInfo) + totalSize += itemInfo.Size + si.NumFiles++ + } + } + // Create FileInfo for the current directory + dirFileInfo := &FileInfo{ + Path: adjustedPath, + Files: fileInfos, + Folders: dirInfos, + } + dirFileInfo.ItemInfo = ItemInfo{ + Name: dirInfo.Name(), + Type: "directory", + Size: totalSize, + ModTime: dirInfo.ModTime(), + } + + dirFileInfo.SortItems() + + // Update the current directory metadata in the index + si.UpdateMetadata(dirFileInfo) + + return nil +} + +func (si *Index) makeIndexPath(subPath string) string { + if strings.HasPrefix(subPath, "./") { + subPath = strings.TrimPrefix(subPath, ".") + } + if strings.HasPrefix(subPath, ".") || si.Root == subPath { + return "/" + } + // clean path + subPath = strings.TrimSuffix(subPath, "/") + // remove index prefix + adjustedPath := strings.TrimPrefix(subPath, si.Root) + // remove trailing slash + adjustedPath = strings.TrimSuffix(adjustedPath, "/") + if !strings.HasPrefix(adjustedPath, "/") { + adjustedPath = "/" + adjustedPath + } + return adjustedPath +} + +func (si *Index) recursiveUpdateDirSizes(childInfo *FileInfo, previousSize int64) { + parentDir := utils.GetParentDirectoryPath(childInfo.Path) + parentInfo, exists := si.GetMetadataInfo(parentDir, true) + if !exists || parentDir == "" { + return + } + newSize := parentInfo.Size - previousSize + childInfo.Size + parentInfo.Size += newSize + si.UpdateMetadata(parentInfo) + si.recursiveUpdateDirSizes(parentInfo, newSize) +} + +func (si *Index) RefreshFileInfo(opts FileOptions) error { + refreshOptions := FileOptions{ + Path: opts.Path, + IsDir: opts.IsDir, + } + + if !refreshOptions.IsDir { + refreshOptions.Path = si.makeIndexPath(filepath.Dir(refreshOptions.Path)) + refreshOptions.IsDir = true + } else { + refreshOptions.Path = si.makeIndexPath(refreshOptions.Path) + } + err := si.indexDirectory(refreshOptions.Path, false, false) + if err != nil { + return fmt.Errorf("file/folder does not exist to refresh data: %s", refreshOptions.Path) + } + file, exists := si.GetMetadataInfo(refreshOptions.Path, true) + if !exists { + return fmt.Errorf("file/folder does not exist in metadata: %s", refreshOptions.Path) + } + + current, firstExisted := si.GetMetadataInfo(refreshOptions.Path, true) + refreshParentInfo := firstExisted && current.Size != file.Size + //utils.PrintStructFields(*file) + result := si.UpdateMetadata(file) + if !result { + return fmt.Errorf("file/folder does not exist in metadata: %s", refreshOptions.Path) + } + if !exists { + return nil + } + if refreshParentInfo { + si.recursiveUpdateDirSizes(file, current.Size) + } + return nil +} diff --git a/backend/files/indexingSchedule.go b/backend/files/indexingSchedule.go new file mode 100644 index 00000000..26eedbcb --- /dev/null +++ b/backend/files/indexingSchedule.go @@ -0,0 +1,120 @@ +package files + +import ( + "log" + "time" + + "github.com/gtsteffaniak/filebrowser/settings" +) + +// schedule in minutes +var scanSchedule = []time.Duration{ + 5 * time.Minute, // 5 minute quick scan & 25 minutes for a full scan + 10 * time.Minute, + 20 * time.Minute, // [3] element is 20 minutes, reset anchor for full scan + 40 * time.Minute, + 1 * time.Hour, + 2 * time.Hour, + 3 * time.Hour, + 4 * time.Hour, // 4 hours for quick scan & 20 hours for a full scan +} + +func (si *Index) newScanner(origin string) { + fullScanAnchor := 3 + fullScanCounter := 0 // every 5th scan is a full scan + for { + // Determine sleep time with modifiers + fullScanCounter++ + sleepTime := scanSchedule[si.currentSchedule] + si.SmartModifier + if si.assessment == "simple" { + sleepTime = scanSchedule[si.currentSchedule] - si.SmartModifier + } + if settings.Config.Server.IndexingInterval > 0 { + sleepTime = time.Duration(settings.Config.Server.IndexingInterval) * time.Minute + } + + // Log and sleep before indexing + log.Printf("Next scan in %v\n", sleepTime) + time.Sleep(sleepTime) + + si.scannerMu.Lock() + if fullScanCounter == 5 { + si.RunIndexing(origin, false) // Full scan + fullScanCounter = 0 + } else { + si.RunIndexing(origin, true) // Quick scan + } + si.scannerMu.Unlock() + + // Adjust schedule based on file changes + if si.FilesChangedDuringIndexing { + // Move to at least the full-scan anchor or reduce interval + if si.currentSchedule > fullScanAnchor { + si.currentSchedule = fullScanAnchor + } else if si.currentSchedule > 0 { + si.currentSchedule-- + } + } else { + // Increment toward the longest interval if no changes + if si.currentSchedule < len(scanSchedule)-1 { + si.currentSchedule++ + } + } + if si.assessment == "simple" && si.currentSchedule > 3 { + si.currentSchedule = 3 + } + // Ensure `currentSchedule` stays within bounds + if si.currentSchedule < 0 { + si.currentSchedule = 0 + } else if si.currentSchedule >= len(scanSchedule) { + si.currentSchedule = len(scanSchedule) - 1 + } + } +} + +func (si *Index) RunIndexing(origin string, quick bool) { + prevNumDirs := si.NumDirs + prevNumFiles := si.NumFiles + if quick { + log.Println("Starting quick scan") + } else { + log.Println("Starting full scan") + si.NumDirs = 0 + si.NumFiles = 0 + } + startTime := time.Now() + si.FilesChangedDuringIndexing = false + // Perform the indexing operation + err := si.indexDirectory("/", quick, true) + if err != nil { + log.Printf("Error during indexing: %v", err) + } + // Update the LastIndexed time + si.LastIndexed = time.Now() + si.indexingTime = int(time.Since(startTime).Seconds()) + if !quick { + // update smart indexing + if si.indexingTime < 3 || si.NumDirs < 10000 { + si.assessment = "simple" + si.SmartModifier = 4 * time.Minute + log.Println("Index is small and efficient, adjusting scan interval accordingly.") + } else if si.indexingTime > 120 || si.NumDirs > 500000 { + si.assessment = "complex" + modifier := si.indexingTime / 10 // seconds + si.SmartModifier = time.Duration(modifier) * time.Minute + log.Println("Index is large and complex, adjusting scan interval accordingly.") + } else { + si.assessment = "normal" + log.Println("Index is normal, quick scan set to every 5 minutes.") + } + log.Printf("Index assessment : complexity=%v directories=%v files=%v \n", si.assessment, si.NumDirs, si.NumFiles) + if si.NumDirs != prevNumDirs || si.NumFiles != prevNumFiles { + si.FilesChangedDuringIndexing = true + } + } + log.Printf("Time Spent Indexing : %v seconds\n", si.indexingTime) +} + +func (si *Index) setupIndexingScanners() { + go si.newScanner("/") +} diff --git a/backend/files/indexing_test.go b/backend/files/indexing_test.go index 7500285a..58051f15 100644 --- a/backend/files/indexing_test.go +++ b/backend/files/indexing_test.go @@ -3,7 +3,6 @@ package files import ( "encoding/json" "math/rand" - "path/filepath" "reflect" "testing" "time" @@ -12,7 +11,7 @@ import ( ) func BenchmarkFillIndex(b *testing.B) { - InitializeIndex(5, false) + InitializeIndex(false) si := GetIndex(settings.Config.Server.Root) b.ResetTimer() b.ReportAllocs() @@ -24,11 +23,11 @@ func BenchmarkFillIndex(b *testing.B) { func (si *Index) createMockData(numDirs, numFilesPerDir int) { for i := 0; i < numDirs; i++ { dirPath := generateRandomPath(rand.Intn(3) + 1) - files := []ReducedItem{} // Slice of FileInfo + files := []ItemInfo{} // Slice of FileInfo // Simulating files and directories with FileInfo for j := 0; j < numFilesPerDir; j++ { - newFile := ReducedItem{ + newFile := ItemInfo{ Name: "file-" + getRandomTerm() + getRandomExtension(), Size: rand.Int63n(1000), // Random size ModTime: time.Now().Add(-time.Duration(rand.Intn(100)) * time.Hour), // Random mod time @@ -37,7 +36,6 @@ func (si *Index) createMockData(numDirs, numFilesPerDir int) { files = append(files, newFile) } dirInfo := &FileInfo{ - Name: filepath.Base(dirPath), Path: dirPath, Files: files, } @@ -112,37 +110,3 @@ func TestGetIndex(t *testing.T) { }) } } - -func TestInitializeIndex(t *testing.T) { - type args struct { - intervalMinutes uint32 - } - tests := []struct { - name string - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - InitializeIndex(tt.args.intervalMinutes, false) - }) - } -} - -func Test_indexingScheduler(t *testing.T) { - type args struct { - intervalMinutes uint32 - } - tests := []struct { - name string - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - indexingScheduler(tt.args.intervalMinutes) - }) - } -} diff --git a/backend/files/search.go b/backend/files/search.go index e77b2b01..df855fce 100644 --- a/backend/files/search.go +++ b/backend/files/search.go @@ -28,7 +28,14 @@ func (si *Index) Search(search string, scope string, sourceSession string) []sea searchOptions := ParseSearch(search) results := make(map[string]searchResult, 0) count := 0 - directories := si.getDirsInScope(scope) + var directories []string + cachedDirs, ok := utils.SearchResultsCache.Get(si.Root + scope).([]string) + if ok { + directories = cachedDirs + } else { + directories = si.getDirsInScope(scope) + utils.SearchResultsCache.Set(si.Root+scope, directories) + } for _, searchTerm := range searchOptions.Terms { if searchTerm == "" { continue @@ -38,6 +45,7 @@ func (si *Index) Search(search string, scope string, sourceSession string) []sea } si.mu.Lock() for _, dirName := range directories { + scopedPath := strings.TrimPrefix(strings.TrimPrefix(dirName, scope), "/") + "/" si.mu.Unlock() dir, found := si.GetReducedMetadata(dirName, true) si.mu.Lock() @@ -47,25 +55,22 @@ func (si *Index) Search(search string, scope string, sourceSession string) []sea if count > maxSearchResults { break } - reducedDir := ReducedItem{ + reducedDir := ItemInfo{ Name: filepath.Base(dirName), Type: "directory", Size: dir.Size, } - matches := reducedDir.containsSearchTerm(searchTerm, searchOptions) if matches { - scopedPath := strings.TrimPrefix(strings.TrimPrefix(dirName, scope), "/") + "/" results[scopedPath] = searchResult{Path: scopedPath, Type: "directory", Size: dir.Size} count++ } - // search files first - for _, item := range dir.Items { - + for _, item := range dir.Files { fullPath := dirName + "/" + item.Name + scopedPath := strings.TrimPrefix(strings.TrimPrefix(fullPath, scope), "/") if item.Type == "directory" { - fullPath += "/" + scopedPath += "/" } value, found := sessionInProgress.Load(sourceSession) if !found || value != runningHash { @@ -77,7 +82,6 @@ func (si *Index) Search(search string, scope string, sourceSession string) []sea } matches := item.containsSearchTerm(searchTerm, searchOptions) if matches { - scopedPath := strings.TrimPrefix(strings.TrimPrefix(fullPath, scope), "/") results[scopedPath] = searchResult{Path: scopedPath, Type: item.Type, Size: item.Size} count++ } @@ -103,7 +107,7 @@ func (si *Index) Search(search string, scope string, sourceSession string) []sea // returns true if the file name contains the search term // returns file type if the file name contains the search term // returns size of file/dir if the file name contains the search term -func (fi ReducedItem) containsSearchTerm(searchTerm string, options *SearchOptions) bool { +func (fi ItemInfo) containsSearchTerm(searchTerm string, options SearchOptions) bool { fileTypes := map[string]bool{} largerThan := int64(options.LargerThan) * 1024 * 1024 diff --git a/backend/files/search_test.go b/backend/files/search_test.go index 094d8f5e..2ebbcfe2 100644 --- a/backend/files/search_test.go +++ b/backend/files/search_test.go @@ -8,7 +8,7 @@ import ( ) func BenchmarkSearchAllIndexes(b *testing.B) { - InitializeIndex(5, false) + InitializeIndex(false) si := GetIndex(rootPath) si.createMockData(50, 3) // 50 dirs, 3 files per dir @@ -29,25 +29,25 @@ func BenchmarkSearchAllIndexes(b *testing.B) { func TestParseSearch(t *testing.T) { tests := []struct { input string - want *SearchOptions + want SearchOptions }{ { input: "my test search", - want: &SearchOptions{ + want: SearchOptions{ Conditions: map[string]bool{"exact": false}, Terms: []string{"my test search"}, }, }, { input: "case:exact my|test|search", - want: &SearchOptions{ + want: SearchOptions{ Conditions: map[string]bool{"exact": true}, Terms: []string{"my", "test", "search"}, }, }, { input: "type:largerThan=100 type:smallerThan=1000 test", - want: &SearchOptions{ + want: SearchOptions{ Conditions: map[string]bool{"exact": false, "larger": true, "smaller": true}, Terms: []string{"test"}, LargerThan: 100, @@ -56,7 +56,7 @@ func TestParseSearch(t *testing.T) { }, { input: "type:audio thisfile", - want: &SearchOptions{ + want: SearchOptions{ Conditions: map[string]bool{"exact": false, "audio": true}, Terms: []string{"thisfile"}, }, @@ -74,7 +74,7 @@ func TestParseSearch(t *testing.T) { } func TestSearchWhileIndexing(t *testing.T) { - InitializeIndex(5, false) + InitializeIndex(false) si := GetIndex(rootPath) searchTerms := generateRandomSearchTerms(10) @@ -89,27 +89,29 @@ func TestSearchWhileIndexing(t *testing.T) { func TestSearchIndexes(t *testing.T) { index := Index{ Directories: map[string]*FileInfo{ - "/test": {Files: []ReducedItem{{Name: "audio1.wav", Type: "audio"}}}, - "/test/path": {Files: []ReducedItem{{Name: "file.txt", Type: "text"}}}, - "/new/test": {Files: []ReducedItem{ + "/test": {Files: []ItemInfo{{Name: "audio1.wav", Type: "audio"}}}, + "/test/path": {Files: []ItemInfo{{Name: "file.txt", Type: "text"}}}, + "/new/test": {Files: []ItemInfo{ {Name: "audio.wav", Type: "audio"}, {Name: "video.mp4", Type: "video"}, {Name: "video.MP4", Type: "video"}, }}, - "/new/test/path": {Files: []ReducedItem{{Name: "archive.zip", Type: "archive"}}}, + "/new/test/path": {Files: []ItemInfo{{Name: "archive.zip", Type: "archive"}}}, "/firstDir": { - Files: []ReducedItem{ + Files: []ItemInfo{ {Name: "archive.zip", Size: 100, Type: "archive"}, }, - Dirs: map[string]*FileInfo{ - "thisIsDir": {Name: "thisIsDir", Size: 2 * 1024 * 1024}, + Folders: []ItemInfo{ + {Name: "thisIsDir", Type: "directory", Size: 2 * 1024 * 1024}, }, }, "/firstDir/thisIsDir": { - Files: []ReducedItem{ + Files: []ItemInfo{ {Name: "hi.txt", Type: "text"}, }, - Size: 2 * 1024 * 1024, + ItemInfo: ItemInfo{ + Size: 2 * 1024 * 1024, + }, }, }, } diff --git a/backend/files/sync.go b/backend/files/sync.go index 7eb35ba1..9205fe1d 100644 --- a/backend/files/sync.go +++ b/backend/files/sync.go @@ -1,10 +1,7 @@ package files import ( - "log" "path/filepath" - "sort" - "time" "github.com/gtsteffaniak/filebrowser/settings" ) @@ -13,15 +10,14 @@ import ( func (si *Index) UpdateMetadata(info *FileInfo) bool { si.mu.Lock() defer si.mu.Unlock() - info.CacheTime = time.Now() si.Directories[info.Path] = info return true } // GetMetadataInfo retrieves the FileInfo from the specified directory in the index. func (si *Index) GetReducedMetadata(target string, isDir bool) (*FileInfo, bool) { - si.mu.RLock() - defer si.mu.RUnlock() + si.mu.Lock() + defer si.mu.Unlock() checkDir := si.makeIndexPath(target) if !isDir { checkDir = si.makeIndexPath(filepath.Dir(target)) @@ -30,50 +26,25 @@ func (si *Index) GetReducedMetadata(target string, isDir bool) (*FileInfo, bool) if !exists { return nil, false } - if !isDir { - if checkDir == "/" { - checkDir = "" - } - baseName := filepath.Base(target) - for _, item := range dir.Files { - if item.Name == baseName { - return &FileInfo{ - Name: item.Name, - Size: item.Size, - ModTime: item.ModTime, - Type: item.Type, - Path: checkDir + "/" + item.Name, - }, true - } + if isDir { + return dir, true + } + // handle file + if checkDir == "/" { + checkDir = "" + } + baseName := filepath.Base(target) + for _, item := range dir.Files { + if item.Name == baseName { + return &FileInfo{ + Path: checkDir + "/" + item.Name, + ItemInfo: item, + }, true } - return nil, false } - cleanedItems := []ReducedItem{} - for name, item := range dir.Dirs { - cleanedItems = append(cleanedItems, ReducedItem{ - Name: name, - Size: item.Size, - ModTime: item.ModTime, - Type: "directory", - }) - } - cleanedItems = append(cleanedItems, dir.Files...) - sort.Slice(cleanedItems, func(i, j int) bool { - return cleanedItems[i].Name < cleanedItems[j].Name - }) - dirname := filepath.Base(dir.Path) - if dirname == "." { - dirname = "/" - } - // construct file info - return &FileInfo{ - Name: dirname, - Type: "directory", - Items: cleanedItems, - ModTime: dir.ModTime, - Size: dir.Size, - }, true + return nil, false + } // GetMetadataInfo retrieves the FileInfo from the specified directory in the index. @@ -91,29 +62,10 @@ func (si *Index) GetMetadataInfo(target string, isDir bool) (*FileInfo, bool) { func (si *Index) RemoveDirectory(path string) { si.mu.Lock() defer si.mu.Unlock() + si.NumDeleted++ delete(si.Directories, path) } -func (si *Index) UpdateCount(given string) { - si.mu.Lock() - defer si.mu.Unlock() - if given == "files" { - si.NumFiles++ - } else if given == "dirs" { - si.NumDirs++ - } else { - log.Println("could not update unknown type: ", given) - } -} - -func (si *Index) resetCount() { - si.mu.Lock() - defer si.mu.Unlock() - si.NumDirs = 0 - si.NumFiles = 0 - si.inProgress = true -} - func GetIndex(root string) *Index { for _, index := range indexes { if index.Root == root { @@ -128,7 +80,6 @@ func GetIndex(root string) *Index { Directories: map[string]*FileInfo{}, NumDirs: 0, NumFiles: 0, - inProgress: false, } newIndex.Directories["/"] = &FileInfo{} indexesMutex.Lock() diff --git a/backend/files/sync_test.go b/backend/files/sync_test.go index 36333750..70a4ffad 100644 --- a/backend/files/sync_test.go +++ b/backend/files/sync_test.go @@ -34,7 +34,7 @@ func TestGetFileMetadataSize(t *testing.T) { t.Run(tt.name, func(t *testing.T) { fileInfo, _ := testIndex.GetReducedMetadata(tt.adjustedPath, true) // Iterate over fileInfo.Items to look for expectedName - for _, item := range fileInfo.Items { + for _, item := range fileInfo.Files { // Assert the existence and the name if item.Name == tt.expectedName { assert.Equal(t, tt.expectedSize, item.Size) @@ -89,8 +89,8 @@ func TestGetFileMetadata(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - fileInfo, _ := testIndex.GetReducedMetadata(tt.adjustedPath, tt.isDir) - if fileInfo == nil { + fileInfo, exists := testIndex.GetReducedMetadata(tt.adjustedPath, tt.isDir) + if !exists { found := false assert.Equal(t, tt.expectedExists, found) return @@ -98,7 +98,7 @@ func TestGetFileMetadata(t *testing.T) { found := false if tt.isDir { // Iterate over fileInfo.Items to look for expectedName - for _, item := range fileInfo.Items { + for _, item := range fileInfo.Files { // Assert the existence and the name if item.Name == tt.expectedName { found = true @@ -120,9 +120,7 @@ func TestGetFileMetadata(t *testing.T) { func TestUpdateFileMetadata(t *testing.T) { info := &FileInfo{ Path: "/testpath", - Name: "testpath", - Type: "directory", - Files: []ReducedItem{ + Files: []ItemInfo{ {Name: "testfile.txt"}, {Name: "anotherfile.txt"}, }, @@ -165,9 +163,11 @@ func TestSetDirectoryInfo(t *testing.T) { Directories: map[string]*FileInfo{ "/testpath": { Path: "/testpath", - Name: "testpath", - Type: "directory", - Items: []ReducedItem{ + ItemInfo: ItemInfo{ + Name: "testpath", + Type: "directory", + }, + Files: []ItemInfo{ {Name: "testfile.txt"}, {Name: "anotherfile.txt"}, }, @@ -176,15 +176,17 @@ func TestSetDirectoryInfo(t *testing.T) { } dir := &FileInfo{ Path: "/newPath", - Name: "newPath", - Type: "directory", - Items: []ReducedItem{ + ItemInfo: ItemInfo{ + Name: "newPath", + Type: "directory", + }, + Files: []ItemInfo{ {Name: "testfile.txt"}, }, } index.UpdateMetadata(dir) storedDir, exists := index.Directories["/newPath"] - if !exists || storedDir.Items[0].Name != "testfile.txt" { + if !exists || storedDir.Files[0].Name != "testfile.txt" { t.Fatalf("expected SetDirectoryInfo to store directory info correctly") } } @@ -203,56 +205,34 @@ func TestRemoveDirectory(t *testing.T) { } } -// Test for UpdateCount -func TestUpdateCount(t *testing.T) { - index := &Index{} - index.UpdateCount("files") - if index.NumFiles != 1 { - t.Fatalf("expected NumFiles to be 1 after UpdateCount('files')") - } - if index.NumFiles != 1 { - t.Fatalf("expected NumFiles to be 1 after UpdateCount('files')") - } - index.UpdateCount("dirs") - if index.NumDirs != 1 { - t.Fatalf("expected NumDirs to be 1 after UpdateCount('dirs')") - } - index.UpdateCount("unknown") - // Just ensure it does not panic or update any counters - if index.NumFiles != 1 || index.NumDirs != 1 { - t.Fatalf("expected counts to remain unchanged for unknown type") - } - index.resetCount() - if index.NumFiles != 0 || index.NumDirs != 0 || !index.inProgress { - t.Fatalf("expected resetCount to reset counts and set inProgress to true") - } -} - func init() { testIndex = Index{ - Root: "/", - NumFiles: 10, - NumDirs: 5, - inProgress: false, + Root: "/", + NumFiles: 10, + NumDirs: 5, Directories: map[string]*FileInfo{ "/testpath": { Path: "/testpath", - Name: "testpath", - Type: "directory", - Files: []ReducedItem{ + ItemInfo: ItemInfo{ + Name: "testpath", + Type: "directory", + }, + Files: []ItemInfo{ {Name: "testfile.txt", Size: 100}, {Name: "anotherfile.txt", Size: 100}, }, }, "/anotherpath": { Path: "/anotherpath", - Name: "anotherpath", - Type: "directory", - Files: []ReducedItem{ + ItemInfo: ItemInfo{ + Name: "anotherpath", + Type: "directory", + }, + Files: []ItemInfo{ {Name: "afile.txt", Size: 100}, }, - Dirs: map[string]*FileInfo{ - "directory": {Name: "directory", Type: "directory", Size: 100}, + Folders: []ItemInfo{ + {Name: "directory", Type: "directory", Size: 100}, }, }, }, diff --git a/backend/http/auth.go b/backend/http/auth.go index abf34674..1b01d4b7 100644 --- a/backend/http/auth.go +++ b/backend/http/auth.go @@ -2,9 +2,11 @@ package http import ( "encoding/json" + libError "errors" "fmt" "log" "net/http" + "net/url" "os" "strings" "sync" @@ -12,9 +14,11 @@ import ( "github.com/golang-jwt/jwt/v4" "github.com/golang-jwt/jwt/v4/request" + "golang.org/x/crypto/bcrypt" "github.com/gtsteffaniak/filebrowser/errors" "github.com/gtsteffaniak/filebrowser/settings" + "github.com/gtsteffaniak/filebrowser/share" "github.com/gtsteffaniak/filebrowser/users" "github.com/gtsteffaniak/filebrowser/utils" ) @@ -207,3 +211,29 @@ func makeSignedTokenAPI(user *users.User, name string, duration time.Duration, p } return claim, err } + +func authenticateShareRequest(r *http.Request, l *share.Link) (int, error) { + if l.PasswordHash == "" { + return 200, nil + } + + if r.URL.Query().Get("token") == l.Token { + return 200, nil + } + + password := r.Header.Get("X-SHARE-PASSWORD") + password, err := url.QueryUnescape(password) + if err != nil { + return http.StatusUnauthorized, err + } + if password == "" { + return http.StatusUnauthorized, nil + } + if err := bcrypt.CompareHashAndPassword([]byte(l.PasswordHash), []byte(password)); err != nil { + if libError.Is(err, bcrypt.ErrMismatchedHashAndPassword) { + return http.StatusUnauthorized, nil + } + return 401, err + } + return 200, nil +} diff --git a/backend/http/middleware.go b/backend/http/middleware.go index 2b8054d2..c6967397 100644 --- a/backend/http/middleware.go +++ b/backend/http/middleware.go @@ -26,6 +26,8 @@ type HttpResponse struct { Token string `json:"token,omitempty"` } +var FileInfoFasterFunc = files.FileInfoFaster + // Updated handleFunc to match the new signature type handleFunc func(w http.ResponseWriter, r *http.Request, data *requestContext) (int, error) @@ -39,30 +41,30 @@ func withHashFileHelper(fn handleFunc) handleFunc { // Get the file link by hash link, err := store.Share.GetByHash(hash) if err != nil { - return http.StatusNotFound, err + return http.StatusNotFound, fmt.Errorf("share not found") } // Authenticate the share request if needed var status int if link.Hash != "" { status, err = authenticateShareRequest(r, link) if err != nil || status != http.StatusOK { - return status, err + return status, fmt.Errorf("could not authenticate share request") } } // Retrieve the user (using the public user by default) user := &users.PublicUser // Get file information with options - file, err := files.FileInfoFaster(files.FileOptions{ + file, err := FileInfoFasterFunc(files.FileOptions{ Path: filepath.Join(user.Scope, link.Path+"/"+path), Modify: user.Perm.Modify, Expand: true, ReadHeader: config.Server.TypeDetectionByHeader, Checker: user, // Call your checker function here - Token: link.Token, }) + file.Token = link.Token if err != nil { - return errToStatus(err), err + return errToStatus(err), fmt.Errorf("error fetching share from server") } // Set the file info in the `data` object @@ -89,6 +91,7 @@ func withAdminHelper(fn handleFunc) handleFunc { // Middleware to retrieve and authenticate user func withUserHelper(fn handleFunc) handleFunc { return func(w http.ResponseWriter, r *http.Request, data *requestContext) (int, error) { + keyFunc := func(token *jwt.Token) (interface{}, error) { return config.Auth.Key, nil } @@ -243,6 +246,7 @@ func (w *ResponseWriterWrapper) Write(b []byte) (int, error) { // LoggingMiddleware logs each request and its status code func LoggingMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + start := time.Now() // Wrap the ResponseWriter to capture the status code diff --git a/backend/http/middleware_test.go b/backend/http/middleware_test.go index 31264c98..a14a53fc 100644 --- a/backend/http/middleware_test.go +++ b/backend/http/middleware_test.go @@ -9,6 +9,7 @@ import ( "github.com/asdine/storm/v3" "github.com/gtsteffaniak/filebrowser/diskcache" + "github.com/gtsteffaniak/filebrowser/files" "github.com/gtsteffaniak/filebrowser/img" "github.com/gtsteffaniak/filebrowser/settings" "github.com/gtsteffaniak/filebrowser/share" @@ -37,6 +38,27 @@ func setupTestEnv(t *testing.T) { fileCache = diskcache.NewNoOp() // mocked imgSvc = img.New(1) // mocked config = &settings.Config // mocked + mockFileInfoFaster(t) // Mock FileInfoFasterFunc for this test +} + +func mockFileInfoFaster(t *testing.T) { + // Backup the original function + originalFileInfoFaster := FileInfoFasterFunc + // Defer restoration of the original function + t.Cleanup(func() { FileInfoFasterFunc = originalFileInfoFaster }) + + // Mock the function to skip execution + FileInfoFasterFunc = func(opts files.FileOptions) (files.ExtendedFileInfo, error) { + return files.ExtendedFileInfo{ + FileInfo: &files.FileInfo{ + Path: opts.Path, + ItemInfo: files.ItemInfo{ + Name: "mocked_file", + Size: 12345, + }, + }, + }, nil + } } func TestWithAdminHelper(t *testing.T) { @@ -197,10 +219,7 @@ func TestPublicShareHandlerAuthentication(t *testing.T) { req := newTestRequest(t, tc.share.Hash, tc.token, tc.password, tc.extraHeaders) // Serve the request - status, err := handler(recorder, req, &requestContext{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + status, _ := handler(recorder, req, &requestContext{}) // Check if the response matches the expected status code if status != tc.expectedStatusCode { diff --git a/backend/http/preview.go b/backend/http/preview.go index 3b3d050b..ac71a0a5 100644 --- a/backend/http/preview.go +++ b/backend/http/preview.go @@ -49,27 +49,23 @@ func previewHandler(w http.ResponseWriter, r *http.Request, d *requestContext) ( if path == "" { return http.StatusBadRequest, fmt.Errorf("invalid request path") } - file, err := files.FileInfoFaster(files.FileOptions{ + response, err := files.FileInfoFaster(files.FileOptions{ Path: filepath.Join(d.user.Scope, path), Modify: d.user.Perm.Modify, Expand: true, ReadHeader: config.Server.TypeDetectionByHeader, Checker: d.user, }) + fileInfo := response.FileInfo if err != nil { return errToStatus(err), err } - realPath, _, err := files.GetRealPath(file.Path) - if err != nil { - return http.StatusInternalServerError, err - } - file.Path = realPath - if file.Type == "directory" { + if fileInfo.Type == "directory" { return http.StatusBadRequest, fmt.Errorf("can't create preview for directory") } - setContentDisposition(w, r, file) - if file.Type != "image" { - return http.StatusNotImplemented, fmt.Errorf("can't create preview for %s type", file.Type) + setContentDisposition(w, r, fileInfo) + if fileInfo.Type != "image" { + return http.StatusNotImplemented, fmt.Errorf("can't create preview for %s type", fileInfo.Type) } if (previewSize == "large" && !config.Server.ResizePreview) || @@ -77,40 +73,40 @@ func previewHandler(w http.ResponseWriter, r *http.Request, d *requestContext) ( if !d.user.Perm.Download { return http.StatusAccepted, nil } - return rawFileHandler(w, r, file) + return rawFileHandler(w, r, fileInfo) } - format, err := imgSvc.FormatFromExtension(filepath.Ext(file.Name)) + format, err := imgSvc.FormatFromExtension(filepath.Ext(fileInfo.Name)) // Unsupported extensions directly return the raw data if err == img.ErrUnsupportedFormat || format == img.FormatGif { if !d.user.Perm.Download { return http.StatusAccepted, nil } - return rawFileHandler(w, r, file) + return rawFileHandler(w, r, fileInfo) } if err != nil { return errToStatus(err), err } - cacheKey := previewCacheKey(file, previewSize) + cacheKey := previewCacheKey(fileInfo, previewSize) resizedImage, ok, err := fileCache.Load(r.Context(), cacheKey) if err != nil { return errToStatus(err), err } if !ok { - resizedImage, err = createPreview(imgSvc, fileCache, file, previewSize) + resizedImage, err = createPreview(imgSvc, fileCache, fileInfo, previewSize) if err != nil { return errToStatus(err), err } } w.Header().Set("Cache-Control", "private") - http.ServeContent(w, r, file.Path, file.ModTime, bytes.NewReader(resizedImage)) + http.ServeContent(w, r, fileInfo.RealPath(), fileInfo.ModTime, bytes.NewReader(resizedImage)) return 0, nil } func createPreview(imgSvc ImgService, fileCache FileCache, file *files.FileInfo, previewSize string) ([]byte, error) { - fd, err := os.Open(file.Path) + fd, err := os.Open(file.RealPath()) if err != nil { return nil, err } diff --git a/backend/http/public.go b/backend/http/public.go index 42f98c9e..63e32cff 100644 --- a/backend/http/public.go +++ b/backend/http/public.go @@ -2,24 +2,19 @@ package http import ( "encoding/json" - "errors" "fmt" "net/http" - "net/url" "strings" - "golang.org/x/crypto/bcrypt" - "github.com/gtsteffaniak/filebrowser/files" "github.com/gtsteffaniak/filebrowser/settings" - "github.com/gtsteffaniak/filebrowser/share" "github.com/gtsteffaniak/filebrowser/users" _ "github.com/gtsteffaniak/filebrowser/swagger/docs" ) func publicShareHandler(w http.ResponseWriter, r *http.Request, d *requestContext) (int, error) { - file, ok := d.raw.(*files.FileInfo) + file, ok := d.raw.(files.ExtendedFileInfo) if !ok { return http.StatusInternalServerError, fmt.Errorf("failed to assert type *files.FileInfo") } @@ -38,8 +33,8 @@ func publicUserGetHandler(w http.ResponseWriter, r *http.Request) { } func publicDlHandler(w http.ResponseWriter, r *http.Request, d *requestContext) (int, error) { - file, _ := d.raw.(*files.FileInfo) - if file == nil { + file, ok := d.raw.(files.ExtendedFileInfo) + if !ok { return http.StatusInternalServerError, fmt.Errorf("failed to assert type files.FileInfo") } if d.user == nil { @@ -47,36 +42,10 @@ func publicDlHandler(w http.ResponseWriter, r *http.Request, d *requestContext) } if file.Type == "directory" { - return rawDirHandler(w, r, d, file) + return rawDirHandler(w, r, d, file.FileInfo) } - return rawFileHandler(w, r, file) -} - -func authenticateShareRequest(r *http.Request, l *share.Link) (int, error) { - if l.PasswordHash == "" { - return 200, nil - } - - if r.URL.Query().Get("token") == l.Token { - return 200, nil - } - - password := r.Header.Get("X-SHARE-PASSWORD") - password, err := url.QueryUnescape(password) - if err != nil { - return http.StatusUnauthorized, err - } - if password == "" { - return http.StatusUnauthorized, nil - } - if err := bcrypt.CompareHashAndPassword([]byte(l.PasswordHash), []byte(password)); err != nil { - if errors.Is(err, bcrypt.ErrMismatchedHashAndPassword) { - return http.StatusUnauthorized, nil - } - return 401, err - } - return 200, nil + return rawFileHandler(w, r, file.FileInfo) } // health godoc diff --git a/backend/http/raw.go b/backend/http/raw.go index 7bac12d1..eeee5a8f 100644 --- a/backend/http/raw.go +++ b/backend/http/raw.go @@ -99,7 +99,7 @@ func rawHandler(w http.ResponseWriter, r *http.Request, d *requestContext) (int, return http.StatusAccepted, nil } path := r.URL.Query().Get("path") - file, err := files.FileInfoFaster(files.FileOptions{ + fileInfo, err := files.FileInfoFaster(files.FileOptions{ Path: filepath.Join(d.user.Scope, path), Modify: d.user.Perm.Modify, Expand: false, @@ -109,15 +109,19 @@ func rawHandler(w http.ResponseWriter, r *http.Request, d *requestContext) (int, if err != nil { return errToStatus(err), err } - if files.IsNamedPipe(file.Mode) { - setContentDisposition(w, r, file) - return 0, nil - } - if file.Type == "directory" { - return rawDirHandler(w, r, d, file) + + // TODO, how to handle? we removed mode, is it needed? + // maybe instead of mode we use bool only two conditions are checked + //if files.IsNamedPipe(fileInfo.Mode) { + // setContentDisposition(w, r, file) + // return 0, nil + //} + + if fileInfo.Type == "directory" { + return rawDirHandler(w, r, d, fileInfo.FileInfo) } - return rawFileHandler(w, r, file) + return rawFileHandler(w, r, fileInfo.FileInfo) } func addFile(ar archiver.Writer, d *requestContext, path, commonPath string) error { diff --git a/backend/http/resource.go b/backend/http/resource.go index ba5b5adb..e165fb43 100644 --- a/backend/http/resource.go +++ b/backend/http/resource.go @@ -14,6 +14,7 @@ import ( "github.com/gtsteffaniak/filebrowser/errors" "github.com/gtsteffaniak/filebrowser/files" + "github.com/gtsteffaniak/filebrowser/utils" ) // resourceGetHandler retrieves information about a resource. @@ -31,9 +32,10 @@ import ( // @Failure 500 {object} map[string]string "Internal server error" // @Router /api/resources [get] func resourceGetHandler(w http.ResponseWriter, r *http.Request, d *requestContext) (int, error) { + // TODO source := r.URL.Query().Get("source") path := r.URL.Query().Get("path") - file, err := files.FileInfoFaster(files.FileOptions{ + fileInfo, err := files.FileInfoFaster(files.FileOptions{ Path: filepath.Join(d.user.Scope, path), Modify: d.user.Perm.Modify, Expand: true, @@ -44,18 +46,19 @@ func resourceGetHandler(w http.ResponseWriter, r *http.Request, d *requestContex if err != nil { return errToStatus(err), err } - if file.Type == "directory" { - return renderJSON(w, r, file) + if fileInfo.Type == "directory" { + return renderJSON(w, r, fileInfo) } - if checksum := r.URL.Query().Get("checksum"); checksum != "" { - err := file.Checksum(checksum) + if algo := r.URL.Query().Get("checksum"); algo != "" { + checksums, err := files.GetChecksum(fileInfo.Path, algo) if err == errors.ErrInvalidOption { return http.StatusBadRequest, nil } else if err != nil { return http.StatusInternalServerError, err } + fileInfo.Checksums = checksums } - return renderJSON(w, r, file) + return renderJSON(w, r, fileInfo) } @@ -90,13 +93,13 @@ func resourceDeleteHandler(w http.ResponseWriter, r *http.Request, d *requestCon ReadHeader: config.Server.TypeDetectionByHeader, Checker: d.user, } - file, err := files.FileInfoFaster(fileOpts) + fileInfo, err := files.FileInfoFaster(fileOpts) if err != nil { return errToStatus(err), err } // delete thumbnails - err = delThumbs(r.Context(), fileCache, file) + err = delThumbs(r.Context(), fileCache, fileInfo.FileInfo) if err != nil { return errToStatus(err), err } @@ -131,11 +134,10 @@ func resourcePostHandler(w http.ResponseWriter, r *http.Request, d *requestConte return http.StatusForbidden, nil } fileOpts := files.FileOptions{ - Path: filepath.Join(d.user.Scope, path), - Modify: d.user.Perm.Modify, - Expand: false, - ReadHeader: config.Server.TypeDetectionByHeader, - Checker: d.user, + Path: filepath.Join(d.user.Scope, path), + Modify: d.user.Perm.Modify, + Expand: false, + Checker: d.user, } // Directories creation on POST. if strings.HasSuffix(path, "/") { @@ -145,7 +147,7 @@ func resourcePostHandler(w http.ResponseWriter, r *http.Request, d *requestConte } return http.StatusOK, nil } - file, err := files.FileInfoFaster(fileOpts) + fileInfo, err := files.FileInfoFaster(fileOpts) if err == nil { if r.URL.Query().Get("override") != "true" { return http.StatusConflict, nil @@ -156,13 +158,17 @@ func resourcePostHandler(w http.ResponseWriter, r *http.Request, d *requestConte return http.StatusForbidden, nil } - err = delThumbs(r.Context(), fileCache, file) + err = delThumbs(r.Context(), fileCache, fileInfo.FileInfo) if err != nil { return errToStatus(err), err } } err = files.WriteFile(fileOpts, r.Body) - return errToStatus(err), err + if err != nil { + return errToStatus(err), err + + } + return http.StatusOK, nil } // resourcePutHandler updates an existing file resource. @@ -301,7 +307,7 @@ func patchAction(ctx context.Context, action, src, dst string, d *requestContext if !d.user.Perm.Rename { return errors.ErrPermissionDenied } - file, err := files.FileInfoFaster(files.FileOptions{ + fileInfo, err := files.FileInfoFaster(files.FileOptions{ Path: src, IsDir: isSrcDir, Modify: d.user.Perm.Modify, @@ -314,7 +320,7 @@ func patchAction(ctx context.Context, action, src, dst string, d *requestContext } // delete thumbnails - err = delThumbs(ctx, fileCache, file) + err = delThumbs(ctx, fileCache, fileInfo.FileInfo) if err != nil { return err } @@ -345,25 +351,29 @@ func diskUsage(w http.ResponseWriter, r *http.Request, d *requestContext) (int, if source == "" { source = "/" } - file, err := files.FileInfoFaster(files.FileOptions{ - Path: source, - Checker: d.user, - }) + + value, ok := utils.DiskUsageCache.Get(source).(DiskUsageResponse) + if ok { + return renderJSON(w, r, &value) + } + + fPath, isDir, err := files.GetRealPath(d.user.Scope, source) if err != nil { return errToStatus(err), err } - fPath := file.RealPath() - if file.Type != "directory" { - return http.StatusBadRequest, fmt.Errorf("path is not a directory") + if !isDir { + return http.StatusNotFound, fmt.Errorf("not a directory: %s", source) } usage, err := disk.UsageWithContext(r.Context(), fPath) if err != nil { return errToStatus(err), err } - return renderJSON(w, r, &DiskUsageResponse{ + latestUsage := DiskUsageResponse{ Total: usage.Total, Used: usage.Used, - }) + } + utils.DiskUsageCache.Set(source, latestUsage) + return renderJSON(w, r, &latestUsage) } func inspectIndex(w http.ResponseWriter, r *http.Request) { diff --git a/backend/http/router.go b/backend/http/router.go index d7c9492f..66530165 100644 --- a/backend/http/router.go +++ b/backend/http/router.go @@ -122,7 +122,7 @@ func StartHttp(Service ImgService, storage *storage.Storage, cache FileCache) { router.HandleFunc(config.Server.BaseURL, indexHandler) // health - router.HandleFunc(fmt.Sprintf("GET %vhealth/", config.Server.BaseURL), healthHandler) + router.HandleFunc(fmt.Sprintf("GET %vhealth", config.Server.BaseURL), healthHandler) // Swagger router.Handle(fmt.Sprintf("%vswagger/", config.Server.BaseURL), @@ -172,7 +172,7 @@ func StartHttp(Service ImgService, storage *storage.Storage, cache FileCache) { } else { // Set HTTP scheme and the default port for HTTP scheme = "http" - if config.Server.Port != 443 { + if config.Server.Port != 80 { port = fmt.Sprintf(":%d", config.Server.Port) } // Build the full URL with host and port diff --git a/backend/http/share.go b/backend/http/share.go index 96ee4e89..e04d470b 100644 --- a/backend/http/share.go +++ b/backend/http/share.go @@ -69,7 +69,7 @@ func shareGetsHandler(w http.ResponseWriter, r *http.Request, d *requestContext) return renderJSON(w, r, []*share.Link{}) } if err != nil { - return http.StatusInternalServerError, err + return http.StatusInternalServerError, fmt.Errorf("error getting share info from server") } return renderJSON(w, r, s) } @@ -188,7 +188,7 @@ func getSharePasswordHash(body share.CreateBody) (data []byte, statuscode int, e hash, err := bcrypt.GenerateFromPassword([]byte(body.Password), bcrypt.DefaultCost) if err != nil { - return nil, http.StatusInternalServerError, fmt.Errorf("failed to hash password: %w", err) + return nil, http.StatusInternalServerError, fmt.Errorf("failed to hash password") } return hash, 0, nil diff --git a/backend/myfolder/subfolder/Screenshot 2024-11-18 at 2.16.29 PM.png b/backend/myfolder/subfolder/Screenshot 2024-11-18 at 2.16.29 PM.png deleted file mode 100755 index 4deb950c..00000000 Binary files a/backend/myfolder/subfolder/Screenshot 2024-11-18 at 2.16.29 PM.png and /dev/null differ diff --git a/backend/settings/config.go b/backend/settings/config.go index 7dbd9ad2..bfb4a5a9 100644 --- a/backend/settings/config.go +++ b/backend/settings/config.go @@ -66,7 +66,6 @@ func setDefaults() Settings { EnableThumbnails: true, ResizePreview: false, EnableExec: false, - IndexingInterval: 5, Port: 80, NumImageProcessors: 4, BaseURL: "", diff --git a/backend/swagger/docs/docs.go b/backend/swagger/docs/docs.go index b010ba4b..cee01dd7 100644 --- a/backend/swagger/docs/docs.go +++ b/backend/swagger/docs/docs.go @@ -1155,22 +1155,16 @@ const docTemplate = `{ "files.FileInfo": { "type": "object", "properties": { - "checksums": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "content": { - "type": "string" - }, - "isSymlink": { - "type": "boolean" - }, - "items": { + "files": { "type": "array", "items": { - "$ref": "#/definitions/files.ReducedItem" + "$ref": "#/definitions/files.ItemInfo" + } + }, + "folders": { + "type": "array", + "items": { + "$ref": "#/definitions/files.ItemInfo" } }, "modified": { @@ -1185,26 +1179,14 @@ const docTemplate = `{ "size": { "type": "integer" }, - "subtitles": { - "type": "array", - "items": { - "type": "string" - } - }, - "token": { - "type": "string" - }, "type": { "type": "string" } } }, - "files.ReducedItem": { + "files.ItemInfo": { "type": "object", "properties": { - "content": { - "type": "string" - }, "modified": { "type": "string" }, diff --git a/backend/swagger/docs/swagger.json b/backend/swagger/docs/swagger.json index 6df9e4c5..8b6a23d1 100644 --- a/backend/swagger/docs/swagger.json +++ b/backend/swagger/docs/swagger.json @@ -1144,22 +1144,16 @@ "files.FileInfo": { "type": "object", "properties": { - "checksums": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "content": { - "type": "string" - }, - "isSymlink": { - "type": "boolean" - }, - "items": { + "files": { "type": "array", "items": { - "$ref": "#/definitions/files.ReducedItem" + "$ref": "#/definitions/files.ItemInfo" + } + }, + "folders": { + "type": "array", + "items": { + "$ref": "#/definitions/files.ItemInfo" } }, "modified": { @@ -1174,26 +1168,14 @@ "size": { "type": "integer" }, - "subtitles": { - "type": "array", - "items": { - "type": "string" - } - }, - "token": { - "type": "string" - }, "type": { "type": "string" } } }, - "files.ReducedItem": { + "files.ItemInfo": { "type": "object", "properties": { - "content": { - "type": "string" - }, "modified": { "type": "string" }, diff --git a/backend/swagger/docs/swagger.yaml b/backend/swagger/docs/swagger.yaml index bb26c052..8a526441 100644 --- a/backend/swagger/docs/swagger.yaml +++ b/backend/swagger/docs/swagger.yaml @@ -1,17 +1,13 @@ definitions: files.FileInfo: properties: - checksums: - additionalProperties: - type: string - type: object - content: - type: string - isSymlink: - type: boolean - items: + files: items: - $ref: '#/definitions/files.ReducedItem' + $ref: '#/definitions/files.ItemInfo' + type: array + folders: + items: + $ref: '#/definitions/files.ItemInfo' type: array modified: type: string @@ -21,19 +17,11 @@ definitions: type: string size: type: integer - subtitles: - items: - type: string - type: array - token: - type: string type: type: string type: object - files.ReducedItem: + files.ItemInfo: properties: - content: - type: string modified: type: string name: diff --git a/backend/test/atest b/backend/test/atest new file mode 100755 index 00000000..e69de29b diff --git a/backend/test/test b/backend/test/test new file mode 100755 index 00000000..e69de29b diff --git a/backend/test/tests b/backend/test/tests new file mode 100755 index 00000000..e69de29b diff --git a/backend/utils/cache.go b/backend/utils/cache.go new file mode 100644 index 00000000..53618223 --- /dev/null +++ b/backend/utils/cache.go @@ -0,0 +1,80 @@ +package utils + +import ( + "sync" + "time" +) + +var ( + DiskUsageCache = newCache(30*time.Second, 24*time.Hour) + RealPathCache = newCache(48*time.Hour, 72*time.Hour) + SearchResultsCache = newCache(15*time.Second, time.Hour) +) + +func newCache(expires time.Duration, cleanup time.Duration) *KeyCache { + newCache := KeyCache{ + data: make(map[string]cachedValue), + expiresAfter: expires, // default + } + go newCache.cleanupExpiredJob(cleanup) + return &newCache +} + +type KeyCache struct { + data map[string]cachedValue + mu sync.RWMutex + expiresAfter time.Duration +} + +type cachedValue struct { + value interface{} + expiresAt time.Time +} + +func (c *KeyCache) Set(key string, value interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + c.data[key] = cachedValue{ + value: value, + expiresAt: time.Now().Add(c.expiresAfter), + } +} + +func (c *KeyCache) SetWithExp(key string, value interface{}, exp time.Duration) { + c.mu.Lock() + defer c.mu.Unlock() + c.data[key] = cachedValue{ + value: value, + expiresAt: time.Now().Add(exp), + } +} + +func (c *KeyCache) Get(key string) interface{} { + c.mu.RLock() + defer c.mu.RUnlock() + cached, ok := c.data[key] + if !ok || time.Now().After(cached.expiresAt) { + return nil + } + return cached.value +} + +func (c *KeyCache) cleanupExpired() { + c.mu.Lock() + defer c.mu.Unlock() + now := time.Now() + for key, cached := range c.data { + if now.After(cached.expiresAt) { + delete(c.data, key) + } + } +} + +// should automatically run for all cache types as part of init. +func (c *KeyCache) cleanupExpiredJob(frequency time.Duration) { + ticker := time.NewTicker(frequency) + defer ticker.Stop() + for range ticker.C { + c.cleanupExpired() + } +} diff --git a/backend/utils/main.go b/backend/utils/main.go index 600b17ac..48018422 100644 --- a/backend/utils/main.go +++ b/backend/utils/main.go @@ -69,3 +69,18 @@ func PrintStructFields(v interface{}) { fmt.Printf("Field: %s, %s\n", fieldType.Name, fieldValue) } } + +func GetParentDirectoryPath(path string) string { + if path == "/" || path == "" { + return "" + } + path = strings.TrimSuffix(path, "/") // Remove trailing slash if any + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + return "" // No parent directory for a relative path without slashes + } + if lastSlash == 0 { + return "/" // If the last slash is the first character, return root + } + return path[:lastSlash] +} diff --git a/backend/utils/main_test.go b/backend/utils/main_test.go new file mode 100644 index 00000000..07b75b3d --- /dev/null +++ b/backend/utils/main_test.go @@ -0,0 +1,59 @@ +package utils + +import ( + "testing" +) + +func TestGetParentDirectoryPath(t *testing.T) { + tests := []struct { + input string + expectedOutput string + }{ + {input: "/", expectedOutput: ""}, // Root directory + {input: "/subfolder", expectedOutput: "/"}, // Single subfolder + {input: "/sub/sub/", expectedOutput: "/sub"}, // Nested subfolder with trailing slash + {input: "/subfolder/", expectedOutput: "/"}, // Relative path with trailing slash + {input: "", expectedOutput: ""}, // Empty string treated as root + {input: "/sub/subfolder", expectedOutput: "/sub"}, // Double slash in path + {input: "/sub/subfolder/deep/nested/", expectedOutput: "/sub/subfolder/deep"}, // Double slash in path + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + actualOutput := GetParentDirectoryPath(test.input) + if actualOutput != test.expectedOutput { + t.Errorf("\n\tinput %q\n\texpected %q\n\tgot %q", + test.input, test.expectedOutput, actualOutput) + } + }) + } +} + +func TestCapitalizeFirst(t *testing.T) { + tests := []struct { + input string + expectedOutput string + }{ + {input: "", expectedOutput: ""}, // Empty string + {input: "a", expectedOutput: "A"}, // Single lowercase letter + {input: "A", expectedOutput: "A"}, // Single uppercase letter + {input: "hello", expectedOutput: "Hello"}, // All lowercase + {input: "Hello", expectedOutput: "Hello"}, // Already capitalized + {input: "123hello", expectedOutput: "123hello"}, // Non-alphabetic first character + {input: "hELLO", expectedOutput: "HELLO"}, // Mixed case + {input: " hello", expectedOutput: " hello"}, // Leading space, no capitalization + {input: "hello world", expectedOutput: "Hello world"}, // Phrase with spaces + {input: " hello world", expectedOutput: " hello world"}, // Phrase with leading space + {input: "123 hello world", expectedOutput: "123 hello world"}, // Numbers before text + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + actualOutput := CapitalizeFirst(test.input) + if actualOutput != test.expectedOutput { + t.Errorf("\n\tinput %q\n\texpected %q\n\tgot %q", + test.input, test.expectedOutput, actualOutput) + } + }) + } +} diff --git a/docs/configuration.md b/docs/configuration.md index 33560b32..b1601329 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -10,7 +10,6 @@ Here is an expanded config file which includes all possible configurations: server: CreateUserDir: false UserHomeBasePath: "" - indexingInterval: 5 indexing: true numImageProcessors: 4 socket: "" @@ -71,7 +70,6 @@ Here are the defaults if nothing is set: server: enableThumbnails: true enableExec: false - indexingInterval: 5 port: 80 numImageProcessors: 4 baseURL: "" @@ -109,7 +107,7 @@ userDefaults: ### Server configuration settings -- `indexingInterval`: This is the time in minutes the system waits before checking for filesystem changes. Default: `5` +- `indexingInterval`: This optional paramter disables smart indexing and specifies a time in minutes the system waits before checking for filesystem changes. See [indexing readme](indexing.md) for more information. - `indexing`: This enables or disables indexing. (Note: search will not work without indexing) Default: `true` diff --git a/docs/contributing.md b/docs/contributing.md index a320bea6..df0e32fd 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -1,2 +1,13 @@ # Contributing Guide +If you would like to contribute, please open a pull request against main or the latest `dev_` branch thats currently in progress. + +A PR is required to have: + +1. A clear description about why it was opened +2. A short title that best describes the issue. +3. Test evidence for anything that is not self evident or covered by unit tests. + +Unit tests should be updated to pass before merging. So, the best way to handle this is to create a fork and test your changes there, then merge to this repo. You can also create a draft pull request if it is not fully ready. + +Please don't hesitate to open an issue for any ideas you have, but cannot contribute directly for whatever reason. \ No newline at end of file diff --git a/docs/indexing.md b/docs/indexing.md new file mode 100644 index 00000000..22444a38 --- /dev/null +++ b/docs/indexing.md @@ -0,0 +1,189 @@ +# About Indexing on FileBrowser Quantum + +The most significant feature is the index, this document intends to demystify how it works so you can understand how to ensure your index closely matches the current state of your filesystem. + +## How does the index work? + +The approach used by this repo includes filesystem watchers that periodically scan the directory tree for changes. By default, this uses a smart scan strategy, but you can also configure a set interval in your config file. + +The `scan interval` is the break time between scans and does not include the time a scan takes. A typical scan can vary dramatically, but here are some expectations for SSD-based disks: + +| # folders | # files | time to index | memory usage (RAM) | +|---|---|---|---| +| 10,000 | 10,000 | ~ 0-5 seconds | 15 MB | +| 2,000 | 250,000 | ~ 0-5 seconds | 300 MB | +| 50,000 | 50,000 | ~ 5-30 seconds | 150 MB | +| 250,000 | 10,000 | ~ 2-5 minutes | 300 MB | +| 500,000 | 500,000 | ~ 5+ minutes | 500+ MB | + +### Smart Scanning + +1. There is a floating `smart scan interval` that ranges from **1 minute - 4 hours** depending on the complexity of your filesystem +2. The smart interval changes based on how often it discovers changed files: + - ``` + // Schedule in minutes + var scanSchedule = []time.Duration{ + 5 * time.Minute, // 5 minute quick scan & 25 minutes for a full scan + 10 * time.Minute, + 20 * time.Minute, // [3] element is 20 minutes, reset anchor for full scan + 40 * time.Minute, + 1 * time.Hour, + 2 * time.Hour, + 3 * time.Hour, + 4 * time.Hour, // 4 hours for quick scan & 20 hours for a full scan + } + ``` +3. The `smart scan interval` performs a `quick scan` 4 times in a row, followed by a 5th `full scan` which completely rebuilds the index. + - A `quick scan` is limited to detecting directory changes, but is 10x faster than a full scan. Here is what a quick scan can see: + 1. New files or folders created. + 2. Files or folders deleted. + 3. Renaming of files or folders. + - A quick scan **cannot** detect when a file has been updated, for example when you save a file and the size increases. + - A `full scan` is a complete re-indexing. This is always more disk and computationally intense but will capture individual file changes. +4. The `smart scan interval` changes based on several things. A `simple` complexity enables scans every 1 minute if changes happen frequently and a maximum full scan interval of every 100 minutes. `high` complexity indicates a minimum scanning interval of 10 minutes. + - **under 10,000 folders** or **Under 3 seconds** to index is awlays considered `simple` complexity. + - **more than 500,000 folders** or **Over 2 minutes** to index is always considered `high` complexity. + +### Manual Scanning Interval + +If you don't like the behavior of smart scanning, you can configure set intervals instead by setting `indexingInterval` to a number greater than 0. This will make FileBrowser Quantum always scan at the given interval in minutes. + +The scan behavior is still 4 quick scans at a given interval, followed by a 5th full scan. + +### System requirements + +You can expect FileBrowser Quantum to use 100 MB of RAM for a typical installation. If you have many files and folders then the requirement could climb to multiple Gigabytes. Please monitor your system on the first run to know your specific requirements. + +### Why does FileBrowser Quantum index the way it does? + +The smart indexing method uses filesystem scanners because it allows a low-footprint design that can cater to individual filesystem complexity. There are a few options for monitoring a filesystem for changes: + +1. **Option 1**: Recursive Traversal with ReadDir + - This is quite computationally intensive but creates an accurate record of the filesystem + - Requires periodic scanning to remain updated + - Low overhead and straightforward implementation. +2. **Option 2**: Use File System Monitoring (Real-Time or Periodic Check) such as `fsnotify` + - This allows for event-based reactions to filesystem changes. + - Requires extra overhead. + - Relies on OS level features and behavior differs between OS's + - Requires OS-level configuration to ulimits in order to properly watch a large filesystem. +3. **Option 3**: Directory Metadata Heuristics. + - Using ModTime to determine when directory structures change. + - Has minimal insight into actual file changes. + - Much faster to scan for changes than Recursive transversal. + +Ultimately, FileBrowser Quantum uses a combination of 1 and 3 to perform index updates. Using something like fsnotify is a non-starter for large filesystems, where it would require manual host OS tuning to work at all. Besides, I can essentially offer the same behavior by creating "watchers" for top-level folders (a feature to come in the future). However, right now there is a single root-level watcher that works over the entire index. + +The main disadvantage of the approach is the delay caused by the scanning interval. + +### How to manually refresh the index? + +There is currently no way to manually trigger a new full indexing. This will come in a future release when the "jobs" functionality is added back. + +However, if you want to force-refresh a certain directory, this happens every time you **view it** in the UI or via the resources API. + +This also means the resources API is always up to date with the current status of the filesystem. When you "look" at a specific folder, you are causing the index to be refreshed at that location. + +### What information does the index have? + +You can see what the index looks like by using the resources API via the GET method, which returns individual directory information -- all of this information is stored in the index. + +Here is an example: + +``` +{ + "name": "filebrowser", + "size": 274467914, + "modified": "2024-11-23T19:18:57.68013727-06:00", + "type": "directory", + "files": [ + { + "name": ".dockerignore", + "size": 73, + "modified": "2024-11-20T18:14:44.91135413-06:00", + "type": "blob" + }, + { + "name": ".DS_Store", + "size": 6148, + "modified": "2024-11-22T14:45:15.901211088-06:00", + "type": "blob" + }, + { + "name": ".gitignore", + "size": 455, + "modified": "2024-11-23T19:18:57.616132373-06:00", + "type": "blob" + }, + { + "name": "CHANGELOG.md", + "size": 9325, + "modified": "2024-11-23T19:18:57.616646332-06:00", + "type": "text" + }, + { + "name": "Dockerfile", + "size": 769, + "modified": "2024-11-23T19:18:57.616941333-06:00", + "type": "blob" + }, + { + "name": "Dockerfile.playwright", + "size": 542, + "modified": "2024-11-23T19:18:57.617151875-06:00", + "type": "blob" + }, + { + "name": "makefile", + "size": 1311, + "modified": "2024-11-23T19:18:57.68017352-06:00", + "type": "blob" + }, + { + "name": "README.md", + "size": 10625, + "modified": "2024-11-23T19:18:57.617464334-06:00", + "type": "text" + } + ], + "folders": [ + { + "name": ".git", + "size": 60075460, + "modified": "2024-11-24T14:44:42.52180215-06:00", + "type": "directory" + }, + { + "name": ".github", + "size": 11584, + "modified": "2024-11-20T18:14:44.911805335-06:00", + "type": "directory" + }, + { + "name": "backend", + "size": 29247172, + "modified": "2024-11-23T19:18:57.667109624-06:00", + "type": "directory" + }, + { + "name": "docs", + "size": 14272, + "modified": "2024-11-24T13:46:12.082024018-06:00", + "type": "directory" + }, + { + "name": "frontend", + "size": 185090178, + "modified": "2024-11-24T14:44:39.880678934-06:00", + "type": "directory" + } + ], + "path": "/filebrowser" +} +``` + +### Can I disable the index and still use FileBrowser Quantum? + +You can disable the index by setting `indexing: false` in your config file. You will still be able to browse your files, but the search will not work and you may run into issues as it's not intended to be used without indexing. + +I'm not sure why you would run it like this, if you have a good reason please open an issue request on how you would like it to work -- and why you would run it without the index. diff --git a/docs/roadmap.md b/docs/roadmap.md index baf81a51..f18e136d 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -1,18 +1,18 @@ # Planned Roadmap -upcoming 0.3.x releases: +upcoming 0.3.x releases, ordered by priority: +- More filetype icons and refreshed icons. +- more filetype previews - eg. office, photoshop, vector, 3d files. +- Enable mobile search with same features as desktop +- Enable mobile search with same features as desktop - Theme configuration from settings -- File synchronization improvements -- more filetype previews - introduce jobs as replacement to runners. - Add Job status to the sidebar - index status. - Job status from users - upload status - opentelemetry metrics -- simple search/filter for current listings. -- Enable mobile search with same features as desktop Unplanned Future releases: - multiple sources https://github.com/filebrowser/filebrowser/issues/2514 diff --git a/frontend/package.json b/frontend/package.json index 220c6ac3..92c2128a 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -13,10 +13,11 @@ "build-docker": "vite build", "watch": "vite build --watch", "typecheck": "vue-tsc -p ./tsconfig.json --noEmit", - "lint": "npm run typecheck && eslint src/", + "lint": "eslint --ext .js,.vue,ts src", "lint:fix": "eslint --fix src/", "format": "prettier --write .", - "test": "npx playwright test" + "integration-test": "npx playwright test", + "test": "vitest run " }, "dependencies": { "ace-builds": "^1.24.2", @@ -32,15 +33,17 @@ "vue-router": "^4.3.0" }, "devDependencies": { - "@playwright/test": "^1.42.1", "@intlify/unplugin-vue-i18n": "^4.0.0", + "@playwright/test": "^1.42.1", "@vitejs/plugin-vue": "^5.0.4", "@vue/eslint-config-typescript": "^13.0.0", "eslint": "^8.57.0", - "eslint-plugin-prettier": "^5.1.3", + "eslint-config-prettier": "^9.1.0", "eslint-plugin-vue": "^9.24.0", + "jsdom": "^25.0.1", "vite": "^5.2.7", "vite-plugin-compression2": "^1.0.0", + "vitest": "^2.1.5", "vue-tsc": "^2.0.7" } } diff --git a/frontend/src/api/files.js b/frontend/src/api/files.js index ea447204..fcdea8d5 100644 --- a/frontend/src/api/files.js +++ b/frontend/src/api/files.js @@ -1,5 +1,4 @@ -import { createURL, fetchURL, adjustedData} from "./utils"; -import { baseURL } from "@/utils/constants"; +import { createURL, fetchURL, adjustedData } from "./utils"; import { removePrefix, getApiPath } from "@/utils/url.js"; import { state } from "@/store"; import { notify } from "@/notify"; @@ -7,11 +6,12 @@ import { notify } from "@/notify"; // Notify if errors occur export async function fetchFiles(url, content = false) { try { - url = removePrefix(url,"files"); - const apiPath = getApiPath("api/resources",{path: url, content: content}); + let path = removePrefix(url, "files"); + const apiPath = getApiPath("api/resources",{path: path, content: content}); const res = await fetchURL(apiPath); const data = await res.json(); - return adjustedData(data,url); + const adjusted = adjustedData(data, url); + return adjusted; } catch (err) { notify.showError(err.message || "Error fetching data"); throw err; @@ -64,7 +64,7 @@ export function download(format, ...files) { fileargs = fileargs.substring(0, fileargs.length - 1); } const apiPath = getApiPath("api/raw",{path: path, files: fileargs, algo: format}); - let url = `${baseURL}${apiPath}`; + const url = createURL(`${apiPath}`); window.open(url); } catch (err) { notify.showError(err.message || "Error downloading files"); @@ -155,10 +155,11 @@ export async function checksum(url, algo) { export function getDownloadURL(path, inline) { try { const params = { - path: path, + path: removePrefix(path,"files"), ...(inline && { inline: "true" }), }; - return createURL("api/raw", params); + const apiPath = getApiPath("api/raw", params); + return createURL(apiPath); } catch (err) { notify.showError(err.message || "Error getting download URL"); throw err; @@ -173,8 +174,8 @@ export function getPreviewURL(path, size, modified) { key: Date.parse(modified), inline: "true", }; - - return createURL("api/preview", params); + const apiPath = getApiPath("api/preview", params); + return createURL(apiPath); } catch (err) { notify.showError(err.message || "Error getting preview URL"); throw err; @@ -183,13 +184,14 @@ export function getPreviewURL(path, size, modified) { export function getSubtitlesURL(file) { try { - const params = { - inline: "true", - }; - const subtitles = []; for (const sub of file.subtitles) { - subtitles.push(createURL("api/raw" + sub, params)); + const params = { + inline: "true", + path: sub + }; + const apiPath = getApiPath("api/raw", params); + return createURL(apiPath); } return subtitles; diff --git a/frontend/src/api/public.js b/frontend/src/api/public.js index 388fde8d..2b4250bb 100644 --- a/frontend/src/api/public.js +++ b/frontend/src/api/public.js @@ -1,53 +1,48 @@ import { createURL, adjustedData } from "./utils"; -import { getApiPath } from "@/utils/url.js"; +import { getApiPath, removePrefix } from "@/utils/url.js"; import { notify } from "@/notify"; // Fetch public share data export async function fetchPub(path, hash, password = "") { - try { - const params = { path, hash } - const apiPath = getApiPath("api/public/share", params); - const response = await fetch(apiPath, { - headers: { - "X-SHARE-PASSWORD": password ? encodeURIComponent(password) : "", - }, - }); + const params = { path, hash } + const apiPath = getApiPath("api/public/share", params); + const response = await fetch(apiPath, { + headers: { + "X-SHARE-PASSWORD": password ? encodeURIComponent(password) : "", + }, + }); - if (!response.ok) { - const error = new Error("Failed to connect to the server."); - error.status = response.status; - throw error; - } - let data = await response.json() - return adjustedData(data, `${hash}${path}`); - } catch (err) { - notify.showError(err.message || "Error fetching public share data"); - throw err; + if (!response.ok) { + const error = new Error("Failed to connect to the server."); + error.status = response.status; + throw error; } + let data = await response.json() + const adjusted = adjustedData(data, getApiPath(`share/${hash}${path}`)); + return adjusted } // Download files with given parameters -export function download(path, hash, token, format, ...files) { +export function download(share, ...files) { try { let fileInfo = files[0] if (files.length > 1) { fileInfo = files.map(encodeURIComponent).join(","); } const params = { - path, - hash, - ...(format && { format}), - ...(token && { token }), - fileInfo + "path": removePrefix(share.path, "share"), + "hash": share.hash, + "token": share.token, + "inline": share.inline, + "files": fileInfo, }; - const url = createURL(`api/public/dl`, params, false); + const apiPath = getApiPath("api/public/dl", params); + const url = createURL(apiPath); window.open(url); } catch (err) { notify.showError(err.message || "Error downloading files"); throw err; } - - } // Get the public user data @@ -64,11 +59,7 @@ export async function getPublicUser() { // Generate a download URL export function getDownloadURL(share) { - const params = { - "path": share.path, - "hash": share.hash, - "token": share.token, - ...(share.inline && { inline: "true" }), - }; - return createURL(`api/public/dl`, params, false); + const apiPath = getApiPath("api/public/dl", share); + const url = createURL(apiPath) + return url } diff --git a/frontend/src/api/utils.js b/frontend/src/api/utils.js index a824eec7..c397622d 100644 --- a/frontend/src/api/utils.js +++ b/frontend/src/api/utils.js @@ -60,36 +60,47 @@ export async function fetchJSON(url, opts) { } } -export function createURL(endpoint, params = {}) { +export function createURL(endpoint) { let prefix = baseURL; + + // Ensure prefix ends with a single slash if (!prefix.endsWith("/")) { - prefix = prefix + "/"; + prefix += "/"; } - const url = new URL(prefix + endpoint, origin); - const searchParams = { - ...params, - }; - - for (const key in searchParams) { - url.searchParams.set(key, searchParams[key]); + // Remove leading slash from endpoint to avoid duplicate slashes + if (endpoint.startsWith("/")) { + endpoint = endpoint.substring(1); } + const url = new URL(prefix + endpoint, window.location.origin); + return url.toString(); } export function adjustedData(data, url) { data.url = url; - if (data.type == "directory") { + + if (data.type === "directory") { if (!data.url.endsWith("/")) data.url += "/"; + + // Combine folders and files into items + data.items = [...(data.folders || []), ...(data.files || [])]; + data.items = data.items.map((item, index) => { item.index = index; item.url = `${data.url}${item.name}`; - if (item.type == "directory") { + if (item.type === "directory") { item.url += "/"; } return item; }); } - return data -} \ No newline at end of file + if (data.files) { + data.files = [] + } + if (data.folders) { + data.folders = [] + } + return data; +} diff --git a/frontend/src/api/utils.test.js b/frontend/src/api/utils.test.js new file mode 100644 index 00000000..8464c90e --- /dev/null +++ b/frontend/src/api/utils.test.js @@ -0,0 +1,114 @@ +import { describe, it, expect, vi } from 'vitest'; +import { adjustedData, createURL } from './utils.js'; + +describe('adjustedData', () => { + it('should append the URL and process directory data correctly', () => { + const input = { + type: "directory", + folders: [ + { name: "folder1", type: "directory" }, + { name: "folder2", type: "directory" }, + ], + files: [ + { name: "file1.txt", type: "file" }, + { name: "file2.txt", type: "file" }, + ], + }; + + const url = "http://example.com/unit-testing/files/path/to/directory"; + + const expected = { + type: "directory", + url: "http://example.com/unit-testing/files/path/to/directory/", + folders: [], + files: [], + items: [ + { name: "folder1", type: "directory", index: 0, url: "http://example.com/unit-testing/files/path/to/directory/folder1/" }, + { name: "folder2", type: "directory", index: 1, url: "http://example.com/unit-testing/files/path/to/directory/folder2/" }, + { name: "file1.txt", type: "file", index: 2, url: "http://example.com/unit-testing/files/path/to/directory/file1.txt" }, + { name: "file2.txt", type: "file", index: 3, url: "http://example.com/unit-testing/files/path/to/directory/file2.txt" }, + ], + }; + + expect(adjustedData(input, url)).toEqual(expected); + }); + + it('should add a trailing slash to the URL if missing for a directory', () => { + const input = { type: "directory", folders: [], files: [] }; + const url = "http://example.com/base"; + + const expected = { + type: "directory", + url: "http://example.com/base/", + folders: [], + files: [], + items: [], + }; + + expect(adjustedData(input, url)).toEqual(expected); + }); + + it('should handle non-directory types without modification to items', () => { + const input = { type: "file", name: "file1.txt" }; + const url = "http://example.com/base"; + + const expected = { + type: "file", + name: "file1.txt", + url: "http://example.com/base", + }; + + expect(adjustedData(input, url)).toEqual(expected); + }); + + it('should handle missing folders and files gracefully', () => { + const input = { type: "directory" }; + const url = "http://example.com/base"; + + const expected = { + type: "directory", + url: "http://example.com/base/", + items: [], + }; + + expect(adjustedData(input, url)).toEqual(expected); + }); + + it('should handle empty input object correctly', () => { + const input = {}; + const url = "http://example.com/base"; + + const expected = { + url: "http://example.com/base", + }; + + expect(adjustedData(input, url)).toEqual(expected); + }); + +}); + + +describe('createURL', () => { + it('createURL', () => { + const url = "base"; + const expected = "http://localhost:3000/unit-testing/base" + expect(createURL(url)).toEqual(expected); + }); + it('createURL with slash', () => { + const url = "/base"; + const expected = "http://localhost:3000/unit-testing/base" + expect(createURL(url)).toEqual(expected); + }); + it('createURL with slash', () => { + const url = "/base"; + const expected = "http://localhost:3000/unit-testing/base" + expect(createURL(url)).toEqual(expected); + }); +}) + +vi.mock('@/utils/constants', () => { + return { + baseURL: "unit-testing", + }; +}); + diff --git a/frontend/src/components/Breadcrumbs.vue b/frontend/src/components/Breadcrumbs.vue index d27804e8..15405ef7 100644 --- a/frontend/src/components/Breadcrumbs.vue +++ b/frontend/src/components/Breadcrumbs.vue @@ -21,7 +21,6 @@ type="range" id="gallery-size" name="gallery-size" - :value="gallerySize" min="0" max="10" @input="updateGallerySize" @@ -62,6 +61,9 @@ export default { if (parts[0] === "") { parts.shift(); } + if (getters.currentView() == "share") { + parts.shift(); + } if (parts[parts.length - 1] === "") { parts.pop(); diff --git a/frontend/src/components/files/ListingItem.vue b/frontend/src/components/files/ListingItem.vue index 90167963..32301607 100644 --- a/frontend/src/components/files/ListingItem.vue +++ b/frontend/src/components/files/ListingItem.vue @@ -76,6 +76,7 @@ import { filesApi } from "@/api"; import * as upload from "@/utils/upload"; import { state, getters, mutations } from "@/store"; // Import your custom store import { baseURL } from "@/utils/constants"; +import { router } from "@/router"; export default { name: "item", @@ -323,7 +324,7 @@ export default { mutations.addSelected(this.index); }, open() { - this.$router.push({ path: this.url }); + router.push({ path: this.url }); }, }, }; diff --git a/frontend/src/components/prompts/Delete.vue b/frontend/src/components/prompts/Delete.vue index de838584..508ed7b3 100644 --- a/frontend/src/components/prompts/Delete.vue +++ b/frontend/src/components/prompts/Delete.vue @@ -59,7 +59,7 @@ export default { if (!this.isListing) { await filesApi.remove(state.route.path); buttons.success("delete"); - showSuccess("Deleted item successfully"); + notify.showSuccess("Deleted item successfully"); this.currentPrompt?.confirm(); this.closeHovers(); @@ -79,7 +79,7 @@ export default { await Promise.all(promises); buttons.success("delete"); - showSuccess("Deleted item successfully"); + notify.showSuccess("Deleted item successfully"); mutations.setReload(true); // Handle reload as needed } catch (e) { buttons.done("delete"); diff --git a/frontend/src/components/prompts/Share.vue b/frontend/src/components/prompts/Share.vue index 5376075b..20996275 100644 --- a/frontend/src/components/prompts/Share.vue +++ b/frontend/src/components/prompts/Share.vue @@ -8,50 +8,52 @@