diff --git a/Collection.go b/Collection.go index c7bdb89..45ea631 100644 --- a/Collection.go +++ b/Collection.go @@ -5,7 +5,6 @@ import ( "os" "path/filepath" "reflect" - "strings" "sync" ) @@ -105,7 +104,7 @@ func (c *collection[T]) Clear() { // keyFile returns the file path for the given key. func (c *collection[T]) keyFile(key string) string { - return filepath.Join(c.directory, key+".json") + return filepath.Join(c.directory, key) } // loadFromDisk loads the collection data from the disk. @@ -118,8 +117,8 @@ func (c *collection[T]) loadFromDisk() error { files, err := file.Readdirnames(0) - for _, name := range files { - fileError := c.loadFileFromDisk(name) + for _, key := range files { + fileError := c.loadFileFromDisk(key) if fileError != nil { return fileError @@ -134,8 +133,8 @@ func (c *collection[T]) loadFromDisk() error { } // loadFileFromDisk loads a single file from the disk. -func (c *collection[T]) loadFileFromDisk(name string) error { - file, err := os.Open(filepath.Join(c.directory, name)) +func (c *collection[T]) loadFileFromDisk(key string) error { + file, err := os.Open(filepath.Join(c.directory, key)) if err != nil { return err @@ -146,10 +145,10 @@ func (c *collection[T]) loadFileFromDisk(name string) error { err = decoder.Decode(value) if err != nil { + file.Close() return err } - key := strings.TrimSuffix(name, filepath.Ext(name)) c.data.Store(key, value) return file.Close() } @@ -167,6 +166,7 @@ func (c *collection[T]) writeFileToDisk(key string, value *T) error { err = encoder.Encode(value) if err != nil { + file.Close() return err } diff --git a/README.md b/README.md index 986c40e..457557e 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,8 @@ You can add as many directory hierarchies as you need but I recommend using a si ## Limitations -This storage mechanism is suitable for small to medium data volume. +* Keys cannot be empty and they cannot contain a directory separator like `/`. -It is not suited for big data, however the package is pretty lightweight so you can combine it with a big data store. +* This storage mechanism is only suitable for small to medium data volume. + +Ocean isn't meant to be used for big data, however the package is very lightweight so you can combine it with a big data store.