aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorn-peugnet <n.peugnet@free.fr>2021-09-20 14:00:08 +0200
committern-peugnet <n.peugnet@free.fr>2021-09-20 14:00:08 +0200
commit540426db79b6e49b79d58aef99ffee5005dc987d (patch)
tree8d31ed9ab733e1681978ce0ccbffc50bec7fd176
parentcfb36188efc8f53e8ec0a939ddcd09a0704fb988 (diff)
downloaddna-backup-540426db79b6e49b79d58aef99ffee5005dc987d.tar.gz
dna-backup-540426db79b6e49b79d58aef99ffee5005dc987d.zip
remove errored files from fileList
and add tests with non existing link
-rw-r--r--TODO.md2
-rw-r--r--repo.go31
-rw-r--r--repo_test.go24
3 files changed, 39 insertions, 18 deletions
diff --git a/TODO.md b/TODO.md
index 60f7ba2..acab4c2 100644
--- a/TODO.md
+++ b/TODO.md
@@ -14,7 +14,7 @@ priority 1
- [ ] store compressed chunks into tracks of `trackSize` (1024o)
- [x] add chunk cache to uniquely store chunks in RAM
- [x] better tests for `(*Repo).Commit`
-- [ ] remove errored files from `fileList`
+- [x] remove errored files from `fileList`
- [ ] add superblock logic
- [ ] add version blocks or journal logic
diff --git a/repo.go b/repo.go
index 108ef5f..36adb81 100644
--- a/repo.go
+++ b/repo.go
@@ -48,6 +48,13 @@ import (
"github.com/n-peugnet/dna-backup/utils"
)
+func init() {
+ // register chunk structs for encoding/decoding using gob
+ gob.Register(&StoredChunk{})
+ gob.Register(&TempChunk{})
+ gob.Register(&DeltaChunk{})
+}
+
type FingerprintMap map[uint64]*ChunkId
type SketchMap map[uint64][]*ChunkId
@@ -141,7 +148,7 @@ func (r *Repo) Commit(source string) {
reader, writer := io.Pipe()
files := listFiles(source)
r.loadHashes(versions)
- go concatFiles(files, writer)
+ go concatFiles(&files, writer)
recipe := r.matchStream(reader, newVersion)
storeFileList(newFilesPath, unprefixFiles(files, source))
storeRecipe(newRecipePath, recipe)
@@ -222,22 +229,30 @@ func unprefixFiles(files []File, prefix string) (ret []File) {
return
}
-func concatFiles(files []File, stream io.WriteCloser) {
- for _, f := range files {
+// concatFiles reads the content of all the listed files into a continuous stream.
+// If any errors are encoutered while opening a file, it is then removed from the
+// list.
+// If read is incomplete, then the actual read size is used.
+func concatFiles(files *[]File, stream io.WriteCloser) {
+ actual := make([]File, 0, len(*files))
+ for _, f := range *files {
file, err := os.Open(f.Path)
if err != nil {
- logger.Error(err)
+ logger.Warning(err)
continue
}
+ af := f
if n, err := io.Copy(stream, file); err != nil {
logger.Error(n, err)
- continue
+ af.Size = n
}
+ actual = append(actual, af)
if err = file.Close(); err != nil {
logger.Panic(err)
}
}
stream.Close()
+ *files = actual
}
func storeBasicStruct(dest string, wrapper utils.WriteWrapper, obj interface{}) {
@@ -636,9 +651,6 @@ func (r *Repo) restoreStream(stream io.WriteCloser, recipe []Chunk) {
}
func storeRecipe(dest string, recipe []Chunk) {
- gob.Register(&StoredChunk{})
- gob.Register(&TempChunk{})
- gob.Register(&DeltaChunk{})
file, err := os.Create(dest)
if err == nil {
encoder := gob.NewEncoder(file)
@@ -658,9 +670,6 @@ func storeRecipe(dest string, recipe []Chunk) {
func loadRecipe(path string) []Chunk {
var recipe []Chunk
- gob.Register(&StoredChunk{})
- gob.Register(&TempChunk{})
- gob.Register(&DeltaChunk{})
file, err := os.Open(path)
if err == nil {
decoder := gob.NewDecoder(file)
diff --git a/repo_test.go b/repo_test.go
index e1a80a3..00c94df 100644
--- a/repo_test.go
+++ b/repo_test.go
@@ -6,6 +6,7 @@ import (
"io"
"io/ioutil"
"os"
+ "path"
"path/filepath"
"reflect"
"testing"
@@ -18,7 +19,7 @@ func chunkCompare(t *testing.T, dataDir string, repo *Repo, testFiles []string,
reader, writer := io.Pipe()
chunks := make(chan []byte)
files := listFiles(dataDir)
- go concatFiles(files, writer)
+ go concatFiles(&files, writer)
go repo.chunkStream(reader, chunks)
offset := 0
@@ -127,6 +128,17 @@ func TestReadFiles3(t *testing.T) {
chunkCompare(t, dataDir, repo, files, chunkCount)
}
+func TestNoSuchFile(t *testing.T) {
+ tmpDir := t.TempDir()
+ os.Symlink("./notexisting", path.Join(tmpDir, "linknotexisting"))
+ var buff bytes.Buffer
+ files := listFiles(tmpDir)
+ assertLen(t, 1, files, "Files")
+ concatFiles(&files, utils.NopCloser(&buff))
+ assertLen(t, 0, files, "Files")
+ assertLen(t, 0, buff.Bytes(), "Buffer")
+}
+
func TestLoadChunks(t *testing.T) {
resultDir := t.TempDir()
dataDir := filepath.Join("testdata", "logs")
@@ -142,8 +154,8 @@ func TestLoadChunks(t *testing.T) {
chunks2 := make(chan []byte, 16)
chunks3 := make(chan IdentifiedChunk, 16)
files := listFiles(dataDir)
- go concatFiles(files, writer1)
- go concatFiles(files, writer2)
+ go concatFiles(&files, writer1)
+ go concatFiles(&files, writer2)
go repo.chunkStream(reader1, chunks1)
go repo.chunkStream(reader2, chunks2)
storeChunks(resultChunks, chunks1)
@@ -183,7 +195,7 @@ func TestStoreLoadFiles(t *testing.T) {
}
}
-func prepareChunks(dataDir string, repo *Repo, streamFunc func([]File, io.WriteCloser)) {
+func prepareChunks(dataDir string, repo *Repo, streamFunc func(*[]File, io.WriteCloser)) {
resultVersion := filepath.Join(repo.path, "00000")
resultChunks := filepath.Join(resultVersion, chunksName)
os.MkdirAll(resultChunks, 0775)
@@ -193,10 +205,10 @@ func prepareChunks(dataDir string, repo *Repo, streamFunc func([]File, io.WriteC
storeChunks(resultChunks, chunks)
}
-func getDataStream(dataDir string, streamFunc func([]File, io.WriteCloser)) io.Reader {
+func getDataStream(dataDir string, streamFunc func(*[]File, io.WriteCloser)) io.Reader {
reader, writer := io.Pipe()
files := listFiles(dataDir)
- go streamFunc(files, writer)
+ go streamFunc(&files, writer)
return reader
}