aboutsummaryrefslogtreecommitdiff
path: root/repo
diff options
context:
space:
mode:
Diffstat (limited to 'repo')
-rw-r--r--repo/export_dir.go88
-rw-r--r--repo/repo.go57
-rw-r--r--repo/repo_test.go19
3 files changed, 127 insertions, 37 deletions
diff --git a/repo/export_dir.go b/repo/export_dir.go
new file mode 100644
index 0000000..8c63fdb
--- /dev/null
+++ b/repo/export_dir.go
@@ -0,0 +1,88 @@
+/* Copyright (C) 2021 Nicolas Peugnet <n.peugnet@free.fr>
+
+ This file is part of dna-backup.
+
+ dna-backup is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ dna-backup is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with dna-backup. If not, see <https://www.gnu.org/licenses/>. */
+
+package repo
+
+import (
+ "bytes"
+ "compress/zlib"
+ "encoding/binary"
+ "io"
+
+ "github.com/n-peugnet/dna-backup/dna"
+ "github.com/n-peugnet/dna-backup/logger"
+ "github.com/n-peugnet/dna-backup/utils"
+)
+
+type Version struct {
+ Chunks uint64
+ Recipe uint64
+ Files uint64
+}
+
+func (r *Repo) ExportDir(dest string, trackSize int) {
+ r.Init()
+ versions := make([]Version, len(r.versions))
+ chunks := r.loadChunks(r.versions)
+ for i := range versions {
+ var count int64
+ var content bytes.Buffer // replace with a reader capable of switching files
+ var recipe, fileList []byte
+ var err error
+ tracker := dna.NewWriter(&content, trackSize)
+ counter := utils.NewWriteCounter(tracker)
+ compressor := zlib.NewWriter(counter)
+ for _, c := range chunks[i] {
+ n, err := io.Copy(compressor, c.Reader())
+ if err != nil {
+ logger.Error(err)
+ }
+ count += n
+ }
+ compressor.Close()
+ tracker.Close()
+ readDelta(r.versions[i], recipeName, utils.NopReadWrapper, func(rc io.ReadCloser) {
+ recipe, err = io.ReadAll(rc)
+ if err != nil {
+ logger.Error("load recipe ", err)
+ }
+ })
+ readDelta(r.versions[i], filesName, utils.NopReadWrapper, func(rc io.ReadCloser) {
+ fileList, err = io.ReadAll(rc)
+ if err != nil {
+ logger.Error("load files ", err)
+ }
+ })
+ versions[i] = Version{
+ uint64(counter.Count()),
+ uint64(len(recipe)),
+ uint64(len(fileList)),
+ }
+ header := versions[i].createHeader()
+ logger.Info(header)
+ }
+}
+
+func (v Version) createHeader() []byte {
+ buf := make([]byte, binary.MaxVarintLen64*3)
+ i := 0
+ for _, x := range []uint64{v.Chunks, v.Recipe, v.Files} {
+ n := binary.PutUvarint(buf[i:], x)
+ i += n
+ }
+ return buf[:i]
+}
diff --git a/repo/repo.go b/repo/repo.go
index d3a5904..d12c72d 100644
--- a/repo/repo.go
+++ b/repo/repo.go
@@ -380,31 +380,36 @@ func storeDelta(prevRaw []byte, curr interface{}, dest string, differ delta.Diff
}
}
+func readDelta(version string, name string, wrapper utils.ReadWrapper, callback func(io.ReadCloser)) {
+ path := filepath.Join(version, name)
+ file, err := os.Open(path)
+ if err != nil {
+ logger.Panic(err)
+ }
+ in, err := wrapper(file)
+ if err != nil {
+ logger.Panic(err)
+ }
+ callback(in)
+ if err = in.Close(); err != nil {
+ logger.Panic(err)
+ }
+ if err = file.Close(); err != nil {
+ logger.Panic(err)
+ }
+}
+
func loadDeltas(target interface{}, versions []string, patcher delta.Patcher, wrapper utils.ReadWrapper, name string) (ret []byte) {
var prev bytes.Buffer
var err error
-
for _, v := range versions {
- var curr bytes.Buffer
- path := filepath.Join(v, name)
- file, err := os.Open(path)
- if err != nil {
- logger.Panic(err)
- }
- in, err := wrapper(file)
- if err != nil {
- logger.Panic(err)
- }
- if err = patcher.Patch(&prev, &curr, in); err != nil {
- logger.Panic(err)
- }
- prev = curr
- if err = in.Close(); err != nil {
- logger.Panic(err)
- }
- if err = file.Close(); err != nil {
- logger.Panic(err)
- }
+ readDelta(v, name, wrapper, func(in io.ReadCloser) {
+ var curr bytes.Buffer
+ if err = patcher.Patch(&prev, &curr, in); err != nil {
+ logger.Panic(err)
+ }
+ prev = curr
+ })
}
ret = prev.Bytes()
if len(ret) == 0 {
@@ -505,8 +510,9 @@ func (r *Repo) LoadChunkContent(id *ChunkId) *bytes.Reader {
}
// TODO: use atoi for chunkid ?
-func (r *Repo) LoadChunks(chunks chan<- IdentifiedChunk) {
- for i, v := range r.versions {
+func (r *Repo) loadChunks(versions []string) (chunks [][]IdentifiedChunk) {
+ for i, v := range versions {
+ vc := make([]IdentifiedChunk, 0)
p := filepath.Join(v, chunksName)
entries, err := os.ReadDir(p)
if err != nil {
@@ -518,10 +524,11 @@ func (r *Repo) LoadChunks(chunks chan<- IdentifiedChunk) {
}
id := &ChunkId{Ver: i, Idx: uint64(j)}
c := NewStoredChunk(r, id)
- chunks <- c
+ vc = append(vc, c)
}
+ chunks = append(chunks, vc)
}
- close(chunks)
+ return chunks
}
// loadHashes loads and aggregates the hashes stored for each given version and
diff --git a/repo/repo_test.go b/repo/repo_test.go
index 38219e3..6338425 100644
--- a/repo/repo_test.go
+++ b/repo/repo_test.go
@@ -135,8 +135,8 @@ func storeChunks(dest string, chunks <-chan []byte) {
// For each chunk, both a fingerprint (hash over the full content) and a sketch
// (resemblance hash based on maximal values of regions) are calculated and
// stored in an hashmap.
-func (r *Repo) hashChunks(chunks <-chan IdentifiedChunk) {
- for c := range chunks {
+func (r *Repo) hashChunks(chunks []IdentifiedChunk) {
+ for _, c := range chunks {
r.hashChunk(c.GetId(), c.Reader())
}
}
@@ -260,7 +260,6 @@ func TestLoadChunks(t *testing.T) {
reader2, writer2 := io.Pipe()
chunks1 := make(chan []byte, 16)
chunks2 := make(chan []byte, 16)
- chunks3 := make(chan IdentifiedChunk, 16)
files := listFiles(dataDir)
go concatFiles(&files, writer1)
go concatFiles(&files, writer2)
@@ -268,11 +267,11 @@ func TestLoadChunks(t *testing.T) {
go repo.chunkStream(reader2, chunks2)
storeChunks(resultChunks, chunks1)
repo.versions = []string{resultVersion}
- go repo.LoadChunks(chunks3)
+ chunks3 := repo.loadChunks(repo.versions)
i := 0
for c2 := range chunks2 {
- c3 := <-chunks3
+ c3 := chunks3[0][i]
buff, err := io.ReadAll(c3.Reader())
if err != nil {
t.Errorf("Error reading from chunk %d: %s\n", c3, err)
@@ -285,7 +284,6 @@ func TestLoadChunks(t *testing.T) {
i++
}
}
-
func prepareChunks(dataDir string, repo *Repo, streamFunc func(*[]File, io.WriteCloser)) {
resultVersion := filepath.Join(repo.path, "00000")
resultChunks := filepath.Join(resultVersion, chunksName)
@@ -327,10 +325,8 @@ func TestBsdiff(t *testing.T) {
repo.chunkWriteWrapper = utils.NopWriteWrapper
// Load previously stored chunks
- oldChunks := make(chan IdentifiedChunk, 16)
repo.loadVersions()
- go repo.LoadChunks(oldChunks)
- repo.hashChunks(oldChunks)
+ repo.hashChunks(repo.loadChunks(repo.versions)[0])
// Read new data
newVersion := len(repo.versions)
@@ -405,7 +401,6 @@ func TestHashes(t *testing.T) {
dest := t.TempDir()
source := filepath.Join("testdata", "repo_8k_zlib")
- chunks := make(chan IdentifiedChunk, 16)
storeQueue := make(chan chunkData, 16)
storeEnd := make(chan bool)
@@ -413,8 +408,8 @@ func TestHashes(t *testing.T) {
repo1.chunkReadWrapper = utils.ZlibReader
repo1.chunkWriteWrapper = utils.ZlibWriter
repo1.versions = []string{filepath.Join(source, "00000")}
- go repo1.LoadChunks(chunks)
- for c := range chunks {
+ chunks := repo1.loadChunks(repo1.versions)
+ for _, c := range chunks[0] {
fp, sk := repo1.hashChunk(c.GetId(), c.Reader())
content, err := io.ReadAll(c.Reader())
if err != nil {