aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorn-peugnet <n.peugnet@free.fr>2021-09-09 13:56:06 +0200
committern-peugnet <n.peugnet@free.fr>2021-09-09 13:56:06 +0200
commit27cf33b15ee5c028f4816607c034df68adf7df4f (patch)
tree0c66c9a68295379cfc3f600c908f094c590b09ac
parent8a03c46bf24b5a1fa1d2080ac4f763532db01bbe (diff)
downloaddna-backup-27cf33b15ee5c028f4816607c034df68adf7df4f.tar.gz
dna-backup-27cf33b15ee5c028f4816607c034df68adf7df4f.zip
move unused functions from repo to repo_test
-rw-r--r--TODO.md2
-rw-r--r--repo.go40
-rw-r--r--repo_test.go39
3 files changed, 41 insertions, 40 deletions
diff --git a/TODO.md b/TODO.md
index b35bff4..e194839 100644
--- a/TODO.md
+++ b/TODO.md
@@ -27,7 +27,7 @@ priority 1
priority 2
----------
-- [ ] use more the `Reader` API (which is analoguous to the `IOStream` in Java)
+- [x] use more the `Reader` API (which is analoguous to the `IOStream` in Java)
- [ ] refactor matchStream as right now it is quite complex
- [ ] better test for `Repo.matchStream`
- [ ] tail packing of PartialChunks (this Struct does not exist yet as it is in fact just `TempChunks` for now)
diff --git a/repo.go b/repo.go
index 908429d..1c1267c 100644
--- a/repo.go
+++ b/repo.go
@@ -171,32 +171,6 @@ func (r *Repo) chunkMinLen() int {
return sketch.SuperFeatureSize(r.chunkSize, r.sketchSfCount, r.sketchFCount)
}
-func (r *Repo) chunkStream(stream io.Reader, chunks chan<- []byte) {
- var buff []byte
- var prev, read = r.chunkSize, 0
- var err error
-
- for err != io.EOF {
- if prev == r.chunkSize {
- buff = make([]byte, r.chunkSize)
- prev, err = stream.Read(buff)
- } else {
- read, err = stream.Read(buff[prev:])
- prev += read
- }
- if err != nil && err != io.EOF {
- log.Println(err)
- }
- if prev == r.chunkSize {
- chunks <- buff
- }
- }
- if prev != r.chunkSize {
- chunks <- buff[:prev]
- }
- close(chunks)
-}
-
func storeFileList(dest string, files []File) {
file, err := os.Create(dest)
if err == nil {
@@ -245,7 +219,7 @@ func (r *Repo) StoreChunkContent(id *ChunkId, reader io.Reader) error {
// LoadChunkContent loads a chunk from the repo.
// If the chunk is in cache, get it from cache, else read it from drive.
-func (r *Repo) LoadChunkContent(id *ChunkId) io.ReadSeeker {
+func (r *Repo) LoadChunkContent(id *ChunkId) *bytes.Reader {
value, exists := r.chunkCache.Get(id)
if !exists {
path := id.Path(r.path)
@@ -262,18 +236,6 @@ func (r *Repo) LoadChunkContent(id *ChunkId) io.ReadSeeker {
return bytes.NewReader(value)
}
-func storeChunks(dest string, chunks <-chan []byte) {
- i := 0
- for c := range chunks {
- path := path.Join(dest, fmt.Sprintf(chunkIdFmt, i))
- err := os.WriteFile(path, c, 0664)
- if err != nil {
- log.Println(err)
- }
- i++
- }
-}
-
// TODO: use atoi for chunkid
func (r *Repo) loadChunks(versions []string, chunks chan<- IdentifiedChunk) {
for i, v := range versions {
diff --git a/repo_test.go b/repo_test.go
index 7d54ef5..07af682 100644
--- a/repo_test.go
+++ b/repo_test.go
@@ -2,6 +2,7 @@ package main
import (
"bytes"
+ "fmt"
"io"
"io/ioutil"
"log"
@@ -54,6 +55,44 @@ func chunkCompare(t *testing.T, dataDir string, repo *Repo, testFiles []string,
}
}
+func (r *Repo) chunkStream(stream io.Reader, chunks chan<- []byte) {
+ var buff []byte
+ var prev, read = r.chunkSize, 0
+ var err error
+
+ for err != io.EOF {
+ if prev == r.chunkSize {
+ buff = make([]byte, r.chunkSize)
+ prev, err = stream.Read(buff)
+ } else {
+ read, err = stream.Read(buff[prev:])
+ prev += read
+ }
+ if err != nil && err != io.EOF {
+ log.Println(err)
+ }
+ if prev == r.chunkSize {
+ chunks <- buff
+ }
+ }
+ if prev != r.chunkSize {
+ chunks <- buff[:prev]
+ }
+ close(chunks)
+}
+
+func storeChunks(dest string, chunks <-chan []byte) {
+ i := 0
+ for c := range chunks {
+ path := path.Join(dest, fmt.Sprintf(chunkIdFmt, i))
+ err := os.WriteFile(path, c, 0664)
+ if err != nil {
+ log.Println(err)
+ }
+ i++
+ }
+}
+
func TestReadFiles1(t *testing.T) {
repo := NewRepo("")
chunkCount := 590/repo.chunkSize + 1