aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorn-peugnet <n.peugnet@free.fr>2021-09-14 15:37:50 +0200
committern-peugnet <n.peugnet@free.fr>2021-09-14 15:41:44 +0200
commit33967236e18c33d0d83b34b76f8238c3d588b21d (patch)
tree9bd3334b6eabb7cd3b07d7efb9c4c90aa8726a6d
parentd85847ec99d44e1010a95c71579a754ac9f7c646 (diff)
downloaddna-backup-33967236e18c33d0d83b34b76f8238c3d588b21d.tar.gz
dna-backup-33967236e18c33d0d83b34b76f8238c3d588b21d.zip
logger add panic + colors & remove ln variants & use it
-rw-r--r--TODO.md1
-rw-r--r--logger/logger.go96
-rw-r--r--main.go8
-rw-r--r--main_test.go4
-rw-r--r--repo.go76
-rw-r--r--repo_test.go8
-rw-r--r--sketch/sketch.go8
-rw-r--r--tar.go17
8 files changed, 101 insertions, 117 deletions
diff --git a/TODO.md b/TODO.md
index 64f4269..5b98575 100644
--- a/TODO.md
+++ b/TODO.md
@@ -33,6 +33,7 @@ priority 2
- [x] remove `LoadedChunk` and only use `StoredChunk` instead now that the cache
is implemented
- [ ] store file list compressed
+- [ ] TODO: add tests for logger
reunion 7/09
------------
diff --git a/logger/logger.go b/logger/logger.go
index c47d268..bb76657 100644
--- a/logger/logger.go
+++ b/logger/logger.go
@@ -26,14 +26,14 @@ const (
// Severity tags.
const (
- tagInfo = "[INFO] "
- tagWarning = "[WARN] "
- tagError = "[ERROR] "
- tagFatal = "[FATAL] "
+ tagInfo = "\033[0m[INFO] "
+ tagWarning = "\033[1;33m[WARN] "
+ tagError = "\033[1;31m[ERROR] "
+ tagFatal = "\033[1;31m[FATAL] "
)
const (
- flags = log.Lmsgprefix | log.LstdFlags | log.Lshortfile
+ flags = log.Lmsgprefix | log.Ltime
)
var (
@@ -100,7 +100,7 @@ func (l *Logger) output(s severity, depth int, txt string) {
if int(s) >= len(l.loggers) {
panic(fmt.Sprintln("unrecognized severity:", s))
}
- l.loggers[s].Output(3+depth, txt)
+ l.loggers[s].Output(3+depth, txt+"\033[0m")
}
// SetFlags sets the output flags for the logger.
@@ -122,12 +122,6 @@ func (l *Logger) InfoDepth(depth int, v ...interface{}) {
l.output(sInfo, depth, fmt.Sprint(v...))
}
-// Infoln logs with the Info severity.
-// Arguments are handled in the manner of fmt.Println.
-func (l *Logger) Infoln(v ...interface{}) {
- l.output(sInfo, 0, fmt.Sprintln(v...))
-}
-
// Infof logs with the Info severity.
// Arguments are handled in the manner of fmt.Printf.
func (l *Logger) Infof(format string, v ...interface{}) {
@@ -146,12 +140,6 @@ func (l *Logger) WarningDepth(depth int, v ...interface{}) {
l.output(sWarning, depth, fmt.Sprint(v...))
}
-// Warningln logs with the Warning severity.
-// Arguments are handled in the manner of fmt.Println.
-func (l *Logger) Warningln(v ...interface{}) {
- l.output(sWarning, 0, fmt.Sprintln(v...))
-}
-
// Warningf logs with the Warning severity.
// Arguments are handled in the manner of fmt.Printf.
func (l *Logger) Warningf(format string, v ...interface{}) {
@@ -170,18 +158,28 @@ func (l *Logger) ErrorDepth(depth int, v ...interface{}) {
l.output(sError, depth, fmt.Sprint(v...))
}
-// Errorln logs with the ERROR severity.
-// Arguments are handled in the manner of fmt.Println.
-func (l *Logger) Errorln(v ...interface{}) {
- l.output(sError, 0, fmt.Sprintln(v...))
-}
-
// Errorf logs with the Error severity.
// Arguments are handled in the manner of fmt.Printf.
func (l *Logger) Errorf(format string, v ...interface{}) {
l.output(sError, 0, fmt.Sprintf(format, v...))
}
+// Panic uses the default logger and logs with the Error severity.
+// Arguments are handled in the manner of fmt.Print.
+func (l *Logger) Panic(v ...interface{}) {
+ s := fmt.Sprint(v...)
+ l.output(sError, 0, s)
+ panic(s)
+}
+
+// Panicf uses the default logger and logs with the Error severity.
+// Arguments are handled in the manner of fmt.Printf.
+func (l *Logger) Panicf(format string, v ...interface{}) {
+ s := fmt.Sprintf(format, v...)
+ l.output(sError, 0, s)
+ panic(s)
+}
+
// Fatal logs with the Fatal severity, and ends with os.Exit(1).
// Arguments are handled in the manner of fmt.Print.
func (l *Logger) Fatal(v ...interface{}) {
@@ -189,13 +187,6 @@ func (l *Logger) Fatal(v ...interface{}) {
os.Exit(1)
}
-// Fatalln logs with the Fatal severity, and ends with os.Exit(1).
-// Arguments are handled in the manner of fmt.Println.
-func (l *Logger) Fatalln(v ...interface{}) {
- l.output(sFatal, 0, fmt.Sprintln(v...))
- os.Exit(1)
-}
-
// Fatalf logs with the Fatal severity, and ends with os.Exit(1).
// Arguments are handled in the manner of fmt.Printf.
func (l *Logger) Fatalf(format string, v ...interface{}) {
@@ -214,12 +205,6 @@ func Info(v ...interface{}) {
defaultLogger.output(sInfo, 0, fmt.Sprint(v...))
}
-// Infoln uses the default logger and logs with the Info severity.
-// Arguments are handled in the manner of fmt.Println.
-func Infoln(v ...interface{}) {
- defaultLogger.output(sInfo, 0, fmt.Sprintln(v...))
-}
-
// Infof uses the default logger and logs with the Info severity.
// Arguments are handled in the manner of fmt.Printf.
func Infof(format string, v ...interface{}) {
@@ -232,12 +217,6 @@ func Warning(v ...interface{}) {
defaultLogger.output(sWarning, 0, fmt.Sprint(v...))
}
-// Warningln uses the default logger and logs with the Warning severity.
-// Arguments are handled in the manner of fmt.Println.
-func Warningln(v ...interface{}) {
- defaultLogger.output(sWarning, 0, fmt.Sprintln(v...))
-}
-
// Warningf uses the default logger and logs with the Warning severity.
// Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, v ...interface{}) {
@@ -250,18 +229,28 @@ func Error(v ...interface{}) {
defaultLogger.output(sError, 0, fmt.Sprint(v...))
}
-// Errorln uses the default logger and logs with the Error severity.
-// Arguments are handled in the manner of fmt.Println.
-func Errorln(v ...interface{}) {
- defaultLogger.output(sError, 0, fmt.Sprintln(v...))
-}
-
// Errorf uses the default logger and logs with the Error severity.
// Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, v ...interface{}) {
defaultLogger.output(sError, 0, fmt.Sprintf(format, v...))
}
+// Panic uses the default logger and logs with the Error severity.
+// Arguments are handled in the manner of fmt.Print.
+func Panic(v ...interface{}) {
+ s := fmt.Sprint(v...)
+ defaultLogger.output(sError, 0, s)
+ panic(s)
+}
+
+// Panicf uses the default logger and logs with the Error severity.
+// Arguments are handled in the manner of fmt.Printf.
+func Panicf(format string, v ...interface{}) {
+ s := fmt.Sprintf(format, v...)
+ defaultLogger.output(sError, 0, s)
+ panic(s)
+}
+
// Fatal uses the default logger, logs with the Fatal severity,
// and ends with os.Exit(1).
// Arguments are handled in the manner of fmt.Print.
@@ -271,15 +260,6 @@ func Fatal(v ...interface{}) {
os.Exit(1)
}
-// Fatalln uses the default logger, logs with the Fatal severity,
-// and ends with os.Exit(1).
-// Arguments are handled in the manner of fmt.Println.
-func Fatalln(v ...interface{}) {
- defaultLogger.output(sFatal, 0, fmt.Sprintln(v...))
- debug.PrintStack()
- os.Exit(1)
-}
-
// Fatalf uses the default logger, logs with the Fatal severity,
// and ends with os.Exit(1).
// Arguments are handled in the manner of fmt.Printf.
diff --git a/main.go b/main.go
index b813954..dea57ea 100644
--- a/main.go
+++ b/main.go
@@ -17,7 +17,7 @@ var (
)
func init() {
- flag.IntVar(&logLevel, "v", 1, "log verbosity level (0-3)")
+ flag.IntVar(&logLevel, "v", 2, "log verbosity level (0-3)")
}
func main() {
@@ -31,9 +31,9 @@ func main() {
flag.Usage()
os.Exit(1)
}
-
- source := os.Args[0]
- dest := os.Args[1]
+ args := flag.Args()
+ source := args[0]
+ dest := args[1]
repo := NewRepo(dest)
repo.Commit(source)
}
diff --git a/main_test.go b/main_test.go
index 57db053..27a3d65 100644
--- a/main_test.go
+++ b/main_test.go
@@ -4,6 +4,8 @@ import (
"log"
"os"
"testing"
+
+ "github.com/n-peugnet/dna-backup/logger"
)
func TestMain(m *testing.M) {
@@ -14,7 +16,7 @@ func TestMain(m *testing.M) {
}
func setup() {
- log.SetFlags(log.Lshortfile)
+ logger.SetFlags(log.Lshortfile)
}
func shutdown() {}
diff --git a/repo.go b/repo.go
index f7aa5e9..cd07759 100644
--- a/repo.go
+++ b/repo.go
@@ -32,7 +32,6 @@ import (
"hash"
"io"
"io/fs"
- "log"
"os"
"path/filepath"
"reflect"
@@ -40,6 +39,7 @@ import (
"github.com/chmduquesne/rollinghash/rabinkarp64"
"github.com/n-peugnet/dna-backup/cache"
+ "github.com/n-peugnet/dna-backup/logger"
"github.com/n-peugnet/dna-backup/sketch"
"github.com/n-peugnet/dna-backup/utils"
)
@@ -71,12 +71,12 @@ type File struct {
func NewRepo(path string) *Repo {
err := os.MkdirAll(path, 0775)
if err != nil {
- log.Panicln(err)
+ logger.Panic(err)
}
var seed int64 = 1
p, err := rabinkarp64.RandomPolynomial(seed)
if err != nil {
- log.Panicln(err)
+ logger.Panic(err)
}
return &Repo{
path: path,
@@ -122,7 +122,7 @@ func (r *Repo) Commit(source string) {
recipe := r.matchStream(reader, newVersion)
storeRecipe(newRecipePath, recipe)
storeFileList(newFilesPath, unprefixFiles(files, source))
- fmt.Println(files)
+ logger.Info(files)
}
func (r *Repo) Restore(destination string) {
@@ -142,10 +142,10 @@ func (r *Repo) Restore(destination string) {
f, _ := os.Create(filePath) // TODO: handle errors
n, err := io.CopyN(f, bufReader, file.Size)
if err != nil {
- log.Printf("Error storing file content for '%s', written %d/%d bytes: %s\n", filePath, n, file.Size, err)
+ logger.Errorf("storing file content for '%s', written %d/%d bytes: %s", filePath, n, file.Size, err)
}
if err := f.Close(); err != nil {
- log.Printf("Error closing restored file '%s': %s\n", filePath, err)
+ logger.Errorf("closing restored file '%s': %s", filePath, err)
}
}
}
@@ -154,7 +154,7 @@ func (r *Repo) loadVersions() []string {
var versions []string
files, err := os.ReadDir(r.path)
if err != nil {
- log.Fatalln(err)
+ logger.Fatal(err)
}
for _, f := range files {
if !f.IsDir() {
@@ -170,7 +170,7 @@ func listFiles(path string) []File {
err := filepath.Walk(path,
func(p string, i fs.FileInfo, err error) error {
if err != nil {
- log.Println(err)
+ logger.Error(err)
return err
}
if i.IsDir() {
@@ -180,7 +180,7 @@ func listFiles(path string) []File {
return nil
})
if err != nil {
- log.Println(err)
+ logger.Error(err)
}
return files
}
@@ -190,7 +190,7 @@ func unprefixFiles(files []File, prefix string) (ret []File) {
preSize := len(prefix)
for i, f := range files {
if !strings.HasPrefix(f.Path, prefix) {
- log.Println("Warning", f.Path, "is not prefixed by", prefix)
+ logger.Warning(f.Path, "is not prefixed by", prefix)
} else {
f.Path = f.Path[preSize:]
}
@@ -203,7 +203,7 @@ func concatFiles(files []File, stream io.WriteCloser) {
for _, f := range files {
file, err := os.Open(f.Path)
if err != nil {
- log.Printf("Error reading file '%s': %s\n", f.Path, err)
+ logger.Errorf("reading file '%s': %s", f.Path, err)
continue
}
io.Copy(stream, file)
@@ -218,10 +218,10 @@ func storeFileList(dest string, files []File) {
err = encoder.Encode(files)
}
if err != nil {
- log.Panicln(err)
+ logger.Panic(err)
}
if err = file.Close(); err != nil {
- log.Panicln(err)
+ logger.Panic(err)
}
}
@@ -233,10 +233,10 @@ func loadFileList(path string) []File {
err = decoder.Decode(&files)
}
if err != nil {
- log.Panicln(err)
+ logger.Panic(err)
}
if err = file.Close(); err != nil {
- log.Panicln(err)
+ logger.Panic(err)
}
return files
}
@@ -245,18 +245,18 @@ func (r *Repo) StoreChunkContent(id *ChunkId, reader io.Reader) error {
path := id.Path(r.path)
file, err := os.Create(path)
if err != nil {
- return fmt.Errorf("Error creating chunk for '%s'; %s\n", path, err)
+ return fmt.Errorf("creating chunk for '%s'; %s\n", path, err)
}
wrapper := r.chunkWriteWrapper(file)
n, err := io.Copy(wrapper, reader)
if err != nil {
- return fmt.Errorf("Error writing chunk content for '%s', written %d bytes: %s\n", path, n, err)
+ return fmt.Errorf("writing chunk content for '%s', written %d bytes: %s\n", path, n, err)
}
if err := wrapper.Close(); err != nil {
- return fmt.Errorf("Error closing write wrapper for '%s': %s\n", path, err)
+ return fmt.Errorf("closing write wrapper for '%s': %s\n", path, err)
}
if err := file.Close(); err != nil {
- return fmt.Errorf("Error closing chunk for '%s': %s\n", path, err)
+ return fmt.Errorf("closing chunk for '%s': %s\n", path, err)
}
return nil
}
@@ -269,18 +269,18 @@ func (r *Repo) LoadChunkContent(id *ChunkId) *bytes.Reader {
path := id.Path(r.path)
f, err := os.Open(path)
if err != nil {
- log.Printf("Cannot open chunk '%s': %s\n", path, err)
+ logger.Errorf("cannot open chunk '%s': %s", path, err)
}
wrapper, err := r.chunkReadWrapper(f)
if err != nil {
- log.Printf("Cannot create read wrapper for chunk '%s': %s\n", path, err)
+ logger.Errorf("cannot create read wrapper for chunk '%s': %s", path, err)
}
value, err = io.ReadAll(wrapper)
if err != nil {
- log.Panicf("Could not read from chunk '%s': %s\n", path, err)
+ logger.Panicf("could not read from chunk '%s': %s", path, err)
}
if err = f.Close(); err != nil {
- log.Printf("Could not close chunk '%s': %s\n", path, err)
+ logger.Warningf("could not close chunk '%s': %s", path, err)
}
r.chunkCache.Set(id, value)
}
@@ -293,7 +293,7 @@ func (r *Repo) loadChunks(versions []string, chunks chan<- IdentifiedChunk) {
p := filepath.Join(v, chunksName)
entries, err := os.ReadDir(p)
if err != nil {
- log.Printf("Error reading version '%05d' in '%s' chunks: %s", i, v, err)
+ logger.Errorf("reading version '%05d' in '%s' chunks: %s", i, v, err)
}
for j, e := range entries {
if e.IsDir() {
@@ -366,7 +366,7 @@ func (r *Repo) findSimilarChunk(chunk Chunk) (*ChunkId, bool) {
for _, id := range chunkIds {
count := similarChunks[*id]
count += 1
- log.Printf("Found %d %d time(s)", id, count)
+ logger.Infof("found %d %d time(s)", id, count)
if count > max {
similarChunk = id
}
@@ -381,7 +381,7 @@ func (r *Repo) tryDeltaEncodeChunk(temp BufferedChunk) (Chunk, bool) {
if found {
var buff bytes.Buffer
if err := r.differ.Diff(r.LoadChunkContent(id), temp.Reader(), &buff); err != nil {
- log.Println("Error trying delta encode chunk:", temp, "with source:", id, ":", err)
+ logger.Error("trying delta encode chunk:", temp, "with source:", id, ":", err)
} else {
return &DeltaChunk{
repo: r,
@@ -399,7 +399,7 @@ func (r *Repo) tryDeltaEncodeChunk(temp BufferedChunk) (Chunk, bool) {
func (r *Repo) encodeTempChunk(temp BufferedChunk, version int, last *uint64) (chunk Chunk, isDelta bool) {
chunk, isDelta = r.tryDeltaEncodeChunk(temp)
if isDelta {
- log.Println("Add new delta chunk")
+ logger.Info("add new delta chunk")
return
}
if chunk.Len() == r.chunkSize {
@@ -409,12 +409,12 @@ func (r *Repo) encodeTempChunk(temp BufferedChunk, version int, last *uint64) (c
r.hashAndStoreChunk(id, temp.Reader(), hasher)
err := r.StoreChunkContent(id, temp.Reader())
if err != nil {
- log.Println(err)
+ logger.Error(err)
}
- log.Println("Add new chunk", id)
+ logger.Info("add new chunk", id)
return NewStoredChunk(r, id), false
}
- log.Println("Add new partial chunk of size:", chunk.Len())
+ logger.Info("add new partial chunk of size:", chunk.Len())
return
}
@@ -453,7 +453,7 @@ func (r *Repo) matchStream(stream io.Reader, version int) []Chunk {
chunks = append(chunks, NewTempChunk(buff[:n]))
return chunks
} else {
- log.Panicf("Error Read only %d bytes with error '%s'\n", n, err)
+ logger.Panicf("matching stream, read only %d bytes with error '%s'", n, err)
}
}
hasher := rabinkarp64.NewFromPol(r.pol)
@@ -472,7 +472,7 @@ func (r *Repo) matchStream(stream io.Reader, version int) []Chunk {
chunks = append(chunks, c)
prev = nil
}
- log.Printf("Add existing chunk: %d\n", chunkId)
+ logger.Infof("add existing chunk: %d", chunkId)
chunks = append(chunks, NewStoredChunk(r, chunkId))
buff = make([]byte, 0, r.chunkSize*2)
for i := 0; i < r.chunkSize && err == nil; i++ {
@@ -523,7 +523,7 @@ func (r *Repo) restoreStream(stream io.WriteCloser, recipe []Chunk) {
rc.SetRepo(r)
}
if n, err := io.Copy(stream, c.Reader()); err != nil {
- log.Printf("Error copying to stream, read %d bytes from chunk: %s\n", n, err)
+ logger.Errorf("copying to stream, read %d bytes from chunk: %s", n, err)
}
}
stream.Close()
@@ -538,15 +538,15 @@ func storeRecipe(dest string, recipe []Chunk) {
encoder := gob.NewEncoder(file)
for _, c := range recipe {
if err = encoder.Encode(&c); err != nil {
- log.Panicln(err)
+ logger.Panic(err)
}
}
}
if err != nil {
- log.Panicln(err)
+ logger.Panic(err)
}
if err = file.Close(); err != nil {
- log.Panicln(err)
+ logger.Panic(err)
}
}
@@ -566,10 +566,10 @@ func loadRecipe(path string) []Chunk {
}
}
if err != nil && err != io.EOF {
- log.Panicln(err)
+ logger.Panic(err)
}
if err = file.Close(); err != nil {
- log.Panicln(err)
+ logger.Panic(err)
}
return recipe
}
diff --git a/repo_test.go b/repo_test.go
index 8748d88..b16ddc6 100644
--- a/repo_test.go
+++ b/repo_test.go
@@ -5,12 +5,12 @@ import (
"fmt"
"io"
"io/ioutil"
- "log"
"os"
"path/filepath"
"reflect"
"testing"
+ "github.com/n-peugnet/dna-backup/logger"
"github.com/n-peugnet/dna-backup/utils"
)
@@ -71,7 +71,7 @@ func (r *Repo) chunkStream(stream io.Reader, chunks chan<- []byte) {
prev += read
}
if err != nil && err != io.EOF {
- log.Println(err)
+ logger.Error(err)
}
if prev == r.chunkSize {
chunks <- buff
@@ -89,7 +89,7 @@ func storeChunks(dest string, chunks <-chan []byte) {
path := filepath.Join(dest, fmt.Sprintf(chunkIdFmt, i))
err := os.WriteFile(path, c, 0664)
if err != nil {
- log.Println(err)
+ logger.Error(err)
}
i++
}
@@ -240,7 +240,7 @@ func TestBsdiff(t *testing.T) {
newChunks := extractDeltaChunks(recipe)
assertLen(t, 2, newChunks, "New delta chunks:")
for _, c := range newChunks {
- log.Println("Patch size:", len(c.Patch))
+ logger.Info("Patch size:", len(c.Patch))
if len(c.Patch) >= repo.chunkSize/10 {
t.Errorf("Bsdiff of chunk is too large: %d", len(c.Patch))
}
diff --git a/sketch/sketch.go b/sketch/sketch.go
index 12e62fa..ca8c238 100644
--- a/sketch/sketch.go
+++ b/sketch/sketch.go
@@ -4,10 +4,10 @@ import (
"bytes"
"encoding/binary"
"io"
- "log"
"sync"
"github.com/chmduquesne/rollinghash/rabinkarp64"
+ "github.com/n-peugnet/dna-backup/logger"
)
type Sketch []uint64
@@ -31,13 +31,13 @@ func SketchChunk(r io.Reader, pol rabinkarp64.Pol, chunkSize int, wSize int, sfC
sfBuff := make([]byte, fBytes*fCount)
chunkLen, err := chunk.ReadFrom(r)
if err != nil {
- log.Panicln(chunkLen, err)
+ logger.Panic(chunkLen, err)
}
for f := 0; f < int(chunkLen)/fSize; f++ {
var fBuff bytes.Buffer
n, err := io.CopyN(&fBuff, &chunk, int64(fSize))
if err != nil {
- log.Println(n, err)
+ logger.Error(n, err)
continue
}
features = append(features, 0)
@@ -62,7 +62,7 @@ func calcFeature(wg *sync.WaitGroup, p rabinkarp64.Pol, r ReadByteReader, wSize
hasher := rabinkarp64.NewFromPol(p)
n, err := io.CopyN(hasher, r, int64(wSize))
if err != nil {
- log.Println(n, err)
+ logger.Error(n, err)
}
max := hasher.Sum64()
for w := 0; w < fSize-wSize; w++ {
diff --git a/tar.go b/tar.go
index e2703c8..ba7f3a5 100644
--- a/tar.go
+++ b/tar.go
@@ -3,8 +3,9 @@ package main
import (
"archive/tar"
"io"
- "log"
"os"
+
+ "github.com/n-peugnet/dna-backup/logger"
)
func streamFilesTar(files []File, stream io.WriteCloser) {
@@ -12,30 +13,30 @@ func streamFilesTar(files []File, stream io.WriteCloser) {
for _, f := range files {
file, err := os.Open(f.Path)
if err != nil {
- log.Printf("Error reading file '%s': %s\n", f.Path, err)
+ logger.Errorf("reading file '%s': %s", f.Path, err)
continue
}
stat, err := file.Stat()
if err != nil {
- log.Printf("Error getting stat of file '%s': %s\n", f.Path, err)
+ logger.Errorf("getting stat of file '%s': %s", f.Path, err)
continue
}
hdr, err := tar.FileInfoHeader(stat, "")
if err != nil {
- log.Printf("Error creating tar header for file '%s': %s\n", f.Path, err)
+ logger.Errorf("creating tar header for file '%s': %s", f.Path, err)
continue
}
if err := tarStream.WriteHeader(hdr); err != nil {
- log.Panicf("Error writing tar header to stream for file '%s': %s\n", f.Path, err)
+ logger.Panicf("writing tar header to stream for file '%s': %s", f.Path, err)
}
if _, err := io.Copy(tarStream, file); err != nil {
- log.Panicf("Error writing file to stream '%s': %s\n", f.Path, err)
+ logger.Panicf("writing file to stream '%s': %s", f.Path, err)
}
}
if err := tarStream.Close(); err != nil {
- log.Panic("Error closing tar stream:", err)
+ logger.Panic("closing tar stream:", err)
}
if err := stream.Close(); err != nil {
- log.Panic("Error closing stream:", err)
+ logger.Panic("closing stream:", err)
}
}