@verb{|apt install golang|}
@end table
-@verbatim
-$ [fetch|wget] http://www.nncpgo.org/download/nncp-5.1.2.tar.xz
-$ [fetch|wget] http://www.nncpgo.org/download/nncp-5.1.2.tar.xz.sig
-$ gpg --verify nncp-5.1.2.tar.xz.sig nncp-5.1.2.tar.xz
-$ xz --decompress --stdout nncp-5.1.2.tar.xz | tar xf -
-$ make -C nncp-5.1.2 all
-@end verbatim
+@example
+$ [fetch|wget] http://www.nncpgo.org/download/nncp-@value{VERSION}.tar.xz
+$ [fetch|wget] http://www.nncpgo.org/download/nncp-@value{VERSION}.tar.xz.sig
+$ gpg --verify nncp-@value{VERSION}.tar.xz.sig nncp-@value{VERSION}.tar.xz
+$ xz --decompress --stdout nncp-@value{VERSION}.tar.xz | tar xf -
+$ make -C nncp-@value{VERSION} all
+@end example
There is @command{install} make-target respecting @env{DESTDIR}. It will
install binaries and info-documentation:
-@verbatim
-# make -C nncp-5.1.2 install PREFIX=/usr/local
-@end verbatim
+@example
+# make -C nncp-@value{VERSION} install PREFIX=/usr/local
+@end example
spool: /var/spool/nncp
log: /var/spool/nncp/log
umask: "022"
+ noprogress: true
notify: {
file: {
override their umask to specified octal mask. Useful for using with
@ref{Shared spool, shared spool directories}.
+Enabled @strong{noprogress} option disabled progress showing for many
+commands by default. You can always force its showing with
+@option{-progress} command line option anyway.
+
@anchor{CfgNotify}
@strong{notify} section contains notification settings for successfully
tossed file, freq and exec packets. Corresponding @strong{from} and
Print only errors, omit simple informational messages. In any case
those messages are logged, so you can reread them using
@ref{nncp-log} command.
+@item -progress, -noprogress
+ Either force progress showing, or disable it.
@item -version
Print version information.
@item -warranty
@item @code{github.com/dustin/go-humanize} @tab MIT
@item @code{github.com/flynn/noise} @tab BSD 3-Clause
@item @code{github.com/gorhill/cronexpr} @tab GNU GPLv3
+@item @code{github.com/gosuri/uilive} @tab MIT
@item @code{github.com/hjson/hjson-go} @tab MIT
@item @code{github.com/klauspost/compress} @tab BSD 3-Clause
@item @code{go.cypherpunks.ru/balloon} @tab GNU LGPLv3
@end itemize
Then you could verify tarballs signature:
-@verbatim
-$ gpg --verify nncp-5.1.2.tar.xz.sig nncp-5.1.2.tar.xz
-@end verbatim
+
+@example
+$ gpg --verify nncp-@value{VERSION}.tar.xz.sig nncp-@value{VERSION}.tar.xz
+@end example
@node Новости
@section Новости
+@node Релиз 5.2.0
+@subsection Релиз 5.2.0
+@itemize
+
+@item
+Большинство команд по умолчанию показывают однострочный прогресс
+выполнения операции. Появились @option{-progress}, @option{-noprogress}
+опции командной строки, @option{noprogress} опция конфигурационного
+файла.
+
+@item
+Исправлен некорректный код возврата @command{nncp-check} команды,
+который возвращал ошибку когда всё хорошо.
+
+@end itemize
+
@node Релиз 5.1.2
@subsection Релиз 5.1.2
@itemize
See also this page @ref{Новости, on russian}.
+@node Release 5.2.0
+@section Release 5.2.0
+@itemize
+
+@item
+Most commands by default show oneline operations progress.
+@option{-progress}, @option{-noprogress} command line options,
+@option{noprogress} configuration file option appeared.
+
+@item
+Fixed incorrect @command{nncp-check} command return code, that returned
+bad code when everything is good.
+
+@end itemize
+
@node Release 5.1.2
@section Release 5.1.2
@itemize
# $FreeBSD: head/net/nncp/Makefile 517819 2019-11-17 11:51:56Z dmgk $
PORTNAME= nncp
-DISTVERSION= 5.1.2
+DISTVERSION= 5.2.0
CATEGORIES= net
MASTER_SITES= http://www.nncpgo.org/download/
import (
"net"
- "strconv"
"github.com/gorhill/cronexpr"
)
state.Wait()
ctx.LogI("call-finish", SDS{
"node": state.Node.Id,
- "duration": strconv.FormatInt(int64(state.Duration.Seconds()), 10),
- "rxbytes": strconv.FormatInt(state.RxBytes, 10),
- "txbytes": strconv.FormatInt(state.TxBytes, 10),
- "rxspeed": strconv.FormatInt(state.RxSpeed, 10),
- "txspeed": strconv.FormatInt(state.TxSpeed, 10),
+ "duration": int64(state.Duration.Seconds()),
+ "rxbytes": state.RxBytes,
+ "txbytes": state.TxBytes,
+ "rxspeed": state.RxSpeed,
+ "txspeed": state.TxSpeed,
}, "")
isGood = true
conn.Close()
break
} else {
- ctx.LogE("call-start", SdsAdd(sds, SDS{"err": err}), "")
+ ctx.LogE("call-start", sds, err, "")
conn.Close()
}
}
Log string `json:"log"`
Umask string `json:"umask",omitempty`
+ OmitPrgrs bool `json:"noprogress",omitempty`
+
Notify *NotifyJSON `json:"notify,omitempty"`
Self *NodeOurJSON `json:"self"`
rInt := int(r)
umaskForce = &rInt
}
+ showPrgrs := true
+ if cfgJSON.OmitPrgrs {
+ showPrgrs = false
+ }
ctx := Ctx{
Spool: spoolPath,
LogPath: logPath,
UmaskForce: umaskForce,
+ ShowPrgrs: showPrgrs,
Self: self,
Neigh: make(map[NodeId]*Node, len(cfgJSON.Neigh)),
Alias: make(map[string]*NodeId),
import (
"bufio"
"bytes"
+ "errors"
"io"
"log"
"golang.org/x/crypto/blake2b"
)
-func Check(src io.Reader, checksum []byte) (bool, error) {
+func Check(src io.Reader, checksum []byte, sds SDS, showPrgrs bool) (bool, error) {
hsh, err := blake2b.New256(nil)
if err != nil {
log.Fatalln(err)
}
- if _, err = io.Copy(hsh, bufio.NewReader(src)); err != nil {
+ if _, err = CopyProgressed(hsh, bufio.NewReader(src), sds, showPrgrs); err != nil {
return false, err
}
return bytes.Compare(hsh.Sum(nil), checksum) == 0, nil
isBad := false
for job := range ctx.Jobs(nodeId, xx) {
sds := SDS{
- "xx": string(xx),
- "node": nodeId,
- "pkt": ToBase32(job.HshValue[:]),
+ "xx": string(xx),
+ "node": nodeId,
+ "pkt": ToBase32(job.HshValue[:]),
+ "fullsize": job.Size,
}
- ctx.LogP("check", sds, "")
- gut, err := Check(job.Fd, job.HshValue[:])
+ gut, err := Check(job.Fd, job.HshValue[:], sds, ctx.ShowPrgrs)
job.Fd.Close()
if err != nil {
- ctx.LogE("check", SdsAdd(sds, SDS{"err": err}), "")
+ ctx.LogE("check", sds, err, "")
return true
}
if !gut {
isBad = true
- ctx.LogE("check", sds, "bad")
+ ctx.LogE("check", sds, errors.New("bad"), "")
}
}
return isBad
"archive/tar"
"bufio"
"bytes"
+ "errors"
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
- "strconv"
"strings"
xdr "github.com/davecgh/go-xdr/xdr2"
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
log.Fatalln("At least one of -rx and -tx must be specified")
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
}); err != nil {
log.Fatalln("Error writing tar header:", err)
}
- if _, err = io.Copy(tarWr, job.Fd); err != nil {
+ if _, err = nncp.CopyProgressed(
+ tarWr, job.Fd,
+ nncp.SdsAdd(sds, nncp.SDS{
+ "pkt": nncp.ToBase32(job.HshValue[:]),
+ "fullsize": job.Size,
+ }),
+ ctx.ShowPrgrs,
+ ); err != nil {
log.Fatalln("Error during copying to tar:", err)
}
job.Fd.Close()
log.Fatalln("Error during deletion:", err)
}
}
- ctx.LogI("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{
- "size": strconv.FormatInt(job.Size, 10),
- }), "")
+ ctx.LogI("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{"size": job.Size}), "")
}
}
if err = tarWr.Close(); err != nil {
}
pktName = filepath.Base(entry.Name)
if _, err = nncp.FromBase32(pktName); err != nil {
- ctx.LogD("nncp-bundle", sds, "Bad packet name")
+ ctx.LogD("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{"err": "bad packet name"}), "")
continue
}
if _, err = io.ReadFull(tarR, pktEncBuf); err != nil {
if _, err = hsh.Write(pktEncBuf); err != nil {
log.Fatalln("Error during writing:", err)
}
- if _, err = io.Copy(hsh, tarR); err != nil {
+ if _, err = nncp.CopyProgressed(
+ hsh, tarR,
+ nncp.SdsAdd(sds, nncp.SDS{"fullsize": entry.Size}),
+ ctx.ShowPrgrs,
+ ); err != nil {
log.Fatalln("Error during copying:", err)
}
if nncp.ToBase32(hsh.Sum(nil)) == pktName {
os.Remove(dstPath)
}
} else {
- ctx.LogE("nncp-bundle", sds, "bad checksum")
+ ctx.LogE("nncp-bundle", sds, errors.New("bad checksum"), "")
}
continue
}
}
sds["node"] = nncp.ToBase32(pktEnc.Recipient[:])
sds["pkt"] = pktName
+ sds["fullsize"] = entry.Size
selfPath = filepath.Join(ctx.Spool, ctx.SelfId.String(), string(nncp.TRx))
dstPath = filepath.Join(selfPath, pktName)
if _, err = os.Stat(dstPath); err == nil || !os.IsNotExist(err) {
if _, err = hsh.Write(pktEncBuf); err != nil {
log.Fatalln("Error during writing:", err)
}
- if _, err = io.Copy(hsh, tarR); err != nil {
+ if _, err = nncp.CopyProgressed(hsh, tarR, sds, ctx.ShowPrgrs); err != nil {
log.Fatalln("Error during copying:", err)
}
if nncp.ToBase32(hsh.Sum(nil)) != pktName {
- ctx.LogE("nncp-bundle", sds, "bad checksum")
+ ctx.LogE("nncp-bundle", sds, errors.New("bad checksum"), "")
continue
}
} else {
if _, err = tmp.W.Write(pktEncBuf); err != nil {
log.Fatalln("Error during writing:", err)
}
- if _, err = io.Copy(tmp.W, tarR); err != nil {
+ if _, err = nncp.CopyProgressed(tmp.W, tarR, sds, ctx.ShowPrgrs); err != nil {
log.Fatalln("Error during copying:", err)
}
if err = tmp.W.Flush(); err != nil {
log.Fatalln("Error during commiting:", err)
}
} else {
- ctx.LogE("nncp-bundle", sds, "bad checksum")
+ ctx.LogE("nncp-bundle", sds, errors.New("bad checksum"), "")
tmp.Cancel()
continue
}
}
} else {
if *dryRun {
- if _, err = io.Copy(ioutil.Discard, tarR); err != nil {
+ if _, err = nncp.CopyProgressed(ioutil.Discard, tarR, sds, ctx.ShowPrgrs); err != nil {
log.Fatalln("Error during copying:", err)
}
} else {
if _, err = bufTmp.Write(pktEncBuf); err != nil {
log.Fatalln("Error during writing:", err)
}
- if _, err = io.Copy(bufTmp, tarR); err != nil {
+ if _, err = nncp.CopyProgressed(bufTmp, tarR, sds, ctx.ShowPrgrs); err != nil {
log.Fatalln("Error during copying:", err)
}
if err = bufTmp.Flush(); err != nil {
}
}
ctx.LogI("nncp-bundle", nncp.SdsAdd(sds, nncp.SDS{
- "size": strconv.FormatInt(entry.Size, 10),
+ "size": sds["fullsize"],
}), "")
}
}
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
log.Fatalln("-rx and -tx can not be set simultaneously")
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
package main
import (
+ "errors"
"flag"
"fmt"
"log"
"os"
- "strconv"
"sync"
"time"
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
return
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
} else {
addrs = append(addrs, *call.Addr)
}
- sds := nncp.SDS{"node": node.Id, "callindex": strconv.Itoa(i)}
+ sds := nncp.SDS{"node": node.Id, "callindex": i}
for {
n := time.Now()
t := call.Cron.Next(n)
ctx.LogD("caller", sds, t.String())
if t.IsZero() {
- ctx.LogE("caller", sds, "got zero time")
+ ctx.LogE("caller", sds, errors.New("got zero time"), "")
return
}
time.Sleep(t.Sub(n))
return
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false)
+ ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false, false, false)
if err != nil {
log.Fatalln("Error during initialization:", err)
}
log: %s
# Enforce specified umask usage
# umask: "022"
+ # Omit progress showing by default
+ # noprogress: true
# Enable notification email sending
# notify: {
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
return
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
"log"
"net"
"os"
- "strconv"
"time"
"go.cypherpunks.ru/nncp/v5"
state.Wait()
ctx.LogI("call-finish", nncp.SDS{
"node": state.Node.Id,
- "duration": strconv.FormatInt(int64(state.Duration.Seconds()), 10),
- "rxbytes": strconv.FormatInt(state.RxBytes, 10),
- "txbytes": strconv.FormatInt(state.TxBytes, 10),
- "rxspeed": strconv.FormatInt(state.RxSpeed, 10),
- "txspeed": strconv.FormatInt(state.TxSpeed, 10),
+ "duration": state.Duration.Seconds(),
+ "rxbytes": state.RxBytes,
+ "txbytes": state.TxBytes,
+ "rxspeed": state.RxSpeed,
+ "txspeed": state.TxSpeed,
}, "")
} else {
nodeId := "unknown"
if state.Node != nil {
nodeId = state.Node.Id.String()
}
- ctx.LogE("call-start", nncp.SDS{"node": nodeId, "err": err}, "")
+ ctx.LogE("call-start", nncp.SDS{"node": nodeId}, err, "")
}
}
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
log.Fatalln(err)
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
log.Fatalln(err)
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
log.Fatalln(err)
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
log.Fatalln(err)
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
return
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, "", *logPath, false, *debug)
+ ctx, err := nncp.CtxFromCmdline(*cfgPath, "", *logPath, false, false, false, *debug)
if err != nil {
log.Fatalln("Error during initialization:", err)
}
_, err = xdr.Unmarshal(bytes.NewReader(beginning), &pktEnc)
if err == nil && pktEnc.Magic == nncp.MagicNNCPEv4 {
if *dump {
- ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false)
+ ctx, err := nncp.CtxFromCmdline(*cfgPath, "", "", false, false, false, false)
if err != nil {
log.Fatalln("Error during initialization:", err)
}
"bufio"
"bytes"
"encoding/hex"
+ "errors"
"flag"
"fmt"
"hash"
}
var metaPkt nncp.ChunkedMeta
if _, err = xdr.Unmarshal(fd, &metaPkt); err != nil {
- ctx.LogE("nncp-reass", nncp.SDS{"path": path, "err": err}, "bad meta file")
+ ctx.LogE("nncp-reass", nncp.SDS{"path": path}, err, "bad meta file")
return false
}
fd.Close()
if metaPkt.Magic != nncp.MagicNNCPMv1 {
- ctx.LogE("nncp-reass", nncp.SDS{"path": path, "err": nncp.BadMagic}, "")
+ ctx.LogE("nncp-reass", nncp.SDS{"path": path}, nncp.BadMagic, "")
return false
}
metaName := filepath.Base(path)
if !strings.HasSuffix(metaName, nncp.ChunkedSuffixMeta) {
- ctx.LogE("nncp-reass", nncp.SDS{
- "path": path,
- "err": "invalid filename suffix",
- }, "")
+ ctx.LogE("nncp-reass", nncp.SDS{"path": path}, errors.New("invalid filename suffix"), "")
return false
}
mainName := strings.TrimSuffix(metaName, nncp.ChunkedSuffixMeta)
for chunkNum, chunkPath := range chunksPaths {
fi, err := os.Stat(chunkPath)
if err != nil && os.IsNotExist(err) {
- ctx.LogI("nncp-reass", nncp.SDS{
- "path": path,
- "chunk": strconv.Itoa(chunkNum),
- }, "missing")
+ ctx.LogI("nncp-reass", nncp.SDS{"path": path, "chunk": chunkNum}, "missing")
allChunksExist = false
continue
}
badSize = uint64(fi.Size()) != metaPkt.ChunkSize
}
if badSize {
- ctx.LogE("nncp-reass", nncp.SDS{
- "path": path,
- "chunk": strconv.Itoa(chunkNum),
- }, "invalid size")
+ ctx.LogE(
+ "nncp-reass",
+ nncp.SDS{"path": path, "chunk": chunkNum},
+ errors.New("invalid size"), "",
+ )
allChunksExist = false
}
}
if err != nil {
log.Fatalln("Can not open file:", err)
}
+ fi, err := fd.Stat()
+ if err != nil {
+ log.Fatalln("Can not stat file:", err)
+ }
hsh, err = blake2b.New256(nil)
if err != nil {
log.Fatalln(err)
}
- if _, err = io.Copy(hsh, bufio.NewReader(fd)); err != nil {
+ if _, err = nncp.CopyProgressed(hsh, bufio.NewReader(fd), nncp.SDS{
+ "pkt": chunkPath,
+ "fullsize": fi.Size(),
+ }, ctx.ShowPrgrs); err != nil {
log.Fatalln(err)
}
fd.Close()
if bytes.Compare(hsh.Sum(nil), metaPkt.Checksums[chunkNum][:]) != 0 {
- ctx.LogE("nncp-reass", nncp.SDS{
- "path": path,
- "chunk": strconv.Itoa(chunkNum),
- }, "checksum is bad")
+ ctx.LogE(
+ "nncp-reass",
+ nncp.SDS{"path": path, "chunk": chunkNum},
+ errors.New("checksum is bad"), "",
+ )
allChecksumsGood = false
}
}
if err != nil {
log.Fatalln("Can not open file:", err)
}
- if _, err = io.Copy(dstW, bufio.NewReader(fd)); err != nil {
+ fi, err := fd.Stat()
+ if err != nil {
+ log.Fatalln("Can not stat file:", err)
+ }
+ if _, err = nncp.CopyProgressed(dstW, bufio.NewReader(fd), nncp.SDS{
+ "pkt": chunkPath,
+ "fullsize": fi.Size(),
+ }, ctx.ShowPrgrs); err != nil {
log.Fatalln(err)
}
fd.Close()
if !keep {
if err = os.Remove(chunkPath); err != nil {
- ctx.LogE("nncp-reass", nncp.SdsAdd(sds, nncp.SDS{
- "chunk": strconv.Itoa(chunkNum),
- "err": err,
- }), "")
+ ctx.LogE("nncp-reass", nncp.SdsAdd(sds, nncp.SDS{"chunk": chunkNum}), err, "")
hasErrors = true
}
}
ctx.LogD("nncp-reass", sds, "written")
if !keep {
if err = os.Remove(path); err != nil {
- ctx.LogE("nncp-reass", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "")
+ ctx.LogE("nncp-reass", sds, err, "")
hasErrors = true
}
}
dir, err := os.Open(dirPath)
defer dir.Close()
if err != nil {
- ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath, "err": err}, "")
+ ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath}, err, "")
return nil
}
fis, err := dir.Readdir(0)
dir.Close()
if err != nil {
- ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath, "err": err}, "")
+ ctx.LogE("nncp-reass", nncp.SDS{"path": dirPath}, err, "")
return nil
}
metaPaths := make([]string, 0)
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
return
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
return
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", *quiet, false, false, *debug)
if err != nil {
log.Fatalln("Error during initialization:", err)
}
ctx.Umask()
if *doTmp {
- err = filepath.Walk(filepath.Join(ctx.Spool, "tmp"), func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if info.IsDir() {
- return nil
- }
- ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
- return os.Remove(path)
- })
+ err = filepath.Walk(
+ filepath.Join(ctx.Spool, "tmp"),
+ func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+ ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
+ return os.Remove(path)
+ })
if err != nil {
log.Fatalln("Error during walking:", err)
}
log.Fatalln("Invalid -node specified:", err)
}
remove := func(xx nncp.TRxTx) error {
- return filepath.Walk(filepath.Join(ctx.Spool, node.Id.String(), string(xx)), func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if info.IsDir() {
+ return filepath.Walk(
+ filepath.Join(ctx.Spool, node.Id.String(), string(xx)),
+ func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+ if *doSeen && strings.HasSuffix(info.Name(), nncp.SeenSuffix) {
+ ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
+ return os.Remove(path)
+ }
+ if *doPart && strings.HasSuffix(info.Name(), nncp.PartSuffix) {
+ ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
+ return os.Remove(path)
+ }
+ if *pktRaw != "" && filepath.Base(info.Name()) == *pktRaw {
+ ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
+ return os.Remove(path)
+ }
+ if !*doSeen &&
+ !*doPart &&
+ (*doRx || *doTx) &&
+ ((*doRx && xx == nncp.TRx) || (*doTx && xx == nncp.TTx)) {
+ ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
+ return os.Remove(path)
+ }
return nil
- }
- if *doSeen && strings.HasSuffix(info.Name(), nncp.SeenSuffix) {
- ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
- return os.Remove(path)
- }
- if *doPart && strings.HasSuffix(info.Name(), nncp.PartSuffix) {
- ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
- return os.Remove(path)
- }
- if *pktRaw != "" && filepath.Base(info.Name()) == *pktRaw {
- ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
- return os.Remove(path)
- }
- if !*doSeen &&
- !*doPart &&
- (*doRx || *doTx) &&
- ((*doRx && xx == nncp.TRx) || (*doTx && xx == nncp.TTx)) {
- ctx.LogI("nncp-rm", nncp.SDS{"file": path}, "")
- return os.Remove(path)
- }
- return nil
- })
+ })
}
if *pktRaw != "" || *doRx || *doSeen || *doPart {
if err = remove(nncp.TRx); err != nil {
return
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", false, *debug)
+ ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, "", false, false, false, *debug)
if err != nil {
log.Fatalln("Error during initialization:", err)
}
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
log.Fatalln(err)
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
import (
"bufio"
+ "errors"
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
- "strconv"
xdr "github.com/davecgh/go-xdr/xdr2"
"go.cypherpunks.ru/nncp/v5"
spoolPath = flag.String("spool", "", "Override path to spool")
logPath = flag.String("log", "", "Override path to logfile")
quiet = flag.Bool("quiet", false, "Print only errors")
+ showPrgrs = flag.Bool("progress", false, "Force progress showing")
+ omitPrgrs = flag.Bool("noprogress", false, "Omit progress showing")
debug = flag.Bool("debug", false, "Print debug messages")
version = flag.Bool("version", false, "Print version information")
warranty = flag.Bool("warranty", false, "Print warranty information")
log.Fatalln("-rx and -tx can not be set simultaneously")
}
- ctx, err := nncp.CtxFromCmdline(*cfgPath, *spoolPath, *logPath, *quiet, *debug)
+ ctx, err := nncp.CtxFromCmdline(
+ *cfgPath,
+ *spoolPath,
+ *logPath,
+ *quiet,
+ *showPrgrs,
+ *omitPrgrs,
+ *debug,
+ )
if err != nil {
log.Fatalln("Error during initialization:", err)
}
ctx.LogD("nncp-xfer", sds, "no dir")
goto Tx
}
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "stat")
+ ctx.LogE("nncp-xfer", sds, err, "stat")
isBad = true
goto Tx
}
dir, err = os.Open(selfPath)
if err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "open")
+ ctx.LogE("nncp-xfer", sds, err, "open")
isBad = true
goto Tx
}
fis, err = dir.Readdir(0)
dir.Close()
if err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "read")
+ ctx.LogE("nncp-xfer", sds, err, "read")
isBad = true
goto Tx
}
}
dir, err = os.Open(filepath.Join(selfPath, fi.Name()))
if err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "open")
+ ctx.LogE("nncp-xfer", sds, err, "open")
isBad = true
continue
}
fisInt, err := dir.Readdir(0)
dir.Close()
if err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "read")
+ ctx.LogE("nncp-xfer", sds, err, "read")
isBad = true
continue
}
if !fi.IsDir() {
continue
}
+ // Check that it is valid Base32 encoding
+ if _, err = nncp.NodeIdFromString(fiInt.Name()); err != nil {
+ continue
+ }
filename := filepath.Join(dir.Name(), fiInt.Name())
sds["file"] = filename
delete(sds, "size")
fd, err := os.Open(filename)
if err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "open")
+ ctx.LogE("nncp-xfer", sds, err, "open")
isBad = true
continue
}
fd.Close()
continue
}
- sds["size"] = strconv.FormatInt(fiInt.Size(), 10)
+ sds["size"] = fiInt.Size()
if !ctx.IsEnoughSpace(fiInt.Size()) {
- ctx.LogE("nncp-xfer", sds, "is not enough space")
+ ctx.LogE("nncp-xfer", sds, errors.New("is not enough space"), "")
fd.Close()
continue
}
if err != nil {
log.Fatalln(err)
}
- if _, err = io.CopyN(tmp.W, bufio.NewReader(fd), fiInt.Size()); err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "copy")
+ r, w := io.Pipe()
+ go func() {
+ _, err := io.CopyN(w, bufio.NewReader(fd), fiInt.Size())
+ if err == nil {
+ w.Close()
+ return
+ }
+ ctx.LogE("nncp-xfer", sds, err, "copy")
+ w.CloseWithError(err)
+ }()
+ if _, err = nncp.CopyProgressed(tmp.W, r, nncp.SdsAdd(sds, nncp.SDS{
+ "pkt": filename,
+ "fullsize": sds["size"],
+ }), ctx.ShowPrgrs); err != nil {
+ ctx.LogE("nncp-xfer", sds, err, "copy")
isBad = true
- fd.Close()
+ }
+ fd.Close()
+ if isBad {
tmp.Cancel()
continue
}
- fd.Close()
if err = tmp.Commit(filepath.Join(
ctx.Spool,
nodeId.String(),
ctx.LogI("nncp-xfer", sds, "")
if !*keep {
if err = os.Remove(filename); err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "remove")
+ ctx.LogE("nncp-xfer", sds, err, "remove")
isBad = true
}
}
}
if err = os.Mkdir(nodePath, os.FileMode(0777)); err != nil {
ctx.UnlockDir(dirLock)
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "mkdir")
+ ctx.LogE("nncp-xfer", sds, err, "mkdir")
isBad = true
continue
}
} else {
ctx.UnlockDir(dirLock)
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "stat")
+ ctx.LogE("nncp-xfer", sds, err, "stat")
isBad = true
continue
}
if os.IsNotExist(err) {
if err = os.Mkdir(dstPath, os.FileMode(0777)); err != nil {
ctx.UnlockDir(dirLock)
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "mkdir")
+ ctx.LogE("nncp-xfer", sds, err, "mkdir")
isBad = true
continue
}
} else {
ctx.UnlockDir(dirLock)
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "stat")
+ ctx.LogE("nncp-xfer", sds, err, "stat")
isBad = true
continue
}
}
tmp, err := nncp.TempFile(dstPath, "xfer")
if err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "mktemp")
+ ctx.LogE("nncp-xfer", sds, err, "mktemp")
job.Fd.Close()
isBad = true
break
sds["tmp"] = tmp.Name()
ctx.LogD("nncp-xfer", sds, "created")
bufW := bufio.NewWriter(tmp)
- copied, err := io.Copy(bufW, bufio.NewReader(job.Fd))
+ copied, err := nncp.CopyProgressed(
+ bufW,
+ bufio.NewReader(job.Fd),
+ nncp.SdsAdd(sds, nncp.SDS{"fullsize": job.Size}),
+ ctx.ShowPrgrs,
+ )
job.Fd.Close()
if err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "copy")
+ ctx.LogE("nncp-xfer", sds, err, "copy")
tmp.Close()
isBad = true
continue
}
if err = bufW.Flush(); err != nil {
tmp.Close()
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "flush")
+ ctx.LogE("nncp-xfer", sds, err, "flush")
isBad = true
continue
}
if err = tmp.Sync(); err != nil {
tmp.Close()
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "sync")
+ ctx.LogE("nncp-xfer", sds, err, "sync")
isBad = true
continue
}
tmp.Close()
if err = os.Rename(tmp.Name(), filepath.Join(dstPath, pktName)); err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "rename")
+ ctx.LogE("nncp-xfer", sds, err, "rename")
isBad = true
continue
}
if err = nncp.DirSync(dstPath); err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "sync")
+ ctx.LogE("nncp-xfer", sds, err, "sync")
isBad = true
continue
}
os.Remove(filepath.Join(dstPath, pktName+".part"))
delete(sds, "tmp")
- ctx.LogI("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{
- "size": strconv.FormatInt(copied, 10),
- }), "")
+ ctx.LogI("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"size": copied}), "")
if !*keep {
if err = os.Remove(job.Fd.Name()); err != nil {
- ctx.LogE("nncp-xfer", nncp.SdsAdd(sds, nncp.SDS{"err": err}), "remove")
+ ctx.LogE("nncp-xfer", sds, err, "remove")
isBad = true
}
}
LogPath string
UmaskForce *int
Quiet bool
+ ShowPrgrs bool
Debug bool
NotifyFile *FromToJSON
NotifyFreq *FromToJSON
func (ctx *Ctx) ensureRxDir(nodeId *NodeId) error {
dirPath := filepath.Join(ctx.Spool, nodeId.String(), string(TRx))
if err := os.MkdirAll(dirPath, os.FileMode(0777)); err != nil {
- ctx.LogE("dir-ensure", SDS{"dir": dirPath, "err": err}, "")
+ ctx.LogE("dir-ensure", SDS{"dir": dirPath}, err, "")
return err
}
fd, err := os.Open(dirPath)
if err != nil {
- ctx.LogE("dir-ensure", SDS{"dir": dirPath, "err": err}, "")
+ ctx.LogE("dir-ensure", SDS{"dir": dirPath}, err, "")
return err
}
fd.Close()
return nil
}
-func CtxFromCmdline(cfgPath, spoolPath, logPath string, quiet, debug bool) (*Ctx, error) {
+func CtxFromCmdline(
+ cfgPath,
+ spoolPath,
+ logPath string,
+ quiet, showPrgrs, omitPrgrs, debug bool,
+) (*Ctx, error) {
env := os.Getenv(CfgPathEnv)
if env != "" {
cfgPath = env
}
+ if showPrgrs && omitPrgrs {
+ return nil, errors.New("simultaneous -progress and -noprogress")
+ }
cfgRaw, err := ioutil.ReadFile(cfgPath)
if err != nil {
return nil, err
} else {
ctx.LogPath = logPath
}
+ if showPrgrs {
+ ctx.ShowPrgrs = true
+ }
+ if quiet || omitPrgrs {
+ ctx.ShowPrgrs = false
+ }
ctx.Quiet = quiet
ctx.Debug = debug
return ctx, nil
}
msg = fmt.Sprintf(
"Packet %s (%s) (nice %s)",
- sds["hash"],
+ sds["pkt"],
size,
NicenessFmt(nice),
)
}
msg += fmt.Sprintf("%s packets, %s", sds["pkts"], size)
case "sp-process":
- msg = fmt.Sprintf("%s has %s (%s): %s", nodeS, sds["hash"], size, rem)
+ msg = fmt.Sprintf("%s has %s (%s): %s", nodeS, sds["pkt"], size, rem)
case "sp-file":
switch sds["xx"] {
case "rx":
}
msg += fmt.Sprintf(
"%s %d%% (%s / %s)",
- sds["hash"],
+ sds["pkt"],
100*sizeParsed/fullsize,
humanize.IBytes(uint64(sizeParsed)),
humanize.IBytes(uint64(fullsize)),
case "sp-done":
switch sds["xx"] {
case "rx":
- msg = fmt.Sprintf("Packet %s is retreived (%s)", sds["hash"], size)
+ msg = fmt.Sprintf("Packet %s is retreived (%s)", sds["pkt"], size)
case "tx":
- msg = fmt.Sprintf("Packet %s is sent", sds["hash"])
+ msg = fmt.Sprintf("Packet %s is sent", sds["pkt"])
default:
return s
}
"io"
"os"
"path/filepath"
- "strconv"
xdr "github.com/davecgh/go-xdr/xdr2"
)
"xx": string(xx),
"node": pktEnc.Sender,
"name": fi.Name(),
- "nice": strconv.Itoa(int(pktEnc.Nice)),
- "size": strconv.FormatInt(fi.Size(), 10),
+ "nice": int(pktEnc.Nice),
+ "size": fi.Size(),
}, "taken")
job := Job{
PktEnc: &pktEnc,
os.FileMode(0666),
)
if err != nil {
- ctx.LogE("lockdir", SDS{"path": lockPath, "err": err}, "")
+ ctx.LogE("lockdir", SDS{"path": lockPath}, err, "")
return nil, err
}
err = unix.Flock(int(dirLock.Fd()), unix.LOCK_EX|unix.LOCK_NB)
if err != nil {
- ctx.LogE("lockdir", SDS{"path": lockPath, "err": err}, "")
+ ctx.LogE("lockdir", SDS{"path": lockPath}, err, "")
dirLock.Close()
return nil, err
}
result := make([]string, 0, 1+len(keys))
result = append(result, "["+who)
for _, k := range keys {
- result = append(result, fmt.Sprintf(`%s="%s"`, k, sds[k]))
+ var value string
+ switch v := sds[k].(type) {
+ case int, int8, uint8, int64, uint64:
+ value = fmt.Sprintf("%d", v)
+ default:
+ value = fmt.Sprintf("%s", v)
+ }
+ result = append(result, fmt.Sprintf(`%s="%s"`, k, value))
}
return strings.Join(result, " ") + "]"
}
ctx.Log(msg)
}
-func (ctx *Ctx) LogP(who string, sds SDS, msg string) {
- if !ctx.Quiet {
- fmt.Fprintln(os.Stderr, ctx.Humanize(msgFmt(LogLevel("P"), who, sds, msg)))
- }
-}
-
-func (ctx *Ctx) LogE(who string, sds SDS, msg string) {
+func (ctx *Ctx) LogE(who string, sds SDS, err error, msg string) {
+ sds["err"] = err.Error()
msg = msgFmt(LogLevel("E"), who, sds, msg)
if len(msg) > 2048 {
msg = msg[:2048]
--- /dev/null
+/*
+NNCP -- Node to Node copy, utilities for store-and-forward data exchange
+Copyright (C) 2016-2019 Sergey Matveev <stargrave@stargrave.org>
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, version 3 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+package nncp
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "go.cypherpunks.ru/nncp/v5/uilive"
+)
+
+func init() {
+ uilive.Out = os.Stderr
+}
+
+var progressBars = make(map[string]*ProgressBar)
+var progressBarsLock sync.RWMutex
+
+type ProgressBar struct {
+ w *uilive.Writer
+ hash string
+ started time.Time
+ initial int64
+ full int64
+}
+
+func ProgressBarNew(initial, full int64) *ProgressBar {
+ pb := ProgressBar{
+ w: uilive.New(),
+ started: time.Now(),
+ initial: initial,
+ full: full,
+ }
+ pb.w.Start()
+ return &pb
+}
+
+func (pb ProgressBar) Render(what string, size int64) {
+ now := time.Now().UTC()
+ timeDiff := now.Sub(pb.started).Seconds()
+ if timeDiff == 0 {
+ timeDiff = 1
+ }
+ percentage := int64(100)
+ if pb.full > 0 {
+ percentage = 100 * size / pb.full
+ }
+ fmt.Fprintf(
+ pb.w, "%s %s %s/%s %d%% (%s/sec)\n",
+ now.Format(time.RFC3339), what,
+ humanize.IBytes(uint64(size)),
+ humanize.IBytes(uint64(pb.full)),
+ percentage,
+ humanize.IBytes(uint64(float64(size-pb.initial)/timeDiff)),
+ )
+}
+
+func (pb ProgressBar) Kill() {
+ pb.w.Stop()
+}
+
+func CopyProgressed(
+ dst io.Writer,
+ src io.Reader,
+ sds SDS,
+ showPrgrs bool,
+) (written int64, err error) {
+ buf := make([]byte, EncBlkSize)
+ var nr, nw int
+ var er, ew error
+ for {
+ nr, er = src.Read(buf)
+ if nr > 0 {
+ nw, ew = dst.Write(buf[:nr])
+ if nw > 0 {
+ written += int64(nw)
+ if showPrgrs {
+ sds["size"] = written
+ Progress(sds)
+ }
+ }
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw {
+ err = io.ErrShortWrite
+ break
+ }
+ }
+ if er != nil {
+ if er != io.EOF {
+ err = er
+ }
+ break
+ }
+ }
+ return
+}
+
+func Progress(sds SDS) {
+ pkt := sds["pkt"].(string)
+ var size int64
+ if sizeI, exists := sds["size"]; exists {
+ size = sizeI.(int64)
+ }
+ fullsize := sds["fullsize"].(int64)
+ progressBarsLock.RLock()
+ pb, exists := progressBars[pkt]
+ progressBarsLock.RUnlock()
+ if !exists {
+ progressBarsLock.Lock()
+ pb = ProgressBarNew(size, fullsize)
+ progressBars[pkt] = pb
+ progressBarsLock.Unlock()
+ }
+ what := pkt
+ if len(what) >= 52 { // Base32 encoded
+ what = what[:16] + ".." + what[len(what)-16:]
+ }
+ if xx, exists := sds["xx"]; exists {
+ what = strings.Title(xx.(string)) + " " + what
+ }
+ pb.Render(what, size)
+ if size >= fullsize {
+ pb.Kill()
+ progressBarsLock.Lock()
+ delete(progressBars, pkt)
+ progressBarsLock.Unlock()
+ }
+}
"os"
"path/filepath"
"sort"
- "strconv"
"sync"
"time"
ctx.LogD("sp-info-our", SDS{
"node": nodeId,
"name": ToBase32(info.Hash[:]),
- "size": strconv.FormatInt(int64(info.Size), 10),
+ "size": info.Size,
}, "")
}
if totalSize > 0 {
ctx.LogI("sp-infos", SDS{
"xx": string(TTx),
"node": nodeId,
- "pkts": strconv.Itoa(len(payloads)),
- "size": strconv.FormatInt(totalSize, 10),
+ "pkts": len(payloads),
+ "size": totalSize,
}, "")
}
return payloadsSplit(payloads)
state.dirUnlock()
return err
}
- sds := SDS{"node": nodeId, "nice": strconv.Itoa(int(state.Nice))}
+ sds := SDS{"node": nodeId, "nice": int(state.Nice)}
state.Ctx.LogD("sp-start", sds, "sending first message")
conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second))
if err = state.WriteSP(conn, buf); err != nil {
- state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-start", sds, err, "")
state.dirUnlock()
return err
}
state.Ctx.LogD("sp-start", sds, "waiting for first message")
conn.SetReadDeadline(time.Now().Add(DefaultDeadline * time.Second))
if buf, err = state.ReadSP(conn); err != nil {
- state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-start", sds, err, "")
state.dirUnlock()
return err
}
payload, state.csOur, state.csTheir, err = state.hs.ReadMessage(nil, buf)
if err != nil {
- state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-start", sds, err, "")
state.dirUnlock()
return err
}
state.Ctx.LogD("sp-start", sds, "starting workers")
err = state.StartWorkers(conn, infosPayloads, payload)
if err != nil {
- state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-start", sds, err, "")
state.dirUnlock()
return err
}
state.xxOnly = xxOnly
var buf []byte
var payload []byte
- state.Ctx.LogD(
- "sp-start",
- SDS{"nice": strconv.Itoa(int(state.Nice))},
- "waiting for first message",
- )
+ state.Ctx.LogD("sp-start", SDS{"nice": int(state.Nice)}, "waiting for first message")
conn.SetReadDeadline(time.Now().Add(DefaultDeadline * time.Second))
if buf, err = state.ReadSP(conn); err != nil {
- state.Ctx.LogE("sp-start", SDS{"err": err}, "")
+ state.Ctx.LogE("sp-start", SDS{}, err, "")
return err
}
if payload, _, _, err = state.hs.ReadMessage(nil, buf); err != nil {
- state.Ctx.LogE("sp-start", SDS{"err": err}, "")
+ state.Ctx.LogE("sp-start", SDS{}, err, "")
return err
}
}
if node == nil {
peerId := ToBase32(state.hs.PeerStatic())
- state.Ctx.LogE("sp-start", SDS{"peer": peerId}, "unknown")
+ state.Ctx.LogE("sp-start", SDS{"peer": peerId}, errors.New("unknown"), "")
return errors.New("Unknown peer: " + peerId)
}
state.Node = node
state.txRate = node.TxRate
state.onlineDeadline = node.OnlineDeadline
state.maxOnlineTime = node.MaxOnlineTime
- sds := SDS{"node": node.Id, "nice": strconv.Itoa(int(state.Nice))}
+ sds := SDS{"node": node.Id, "nice": int(state.Nice)}
if state.Ctx.ensureRxDir(node.Id); err != nil {
return err
}
conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second))
if err = state.WriteSP(conn, buf); err != nil {
- state.Ctx.LogE("sp-start", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-start", sds, err, "")
state.dirUnlock()
return err
}
conn ConnDeadlined,
infosPayloads [][]byte,
payload []byte) error {
- sds := SDS{"node": state.Node.Id, "nice": strconv.Itoa(int(state.Nice))}
+ sds := SDS{"node": state.Node.Id, "nice": int(state.Nice)}
if len(infosPayloads) > 1 {
go func() {
for _, payload := range infosPayloads[1:] {
state.Ctx.LogD(
"sp-work",
- SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+ SdsAdd(sds, SDS{"size": len(payload)}),
"queuing remaining payload",
)
state.payloads <- payload
}
state.Ctx.LogD(
"sp-work",
- SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+ SdsAdd(sds, SDS{"size": len(payload)}),
"processing first payload",
)
replies, err := state.ProcessSP(payload)
if err != nil {
- state.Ctx.LogE("sp-work", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-work", sds, err, "")
return err
}
for _, reply := range replies {
state.Ctx.LogD(
"sp-work",
- SdsAdd(sds, SDS{"size": strconv.Itoa(len(reply))}),
+ SdsAdd(sds, SDS{"size": len(reply)}),
"queuing reply",
)
state.payloads <- reply
) {
state.Ctx.LogD(
"sp-work",
- SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+ SdsAdd(sds, SDS{"size": len(payload)}),
"queuing new info",
)
state.payloads <- payload
case payload = <-state.payloads:
state.Ctx.LogD(
"sp-xmit",
- SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+ SdsAdd(sds, SDS{"size": len(payload)}),
"got payload",
)
default:
sdsp := SdsAdd(sds, SDS{
"xx": string(TTx),
- "hash": ToBase32(freq.Hash[:]),
- "size": strconv.FormatInt(int64(freq.Offset), 10),
+ "pkt": ToBase32(freq.Hash[:]),
+ "size": int64(freq.Offset),
})
state.Ctx.LogD("sp-file", sdsp, "queueing")
fd, err := os.Open(filepath.Join(
ToBase32(freq.Hash[:]),
))
if err != nil {
- state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-file", sdsp, err, "")
break
}
fi, err := fd.Stat()
if err != nil {
- state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-file", sdsp, err, "")
break
}
- fullSize := uint64(fi.Size())
+ fullSize := fi.Size()
var buf []byte
- if freq.Offset < fullSize {
+ if freq.Offset < uint64(fullSize) {
state.Ctx.LogD("sp-file", sdsp, "seeking")
if _, err = fd.Seek(int64(freq.Offset), io.SeekStart); err != nil {
- state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-file", sdsp, err, "")
break
}
buf = make([]byte, MaxSPSize-SPHeadOverhead-SPFileOverhead)
n, err := fd.Read(buf)
if err != nil {
- state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-file", sdsp, err, "")
break
}
buf = buf[:n]
state.Ctx.LogD(
"sp-file",
- SdsAdd(sdsp, SDS{"size": strconv.Itoa(n)}),
+ SdsAdd(sdsp, SDS{"size": n}),
"read",
)
}
Payload: buf,
})
ourSize := freq.Offset + uint64(len(buf))
- sdsp["size"] = strconv.FormatInt(int64(ourSize), 10)
- sdsp["fullsize"] = strconv.FormatInt(int64(fullSize), 10)
- state.Ctx.LogP("sp-file", sdsp, "")
+ sdsp["size"] = int64(ourSize)
+ sdsp["fullsize"] = fullSize
+ if state.Ctx.ShowPrgrs {
+ Progress(sdsp)
+ }
state.Lock()
if len(state.queueTheir) > 0 && *state.queueTheir[0].freq.Hash == *freq.Hash {
- if ourSize == fullSize {
+ if ourSize == uint64(fullSize) {
state.Ctx.LogD("sp-file", sdsp, "finished")
if len(state.queueTheir) > 1 {
state.queueTheir = state.queueTheir[1:]
}
state.Ctx.LogD(
"sp-xmit",
- SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+ SdsAdd(sds, SDS{"size": len(payload)}),
"sending",
)
conn.SetWriteDeadline(time.Now().Add(DefaultDeadline * time.Second))
if err := state.WriteSP(conn, state.csOur.Encrypt(nil, nil, payload)); err != nil {
- state.Ctx.LogE("sp-xmit", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-xmit", sds, err, "")
break
}
}
if unmarshalErr.ErrorCode == xdr.ErrIO {
break
}
- state.Ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-recv", sds, err, "")
break
}
state.Ctx.LogD(
"sp-recv",
- SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+ SdsAdd(sds, SDS{"size": len(payload)}),
"got payload",
)
payload, err = state.csTheir.Decrypt(nil, nil, payload)
if err != nil {
- state.Ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-recv", sds, err, "")
break
}
state.Ctx.LogD(
"sp-recv",
- SdsAdd(sds, SDS{"size": strconv.Itoa(len(payload))}),
+ SdsAdd(sds, SDS{"size": len(payload)}),
"processing",
)
replies, err := state.ProcessSP(payload)
if err != nil {
- state.Ctx.LogE("sp-recv", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-recv", sds, err, "")
break
}
go func() {
for _, reply := range replies {
state.Ctx.LogD(
"sp-recv",
- SdsAdd(sds, SDS{"size": strconv.Itoa(len(reply))}),
+ SdsAdd(sds, SDS{"size": len(reply)}),
"queuing reply",
)
state.payloads <- reply
}
func (state *SPState) ProcessSP(payload []byte) ([][]byte, error) {
- sds := SDS{"node": state.Node.Id, "nice": strconv.Itoa(int(state.Nice))}
+ sds := SDS{"node": state.Node.Id, "nice": int(state.Nice)}
r := bytes.NewReader(payload)
var err error
var replies [][]byte
state.Ctx.LogD("sp-process", sds, "unmarshaling header")
var head SPHead
if _, err = xdr.Unmarshal(r, &head); err != nil {
- state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-process", sds, err, "")
return nil, err
}
switch head.Type {
state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet")
var info SPInfo
if _, err = xdr.Unmarshal(r, &info); err != nil {
- state.Ctx.LogE("sp-process", SdsAdd(sdsp, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-process", sdsp, err, "")
return nil, err
}
sdsp = SdsAdd(sds, SDS{
- "hash": ToBase32(info.Hash[:]),
- "size": strconv.FormatInt(int64(info.Size), 10),
- "nice": strconv.Itoa(int(info.Nice)),
+ "pkt": ToBase32(info.Hash[:]),
+ "size": int64(info.Size),
+ "nice": int(info.Nice),
})
if !state.listOnly && info.Nice > state.Nice {
state.Ctx.LogD("sp-process", sdsp, "too nice")
}
state.Ctx.LogI(
"sp-info",
- SdsAdd(sdsp, SDS{"offset": strconv.FormatInt(offset, 10)}),
+ SdsAdd(sdsp, SDS{"offset": offset}),
"",
)
if !state.listOnly && (state.onlyPkts == nil || state.onlyPkts[*info.Hash]) {
state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet")
var file SPFile
if _, err = xdr.Unmarshal(r, &file); err != nil {
- state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{
- "err": err,
- "type": "file",
- }), "")
+ state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{"type": "file"}), err, "")
return nil, err
}
sdsp["xx"] = string(TRx)
- sdsp["hash"] = ToBase32(file.Hash[:])
- sdsp["size"] = strconv.Itoa(len(file.Payload))
+ sdsp["pkt"] = ToBase32(file.Hash[:])
+ sdsp["size"] = len(file.Payload)
dirToSync := filepath.Join(
state.Ctx.Spool,
state.Node.Id.String(),
os.FileMode(0666),
)
if err != nil {
- state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-file", sdsp, err, "")
return nil, err
}
state.Ctx.LogD(
"sp-file",
- SdsAdd(sdsp, SDS{"offset": strconv.FormatInt(int64(file.Offset), 10)}),
+ SdsAdd(sdsp, SDS{"offset": file.Offset}),
"seeking",
)
if _, err = fd.Seek(int64(file.Offset), io.SeekStart); err != nil {
- state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-file", sdsp, err, "")
fd.Close()
return nil, err
}
state.Ctx.LogD("sp-file", sdsp, "writing")
_, err = fd.Write(file.Payload)
if err != nil {
- state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-file", sdsp, err, "")
fd.Close()
return nil, err
}
- ourSize := uint64(file.Offset) + uint64(len(file.Payload))
+ ourSize := file.Offset + uint64(len(file.Payload))
state.RLock()
- sdsp["fullsize"] = strconv.FormatInt(int64(state.infosTheir[*file.Hash].Size), 10)
- sdsp["size"] = strconv.FormatInt(int64(ourSize), 10)
- state.Ctx.LogP("sp-file", sdsp, "")
+ sdsp["size"] = int64(ourSize)
+ sdsp["fullsize"] = int64(state.infosTheir[*file.Hash].Size)
+ if state.Ctx.ShowPrgrs {
+ Progress(sdsp)
+ }
if state.infosTheir[*file.Hash].Size != ourSize {
state.RUnlock()
fd.Close()
spWorkersGroup.Add(1)
go func() {
if err := fd.Sync(); err != nil {
- state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "sync")
+ state.Ctx.LogE("sp-file", sdsp, err, "sync")
fd.Close()
return
}
defer state.wg.Done()
fd.Seek(0, io.SeekStart)
state.Ctx.LogD("sp-file", sdsp, "checking")
- gut, err := Check(fd, file.Hash[:])
+ gut, err := Check(fd, file.Hash[:], sdsp, state.Ctx.ShowPrgrs)
fd.Close()
if err != nil || !gut {
- state.Ctx.LogE("sp-file", sdsp, "checksum mismatch")
+ state.Ctx.LogE("sp-file", sdsp, errors.New("checksum mismatch"), "")
return
}
state.Ctx.LogI("sp-done", SdsAdd(sdsp, SDS{"xx": string(TRx)}), "")
if err = os.Rename(filePath+PartSuffix, filePath); err != nil {
- state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "rename")
+ state.Ctx.LogE("sp-file", sdsp, err, "rename")
return
}
if err = DirSync(dirToSync); err != nil {
- state.Ctx.LogE("sp-file", SdsAdd(sdsp, SDS{"err": err}), "sync")
+ state.Ctx.LogE("sp-file", sdsp, err, "sync")
return
}
state.Lock()
state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet")
var done SPDone
if _, err = xdr.Unmarshal(r, &done); err != nil {
- state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{
- "type": "done",
- "err": err,
- }), "")
+ state.Ctx.LogE("sp-process", SdsAdd(sds, SDS{"type": "done"}), err, "")
return nil, err
}
- sdsp["hash"] = ToBase32(done.Hash[:])
+ sdsp["pkt"] = ToBase32(done.Hash[:])
state.Ctx.LogD("sp-done", sdsp, "removing")
err := os.Remove(filepath.Join(
state.Ctx.Spool,
if err == nil {
state.Ctx.LogI("sp-done", sdsp, "")
} else {
- state.Ctx.LogE("sp-done", sdsp, "")
+ state.Ctx.LogE("sp-done", sdsp, err, "")
}
case SPTypeFreq:
sdsp := SdsAdd(sds, SDS{"type": "freq"})
state.Ctx.LogD("sp-process", sdsp, "unmarshaling packet")
var freq SPFreq
if _, err = xdr.Unmarshal(r, &freq); err != nil {
- state.Ctx.LogE("sp-process", SdsAdd(sdsp, SDS{"err": err}), "")
+ state.Ctx.LogE("sp-process", sdsp, err, "")
return nil, err
}
- sdsp["hash"] = ToBase32(freq.Hash[:])
- sdsp["offset"] = strconv.FormatInt(int64(freq.Offset), 10)
+ sdsp["pkt"] = ToBase32(freq.Hash[:])
+ sdsp["offset"] = freq.Offset
state.Ctx.LogD("sp-process", sdsp, "queueing")
nice, exists := state.infosOurSeen[*freq.Hash]
if exists {
state.Ctx.LogE(
"sp-process",
SdsAdd(sds, SDS{"type": head.Type}),
- "unknown",
+ errors.New("unknown type"),
+ "",
)
return nil, BadPktType
}
state.Ctx.LogI("sp-infos", SDS{
"xx": string(TRx),
"node": state.Node.Id,
- "pkts": strconv.Itoa(pkts),
- "size": strconv.FormatInt(int64(size), 10),
+ "pkts": pkts,
+ "size": int64(size),
}, "")
}
return payloadsSplit(replies), nil
"bufio"
"bytes"
"encoding/base64"
+ "errors"
"fmt"
"io"
"io/ioutil"
pktName := filepath.Base(job.Fd.Name())
sds := SDS{"node": job.PktEnc.Sender, "pkt": pktName}
if job.PktEnc.Nice > nice {
- ctx.LogD("rx", SdsAdd(sds, SDS{
- "nice": strconv.Itoa(int(job.PktEnc.Nice)),
- }), "too nice")
+ ctx.LogD("rx", SdsAdd(sds, SDS{"nice": int(job.PktEnc.Nice)}), "too nice")
continue
}
pipeR, pipeW := io.Pipe()
pipeW.Close()
job.Fd.Close()
if err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "decryption")
+ ctx.LogE("rx", sds, err, "decryption")
}
}(job)
var pkt Pkt
var pktSize int64
var pktSizeBlocks int64
if _, err = xdr.Unmarshal(pipeR, &pkt); err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "unmarshal")
+ ctx.LogE("rx", sds, err, "unmarshal")
isBad = true
goto Closing
}
pktSize -= poly1305.TagSize
}
pktSize -= pktSizeBlocks * poly1305.TagSize
- sds["size"] = strconv.FormatInt(pktSize, 10)
+ sds["size"] = pktSize
ctx.LogD("rx", sds, "taken")
switch pkt.Type {
case PktTypeExec:
sender := ctx.Neigh[*job.PktEnc.Sender]
cmdline, exists := sender.Exec[handle]
if !exists || len(cmdline) == 0 {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": "No handle found"}), "")
+ ctx.LogE("rx", sds, errors.New("No handle found"), "")
isBad = true
goto Closing
}
cmd.Stdin = decompressor
output, err := cmd.Output()
if err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "handle")
+ ctx.LogE("rx", sds, err, "handle")
isBad = true
goto Closing
}
}
}
if err = os.Remove(job.Fd.Name()); err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove")
+ ctx.LogE("rx", sds, err, "remove")
isBad = true
}
}
dst := string(pkt.Path[:int(pkt.PathLen)])
sds := SdsAdd(sds, SDS{"type": "file", "dst": dst})
if filepath.IsAbs(dst) {
- ctx.LogE("rx", sds, "non-relative destination path")
+ ctx.LogE("rx", sds, errors.New("non-relative destination path"), "")
isBad = true
goto Closing
}
incoming := ctx.Neigh[*job.PktEnc.Sender].Incoming
if incoming == nil {
- ctx.LogE("rx", sds, "incoming is not allowed")
+ ctx.LogE("rx", sds, errors.New("incoming is not allowed"), "")
isBad = true
goto Closing
}
dir := filepath.Join(*incoming, path.Dir(dst))
if err = os.MkdirAll(dir, os.FileMode(0777)); err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "mkdir")
+ ctx.LogE("rx", sds, err, "mkdir")
isBad = true
goto Closing
}
if !dryRun {
tmp, err := TempFile(dir, "file")
if err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "mktemp")
+ ctx.LogE("rx", sds, err, "mktemp")
isBad = true
goto Closing
}
sds["tmp"] = tmp.Name()
ctx.LogD("rx", sds, "created")
bufW := bufio.NewWriter(tmp)
- if _, err = io.Copy(bufW, pipeR); err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "copy")
+ if _, err = CopyProgressed(
+ bufW,
+ pipeR,
+ SdsAdd(sds, SDS{"fullsize": sds["size"]}),
+ ctx.ShowPrgrs,
+ ); err != nil {
+ ctx.LogE("rx", sds, err, "copy")
isBad = true
goto Closing
}
if err = bufW.Flush(); err != nil {
tmp.Close()
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "copy")
+ ctx.LogE("rx", sds, err, "copy")
isBad = true
goto Closing
}
if err = tmp.Sync(); err != nil {
tmp.Close()
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "copy")
+ ctx.LogE("rx", sds, err, "copy")
isBad = true
goto Closing
}
if os.IsNotExist(err) {
break
}
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "stat")
+ ctx.LogE("rx", sds, err, "stat")
isBad = true
goto Closing
}
dstPathCtr++
}
if err = os.Rename(tmp.Name(), dstPath); err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "rename")
+ ctx.LogE("rx", sds, err, "rename")
isBad = true
}
if err = DirSync(*incoming); err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "sync")
+ ctx.LogE("rx", sds, err, "sync")
isBad = true
}
delete(sds, "tmp")
}
}
if err = os.Remove(job.Fd.Name()); err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove")
+ ctx.LogE("rx", sds, err, "remove")
isBad = true
}
if len(sendmail) > 0 && ctx.NotifyFile != nil {
}
src := string(pkt.Path[:int(pkt.PathLen)])
if filepath.IsAbs(src) {
- ctx.LogE("rx", sds, "non-relative source path")
+ ctx.LogE("rx", sds, errors.New("non-relative source path"), "")
isBad = true
goto Closing
}
sds := SdsAdd(sds, SDS{"type": "freq", "src": src})
dstRaw, err := ioutil.ReadAll(pipeR)
if err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "read")
+ ctx.LogE("rx", sds, err, "read")
isBad = true
goto Closing
}
sender := ctx.Neigh[*job.PktEnc.Sender]
freqPath := sender.FreqPath
if freqPath == nil {
- ctx.LogE("rx", sds, "freqing is not allowed")
+ ctx.LogE("rx", sds, errors.New("freqing is not allowed"), "")
isBad = true
goto Closing
}
sender.FreqMaxSize,
)
if err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "tx file")
+ ctx.LogE("rx", sds, err, "tx file")
isBad = true
goto Closing
}
}
}
if err = os.Remove(job.Fd.Name()); err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove")
+ ctx.LogE("rx", sds, err, "remove")
isBad = true
}
if len(sendmail) > 0 && ctx.NotifyFreq != nil {
node, known := ctx.Neigh[nodeId]
sds := SdsAdd(sds, SDS{"type": "trns", "dst": nodeId})
if !known {
- ctx.LogE("rx", sds, "unknown node")
+ ctx.LogE("rx", sds, errors.New("unknown node"), "")
isBad = true
goto Closing
}
ctx.LogD("rx", sds, "taken")
if !dryRun {
if err = ctx.TxTrns(node, job.PktEnc.Nice, pktSize, pipeR); err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "tx trns")
+ ctx.LogE("rx", sds, err, "tx trns")
isBad = true
goto Closing
}
}
}
if err = os.Remove(job.Fd.Name()); err != nil {
- ctx.LogE("rx", SdsAdd(sds, SDS{"err": err}), "remove")
+ ctx.LogE("rx", sds, err, "remove")
isBad = true
}
}
default:
- ctx.LogE("rx", sds, "unknown type")
+ ctx.LogE("rx", sds, errors.New("unknown type"), "")
isBad = true
}
Closing:
nice uint8,
size, minSize int64,
src io.Reader,
+ pktName string,
) (*Node, error) {
hops := make([]*Node, 0, 1+len(node.Via))
hops = append(hops, node)
go func(size int64, src io.Reader, dst io.WriteCloser) {
ctx.LogD("tx", SDS{
"node": hops[0].Id,
- "nice": strconv.Itoa(int(nice)),
- "size": strconv.FormatInt(size, 10),
+ "nice": int(nice),
+ "size": size,
}, "wrote")
errs <- PktEncWrite(ctx.Self, hops[0], pkt, nice, size, padSize, src, dst)
dst.Close()
go func(node *Node, pkt *Pkt, size int64, src io.Reader, dst io.WriteCloser) {
ctx.LogD("tx", SDS{
"node": node.Id,
- "nice": strconv.Itoa(int(nice)),
- "size": strconv.FormatInt(size, 10),
+ "nice": int(nice),
+ "size": size,
}, "trns wrote")
errs <- PktEncWrite(ctx.Self, node, pkt, nice, size, 0, src, dst)
dst.Close()
curSize = PktEncOverhead + PktSizeOverhead + sizeWithTags(PktOverhead+curSize)
}
go func() {
- _, err := io.Copy(tmp.W, pipeR)
+ _, err := CopyProgressed(
+ tmp.W, pipeR,
+ SDS{"xx": string(TTx), "pkt": pktName, "fullsize": curSize},
+ ctx.ShowPrgrs,
+ )
errs <- err
}()
for i := 0; i <= len(hops); i++ {
if err != nil {
return err
}
- _, err = ctx.Tx(node, pkt, nice, fileSize, minSize, reader)
+ _, err = ctx.Tx(node, pkt, nice, fileSize, minSize, reader, dstPath)
sds := SDS{
"type": "file",
"node": node.Id,
- "nice": strconv.Itoa(int(nice)),
+ "nice": int(nice),
"src": srcPath,
"dst": dstPath,
- "size": strconv.FormatInt(fileSize, 10),
+ "size": fileSize,
}
if err == nil {
ctx.LogI("tx", sds, "sent")
} else {
- ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent")
+ ctx.LogE("tx", sds, err, "sent")
}
return err
}
sizeToSend,
minSize,
io.TeeReader(reader, hsh),
+ path,
)
sds := SDS{
"type": "file",
"node": node.Id,
- "nice": strconv.Itoa(int(nice)),
+ "nice": int(nice),
"src": srcPath,
"dst": path,
- "size": strconv.FormatInt(sizeToSend, 10),
+ "size": sizeToSend,
}
if err == nil {
ctx.LogI("tx", sds, "sent")
} else {
- ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent")
+ ctx.LogE("tx", sds, err, "sent")
return err
}
hsh.Sum(metaPkt.Checksums[chunkNum][:0])
return err
}
metaPktSize := int64(metaBuf.Len())
- _, err = ctx.Tx(node, pkt, nice, metaPktSize, minSize, &metaBuf)
+ _, err = ctx.Tx(node, pkt, nice, metaPktSize, minSize, &metaBuf, path)
sds := SDS{
"type": "file",
"node": node.Id,
- "nice": strconv.Itoa(int(nice)),
+ "nice": int(nice),
"src": srcPath,
"dst": path,
- "size": strconv.FormatInt(metaPktSize, 10),
+ "size": metaPktSize,
}
if err == nil {
ctx.LogI("tx", sds, "sent")
} else {
- ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent")
+ ctx.LogE("tx", sds, err, "sent")
}
return err
}
}
src := strings.NewReader(dstPath)
size := int64(src.Len())
- _, err = ctx.Tx(node, pkt, nice, size, minSize, src)
+ _, err = ctx.Tx(node, pkt, nice, size, minSize, src, srcPath)
sds := SDS{
"type": "freq",
"node": node.Id,
- "nice": strconv.Itoa(int(nice)),
- "replynice": strconv.Itoa(int(replyNice)),
+ "nice": int(nice),
+ "replynice": int(replyNice),
"src": srcPath,
"dst": dstPath,
}
if err == nil {
ctx.LogI("tx", sds, "sent")
} else {
- ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent")
+ ctx.LogE("tx", sds, err, "sent")
}
return err
}
return err
}
size := int64(compressed.Len())
- _, err = ctx.Tx(node, pkt, nice, size, minSize, &compressed)
+ _, err = ctx.Tx(node, pkt, nice, size, minSize, &compressed, handle)
sds := SDS{
"type": "exec",
"node": node.Id,
- "nice": strconv.Itoa(int(nice)),
- "replynice": strconv.Itoa(int(replyNice)),
+ "nice": int(nice),
+ "replynice": int(replyNice),
"dst": strings.Join(append([]string{handle}, args...), " "),
- "size": strconv.FormatInt(size, 10),
+ "size": size,
}
if err == nil {
ctx.LogI("tx", sds, "sent")
} else {
- ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), "sent")
+ ctx.LogE("tx", sds, err, "sent")
}
return err
}
sds := SDS{
"type": "trns",
"node": node.Id,
- "nice": strconv.Itoa(int(nice)),
- "size": strconv.FormatInt(size, 10),
+ "nice": int(nice),
+ "size": size,
}
ctx.LogD("tx", sds, "taken")
if !ctx.IsEnoughSpace(size) {
err := errors.New("is not enough space")
- ctx.LogE("tx", SdsAdd(sds, SDS{"err": err}), err.Error())
+ ctx.LogE("tx", sds, err, err.Error())
return err
}
tmp, err := ctx.NewTmpFileWHash()
if err != nil {
return err
}
- if _, err = io.Copy(tmp.W, src); err != nil {
+ if _, err = CopyProgressed(tmp.W, src, SDS{
+ "xx": string(TTx),
+ "pkt": node.Id.String(),
+ "fullsize": size,
+ }, ctx.ShowPrgrs); err != nil {
return err
}
nodePath := filepath.Join(ctx.Spool, node.Id.String())
int64(src.Len()),
int64(padSize),
src,
+ "pktName",
)
if err != nil {
return false
--- /dev/null
+MIT License
+===========
+
+Copyright (c) 2015, Greg Osuri
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+# uilive [](https://godoc.org/github.com/gosuri/uilive) [](https://travis-ci.org/gosuri/uilive)
+
+uilive is a go library for updating terminal output in realtime. It provides a buffered [io.Writer](https://golang.org/pkg/io/#Writer) that is flushed at a timed interval. uilive powers [uiprogress](https://github.com/gosuri/uiprogress).
+
+## Usage Example
+
+Calling `uilive.New()` will create a new writer. To start rendering, simply call `writer.Start()` and update the ui by writing to the `writer`. Full source for the below example is in [example/main.go](example/main.go).
+
+```go
+writer := uilive.New()
+// start listening for updates and render
+writer.Start()
+
+for i := 0; i <= 100; i++ {
+ fmt.Fprintf(writer, "Downloading.. (%d/%d) GB\n", i, 100)
+ time.Sleep(time.Millisecond * 5)
+}
+
+fmt.Fprintln(writer, "Finished: Downloaded 100GB")
+writer.Stop() // flush and stop rendering
+```
+
+The above will render
+
+
+
+## Installation
+
+```sh
+$ go get -v github.com/gosuri/uilive
+```
--- /dev/null
+// Package uilive provides a writer that live updates the terminal. It provides a buffered io.Writer that is flushed at a timed interval.
+package uilive
--- /dev/null
+// +build !windows
+
+package uilive
+
+import (
+ "os"
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+type windowSize struct {
+ rows uint16
+ cols uint16
+}
+
+var out *os.File
+var err error
+var sz windowSize
+
+func getTermSize() (int, int) {
+ if runtime.GOOS == "openbsd" {
+ out, err = os.OpenFile("/dev/tty", os.O_RDWR, 0)
+ if err != nil {
+ return 0, 0
+ }
+
+ } else {
+ out, err = os.OpenFile("/dev/tty", os.O_WRONLY, 0)
+ if err != nil {
+ return 0, 0
+ }
+ }
+ _, _, _ = syscall.Syscall(syscall.SYS_IOCTL,
+ out.Fd(), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&sz)))
+ return int(sz.cols), int(sz.rows)
+}
--- /dev/null
+// This is a fork of github.com/gosuri/uilive for NNCP project
+// * It does not buffer all the writes, but resets the buffer
+// just only for single latest line. Some terminals have
+// huge CPU usage if so much data (as copied files progress)
+// is printed
+// * By default it uses stderr
+// * By default it uses 10ms refresh period
+// * defer-s are removed for less CPU usage
+// * By default it uses stderr
+// * By default it uses stderr
+// * By default it uses stderr
+// * Removed newline/bypass related code. No Windows support
+
+package uilive
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+ "time"
+)
+
+// ESC is the ASCII code for escape character
+const ESC = 27
+
+// RefreshInterval is the default refresh interval to update the ui
+var RefreshInterval = 10 * time.Millisecond
+
+var overFlowHandled bool
+
+var termWidth int
+
+// Out is the default output writer for the Writer
+var Out = os.Stdout
+
+// FdWriter is a writer with a file descriptor.
+type FdWriter interface {
+ io.Writer
+ Fd() uintptr
+}
+
+// Writer is a buffered the writer that updates the terminal. The contents of writer will be flushed on a timed interval or when Flush is called.
+type Writer struct {
+ // Out is the writer to write to
+ Out io.Writer
+
+ // RefreshInterval is the time the UI sould refresh
+ RefreshInterval time.Duration
+
+ ticker *time.Ticker
+ tdone chan struct{}
+
+ buf bytes.Buffer
+ mtx *sync.Mutex
+}
+
+// New returns a new Writer with defaults
+func New() *Writer {
+ termWidth, _ = getTermSize()
+ if termWidth != 0 {
+ overFlowHandled = true
+ }
+ return &Writer{
+ Out: Out,
+ RefreshInterval: RefreshInterval,
+ mtx: &sync.Mutex{},
+ }
+}
+
+// clear the line and move the cursor up
+var clear = fmt.Sprintf("%c[%dA%c[2K", ESC, 1, ESC)
+
+func (w *Writer) clearLines() {
+ fmt.Fprint(w.Out, clear)
+}
+
+// Flush writes to the out and resets the buffer. It should be called after the last call to Write to ensure that any data buffered in the Writer is written to output.
+// Any incomplete escape sequence at the end is considered complete for formatting purposes.
+// An error is returned if the contents of the buffer cannot be written to the underlying output stream
+func (w *Writer) Flush() (err error) {
+ w.mtx.Lock()
+ // do nothing if buffer is empty
+ if len(w.buf.Bytes()) == 0 {
+ w.mtx.Unlock()
+ return
+ }
+ w.clearLines()
+ var currentLine bytes.Buffer
+ for _, b := range w.buf.Bytes() {
+ if b == '\n' {
+ currentLine.Reset()
+ } else {
+ currentLine.Write([]byte{b})
+ if overFlowHandled && currentLine.Len() > termWidth {
+ currentLine.Reset()
+ }
+ }
+ }
+ _, err = w.Out.Write(w.buf.Bytes())
+ w.mtx.Unlock()
+ return
+}
+
+// Start starts the listener in a non-blocking manner
+func (w *Writer) Start() {
+ w.ticker = time.NewTicker(w.RefreshInterval)
+ w.tdone = make(chan struct{}, 0)
+ w.Out.Write([]byte("\n"))
+ go w.Listen()
+}
+
+// Stop stops the listener that updates the terminal
+func (w *Writer) Stop() {
+ w.Flush()
+ close(w.tdone)
+}
+
+// Listen listens for updates to the writer's buffer and flushes to the out provided. It blocks the runtime.
+func (w *Writer) Listen() {
+ for {
+ select {
+ case <-w.ticker.C:
+ if w.ticker != nil {
+ w.Flush()
+ }
+ case <-w.tdone:
+ w.mtx.Lock()
+ w.ticker.Stop()
+ w.mtx.Unlock()
+ return
+ }
+ }
+}
+
+// Write save the contents of buf to the writer b. The only errors returned are ones encountered while writing to the underlying buffer.
+func (w *Writer) Write(buf []byte) (n int, err error) {
+ w.mtx.Lock()
+ w.buf.Reset()
+ n, err = w.buf.Write(buf)
+ w.mtx.Unlock()
+ return
+}