const (
Ed25519BLAKE2b = ed25519blake2b.Ed25519BLAKE2b
Ed25519PhBLAKE2b = ed25519blake2b.Ed25519PhBLAKE2b
+ Ed25519PhBLAKE2bMerkle = ed25519blake2b.Ed25519PhBLAKE2bMerkle
GOST3410256A = gost.GOST3410256A
+ GOST3410256AMerkle = gost.GOST3410256AMerkle
GOST3410512C = gost.GOST3410512C
+ GOST3410512CMerkle = gost.GOST3410512CMerkle
SNTRUP4591761X25519 = sntrup4591761x25519.SNTRUP4591761X25519
SNTRUP4591761X25519HKDFBLAKE2b = sntrup4591761x25519.SNTRUP4591761X25519HKDFBLAKE2b
BalloonBLAKE2bHKDF = "balloon-blake2b-hkdf"
var valid bool
switch pub.A {
case Ed25519BLAKE2b:
- if algo != Ed25519PhBLAKE2b {
+ switch algo {
+ case Ed25519PhBLAKE2b:
+ case Ed25519PhBLAKE2bMerkle:
+ default:
return ErrBadSigAlgo
}
valid, err = ed25519blake2b.VerifyPrehash(pub.V, prehash, signature)
err = ErrSigInvalid
}
case GOST3410256A, GOST3410512C:
- if algo != pub.A {
+ switch algo {
+ case GOST3410256A:
+ case GOST3410256AMerkle:
+ case GOST3410512C:
+ case GOST3410512CMerkle:
+ default:
return ErrBadSigAlgo
}
valid, err = gost.VerifyPrehash(pub.A, pub.V, prehash, signature)
--- /dev/null
+package main
+
+import (
+ "crypto/sha512"
+ "encoding/hex"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "runtime"
+
+ "go.cypherpunks.su/gogost/v6/gost34112012256"
+ "go.cypherpunks.su/gogost/v6/gost34112012512"
+ ed25519blake2b "go.cypherpunks.su/keks/pki/ed25519-blake2b"
+ "go.cypherpunks.su/keks/pki/gost"
+ pkihash "go.cypherpunks.su/keks/pki/hash"
+ "go.cypherpunks.su/keks/pki/hash/merkle"
+)
+
+func main() {
+ workers := flag.Int("p", runtime.NumCPU(), "Parallel workers")
+ chunkLenK := flag.Int("c", merkle.DefaultChunkLen/1024, "Chunk size, KiB")
+ algo := flag.String("a", pkihash.BLAKE2b, "Algorithm to use")
+ list := flag.Bool("list", false, "List available algorithms")
+ mmap := flag.String("mmap", "", "Use that mmap-ed file instead of stdin")
+ flag.Parse()
+ if *list {
+ fmt.Println(pkihash.BLAKE2bMerkle)
+ fmt.Println(pkihash.SHA2512 + "-merkle")
+ fmt.Println(pkihash.SHAKE128Merkle)
+ fmt.Println(pkihash.SHAKE256Merkle)
+ fmt.Println(pkihash.Streebog256Merkle)
+ fmt.Println(pkihash.Streebog512Merkle)
+ return
+ }
+ chunkLen := 1024 * *chunkLenK
+ var hasher *merkle.Hasher
+ switch *algo {
+ case pkihash.BLAKE2bMerkle:
+ hasher = ed25519blake2b.NewMerkleHasher(chunkLen, *workers).(*merkle.Hasher)
+ case pkihash.SHA2512 + "-merkle":
+ hasher = merkle.NewHasherPrefixed(sha512.New, chunkLen, *workers)
+ case pkihash.SHAKE128Merkle:
+ hasher = pkihash.NewSHAKE128MerkleHasher(chunkLen, *workers).(*merkle.Hasher)
+ case pkihash.SHAKE256Merkle:
+ hasher = pkihash.NewSHAKE256MerkleHasher(chunkLen, *workers).(*merkle.Hasher)
+ case pkihash.Streebog256Merkle:
+ hasher = gost.NewMerkleHasher(gost34112012256.New, chunkLen, *workers).(*merkle.Hasher)
+ case pkihash.Streebog512Merkle:
+ hasher = gost.NewMerkleHasher(gost34112012512.New, chunkLen, *workers).(*merkle.Hasher)
+ default:
+ log.Fatal("unknown -a")
+ }
+ var err error
+ if *mmap == "" {
+ _, err = hasher.DoReadFrom(os.Stdin)
+ } else {
+ err = hasher.Mmap(*mmap)
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(hex.EncodeToString(hasher.Sum(nil)))
+}
"Set/check encrypted-binding, UUID")
detached := flag.Bool("detached", false, "Detached data mode")
noWhen := flag.Bool("no-when", false, `Do not include "when"`)
+ doMerkle := flag.Bool("merkle", false, "Use Merkle-tree based hasher")
flag.Parse()
log.SetFlags(log.Lshortfile)
if err != nil {
log.Fatal(err)
}
- if err = signer.SetMode(sign.ModePrehash); err != nil {
+ if *doMerkle {
+ err = signer.SetMode(sign.ModeMerkle)
+ } else {
+ err = signer.SetMode(sign.ModePrehash)
+ }
+ if err != nil {
log.Fatal(err)
}
package ed25519blake2b
const (
- Ed25519BLAKE2b = "ed25519-blake2b"
- Ed25519PhBLAKE2b = "ed25519ph-blake2b"
+ Ed25519BLAKE2b = "ed25519-blake2b"
+ Ed25519PhBLAKE2b = "ed25519ph-blake2b"
+ Ed25519PhBLAKE2bMerkle = "ed25519ph-blake2b-merkle"
)
--- /dev/null
+// GoKEKS/PKI -- PKI-related capabilities based on KEKS encoded formats
+// Copyright (C) 2024-2025 Sergey Matveev <stargrave@stargrave.org>
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, version 3 of the License.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+package ed25519blake2b
+
+import (
+ "hash"
+
+ "go.cypherpunks.su/keks/pki/hash/merkle"
+ "golang.org/x/crypto/blake2b"
+)
+
+func NewMerkleHasher(chunkLen, workers int) hash.Hash {
+ return merkle.NewHasher(
+ func() hash.Hash {
+ h, e := blake2b.New512([]byte(merkle.Leaf))
+ if e != nil {
+ panic(e)
+ }
+ return h
+ },
+ func() hash.Hash {
+ h, e := blake2b.New512([]byte(merkle.Node))
+ if e != nil {
+ panic(e)
+ }
+ return h
+ },
+ func(h hash.Hash) hash.Hash {
+ h.Reset()
+ return h
+ },
+ func(h hash.Hash) hash.Hash {
+ h.Reset()
+ return h
+ },
+ chunkLen,
+ workers,
+ )
+}
"errors"
"hash"
"io"
+ "runtime"
"go.cypherpunks.su/keks/pki/ed25519-blake2b/ed25519"
+ "go.cypherpunks.su/keks/pki/hash/merkle"
"golang.org/x/crypto/blake2b"
"go.cypherpunks.su/keks/pki/sign"
}
s.prehasher = &h
return nil
+ case sign.ModeMerkle:
+ s.mode = m
+ h := NewMerkleHasher(merkle.DefaultChunkLen, runtime.NumCPU())
+ s.prehasher = &h
+ return nil
default:
return errors.New("unsupported mode")
}
return Ed25519BLAKE2b
case sign.ModePrehash:
return Ed25519PhBLAKE2b
+ case sign.ModeMerkle:
+ return Ed25519PhBLAKE2bMerkle
}
return ""
}
switch s.mode {
case sign.ModePure:
return s.Prv.Sign(rand, msg, opts)
- case sign.ModePrehash:
+ case sign.ModePrehash, sign.ModeMerkle:
return s.Prv.Sign(rand, msg, &ed25519.Options{Hash: crypto.BLAKE2b_512})
default:
panic("unsupported mode")
--- /dev/null
+// GoKEKS/PKI -- PKI-related capabilities based on KEKS encoded formats
+// Copyright (C) 2024-2025 Sergey Matveev <stargrave@stargrave.org>
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, version 3 of the License.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+package gost
+
+import (
+ "hash"
+
+ "go.cypherpunks.su/keks/pki/hash/merkle"
+)
+
+const (
+ GOST3410256AMerkle = "gost3410-256A-merkle"
+ GOST3410512CMerkle = "gost3410-512C-merkle"
+)
+
+func NewMerkleHasher(h func() hash.Hash, chunkLen, workers int) hash.Hash {
+ return merkle.NewHasherPrefixed(h, chunkLen, workers)
+}
"errors"
"hash"
"io"
+ "runtime"
"go.cypherpunks.su/gogost/v6/gost3410"
"go.cypherpunks.su/gogost/v6/gost34112012256"
"go.cypherpunks.su/gogost/v6/gost34112012512"
+ "go.cypherpunks.su/keks/pki/hash/merkle"
"go.cypherpunks.su/keks/pki/sign"
)
type Signer struct {
- mode sign.Mode
- Prv *gost3410.PrivateKey
NewHasher func() hash.Hash
+ Prv *gost3410.PrivateKey
prehasher *hash.Hash
+ mode sign.Mode
}
func (s *Signer) SetMode(m sign.Mode) error {
p := s.NewHasher()
s.prehasher = &p
return nil
+ case sign.ModeMerkle:
+ s.mode = m
+ p := NewMerkleHasher(s.NewHasher, merkle.DefaultChunkLen, runtime.NumCPU())
+ s.prehasher = &p
+ return nil
default:
return errors.New("unsupported mode")
}
func (s *Signer) Algo() string {
switch s.Prv.C.PointSize() {
case 32:
+ if s.mode == sign.ModeMerkle {
+ return GOST3410256AMerkle
+ }
return GOST3410256A
case 64:
+ if s.mode == sign.ModeMerkle {
+ return GOST3410512CMerkle
+ }
return GOST3410512C
+ default:
+ return ""
}
- return ""
}
func (s *Signer) Public() crypto.PublicKey {
h := s.NewHasher()
h.Write(msg)
hsh = h.Sum(nil)
- case sign.ModePrehash:
+ case sign.ModePrehash, sign.ModeMerkle:
hsh = msg
default:
panic("unsupported mode")
import (
"hash"
+ "runtime"
"go.cypherpunks.su/gogost/v6/gost34112012256"
"go.cypherpunks.su/gogost/v6/gost34112012512"
ed25519blake2b "go.cypherpunks.su/keks/pki/ed25519-blake2b"
"go.cypherpunks.su/keks/pki/gost"
+ "go.cypherpunks.su/keks/pki/hash/merkle"
)
const (
- Streebog256 = "streebog256"
- Streebog512 = "streebog512"
BLAKE2b = "blake2b"
BLAKE2b256 = "blake2b256"
+ SHA2512 = "sha2-512"
+ SHAKE128 = "shake128"
+ SHAKE256 = "shake256"
+ Streebog256 = "streebog256"
+ Streebog512 = "streebog512"
+
+ BLAKE2bMerkle = "blake2b-merkle"
+ SHAKE128Merkle = "shake128-merkle"
+ SHAKE256Merkle = "shake256-merkle"
+ Streebog256Merkle = "streebog256-merkle"
+ Streebog512Merkle = "streebog512-merkle"
)
func ByName(name string) hash.Hash {
switch name {
case Streebog256, gost.GOST3410256A:
return gost34112012256.New()
+ case Streebog256Merkle, gost.GOST3410256AMerkle:
+ return gost.NewMerkleHasher(gost34112012256.New,
+ merkle.DefaultChunkLen, runtime.NumCPU())
case Streebog512, gost.GOST3410512C:
return gost34112012512.New()
+ case Streebog512Merkle, gost.GOST3410512CMerkle:
+ return gost.NewMerkleHasher(gost34112012512.New,
+ merkle.DefaultChunkLen, runtime.NumCPU())
case BLAKE2b, ed25519blake2b.Ed25519BLAKE2b, ed25519blake2b.Ed25519PhBLAKE2b:
h, err := blake2b.New512(nil)
if err != nil {
panic(err)
}
return h
+ case BLAKE2bMerkle, ed25519blake2b.Ed25519PhBLAKE2bMerkle:
+ return ed25519blake2b.NewMerkleHasher(
+ merkle.DefaultChunkLen, runtime.NumCPU())
case BLAKE2b256:
h, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
return h
+ case SHAKE128Merkle:
+ return NewSHAKE128MerkleHasher(
+ merkle.DefaultChunkLen, runtime.NumCPU())
+ case SHAKE256Merkle:
+ return NewSHAKE256MerkleHasher(
+ merkle.DefaultChunkLen, runtime.NumCPU())
}
return nil
}
+++ /dev/null
-package main
-
-import (
- "crypto/sha512"
- "encoding/hex"
- "flag"
- "fmt"
- "hash"
- "io"
- "log"
- "os"
- "runtime"
-
- "go.cypherpunks.su/keks/pki/hash/merkle"
- "golang.org/x/crypto/blake2b"
-)
-
-func main() {
- workers := flag.Int("p", runtime.NumCPU(), "Parallel workers")
- chunkLen := flag.Int("c", 8, "Chunk size, KiB")
- algo := flag.String("a", "blake2b", "TODO")
- flag.Parse()
- var hasher hash.Hash
- switch *algo {
- case "sha512":
- hasher = merkle.NewHasherPrefixed(sha512.New, *chunkLen*1024, *workers)
- case "blake2b":
- hasher = merkle.NewHasher(
- func() hash.Hash {
- h, e := blake2b.New512([]byte("LEAF"))
- if e != nil {
- panic(e)
- }
- return h
- },
- func() hash.Hash {
- h, e := blake2b.New512([]byte("NODE"))
- if e != nil {
- panic(e)
- }
- return h
- },
- func(h hash.Hash) { h.Reset() },
- func(h hash.Hash) { h.Reset() },
- *chunkLen*1024,
- *workers,
- )
- default:
- log.Fatal("unknown -a")
- }
- if _, err := io.CopyBuffer(hasher, os.Stdin, make([]byte, 128*1024)); err != nil {
- log.Fatal(err)
- }
- fmt.Println(hex.EncodeToString(hasher.Sum(nil)))
-}
package merkle
import (
+ "errors"
"hash"
"io"
- "sync"
+ "os"
+
+ "golang.org/x/sys/unix"
)
-const MaxDepth = 64
+const (
+ maxDepth = 64
+ Leaf = "LEAF"
+ Node = "NODE"
+ DefaultChunkLen = 128 * 1024
+)
type job struct {
- reply chan []byte
- chunk []byte
+ bufReady chan struct{}
+ hshReady chan struct{}
+ buf []byte
+ hsh []byte
}
+// Merkle-tree based parallelised hasher. You must use either io.Writer
+// interface to feed the data, or call DoReadFrom() method only once, or
+// call Mmap() method.
type Hasher struct {
- nodeHash hash.Hash
- leafNew func() hash.Hash
- leafReset func(hash.Hash)
- nodeReset func(hash.Hash)
- pr *io.PipeReader
- pw *io.PipeWriter
- hashes [2 * MaxDepth][]byte
- frees [2 * MaxDepth]bool
- count int
- workersLen int
- chunkLen int
-
- freeChunks chan []byte
- freeHshes chan []byte
- freeReplies chan chan []byte
- jobs chan job
- replies chan chan []byte
- finished chan struct{}
- workers sync.WaitGroup
+ ready chan *job
+ dones chan *job
+ finished chan struct{}
+ leafNew func() hash.Hash
+ leafReset func(hash.Hash) hash.Hash
+ nodeReset func(hash.Hash) hash.Hash
+ nodeHash hash.Hash
+ pr *io.PipeReader
+ pw *io.PipeWriter
+ hashes [2 * maxDepth][]byte
+ frees [2 * maxDepth]bool
+ count int
+ workers int
+ chunkLen int
+ wasCalled bool
}
func (h *Hasher) Size() int {
return h.nodeHash.BlockSize()
}
+// Create new Merkle-tree based hasher. leafNew/nodeNew specify
+// functions that will create leaf/node hasher correspondingly.
+// leafReset/nodeReset are functions to reset them, to avoid
+// possibly relatively expensive hash creation call.
func NewHasher(
leafNew, nodeNew func() hash.Hash,
- leafReset, nodeReset func(hash.Hash),
+ leafReset, nodeReset func(hash.Hash) hash.Hash,
chunkLen, workers int,
) *Hasher {
h := Hasher{
- leafNew: leafNew,
- nodeHash: nodeNew(),
- leafReset: leafReset,
- nodeReset: nodeReset,
- freeChunks: make(chan []byte, workers),
- freeHshes: make(chan []byte, workers),
- freeReplies: make(chan chan []byte, workers),
- workersLen: workers,
- chunkLen: chunkLen,
+ leafNew: leafNew,
+ nodeHash: nodeNew(),
+ leafReset: leafReset,
+ nodeReset: nodeReset,
+ workers: workers,
+ chunkLen: chunkLen,
}
hashSize := h.Size()
- for i := 0; i < 2*MaxDepth; i++ {
+ for i := 0; i < 2*maxDepth; i++ {
h.hashes[i] = make([]byte, hashSize)
h.frees[i] = true
}
- for range workers {
- h.freeChunks <- make([]byte, chunkLen)
- h.freeHshes <- make([]byte, hashSize)
- h.freeReplies <- make(chan []byte)
- }
h.prepare()
return &h
}
func (h *Hasher) prepare() {
- h.jobs = make(chan job, h.workersLen)
- h.replies = make(chan chan []byte, h.workersLen)
h.finished = make(chan struct{})
- for range h.workersLen {
+ h.ready = make(chan *job, h.workers)
+ h.dones = make(chan *job, h.workers)
+ for range h.workers {
go h.worker()
}
- h.workers.Add(h.workersLen)
go h.aggregator()
}
func (h *Hasher) Reset() {
- for i := 0; i < 2*MaxDepth; i++ {
- h.frees[i] = true
- }
h.pw.Close()
<-h.finished
+ for i := 0; i < 2*maxDepth; i++ {
+ h.frees[i] = true
+ }
h.prepare()
}
func (h *Hasher) get(l int) []byte {
- if l >= MaxDepth {
+ if l >= maxDepth {
panic("too deep")
}
i := l * 2
func (h *Hasher) fold() {
var err error
- for l := 0; l < MaxDepth; l++ {
+ for l := 0; l < maxDepth; l++ {
if h.frees[l*2+0] || h.frees[l*2+1] {
continue
}
- h.nodeReset(h.nodeHash)
+ h.nodeHash = h.nodeReset(h.nodeHash)
if _, err = h.nodeHash.Write(h.hashes[l*2+0]); err != nil {
panic(err)
}
func (h *Hasher) makePipe() {
h.pr, h.pw = io.Pipe()
go func() {
- h.ReadFrom(h.pr)
+ h.DoReadFrom(h.pr)
h.pr.Close()
}()
}
return h.pw.Write(p)
}
-func (h *Hasher) ReadFrom(r io.Reader) (total int64, err error) {
+func (h *Hasher) DoReadFrom(r io.Reader) (total int64, err error) {
+ if h.wasCalled {
+ panic("must be called only once")
+ }
+ h.wasCalled = true
+ defer close(h.dones)
+ var j *job
var n int
var eof bool
- var chunk []byte
- var reply chan []byte
for !eof {
- chunk = <-h.freeChunks
- n, err = io.ReadFull(r, chunk)
+ j = <-h.ready
+ n, err = io.ReadFull(r, j.buf)
total += int64(n)
if err != nil {
if err != io.ErrUnexpectedEOF {
}
err = nil
eof = true
+ j.buf = j.buf[:n]
}
if n == 0 {
continue
}
- reply = <-h.freeReplies
- h.jobs <- job{reply: reply, chunk: chunk[:n]}
- h.replies <- reply
+ j.bufReady <- struct{}{}
+ h.dones <- j
}
- close(h.jobs)
- h.workers.Wait()
- close(h.replies)
return
}
func (h *Hasher) worker() {
+ j := job{
+ buf: make([]byte, h.chunkLen),
+ hsh: make([]byte, h.Size()),
+ bufReady: make(chan struct{}),
+ hshReady: make(chan struct{}),
+ }
+ h.ready <- &j
hasher := h.leafNew()
var err error
- var hsh []byte
- for j := range h.jobs {
- h.leafReset(hasher)
- if _, err = hasher.Write(j.chunk); err != nil {
+ for {
+ <-j.bufReady
+ hasher = h.leafReset(hasher)
+ if _, err = hasher.Write(j.buf); err != nil {
panic(err)
}
- h.freeChunks <- j.chunk
- hsh = <-h.freeHshes
- hasher.Sum(hsh[:0])
- j.reply <- hsh
+ hasher.Sum(j.hsh[:0])
+ j.hshReady <- struct{}{}
}
- h.workers.Done()
}
func (h *Hasher) aggregator() {
- var hsh []byte
- for reply := range h.replies {
- hsh = <-reply
- h.freeReplies <- reply
- copy(h.get(0), hsh)
+ for j := range h.dones {
+ <-j.hshReady
+ copy(h.get(0), j.hsh)
+ h.ready <- j
h.count++
h.fold()
- h.freeHshes <- hsh
}
close(h.finished)
}
func (h *Hasher) Sum(b []byte) []byte {
+ if !h.wasCalled {
+ return append(b, h.leafNew().Sum(nil)...)
+ }
if h.pw != nil {
h.pw.Close()
}
<-h.finished
- if h.count == 0 {
- return append(b, h.leafNew().Sum(nil)...)
- }
- for l := 0; l < MaxDepth; l++ {
+ for l := 0; l < maxDepth; l++ {
if h.count == 1 {
- for ; l < MaxDepth; l++ {
+ for ; l < maxDepth; l++ {
if !h.frees[l*2+0] {
return append(b, h.hashes[l*2+0]...)
}
}
panic("did not reach the end")
}
+
+func (h *Hasher) Mmap(fn string) error {
+ fd, err := os.Open(fn)
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+ var size int64
+ {
+ var fi os.FileInfo
+ fi, err = fd.Stat()
+ if err != nil {
+ return err
+ }
+ size = fi.Size()
+ }
+ if size == 0 {
+ return nil
+ }
+ if size < 0 {
+ return errors.New("negative size")
+ }
+ if size != int64(int(size)) {
+ return errors.New("file is too large")
+ }
+ data, err := unix.Mmap(int(fd.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_SHARED)
+ if err != nil {
+ return err
+ }
+
+ h.wasCalled = true
+ var j *job
+ for i := 0; i < len(data); i += h.chunkLen {
+ j = <-h.ready
+ j.buf = data[i:min(i+h.chunkLen, len(data))]
+ if len(j.buf) == 0 {
+ break
+ }
+ j.bufReady <- struct{}{}
+ h.dones <- j
+ }
+ close(h.dones)
+ <-h.finished
+ unix.Munmap(data)
+ return nil
+}
import "hash"
+// Create Merkle-tree hasher as suggested in RFC 9162 that uses 0x00 and
+// 0x01 prefixes for leaf/node separation.
func NewHasherPrefixed(h func() hash.Hash, chunkLen, workers int) *Hasher {
return NewHasher(
h,
h,
- func(h hash.Hash) {
+ func(h hash.Hash) hash.Hash {
h.Reset()
if _, err := h.Write([]byte{0x00}); err != nil {
panic(err)
}
+ return h
},
- func(h hash.Hash) {
+ func(h hash.Hash) hash.Hash {
h.Reset()
if _, err := h.Write([]byte{0x01}); err != nil {
panic(err)
}
+ return h
},
chunkLen,
workers,
--- /dev/null
+// GoKEKS/PKI -- PKI-related capabilities based on KEKS encoded formats
+// Copyright (C) 2024-2025 Sergey Matveev <stargrave@stargrave.org>
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, version 3 of the License.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+package hash
+
+import (
+ "hash"
+
+ "go.cypherpunks.su/keks/pki/hash/merkle"
+ "golang.org/x/crypto/sha3"
+)
+
+func NewSHAKE128MerkleHasher(chunkLen, workers int) hash.Hash {
+ leafHash := sha3.NewCShake128(nil, []byte(merkle.Leaf))
+ nodeHash := sha3.NewCShake128(nil, []byte(merkle.Node))
+ return merkle.NewHasher(
+ func() hash.Hash { return leafHash.Clone() },
+ func() hash.Hash { return nodeHash.Clone() },
+ func(hash.Hash) hash.Hash { return leafHash.Clone() },
+ func(hash.Hash) hash.Hash { return nodeHash.Clone() },
+ chunkLen,
+ workers,
+ )
+}
+
+func NewSHAKE256MerkleHasher(chunkLen, workers int) hash.Hash {
+ leafHash := sha3.NewCShake256(nil, []byte(merkle.Leaf))
+ nodeHash := sha3.NewCShake256(nil, []byte(merkle.Node))
+ return merkle.NewHasher(
+ func() hash.Hash { return leafHash.Clone() },
+ func() hash.Hash { return nodeHash.Clone() },
+ func(hash.Hash) hash.Hash { return leafHash.Clone() },
+ func(hash.Hash) hash.Hash { return nodeHash.Clone() },
+ chunkLen,
+ workers,
+ )
+}
const (
ModePure Mode = 0
ModePrehash = iota
+ ModeMerkle = iota
PrehashT = "prehash"
)
const SignedMagic = keks.Magic("pki/signed")
type SignedPrehash struct {
- T string `keks:"t"`
Sigs map[string]*struct{} `keks:"sigs"`
+ T string `keks:"t"`
}
type SignedLoad struct {
}
type SigTBS struct {
- CID *uuid.UUID `keks:"cid,omitempty"`
- Exp *[]time.Time `keks:"exp,omitempty"`
- When *time.Time `keks:"when,omitempty"`
- SID uuid.UUID `keks:"sid"`
-
- EncryptedBinding *uuid.UUID `keks:"encrypted-binding,omitempty"`
+ CID *uuid.UUID `keks:"cid,omitempty"`
+ Exp *[]time.Time `keks:"exp,omitempty"`
+ When *time.Time `keks:"when,omitempty"`
+ EncryptedBinding *uuid.UUID `keks:"encrypted-binding,omitempty"`
+ SID uuid.UUID `keks:"sid"`
}
type Sig struct {
@code{/hash} contains the hash values for all corresponding @code{/a}
algorithms.
+@node Merkle hashing
+@cindex Merkle tree
+@cindex Merkle hashing
+@section Merkle-tree based hashing
+
+ Merkle trees are very convenient way to parallelise data hashing.
+ @url{https://datatracker.ietf.org/doc/html/rfc9162, RFC 9162} is used as
+ a base for all Merkle-tree based hashers. By default 128KiB chunks are
+used.
+
@node pki-hashed-blake2b
@subsection pki-hashed with BLAKE2b
@node pki-hashed-blake2b-merkle
@subsection pki-hashed with BLAKE2b in Merkle-tree mode
- BLAKE2b-512 is used in Merkle tree hashing mode, as described in
- @url{https://datatracker.ietf.org/doc/html/rfc9162, RFC 9162},
- except that no @code{0x00}/@code{0x01} constants are appended to
- the hashed data, but BLAKE2b is initialised in keyed mode with
- either "LEAF" or "NODE" keys. Although BLAKE2 has ability to set
- tree-hashing parameters on its own, many implementations do not
- provide necessary API for that.
+ BLAKE2b-512 is initialised with either "LEAF" or "NODE" keys,
+ instead of using @code{0x00}/@code{0x01} constants prepending to the
+ data. Although BLAKE2 has ability to set tree-hashing parameters on
+ its own, many implementations do not provide necessary API for that.
@code{blake2b-merkle} algorithm identifier is used.
@code{shake128}, @code{shake256} algorithm identifiers are used.
+@node pki-hashed-shake-merkle
+@subsection pki-hashed with SHAKE in Merkle-tree mode
+
+ cSHAKE with either "LEAF" or "NODE" personalisation strings are used
+ instead of @code{0x00}/@code{0x01} constants prepending to the
+ hashed data.
+
+ @code{shake128-merkle}, @code{shake256-merkle} algorithm identifiers
+ are used.
+
@node pki-hashed-skein512
@subsection pki-hashed with Skein-512
@node pki-hashed-gost3411-merkle
@subsection pki-hashed with GOST R 34.11-2012 in Merkle tree mode
- Streebog-512 is used in Merkle tree hashing mode, as described in
- @url{https://datatracker.ietf.org/doc/html/rfc9162, RFC 9162}.
-
@code{streebog256-merkle}, @code{streebog512-merkle} algorithm
identifiers are used.
@code{@ref{cer-ed25519-blake2b}},
@code{@ref{pki-hashed-blake2b}},
@code{@ref{pki-signed-ed25519-blake2b}}
+@item blake2b-merkle
+ @code{@ref{pki-hashed-blake2b-merkle}},
+ @code{@ref{pki-signed-ed25519-blake2b-merkle}}
@item blake3
@code{@ref{pki-hashed-blake3}}
@item sha2-256, sha2-512
@code{@ref{pki-hashed-sha2}}
@item shake128, shake256
@code{@ref{pki-hashed-shake}}
+@item shake128-merkle, shake256-merkle
+ @code{@ref{pki-hashed-shake-merkle}}
@item skein512
@code{@ref{pki-hashed-skein512}}
@item streebog256, streebog512
@code{@ref{pki-hashed-gost3411}}
+@item streebog256-merkle, streebog512-merkle
+ @code{@ref{pki-hashed-gost3411-merkle}}
@item xxh3-128
@code{@ref{pki-hashed-xxh3-128}}
@end table
@code{@ref{private-key-ed25519-blake2b}}
@code{@ref{pki-signed-ed25519-blake2b}},
@code{@ref{cer-ed25519-blake2b}}
+@item ed25519-blake2b-merkle
+ @code{@ref{pki-signed-ed25519-blake2b-merkle}}
@item ed448
@item gost3410-256A, gost3410-512C
@code{@ref{cer-gost3410}},
@code{@ref{private-key-gost3410}},
@code{@ref{pki-signed-gost3410}}
+@item gost3410-256A-merkle, gost3410-512C-merkle
+ @code{@ref{pki-signed-gost3410-merkle}}
@end table
@node AI Content types
@node pki-signed-gost3410
@subsection pki-signed with GOST R 34.10-2012
-GOST R 34.10-2012 must be used with Streebog (GOST R 34.11-2012) hash
-function. Its digest must be big-endian serialised. Signature is in
-@code{BE(R)||BE(S)} format.
+ GOST R 34.10-2012 must be used with Streebog (GOST R 34.11-2012)
+ hash function. Its digest must be big-endian serialised. Signature
+ is in @code{BE(R)||BE(S)} format.
+
+ Algorithm identifiers for the signature: @code{gost3410-256A},
+ @code{gost3410-512C}.
+
+@node pki-signed-gost3410-merkle
+@subsection pki-signed with GOST R 34.10-2012 with Merkle-tree hashing
+
+ @ref{pki-hashed-gost3411-merkle} Merkle-tree hashing is used.
+
+ Algorithm identifiers for the signature: @code{gost3410-256A-merkle},
+ @code{gost3410-512C-merkle}.
-Algorithm identifiers for the signature: @code{gost3410-256A},
-@code{gost3410-512C}.
@node pki-signed-ed25519-blake2b
@subsection pki-signed with Ed25519-BLAKE2b
-@url{https://datatracker.ietf.org/doc/html/rfc8032, EdDSA} with
-Edwards25519 is used similarly as in RFC 8032.
-But BLAKE2b is used instead of SHA2-512 hash.
+ @url{https://datatracker.ietf.org/doc/html/rfc8032, EdDSA} with
+ Edwards25519 is used similarly as in RFC 8032. But BLAKE2b is used
+ instead of SHA2-512 hash.
+
+ Strict @url{https://zips.z.cash/zip-0215, ZIP-0215} validation rules
+ should be used while verifying the signature.
+
+ PureEdDSA @strong{must} be used when no detached data exists and
+ @code{ed25519-blake2b} algorithm identifier is used for signature.
-Strict @url{https://zips.z.cash/zip-0215, ZIP-0215} validation rules
-should be used while verifying the signature.
+ HashEdDSA @strong{must} be used otherwise, using BLAKE2b-512 as a
+ hash, using @code{ed25519ph-blake2b} algorithm identifier for
+ signature.
-PureEdDSA @strong{must} be used when no detached data exists and
-@code{ed25519-blake2b} algorithm identifier is used for signature.
+@node pki-signed-ed25519-blake2b-merkle
+@subsection pki-signed with Ed25519-BLAKE2b with Merkle-tree hashing
-HashEdDSA @strong{must} be used otherwise, using BLAKE2b-512 as a hash,
-using @code{ed25519ph-blake2b} algorithm identifier for signature.
+ @ref{pki-hashed-blake2b-merkle} Merkle-tree hashing is used.
+ HashEdDSA mode is used with @code{ed25519ph-blake2b-merkle}
+ algorithm identifier for signature.