From 3492aa43de727559b194156789b87a31c3366697 Mon Sep 17 00:00:00 2001 From: Al Cutter Date: Thu, 28 Oct 2021 10:09:22 -0400 Subject: [PATCH] Initial import Change-Id: I66c0d70ceaa8fa3f392fce8624df56c7e36696a2 --- compact/nodes.go | 63 +++++++++ compact/nodes_test.go | 74 ++++++++++ compact/range.go | 265 ++++++++++++++++++++++++++++++++++++ go.mod | 5 + go.sum | 2 + hashers/tree_hasher.go | 28 ++++ logverifier/hash_chainer.go | 62 +++++++++ logverifier/log_verifier.go | 197 +++++++++++++++++++++++++++ rfc6962/rfc6962.go | 68 +++++++++ rfc6962/rfc6962_test.go | 109 +++++++++++++++ 10 files changed, 873 insertions(+) create mode 100644 compact/nodes.go create mode 100644 compact/nodes_test.go create mode 100644 compact/range.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 hashers/tree_hasher.go create mode 100644 logverifier/hash_chainer.go create mode 100644 logverifier/log_verifier.go create mode 100644 rfc6962/rfc6962.go create mode 100644 rfc6962/rfc6962_test.go diff --git a/compact/nodes.go b/compact/nodes.go new file mode 100644 index 0000000..77c00ba --- /dev/null +++ b/compact/nodes.go @@ -0,0 +1,63 @@ +// Copyright 2019 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compact + +import "math/bits" + +// NodeID identifies a node of a Merkle tree. +// +// The level is the longest distance from the node down to the leaves, and +// index is its horizontal position in this level ordered from left to right. +// Consider an example below where nodes are labeled as [ ]. +// +// [2 0] +// / \ +// [1 0] \ +// / \ \ +// [0 0] [0 1] [0 2] +type NodeID struct { + Level uint + Index uint64 +} + +// NewNodeID returns a NodeID with the passed in node coordinates. +func NewNodeID(level uint, index uint64) NodeID { + return NodeID{Level: level, Index: index} +} + +// RangeNodes returns node IDs that comprise the [begin, end) compact range. +func RangeNodes(begin, end uint64) []NodeID { + left, right := Decompose(begin, end) + ids := make([]NodeID, 0, bits.OnesCount64(left)+bits.OnesCount64(right)) + + pos := begin + // Iterate over perfect subtrees along the left border of the range, ordered + // from lower to upper levels. + for bit := uint64(0); left != 0; pos, left = pos+bit, left^bit { + level := uint(bits.TrailingZeros64(left)) + bit = uint64(1) << level + ids = append(ids, NewNodeID(level, pos>>level)) + } + + // Iterate over perfect subtrees along the right border of the range, ordered + // from upper to lower levels. + for bit := uint64(0); right != 0; pos, right = pos+bit, right^bit { + level := uint(bits.Len64(right)) - 1 + bit = uint64(1) << level + ids = append(ids, NewNodeID(level, pos>>level)) + } + + return ids +} diff --git a/compact/nodes_test.go b/compact/nodes_test.go new file mode 100644 index 0000000..c0a3c69 --- /dev/null +++ b/compact/nodes_test.go @@ -0,0 +1,74 @@ +// Copyright 2019 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compact + +import ( + "fmt" + "reflect" + "testing" +) + +func TestRangeNodes(t *testing.T) { + n := func(level uint, index uint64) NodeID { + return NewNodeID(level, index) + } + for _, tc := range []struct { + begin uint64 + end uint64 + want []NodeID + }{ + // Empty ranges. + {end: 0, want: []NodeID{}}, + {begin: 10, end: 10, want: []NodeID{}}, + {begin: 1024, end: 1024, want: []NodeID{}}, + // One entry. + {begin: 10, end: 11, want: []NodeID{n(0, 10)}}, + {begin: 1024, end: 1025, want: []NodeID{n(0, 1024)}}, + {begin: 1025, end: 1026, want: []NodeID{n(0, 1025)}}, + // Two entries. + {begin: 10, end: 12, want: []NodeID{n(1, 5)}}, + {begin: 1024, end: 1026, want: []NodeID{n(1, 512)}}, + {begin: 1025, end: 1027, want: []NodeID{n(0, 1025), n(0, 1026)}}, + // Only right border. + {end: 1, want: []NodeID{n(0, 0)}}, + {end: 2, want: []NodeID{n(1, 0)}}, + {end: 3, want: []NodeID{n(1, 0), n(0, 2)}}, + {end: 4, want: []NodeID{n(2, 0)}}, + {end: 5, want: []NodeID{n(2, 0), n(0, 4)}}, + {end: 15, want: []NodeID{n(3, 0), n(2, 2), n(1, 6), n(0, 14)}}, + {end: 100, want: []NodeID{n(6, 0), n(5, 2), n(2, 24)}}, + {end: 513, want: []NodeID{n(9, 0), n(0, 512)}}, + {end: uint64(1) << 63, want: []NodeID{n(63, 0)}}, + {end: (uint64(1) << 63) + (uint64(1) << 57), want: []NodeID{n(63, 0), n(57, 64)}}, + // Only left border. + {begin: 0, end: 16, want: []NodeID{n(4, 0)}}, + {begin: 1, end: 16, want: []NodeID{n(0, 1), n(1, 1), n(2, 1), n(3, 1)}}, + {begin: 2, end: 16, want: []NodeID{n(1, 1), n(2, 1), n(3, 1)}}, + {begin: 3, end: 16, want: []NodeID{n(0, 3), n(2, 1), n(3, 1)}}, + {begin: 4, end: 16, want: []NodeID{n(2, 1), n(3, 1)}}, + {begin: 6, end: 16, want: []NodeID{n(1, 3), n(3, 1)}}, + {begin: 8, end: 16, want: []NodeID{n(3, 1)}}, + {begin: 11, end: 16, want: []NodeID{n(0, 11), n(2, 3)}}, + // Two-sided. + {begin: 1, end: 31, want: []NodeID{n(0, 1), n(1, 1), n(2, 1), n(3, 1), n(3, 2), n(2, 6), n(1, 14), n(0, 30)}}, + {begin: 1, end: 17, want: []NodeID{n(0, 1), n(1, 1), n(2, 1), n(3, 1), n(0, 16)}}, + } { + t.Run(fmt.Sprintf("range:%d:%d", tc.begin, tc.end), func(t *testing.T) { + if got, want := RangeNodes(tc.begin, tc.end), tc.want; !reflect.DeepEqual(got, tc.want) { + t.Fatalf("RangeNodes: got %v, want %v", got, want) + } + }) + } +} diff --git a/compact/range.go b/compact/range.go new file mode 100644 index 0000000..c260303 --- /dev/null +++ b/compact/range.go @@ -0,0 +1,265 @@ +// Copyright 2019 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compact provides compact Merkle tree data structures. +package compact + +import ( + "bytes" + "errors" + "fmt" + "math/bits" +) + +// HashFn computes an internal node's hash using the hashes of its child nodes. +type HashFn func(left, right []byte) []byte + +// VisitFn visits the node with the specified ID and hash. +type VisitFn func(id NodeID, hash []byte) + +// RangeFactory allows creating compact ranges with the specified hash +// function, which must not be nil, and must not be changed. +type RangeFactory struct { + Hash HashFn +} + +// NewRange creates a Range for [begin, end) with the given set of hashes. The +// hashes correspond to the roots of the minimal set of perfect sub-trees +// covering the [begin, end) leaves range, ordered left to right. +func (f *RangeFactory) NewRange(begin, end uint64, hashes [][]byte) (*Range, error) { + if end < begin { + return nil, fmt.Errorf("invalid range: end=%d, want >= %d", end, begin) + } + left, right := Decompose(begin, end) + ones := bits.OnesCount64(left) + bits.OnesCount64(right) + if ln := len(hashes); ln != ones { + return nil, fmt.Errorf("invalid hashes: got %d values, want %d", ln, ones) + } + return &Range{f: f, begin: begin, end: end, hashes: hashes}, nil +} + +// NewEmptyRange returns a new Range for an empty [begin, begin) range. The +// value of begin defines where the range will start growing from when entries +// are appended to it. +func (f *RangeFactory) NewEmptyRange(begin uint64) *Range { + return &Range{f: f, begin: begin, end: begin} +} + +// Range represents a compact Merkle tree range for leaf indices [begin, end). +// +// It contains the minimal set of perfect subtrees whose leaves comprise this +// range. The structure is efficiently mergeable with other compact ranges that +// share one of the endpoints with it. +// +// TODO(pavelkalinnikov): Add document with more details on how it works, and +// what it can be used for. +type Range struct { + f *RangeFactory + begin uint64 + end uint64 + hashes [][]byte +} + +// Begin returns the first index covered by the range (inclusive). +func (r *Range) Begin() uint64 { + return r.begin +} + +// End returns the last index covered by the range (exclusive). +func (r *Range) End() uint64 { + return r.end +} + +// Hashes returns sub-tree hashes corresponding to the minimal set of perfect +// sub-trees covering the [begin, end) range, ordered left to right. +func (r *Range) Hashes() [][]byte { + return r.hashes +} + +// Append extends the compact range by appending the passed in hash to it. It +// uses the tree hasher to calculate hashes of newly created nodes, and reports +// them through the visitor function (if non-nil). +func (r *Range) Append(hash []byte, visitor VisitFn) error { + // TODO(pphaneuf): Consider calling `visitor` for this hash, for consistency. + return r.appendImpl(r.end+1, hash, nil, visitor) +} + +// AppendRange extends the compact range by merging in the other compact range +// from the right. It uses the tree hasher to calculate hashes of newly created +// nodes, and reports them through the visitor function (if non-nil). +func (r *Range) AppendRange(other *Range, visitor VisitFn) error { + if other.f != r.f { + return errors.New("incompatible ranges") + } + if got, want := other.begin, r.end; got != want { + return fmt.Errorf("ranges are disjoint: other.begin=%d, want %d", got, want) + } + if len(other.hashes) == 0 { // The other range is empty, merging is trivial. + return nil + } + return r.appendImpl(other.end, other.hashes[0], other.hashes[1:], visitor) +} + +// GetRootHash returns the root hash of the Merkle tree represented by this +// compact range. Requires the range to start at index 0. If the range is +// empty, returns nil. +// +// If visitor is not nil, it is called with all "ephemeral" nodes (i.e. the +// ones rooting imperfect subtrees) along the right border of the tree. +func (r *Range) GetRootHash(visitor VisitFn) ([]byte, error) { + if r.begin != 0 { + return nil, fmt.Errorf("begin=%d, want 0", r.begin) + } + ln := len(r.hashes) + if ln == 0 { + return nil, nil + } + hash := r.hashes[ln-1] + // All non-perfect subtree hashes along the right border of the tree + // correspond to the parents of all perfect subtree nodes except the lowest + // one (therefore the loop skips it). + for i, size := ln-2, r.end; i >= 0; i-- { + hash = r.f.Hash(r.hashes[i], hash) + if visitor != nil { + size &= size - 1 // Delete the previous node. + level := uint(bits.TrailingZeros64(size)) + 1 // Compute the parent level. + index := size >> level // And its horizontal index. + visitor(NewNodeID(level, index), hash) + } + } + return hash, nil +} + +// Equal compares two Ranges for equality. +func (r *Range) Equal(other *Range) bool { + if r.f != other.f || r.begin != other.begin || r.end != other.end { + return false + } + if len(r.hashes) != len(other.hashes) { + return false + } + for i := range r.hashes { + if !bytes.Equal(r.hashes[i], other.hashes[i]) { + return false + } + } + return true +} + +// appendImpl extends the compact range by merging the [r.end, end) compact +// range into it. The other compact range is decomposed into a seed hash and +// all the other hashes (possibly none). The method uses the tree hasher to +// calculate hashes of newly created nodes, and reports them through the +// visitor function (if non-nil). +func (r *Range) appendImpl(end uint64, seed []byte, hashes [][]byte, visitor VisitFn) error { + // Bits [low, high) of r.end encode the merge path, i.e. the sequence of node + // merges that transforms the two compact ranges into one. + low, high := getMergePath(r.begin, r.end, end) + if high < low { + high = low + } + index := r.end >> low + // Now bits [0, high-low) of index encode the merge path. + + // The number of one bits in index is the number of nodes from the left range + // that will be merged, and zero bits correspond to the nodes in the right + // range. Below we make sure that both ranges have enough hashes, which can + // be false only in case the data is corrupted in some way. + ones := bits.OnesCount64(index & (1<<(high-low) - 1)) + if ln := len(r.hashes); ln < ones { + return fmt.Errorf("corrupted lhs range: got %d hashes, want >= %d", ln, ones) + } + if ln, zeros := len(hashes), int(high-low)-ones; ln < zeros { + return fmt.Errorf("corrupted rhs range: got %d hashes, want >= %d", ln+1, zeros+1) + } + + // Some of the trailing nodes of the left compact range, and some of the + // leading nodes of the right range, are sequentially merged with the seed, + // according to the mask. All new nodes are reported through the visitor. + idx1, idx2 := len(r.hashes), 0 + for h := low; h < high; h++ { + if index&1 == 0 { + seed = r.f.Hash(seed, hashes[idx2]) + idx2++ + } else { + idx1-- + seed = r.f.Hash(r.hashes[idx1], seed) + } + index >>= 1 + if visitor != nil { + visitor(NewNodeID(h+1, index), seed) + } + } + + // All nodes from both ranges that have not been merged are bundled together + // with the "merged" seed node. + r.hashes = append(append(r.hashes[:idx1], seed), hashes[idx2:]...) + r.end = end + return nil +} + +// getMergePath returns the merging path between the compact range [begin, mid) +// and [mid, end). The path is represented as a range of bits within mid, with +// bit indices [low, high). A bit value of 1 on level i of mid means that the +// node on this level merges with the corresponding node in the left compact +// range, whereas 0 represents merging with the right compact range. If the +// path is empty then high <= low. +// +// The output is not specified if begin <= mid <= end doesn't hold, but the +// function never panics. +func getMergePath(begin, mid, end uint64) (uint, uint) { + low := bits.TrailingZeros64(mid) + high := 64 + if begin != 0 { + high = bits.Len64(mid ^ (begin - 1)) + } + if high2 := bits.Len64((mid - 1) ^ end); high2 < high { + high = high2 + } + return uint(low), uint(high - 1) +} + +// Decompose splits the [begin, end) range into a minimal number of sub-ranges, +// each of which is of the form [m * 2^k, (m+1) * 2^k), i.e. of length 2^k, for +// some integers m, k >= 0. +// +// The sequence of sizes is returned encoded as bitmasks left and right, where: +// - a 1 bit in a bitmask denotes a sub-range of the corresponding size 2^k +// - left mask bits in LSB-to-MSB order encode the left part of the sequence +// - right mask bits in MSB-to-LSB order encode the right part +// +// The corresponding values of m are not returned (they can be calculated from +// begin and the sub-range sizes). +// +// For example, (begin, end) values of (0b110, 0b11101) would indicate a +// sequence of tree sizes: 2,8; 8,4,1. +// +// The output is not specified if begin > end, but the function never panics. +func Decompose(begin, end uint64) (uint64, uint64) { + // Special case, as the code below works only if begin != 0, or end < 2^63. + if begin == 0 { + return 0, end + } + xbegin := begin - 1 + // Find where paths to leaves #begin-1 and #end diverge, and mask the upper + // bits away, as only the nodes strictly below this point are in the range. + d := bits.Len64(xbegin^end) - 1 + mask := uint64(1)<>uint(i))&1 == 0 { + seed = c.hasher.HashChildren(seed, h) + } else { + seed = c.hasher.HashChildren(h, seed) + } + } + return seed +} + +// chainInnerRight computes a subtree hash like chainInner, but only takes +// hashes to the left from the path into consideration, which effectively means +// the result is a hash of the corresponding earlier version of this subtree. +func (c hashChainer) chainInnerRight(seed []byte, proof [][]byte, index int64) []byte { + for i, h := range proof { + if (index>>uint(i))&1 == 1 { + seed = c.hasher.HashChildren(h, seed) + } + } + return seed +} + +// chainBorderRight chains proof hashes along tree borders. This differs from +// inner chaining because |proof| contains only left-side subtree hashes. +func (c hashChainer) chainBorderRight(seed []byte, proof [][]byte) []byte { + for _, h := range proof { + seed = c.hasher.HashChildren(h, seed) + } + return seed +} diff --git a/logverifier/log_verifier.go b/logverifier/log_verifier.go new file mode 100644 index 0000000..47f503c --- /dev/null +++ b/logverifier/log_verifier.go @@ -0,0 +1,197 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logverifier + +import ( + "bytes" + "errors" + "fmt" + "math/bits" + + "team.git.corp.google.com/security-transparency-team/merkle.git/hashers" +) + +// RootMismatchError occurs when an inclusion proof fails. +type RootMismatchError struct { + ExpectedRoot []byte + CalculatedRoot []byte +} + +func (e RootMismatchError) Error() string { + return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot) +} + +// LogVerifier verifies inclusion and consistency proofs for append only logs. +type LogVerifier struct { + hasher hashers.LogHasher +} + +// New returns a new LogVerifier for a tree. +func New(hasher hashers.LogHasher) LogVerifier { + return LogVerifier{hasher} +} + +// VerifyInclusionProof verifies the correctness of the proof given the passed +// in information about the tree and leaf. +func (v LogVerifier) VerifyInclusionProof(leafIndex, treeSize int64, proof [][]byte, root []byte, leafHash []byte) error { + calcRoot, err := v.RootFromInclusionProof(leafIndex, treeSize, proof, leafHash) + if err != nil { + return err + } + if !bytes.Equal(calcRoot, root) { + return RootMismatchError{ + CalculatedRoot: calcRoot, + ExpectedRoot: root, + } + } + return nil +} + +// RootFromInclusionProof calculates the expected tree root given the proof and leaf. +// leafIndex starts at 0. treeSize is the number of nodes in the tree. +// proof is an array of neighbor nodes from the bottom to the root. +func (v LogVerifier) RootFromInclusionProof(leafIndex, treeSize int64, proof [][]byte, leafHash []byte) ([]byte, error) { + switch { + case leafIndex < 0: + return nil, fmt.Errorf("leafIndex %d < 0", leafIndex) + case treeSize < 0: + return nil, fmt.Errorf("treeSize %d < 0", treeSize) + case leafIndex >= treeSize: + return nil, fmt.Errorf("leafIndex is beyond treeSize: %d >= %d", leafIndex, treeSize) + } + if got, want := len(leafHash), v.hasher.Size(); got != want { + return nil, fmt.Errorf("leafHash has unexpected size %d, want %d", got, want) + } + + inner, border := decompInclProof(leafIndex, treeSize) + if got, want := len(proof), inner+border; got != want { + return nil, fmt.Errorf("wrong proof size %d, want %d", got, want) + } + + ch := hashChainer(v) + res := ch.chainInner(leafHash, proof[:inner], leafIndex) + res = ch.chainBorderRight(res, proof[inner:]) + return res, nil +} + +// VerifyConsistencyProof checks that the passed in consistency proof is valid +// between the passed in tree snapshots. Snapshots are the respective tree +// sizes. Accepts shapshot2 >= snapshot1 >= 0. +func (v LogVerifier) VerifyConsistencyProof(snapshot1, snapshot2 int64, root1, root2 []byte, proof [][]byte) error { + switch { + case snapshot1 < 0: + return fmt.Errorf("snapshot1 (%d) < 0 ", snapshot1) + case snapshot2 < snapshot1: + return fmt.Errorf("snapshot2 (%d) < snapshot1 (%d)", snapshot1, snapshot2) + case snapshot1 == snapshot2: + if !bytes.Equal(root1, root2) { + return RootMismatchError{ + CalculatedRoot: root1, + ExpectedRoot: root2, + } + } else if len(proof) > 0 { + return errors.New("root1 and root2 match, but proof is non-empty") + } + return nil // Proof OK. + case snapshot1 == 0: + // Any snapshot greater than 0 is consistent with snapshot 0. + if len(proof) > 0 { + return fmt.Errorf("expected empty proof, but got %d components", len(proof)) + } + return nil // Proof OK. + case len(proof) == 0: + return errors.New("empty proof") + } + + inner, border := decompInclProof(snapshot1-1, snapshot2) + shift := bits.TrailingZeros64(uint64(snapshot1)) + inner -= shift // Note: shift < inner if snapshot1 < snapshot2. + + // The proof includes the root hash for the sub-tree of size 2^shift. + seed, start := proof[0], 1 + if snapshot1 == 1<> uint(shift) // Start chaining from level |shift|. + hash1 := ch.chainInnerRight(seed, proof[:inner], mask) + hash1 = ch.chainBorderRight(hash1, proof[inner:]) + if !bytes.Equal(hash1, root1) { + return RootMismatchError{ + CalculatedRoot: hash1, + ExpectedRoot: root1, + } + } + + // Verify the second root. + hash2 := ch.chainInner(seed, proof[:inner], mask) + hash2 = ch.chainBorderRight(hash2, proof[inner:]) + if !bytes.Equal(hash2, root2) { + return RootMismatchError{ + CalculatedRoot: hash2, + ExpectedRoot: root2, + } + } + + return nil // Proof OK. +} + +// VerifiedPrefixHashFromInclusionProof calculates a root hash over leaves +// [0..subSize), based on the inclusion |proof| and |leafHash| for a leaf at +// index |subSize-1| in a tree of the specified |size| with the passed in +// |root| hash. +// Returns an error if the |proof| verification fails. The resulting smaller +// tree's root hash is trusted iff the bigger tree's |root| hash is trusted. +func (v LogVerifier) VerifiedPrefixHashFromInclusionProof( + subSize, size int64, + proof [][]byte, root []byte, leafHash []byte, +) ([]byte, error) { + if subSize <= 0 { + return nil, fmt.Errorf("subtree size is %d, want > 0", subSize) + } + leaf := subSize - 1 + if err := v.VerifyInclusionProof(leaf, size, proof, root, leafHash); err != nil { + return nil, err + } + + inner := innerProofSize(leaf, size) + ch := hashChainer(v) + res := ch.chainInnerRight(leafHash, proof[:inner], leaf) + res = ch.chainBorderRight(res, proof[inner:]) + return res, nil +} + +// decompInclProof breaks down inclusion proof for a leaf at the specified +// |index| in a tree of the specified |size| into 2 components. The splitting +// point between them is where paths to leaves |index| and |size-1| diverge. +// Returns lengths of the bottom and upper proof parts correspondingly. The sum +// of the two determines the correct length of the inclusion proof. +func decompInclProof(index, size int64) (int, int) { + inner := innerProofSize(index, size) + border := bits.OnesCount64(uint64(index) >> uint(inner)) + return inner, border +} + +func innerProofSize(index, size int64) int { + return bits.Len64(uint64(index ^ (size - 1))) +} diff --git a/rfc6962/rfc6962.go b/rfc6962/rfc6962.go new file mode 100644 index 0000000..b04f952 --- /dev/null +++ b/rfc6962/rfc6962.go @@ -0,0 +1,68 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rfc6962 provides hashing functionality according to RFC6962. +package rfc6962 + +import ( + "crypto" + _ "crypto/sha256" // SHA256 is the default algorithm. +) + +// Domain separation prefixes +const ( + RFC6962LeafHashPrefix = 0 + RFC6962NodeHashPrefix = 1 +) + +// DefaultHasher is a SHA256 based LogHasher. +var DefaultHasher = New(crypto.SHA256) + +// Hasher implements the RFC6962 tree hashing algorithm. +type Hasher struct { + crypto.Hash +} + +// New creates a new Hashers.LogHasher on the passed in hash function. +func New(h crypto.Hash) *Hasher { + return &Hasher{Hash: h} +} + +// EmptyRoot returns a special case for an empty tree. +func (t *Hasher) EmptyRoot() []byte { + return t.New().Sum(nil) +} + +// HashLeaf returns the Merkle tree leaf hash of the data passed in through leaf. +// The data in leaf is prefixed by the LeafHashPrefix. +func (t *Hasher) HashLeaf(leaf []byte) []byte { + h := t.New() + h.Write([]byte{RFC6962LeafHashPrefix}) + h.Write(leaf) + return h.Sum(nil) +} + +// HashChildren returns the inner Merkle tree node hash of the two child nodes l and r. +// The hashed structure is NodeHashPrefix||l||r. +func (t *Hasher) HashChildren(l, r []byte) []byte { + h := t.New() + b := append(append(append( + make([]byte, 0, 1+len(l)+len(r)), + RFC6962NodeHashPrefix), + l...), + r...) + + h.Write(b) + return h.Sum(nil) +} diff --git a/rfc6962/rfc6962_test.go b/rfc6962/rfc6962_test.go new file mode 100644 index 0000000..99e5eae --- /dev/null +++ b/rfc6962/rfc6962_test.go @@ -0,0 +1,109 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rfc6962 + +import ( + "bytes" + "encoding/hex" + "testing" + + _ "github.com/golang/glog" +) + +func TestRFC6962Hasher(t *testing.T) { + hasher := DefaultHasher + + leafHash := hasher.HashLeaf([]byte("L123456")) + emptyLeafHash := hasher.HashLeaf([]byte{}) + + for _, tc := range []struct { + desc string + got []byte + want string + }{ + // echo -n | sha256sum + { + desc: "RFC6962 Empty", + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + got: hasher.EmptyRoot(), + }, + // Check that the empty hash is not the same as the hash of an empty leaf. + // echo -n 00 | xxd -r -p | sha256sum + { + desc: "RFC6962 Empty Leaf", + want: "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d", + got: emptyLeafHash, + }, + // echo -n 004C313233343536 | xxd -r -p | sha256sum + { + desc: "RFC6962 Leaf", + want: "395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56", + got: leafHash, + }, + // echo -n 014E3132334E343536 | xxd -r -p | sha256sum + { + desc: "RFC6962 Node", + want: "aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb", + got: hasher.HashChildren([]byte("N123"), []byte("N456")), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + wantBytes, err := hex.DecodeString(tc.want) + if err != nil { + t.Fatalf("hex.DecodeString(%x): %v", tc.want, err) + } + if got, want := tc.got, wantBytes; !bytes.Equal(got, want) { + t.Errorf("got %x, want %x", got, want) + } + }) + } +} + +// TODO(pavelkalinnikov): Apply this test to all LogHasher implementations. +func TestRFC6962HasherCollisions(t *testing.T) { + hasher := DefaultHasher + + // Check that different leaves have different hashes. + leaf1, leaf2 := []byte("Hello"), []byte("World") + hash1 := hasher.HashLeaf(leaf1) + hash2 := hasher.HashLeaf(leaf2) + if bytes.Equal(hash1, hash2) { + t.Errorf("Leaf hashes should differ, but both are %x", hash1) + } + + // Compute an intermediate subtree hash. + subHash1 := hasher.HashChildren(hash1, hash2) + // Check that this is not the same as a leaf hash of their concatenation. + preimage := append(hash1, hash2...) + forgedHash := hasher.HashLeaf(preimage) + if bytes.Equal(subHash1, forgedHash) { + t.Errorf("Hasher is not second-preimage resistant") + } + + // Swap the order of nodes and check that the hash is different. + subHash2 := hasher.HashChildren(hash2, hash1) + if bytes.Equal(subHash1, subHash2) { + t.Errorf("Subtree hash does not depend on the order of leaves") + } +} + +func BenchmarkHashChildren(b *testing.B) { + h := DefaultHasher + l := h.HashLeaf([]byte("one")) + r := h.HashLeaf([]byte("or other")) + for i := 0; i < b.N; i++ { + _ = h.HashChildren(l, r) + } +}