Skip to content

Commit

Permalink
consensus/ethash: improve cache/dataset handling (ethereum#15864)
Browse files Browse the repository at this point in the history
* consensus/ethash: add maxEpoch constant

* consensus/ethash: improve cache/dataset handling

There are two fixes in this commit:

Unmap the memory through a finalizer like the libethash wrapper did. The
release logic was incorrect and freed the memory while it was being
used, leading to crashes like in ethereum#14495 or ethereum#14943.

Track caches and datasets using simplelru instead of reinventing LRU
logic. This should make it easier to see whether it's correct.

* consensus/ethash: restore 'future item' logic in lru

* consensus/ethash: use mmap even in test mode

This makes it possible to shorten the time taken for TestCacheFileEvict.

* consensus/ethash: shuffle func calc*Size comments around

* consensus/ethash: ensure future cache/dataset is in the lru cache

* consensus/ethash: add issue link to the new test

* consensus/ethash: fix vet

* consensus/ethash: fix test

* consensus: tiny issue + nitpick fixes
  • Loading branch information
fjl authored and karalabe committed Jan 23, 2018
1 parent 5d42679 commit 924065e
Show file tree
Hide file tree
Showing 8 changed files with 208 additions and 213 deletions.
6 changes: 4 additions & 2 deletions consensus/ethash/algorithm.go
Original file line number Diff line number Diff line change
Expand Up @@ -355,9 +355,11 @@ func hashimotoFull(dataset []uint32, hash []byte, nonce uint64) ([]byte, []byte)
return hashimoto(hash, nonce, uint64(len(dataset))*4, lookup)
}

const maxEpoch = 2048

// datasetSizes is a lookup table for the ethash dataset size for the first 2048
// epochs (i.e. 61440000 blocks).
var datasetSizes = []uint64{
var datasetSizes = [maxEpoch]uint64{
1073739904, 1082130304, 1090514816, 1098906752, 1107293056,
1115684224, 1124070016, 1132461952, 1140849536, 1149232768,
1157627776, 1166013824, 1174404736, 1182786944, 1191180416,
Expand Down Expand Up @@ -771,7 +773,7 @@ var datasetSizes = []uint64{

// cacheSizes is a lookup table for the ethash verification cache size for the
// first 2048 epochs (i.e. 61440000 blocks).
var cacheSizes = []uint64{
var cacheSizes = [maxEpoch]uint64{
16776896, 16907456, 17039296, 17170112, 17301056, 17432512, 17563072,
17693888, 17824192, 17955904, 18087488, 18218176, 18349504, 18481088,
18611392, 18742336, 18874304, 19004224, 19135936, 19267264, 19398208,
Expand Down
4 changes: 2 additions & 2 deletions consensus/ethash/algorithm_go1.7.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ package ethash
func cacheSize(block uint64) uint64 {
// If we have a pre-generated value, use that
epoch := int(block / epochLength)
if epoch < len(cacheSizes) {
if epoch < maxEpoch {
return cacheSizes[epoch]
}
// We don't have a way to verify primes fast before Go 1.8
Expand All @@ -39,7 +39,7 @@ func cacheSize(block uint64) uint64 {
func datasetSize(block uint64) uint64 {
// If we have a pre-generated value, use that
epoch := int(block / epochLength)
if epoch < len(datasetSizes) {
if epoch < maxEpoch {
return datasetSizes[epoch]
}
// We don't have a way to verify primes fast before Go 1.8
Expand Down
34 changes: 20 additions & 14 deletions consensus/ethash/algorithm_go1.8.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,35 +20,41 @@ package ethash

import "math/big"

// cacheSize calculates and returns the size of the ethash verification cache that
// belongs to a certain block number. The cache size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
// cacheSize returns the size of the ethash verification cache that belongs to a certain
// block number.
func cacheSize(block uint64) uint64 {
// If we have a pre-generated value, use that
epoch := int(block / epochLength)
if epoch < len(cacheSizes) {
if epoch < maxEpoch {
return cacheSizes[epoch]
}
// No known cache size, calculate manually (sanity branch only)
return calcCacheSize(epoch)
}

// calcCacheSize calculates the cache size for epoch. The cache size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcCacheSize(epoch int) uint64 {
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * hashBytes
}
return size
}

// datasetSize calculates and returns the size of the ethash mining dataset that
// belongs to a certain block number. The dataset size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
// datasetSize returns the size of the ethash mining dataset that belongs to a certain
// block number.
func datasetSize(block uint64) uint64 {
// If we have a pre-generated value, use that
epoch := int(block / epochLength)
if epoch < len(datasetSizes) {
if epoch < maxEpoch {
return datasetSizes[epoch]
}
// No known dataset size, calculate manually (sanity branch only)
return calcDatasetSize(epoch)
}

// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcDatasetSize(epoch int) uint64 {
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * mixBytes
Expand Down
23 changes: 7 additions & 16 deletions consensus/ethash/algorithm_go1.8_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,24 +23,15 @@ import "testing"
// Tests whether the dataset size calculator works correctly by cross checking the
// hard coded lookup table with the value generated by it.
func TestSizeCalculations(t *testing.T) {
var tests []uint64

// Verify all the cache sizes from the lookup table
defer func(sizes []uint64) { cacheSizes = sizes }(cacheSizes)
tests, cacheSizes = cacheSizes, []uint64{}

for i, test := range tests {
if size := cacheSize(uint64(i*epochLength) + 1); size != test {
t.Errorf("cache %d: cache size mismatch: have %d, want %d", i, size, test)
// Verify all the cache and dataset sizes from the lookup table.
for epoch, want := range cacheSizes {
if size := calcCacheSize(epoch); size != want {
t.Errorf("cache %d: cache size mismatch: have %d, want %d", epoch, size, want)
}
}
// Verify all the dataset sizes from the lookup table
defer func(sizes []uint64) { datasetSizes = sizes }(datasetSizes)
tests, datasetSizes = datasetSizes, []uint64{}

for i, test := range tests {
if size := datasetSize(uint64(i*epochLength) + 1); size != test {
t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", i, size, test)
for epoch, want := range datasetSizes {
if size := calcDatasetSize(epoch); size != want {
t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", epoch, size, want)
}
}
}
10 changes: 7 additions & 3 deletions consensus/ethash/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -476,22 +476,26 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
}
// Sanity check that the block number is below the lookup table size (60M blocks)
number := header.Number.Uint64()
if number/epochLength >= uint64(len(cacheSizes)) {
if number/epochLength >= maxEpoch {
// Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
return errNonceOutOfRange
}
// Ensure that we have a valid difficulty for the block
if header.Difficulty.Sign() <= 0 {
return errInvalidDifficulty
}

// Recompute the digest and PoW value and verify against the header
cache := ethash.cache(number)

size := datasetSize(number)
if ethash.config.PowMode == ModeTest {
size = 32 * 1024
}
digest, result := hashimotoLight(size, cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
digest, result := hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
// Caches are unmapped in a finalizer. Ensure that the cache stays live
// until after the call to hashimotoLight so it's not unmapped while being used.
runtime.KeepAlive(cache)

if !bytes.Equal(header.MixDigest[:], digest) {
return errInvalidMixDigest
}
Expand Down
Loading

0 comments on commit 924065e

Please sign in to comment.