diff --git a/cmd/config.go b/cmd/config.go index 77ce94606..7a5278900 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -94,7 +94,8 @@ type Config struct { func GetStringSliceWorkaround(flagName string) []string { value := viper.GetString(flagName) if value == "" || value == " " { - return []string{} + values := viper.GetStringSlice(flagName) + return values } return strings.Split(value, ",") } @@ -147,7 +148,6 @@ func LoadConfig() *Config { // Peers config.ConnectIPs = GetStringSliceWorkaround("connect-ips") - glog.V(2).Infof("Connect IPs read in: %v", config.ConnectIPs) config.AddIPs = GetStringSliceWorkaround("add-ips") config.AddSeeds = GetStringSliceWorkaround("add-seeds") config.TargetOutboundPeers = viper.GetUint32("target-outbound-peers") diff --git a/go.mod b/go.mod index 0b5355f47..801cfd02d 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,6 @@ require ( github.com/bxcodec/faker v2.0.1+incompatible github.com/cloudflare/circl v1.5.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/decred/dcrd/container/lru v1.0.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/deso-protocol/go-deadlock v1.0.1 github.com/deso-protocol/go-merkle-tree v1.0.0 @@ -38,11 +37,11 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/tyler-smith/go-bip39 v1.1.0 github.com/unrolled/secure v1.16.0 - golang.org/x/crypto v0.28.0 - golang.org/x/sync v0.8.0 + golang.org/x/crypto v0.29.0 + golang.org/x/sync v0.9.0 gopkg.in/DataDog/dd-trace-go.v1 v1.69.0 ) @@ -101,7 +100,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.17.1 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/kyokomi/emoji/v2 v2.2.13 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -145,10 +144,10 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/term v0.26.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect diff --git a/go.sum b/go.sum index a21ccd144..dd7d70b0c 100644 --- a/go.sum +++ b/go.sum @@ -100,8 +100,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/container/lru v1.0.0 h1:7foQymtbu18aQWYiY9RnNIeE+kvpiN+fiBQ3+viyJjI= -github.com/decred/dcrd/container/lru v1.0.0/go.mod h1:vlPwj0l+IzAHhQSsbgQnJgO5Cte78+yI065V+Mc5PRQ= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= @@ -250,8 +248,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= -github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -384,8 +382,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -440,8 +438,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= @@ -476,8 +474,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -488,8 +486,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -520,15 +518,15 @@ golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -536,8 +534,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/lib/block_producer.go b/lib/block_producer.go index 46c6c85b1..1ad40c2ee 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -367,8 +367,8 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) blockRet.Header.TransactionMerkleRoot = merkleRoot // Compute the next difficulty target given the current tip. - diffTarget, err := CalcNextDifficultyTarget( - lastNode, CurrentHeaderVersion, desoBlockProducer.params) + diffTarget, err := desoBlockProducer.chain.CalcNextDifficultyTarget( + lastNode, CurrentHeaderVersion) if err != nil { return nil, nil, nil, errors.Wrapf(err, "DeSoBlockProducer._getBlockTemplate: Problem computing next difficulty: ") } diff --git a/lib/block_view.go b/lib/block_view.go index 5a148b0ea..3a8eacb12 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -5114,6 +5114,7 @@ func (bav *UtxoView) GetSpendableDeSoBalanceNanosForPublicKey(pkBytes []byte, // but we do have the header. As a result, this condition always evaluates to false and thus // we only process the block reward for the previous block instead of all immature block rewards // as defined by the params. + // NOTE: we are not using .GetParent here as it changes the meaning of this code. if blockNode.Parent != nil { nextBlockHash = blockNode.Parent.Hash } else { diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index e788e3e03..d542f3eef 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -2353,7 +2353,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { // Process the first block err = testMeta.miner.BlockProducer.SignBlock(blk1) require.NoError(t, err) - _, _, _, err = testMeta.chain.ProcessBlock(blk1, false) + _, _, _, err = testMeta.chain.ProcessBlock(blk1, nil, false) require.NoError(t, err) // Validate state update @@ -2417,7 +2417,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { // Process the second block err = testMeta.miner.BlockProducer.SignBlock(blk2) require.NoError(t, err) - _, _, _, err = testMeta.chain.ProcessBlock(blk2, false) + _, _, _, err = testMeta.chain.ProcessBlock(blk2, nil, false) require.NoError(t, err) // Validate state update @@ -2462,7 +2462,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.NoError(t, utxoView.FlushToDb(blk2.Header.Height)) // Update the tip - testMeta.chain.bestChain = testMeta.chain.bestChain[:len(testMeta.chain.bestChain)-1] + testMeta.chain.blockIndex.tip = testMeta.chain.blockIndex.tip.Parent // Validate the state update utxoView = NewUtxoView( @@ -2517,7 +2517,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.NoError(t, utxoView.FlushToDb(blk1.Header.Height)) // Update the tip - testMeta.chain.bestChain = testMeta.chain.bestChain[:len(testMeta.chain.bestChain)-1] + testMeta.chain.blockIndex.setTip(testMeta.chain.blockIndex.tip.Parent) // Verify we return back to the initial state utxoView = NewUtxoView( diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 7e9f275d2..852404510 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" + lru "github.com/hashicorp/golang-lru/v2" "math" _ "net/http/pprof" "reflect" @@ -13,7 +14,6 @@ import ( "github.com/deso-protocol/core/bls" "github.com/btcsuite/btcd/btcec/v2" - "github.com/decred/dcrd/container/lru" "github.com/dgraph-io/badger/v3" embeddedpostgres "github.com/fergusstrange/embedded-postgres" "github.com/golang/glog" @@ -702,8 +702,8 @@ func (tes *transactionTestSuite) testDisconnectBlock(tm *transactionTestMeta, te require.NoError(err) // sanity-check that the last block hash is the same as the last header hash. require.Equal(true, bytes.Equal( - tm.chain.bestChain[len(tm.chain.bestChain)-1].Hash.ToBytes(), - tm.chain.bestHeaderChain[len(tm.chain.bestHeaderChain)-1].Hash.ToBytes())) + tm.chain.blockIndex.GetTip().Hash.ToBytes(), + tm.chain.blockIndex.GetHeaderTip().Hash.ToBytes())) // Last block shouldn't be nil, and the number of expectedTxns should be the same as in the testVectorBlock + 1, // because of the additional block reward. require.NotNil(lastBlock) @@ -791,15 +791,14 @@ func (tes *transactionTestSuite) testDisconnectBlock(tm *transactionTestMeta, te // TODO: if ever needed we can call tm.chain.eventManager.blockDisconnected() here. // Update the block and header metadata chains. - tm.chain.bestChain = tm.chain.bestChain[:len(tm.chain.bestChain)-1] - tm.chain.bestHeaderChain = tm.chain.bestHeaderChain[:len(tm.chain.bestHeaderChain)-1] - delete(tm.chain.bestChainMap, *lastBlockHash) - delete(tm.chain.bestHeaderChainMap, *lastBlockHash) + tm.chain.blockIndex.setTip(tm.chain.BlockTip().GetParent(tm.chain.blockIndex)) + tm.chain.blockIndex.setHeaderTip(tm.chain.HeaderTip().GetParent(tm.chain.blockIndex)) // We don't pass the chain's snapshot above to prevent certain concurrency issues. As a // result, we need to reset the snapshot's db cache to get rid of stale data. if tm.chain.snapshot != nil { - tm.chain.snapshot.DatabaseCache = *lru.NewMap[string, []byte](DatabaseCacheSize) + tm.chain.snapshot.DatabaseCache, err = lru.New[string, []byte](int(DatabaseCacheSize)) + require.NoError(err) } // Note that unlike connecting test vectors, when disconnecting, we don't need to verify db entries. diff --git a/lib/blockchain.go b/lib/blockchain.go index 83d3f1b5a..fe9fd6eda 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -17,7 +17,7 @@ import ( "sync" "time" - "github.com/decred/dcrd/container/lru" + "github.com/hashicorp/golang-lru/v2" "github.com/deso-protocol/core/collections" @@ -59,7 +59,9 @@ const ( // have room for multiple forks each an entire history's length with this value). If // each node takes up 100 bytes of space this amounts to around 500MB, which also seems // like a reasonable size. - MaxBlockIndexNodes = 5000000 + // UPDATE: now that we don't keep everything in memory, we reduced this value from + // 50000000 to 1000000 + MaxBlockIndexNodes = 1000000 // TODO: trim this down somehow... ) type BlockStatus uint32 @@ -123,7 +125,12 @@ func (nn *BlockNode) IsValidateFailed() bool { // IsCommitted returns true if a BlockNode has passed all validations, and it has been committed to // the Blockchain according to the Fast HotStuff commit rule. func (nn *BlockNode) IsCommitted() bool { - return nn.Status&StatusBlockCommitted != 0 || !blockNodeProofOfStakeCutoverMigrationTriggered(nn.Height) + //return nn.Status&StatusBlockCommitted != 0 || !blockNodeProofOfStakeCutoverMigrationTriggered(nn.Height) + return nn.Status&StatusBlockCommitted != 0 +} + +func (nn *BlockNode) ClearCommittedStatus() { + nn.Status &= BlockStatus(^uint32(StatusBlockCommitted)) } // IsFullyProcessed determines if the BlockStatus corresponds to a fully processed and stored block. @@ -236,6 +243,20 @@ func (nn *BlockNode) GetEncoderType() EncoderType { return EncoderTypeBlockNode } +func (nn *BlockNode) GetParent(blockIndex *BlockIndex) *BlockNode { + if nn.Parent != nil { + return nn.Parent + } + // If we don't have a parent, try to get it from the block index. + parentNode, exists := blockIndex.GetBlockNodeByHashAndHeight(nn.Header.PrevBlockHash, uint64(nn.Height-1)) + if !exists { + return nil + } + + nn.Parent = parentNode + return parentNode +} + // Append DeSo Encoder Metadata bytes to BlockNode bytes. func AddEncoderMetadataToBlockNodeBytes(blockNodeBytes []byte, blockHeight uint64) []byte { var blockData []byte @@ -371,6 +392,8 @@ func (nn *BlockNode) String() string { var parentHash *BlockHash if nn.Parent != nil { parentHash = nn.Parent.Hash + } else { + parentHash = nn.Header.PrevBlockHash } tstamp := uint32(0) if nn.Header != nil { @@ -404,14 +427,25 @@ func NewBlockNode( } } -func (nn *BlockNode) Ancestor(height uint32) *BlockNode { +func (nn *BlockNode) Ancestor(height uint32, blockIndex *BlockIndex) *BlockNode { if height > nn.Height { return nil } node := nn + // NOTE: using .Parent here is okay b/c it explicitly set it + // if we don't already have it when we fetch the parent from + // the block index. for ; node != nil && node.Height != height; node = node.Parent { // Keep iterating node until the condition no longer holds. + if node.Parent == nil { + var exists bool + node.Parent, exists = blockIndex.GetBlockNodeByHashAndHeight( + node.Header.PrevBlockHash, uint64(node.Height-1)) + if !exists { + return nil + } + } } return node @@ -422,24 +456,24 @@ func (nn *BlockNode) Ancestor(height uint32) *BlockNode { // height minus provided distance. // // This function is safe for concurrent access. -func (nn *BlockNode) RelativeAncestor(distance uint32) *BlockNode { - return nn.Ancestor(nn.Height - distance) -} +//func (nn *BlockNode) RelativeAncestor(distance uint32, blockIndex *BlockIndex) *BlockNode { +// return nn.Ancestor(nn.Height-distance, blockIndex) +//} // CalcNextDifficultyTarget computes the difficulty target expected of the // next block. -func CalcNextDifficultyTarget( - lastNode *BlockNode, version uint32, params *DeSoParams) (*BlockHash, error) { +func (bc *Blockchain) CalcNextDifficultyTarget( + lastNode *BlockNode, version uint32) (*BlockHash, error) { // Compute the blocks in each difficulty cycle. - blocksPerRetarget := uint32(params.TimeBetweenDifficultyRetargets / params.TimeBetweenBlocks) + blocksPerRetarget := uint32(bc.params.TimeBetweenDifficultyRetargets / bc.params.TimeBetweenBlocks) // We effectively skip the first difficulty retarget by returning the default // difficulty value for the first cycle. Not doing this (or something like it) // would cause the genesis block's timestamp, which could be off by several days // to significantly skew the first cycle in a way that is mostly annoying for // testing but also suboptimal for the mainnet. - minDiffBytes, err := hex.DecodeString(params.MinDifficultyTargetHex) + minDiffBytes, err := hex.DecodeString(bc.params.MinDifficultyTargetHex) if err != nil { return nil, errors.Wrapf(err, "CalcNextDifficultyTarget: Problem computing min difficulty") } @@ -460,19 +494,27 @@ func CalcNextDifficultyTarget( } // If we get here it means we reached a difficulty retarget point. - targetSecs := int64(params.TimeBetweenDifficultyRetargets / time.Second) - minRetargetTimeSecs := targetSecs / params.MaxDifficultyRetargetFactor - maxRetargetTimeSecs := targetSecs * params.MaxDifficultyRetargetFactor + targetSecs := int64(bc.params.TimeBetweenDifficultyRetargets / time.Second) + minRetargetTimeSecs := targetSecs / bc.params.MaxDifficultyRetargetFactor + maxRetargetTimeSecs := targetSecs * bc.params.MaxDifficultyRetargetFactor firstNodeHeight := lastNode.Height - blocksPerRetarget - firstNode := lastNode.Ancestor(firstNodeHeight) - if firstNode == nil { + // TODO: we need to write the migration to only have committed blocks from PoW. + // This code is dead for PoS. + // TODO: do we need to do something if we need to get this from the header chain? + firstNode, exists, err := bc.GetBlockFromBestChainByHeight(uint64(firstNodeHeight), true) + if err != nil { + return nil, errors.Wrapf(err, "CalcNextDifficultyTarget: Problem getting block at "+ + "beginning of retarget interval at height %d during retarget from height %d", + firstNodeHeight, lastNode.Height) + } + if firstNode == nil || !exists { return nil, fmt.Errorf("CalcNextDifficultyTarget: Problem getting block at "+ "beginning of retarget interval at height %d during retarget from height %d", firstNodeHeight, lastNode.Height) } - actualTimeDiffSecs := int64(lastNode.Header.GetTstampSecs() - firstNode.Header.GetTstampSecs()) + actualTimeDiffSecs := lastNode.Header.GetTstampSecs() - firstNode.Header.GetTstampSecs() clippedTimeDiffSecs := actualTimeDiffSecs if actualTimeDiffSecs < minRetargetTimeSecs { clippedTimeDiffSecs = minRetargetTimeSecs @@ -527,6 +569,146 @@ type CheckpointBlockInfoAndError struct { Error error } +type BlockIndex struct { + db *badger.DB + snapshot *Snapshot + blockIndexByHash *lru.Cache[BlockHash, *BlockNode] + blockIndexByHeight *lru.Cache[uint64, []*BlockNode] + tip *BlockNode + headerTip *BlockNode +} + +func NewBlockIndex(db *badger.DB, snapshot *Snapshot, tipNode *BlockNode) *BlockIndex { + blockIndexByHash, _ := lru.New[BlockHash, *BlockNode](MaxBlockIndexNodes) // TODO: parameterize this? + blockIndexByHeight, _ := lru.New[uint64, []*BlockNode](MaxBlockIndexNodes) // TODO: parameterize this? + return &BlockIndex{ + db: db, + snapshot: snapshot, + blockIndexByHash: blockIndexByHash, + blockIndexByHeight: blockIndexByHeight, + tip: tipNode, + } +} + +func (bi *BlockIndex) setBlockIndexFromMap(input map[BlockHash]*BlockNode) { + newHashToBlockNodeMap, _ := lru.New[BlockHash, *BlockNode](MaxBlockIndexNodes) + newHeightToBlockNodeMap, _ := lru.New[uint64, []*BlockNode](MaxBlockIndexNodes) + bi.blockIndexByHash = newHashToBlockNodeMap + bi.blockIndexByHeight = newHeightToBlockNodeMap + for _, val := range input { + bi.addNewBlockNodeToBlockIndex(val) + // This function is always used for tests. + // We assume that the tip is just the highest block in the block index. + if bi.tip == nil { + bi.tip = val + } else if val.Height > bi.tip.Height { + bi.tip = val + } + } +} + +func (bi *BlockIndex) setHeaderTip(tip *BlockNode) { + // Just to be safe, we also add it to the block index. + bi.addNewBlockNodeToBlockIndex(tip) + bi.headerTip = tip +} + +func (bi *BlockIndex) setTip(tip *BlockNode) { + // Just to be safe, we also add it to the block index. + bi.addNewBlockNodeToBlockIndex(tip) + bi.tip = tip +} + +func (bi *BlockIndex) addNewBlockNodeToBlockIndex(blockNode *BlockNode) { + bi.blockIndexByHash.Add(*blockNode.Hash, blockNode) + blocksAtHeight, exists := bi.blockIndexByHeight.Get(uint64(blockNode.Height)) + if !exists { + blocksAtHeight = []*BlockNode{} + } else { + // Make sure we don't add the same block node twice. + // TODO: we *could* make this more efficient by using a map, + // but generally we won't have many blocks at the same height. + for ii, blockAtHeight := range blocksAtHeight { + if blockAtHeight.Hash.IsEqual(blockNode.Hash) { + blocksAtHeight[ii] = blockNode + break + } + } + } + bi.blockIndexByHeight.Add(uint64(blockNode.Height), append(blocksAtHeight, blockNode)) +} + +func (bi *BlockIndex) GetBlockNodeByHashOnly(blockHash *BlockHash) (*BlockNode, bool, error) { + val, exists := bi.blockIndexByHash.Get(*blockHash) + if exists { + return val, true, nil + } + height, err := GetHeightForHash(bi.db, bi.snapshot, blockHash) + if err != nil { + if errors.Is(err, badger.ErrKeyNotFound) { + return nil, false, nil + } + return nil, false, errors.Wrapf(err, "GetBlockNodeByHashOnly: Problem getting height for hash") + } + blockNode := GetHeightHashToNodeInfo(bi.db, bi.snapshot, uint32(height), blockHash, false) + if blockNode == nil { + return nil, false, nil + } + bi.addNewBlockNodeToBlockIndex(blockNode) + return blockNode, true, nil +} + +func (bi *BlockIndex) GetBlockNodeByHashAndHeight(blockHash *BlockHash, height uint64) (*BlockNode, bool) { + val, exists := bi.blockIndexByHash.Get(*blockHash) + if exists { + return val, true + } + if height > math.MaxUint32 { + glog.Fatalf("GetBlockNodeByHashAndHeight: Height %d is greater than math.MaxUint32", height) + } + bn := GetHeightHashToNodeInfo(bi.db, bi.snapshot, uint32(height), blockHash, false) + if bn == nil { + return nil, false + } + bi.addNewBlockNodeToBlockIndex(bn) + return bn, true +} + +func (bi *BlockIndex) GetBlockNodesByHeight(height uint64) []*BlockNode { + if height > math.MaxUint32 { + glog.Fatalf("GetBlockNodesByHeight: Height %d is greater than math.MaxUint32", height) + } + //if height > bi.maxHeightSeen { + // return []*BlockNode{} + //} + blockNodesAtHeight, exists := bi.blockIndexByHeight.Get(height) + if exists { + return blockNodesAtHeight + } + // TODO: cache current height to exit early? + prefixKey := _heightHashToNodePrefixByHeight(uint32(height), false) + _, valsFound := EnumerateKeysForPrefix(bi.db, prefixKey, false) + blockNodes := []*BlockNode{} + for _, val := range valsFound { + blockNode, err := DeserializeBlockNode(val) + if err != nil { + glog.Errorf("GetBlockNodesByHeight: Problem deserializing block node: %v", err) + continue + } + bi.addNewBlockNodeToBlockIndex(blockNode) + blockNodes = append(blockNodes, blockNode) + } + return blockNodes +} + +func (bi *BlockIndex) GetTip() *BlockNode { + return bi.tip +} + +func (bi *BlockIndex) GetHeaderTip() *BlockNode { + return bi.headerTip +} + type Blockchain struct { db *badger.DB postgres *Postgres @@ -553,20 +735,8 @@ type Blockchain struct { ChainLock deadlock.RWMutex // These should only be accessed after acquiring the ChainLock. - // - // An in-memory index of the "tree" of blocks we are currently aware of. - // This index includes forks and side-chains. - blockIndexByHash *collections.ConcurrentMap[BlockHash, *BlockNode] - // blockIndexByHeight is an in-memory map of block height to block nodes. This is - // used to quickly find the safe blocks from which the chain can be extended for PoS - blockIndexByHeight map[uint64]map[BlockHash]*BlockNode - // An in-memory slice of the blocks on the main chain only. The end of - // this slice is the best known tip that we have at any given time. - bestChain []*BlockNode - bestChainMap map[BlockHash]*BlockNode - - bestHeaderChain []*BlockNode - bestHeaderChainMap map[BlockHash]*BlockNode + blockIndex *BlockIndex + lowestBlockNotStored uint64 // We keep track of orphan blocks with the following data structures. Orphans // are not written to disk and are only cached in memory. Moreover we only keep @@ -577,7 +747,7 @@ type Blockchain struct { blockView *UtxoView // cache block view for each block - blockViewCache lru.Map[BlockHash, *BlockViewAndUtxoOps] + blockViewCache *lru.Cache[BlockHash, *BlockViewAndUtxoOps] // snapshot cache snapshotCache *SnapshotCache @@ -705,80 +875,67 @@ func getCheckpointBlockInfoFromProviderHelper(provider string) *CheckpointBlockI } func (bc *Blockchain) addNewBlockNodeToBlockIndex(blockNode *BlockNode) { - bc.blockIndexByHash.Set(*blockNode.Hash, blockNode) - if _, exists := bc.blockIndexByHeight[uint64(blockNode.Height)]; !exists { - bc.blockIndexByHeight[uint64(blockNode.Height)] = make(map[BlockHash]*BlockNode) - } - bc.blockIndexByHeight[uint64(blockNode.Height)][*blockNode.Hash] = blockNode + bc.blockIndex.addNewBlockNodeToBlockIndex(blockNode) } func (bc *Blockchain) CopyBlockIndexes() ( - _blockIndexByHash *collections.ConcurrentMap[BlockHash, *BlockNode], - _blockIndexByHeight map[uint64]map[BlockHash]*BlockNode, + _blockIndexByHash *lru.Cache[BlockHash, *BlockNode], ) { - newBlockIndexByHash := collections.NewConcurrentMap[BlockHash, *BlockNode]() - newBlockIndexByHeight := make(map[uint64]map[BlockHash]*BlockNode) - bc.blockIndexByHash.Iterate(func(kk BlockHash, vv *BlockNode) { - newBlockIndexByHash.Set(kk, vv) - blockHeight := uint64(vv.Height) - if _, exists := newBlockIndexByHeight[blockHeight]; !exists { - newBlockIndexByHeight[blockHeight] = make(map[BlockHash]*BlockNode) - } - newBlockIndexByHeight[blockHeight][kk] = vv - }) - return newBlockIndexByHash, newBlockIndexByHeight + newBlockIndexByHash, _ := lru.New[BlockHash, *BlockNode](MaxBlockIndexNodes) + for _, key := range bc.blockIndex.blockIndexByHash.Keys() { + val, _ := bc.blockIndex.blockIndexByHash.Get(key) + newBlockIndexByHash.Add(key, val) + } + return newBlockIndexByHash } -func (bc *Blockchain) constructBlockIndexByHeight() map[uint64]map[BlockHash]*BlockNode { - newBlockIndex := make(map[uint64]map[BlockHash]*BlockNode) - bc.blockIndexByHash.Iterate(func(_ BlockHash, blockNode *BlockNode) { - blockHeight := uint64(blockNode.Height) - if _, exists := newBlockIndex[blockHeight]; !exists { - newBlockIndex[blockHeight] = make(map[BlockHash]*BlockNode) - } - newBlockIndex[blockHeight][*blockNode.Hash] = blockNode - }) - return newBlockIndex +func (bc *Blockchain) GetBlockIndex() *BlockIndex { + return bc.blockIndex } +// TODO: read through to DB. func (bc *Blockchain) getAllBlockNodesIndexedAtHeight(blockHeight uint64) []*BlockNode { - return collections.MapValues(bc.blockIndexByHeight[blockHeight]) + return bc.blockIndex.GetBlockNodesByHeight(blockHeight) } func (bc *Blockchain) hasBlockNodesIndexedAtHeight(blockHeight uint64) bool { - blocksAtHeight, hasNestedMapAtHeight := bc.blockIndexByHeight[blockHeight] - if !hasNestedMapAtHeight { - return false - } - return len(blocksAtHeight) > 0 -} - -func (bc *Blockchain) CopyBestChain() ([]*BlockNode, map[BlockHash]*BlockNode) { - newBestChain := []*BlockNode{} - newBestChainMap := make(map[BlockHash]*BlockNode) - newBestChain = append(newBestChain, bc.bestChain...) - for kk, vv := range bc.bestChainMap { - newBestChainMap[kk] = vv - } - - return newBestChain, newBestChainMap -} - -func (bc *Blockchain) CopyBestHeaderChain() ([]*BlockNode, map[BlockHash]*BlockNode) { - newBestChain := []*BlockNode{} - newBestChainMap := make(map[BlockHash]*BlockNode) - newBestChain = append(newBestChain, bc.bestHeaderChain...) - for kk, vv := range bc.bestHeaderChainMap { - newBestChainMap[kk] = vv - } - - return newBestChain, newBestChainMap + blockNodes := bc.blockIndex.GetBlockNodesByHeight(blockHeight) + return len(blockNodes) > 0 } // IsFullyStored determines if there are block nodes that haven't been fully stored or processed in the best block chain. func (bc *Blockchain) IsFullyStored() bool { - if bc.ChainState() == SyncStateFullyCurrent { - for _, blockNode := range bc.bestChain { + // TODO: figure out how to iterate over best chain w/o having entire thing in memory. + chainState := bc.ChainState() + if chainState == SyncStateFullyCurrent || (chainState == SyncStateNeedBlocksss && + bc.headerTip().Height-bc.blockTip().Height < 10) { + // Get a sampling of blocks from the best chain and check if they are fully stored. + // We only need to check a few blocks to determine if the chain is fully stored. + blockTipHeight := uint64(bc.BlockTip().Height) + increment := blockTipHeight / 20 + if increment == 0 { + increment = 1 + } + blockHeights := []uint64{} + for ii := uint64(0); ii < blockTipHeight; ii += increment { + blockHeights = append(blockHeights, ii) + } + if blockTipHeight > 100 { + for ii := blockTipHeight - 20; ii < blockTipHeight; ii++ { + blockHeights = append(blockHeights, ii) + } + } + blockHeights = append(blockHeights, blockTipHeight) + blockHeightSet := NewSet(blockHeights) + for _, blockHeight := range blockHeightSet.ToSlice() { + blockNode, exists, err := bc.GetBlockFromBestChainByHeight(blockHeight, false) + if err != nil { + glog.Errorf("IsFullyStored: Problem getting block at height %d: %v", blockHeight, err) + return false + } + if !exists { + return false + } if !blockNode.Status.IsFullyProcessed() { return false } @@ -840,56 +997,56 @@ func (bc *Blockchain) _initChain() error { // to previous blocks we've read in and error if they don't. This works because // reading blocks in height order as we do here ensures that we'll always // add a block's parents, if they exist, before adding the block itself. + //var err error + //if bc.postgres != nil { + // bc.blockIndexByHash, err = bc.postgres.GetBlockIndex() + //} else { + // bc.blockIndexByHash, err = GetBlockIndex(bc.db, false /*bitcoinNodes*/, bc.params) + //} + //if err != nil { + // return errors.Wrapf(err, "_initChain: Problem reading block index from db") + //} + //bc.blockIndexByHeight = bc.constructBlockIndexByHeight() + + // For postgres, we still load the entire block index into memory. This is because var err error + var tipNode *BlockNode if bc.postgres != nil { - bc.blockIndexByHash, err = bc.postgres.GetBlockIndex() - } else { - bc.blockIndexByHash, err = GetBlockIndex(bc.db, false /*bitcoinNodes*/, bc.params) - } - if err != nil { - return errors.Wrapf(err, "_initChain: Problem reading block index from db") - } - bc.blockIndexByHeight = bc.constructBlockIndexByHeight() - - // At this point the blockIndexByHash should contain a full node tree with all - // nodes pointing to valid parent nodes. - { - // Find the tip node with the best node hash. - tipNode, exists := bc.blockIndexByHash.Get(*bestBlockHash) + bc.blockIndex.blockIndexByHash, err = bc.postgres.GetBlockIndex() + var exists bool + tipNode, exists = bc.blockIndex.blockIndexByHash.Get(*bestBlockHash) if !exists { - return fmt.Errorf("_initChain(block): Best hash (%#v) not found in block index", bestBlockHash) + return fmt.Errorf("_initChain: Best hash (%#v) not found in block index", bestBlockHash) } - - // Walk back from the best node to the genesis block and store them all - // in bestChain. - bc.bestChain, err = GetBestChain(tipNode) - if err != nil { - return errors.Wrapf(err, "_initChain(block): Problem reading best chain from db") - } - for _, bestChainNode := range bc.bestChain { - bc.bestChainMap[*bestChainNode.Hash] = bestChainNode + } else { + var tipNodeExists bool + // For badger, we only need the tip block to get started. + // Weird hack required for the genesis block. + if bestBlockHash.IsEqual(GenesisBlockHash) { + tipNode, tipNodeExists = bc.blockIndex.GetBlockNodeByHashAndHeight(bestBlockHash, 0) + } else { + tipNode, tipNodeExists, err = bc.blockIndex.GetBlockNodeByHashOnly(bestBlockHash) + if err != nil { + return errors.Wrapf(err, "_initChain: Problem reading best block from db") + } + if !tipNodeExists { + return fmt.Errorf("_initChain: Best hash (%#v) not found in block index", bestBlockHash) + } + // Walk back the last 24 hours of blocks. + currBlockCounter := 1 + for currBlockCounter < 3600*24 && tipNode.Header.PrevBlockHash != nil { + bc.blockIndex.GetBlockNodeByHashAndHeight(tipNode.Header.PrevBlockHash, tipNode.Header.Height-1) + currBlockCounter++ + } } - } - - // TODO: This code is a bit repetitive but this seemed clearer than factoring it out. - { - // Find the tip node with the best node hash. - tipNode, exists := bc.blockIndexByHash.Get(*bestHeaderHash) - if !exists { - return fmt.Errorf("_initChain(header): Best hash (%#v) not found in block index", bestHeaderHash) + if err = bc.blockIndex.LoadBlockIndexFromHeight(tipNode.Height, bc.params); err != nil { + return errors.Wrapf(err, "_initChain: Problem loading block index from db") } - // Walk back from the best node to the genesis block and store them all - // in bestChain. - bc.bestHeaderChain, err = GetBestChain(tipNode) - if err != nil { - return errors.Wrapf(err, "_initChain(header): Problem reading best chain from db") - } - for _, bestHeaderChainNode := range bc.bestHeaderChain { - bc.bestHeaderChainMap[*bestHeaderChainNode.Hash] = bestHeaderChainNode - } + // We start by simply setting the chain tip and header tip to the tip node. + bc.blockIndex.setTip(tipNode) + bc.blockIndex.setHeaderTip(tipNode) } - bc.isInitialized = true return nil @@ -931,20 +1088,12 @@ func (bc *Blockchain) _applyUncommittedBlocksToBestChain() error { } // Add the uncommitted blocks to the in-memory data structures. - if _, _, _, err := bc.tryApplyNewTip(uncommittedTipBlockNode, 0, lineageFromCommittedTip); err != nil { + if _, _, _, err = bc.tryApplyNewTip(uncommittedTipBlockNode, 0, lineageFromCommittedTip); err != nil { return errors.Wrapf(err, "_applyUncommittedBlocksToBestChain: ") } - ////////////////////////// Update the bestHeaderChain in-memory data structures ////////////////////////// - currentHeaderTip := bc.headerTip() - _, blocksToDetach, blocksToAttach := GetReorgBlocks(currentHeaderTip, uncommittedTipBlockNode) - bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( - bc.bestHeaderChain, - bc.bestHeaderChainMap, - blocksToDetach, - blocksToAttach, - ) - + bc.blockIndex.setTip(uncommittedTipBlockNode) + bc.blockIndex.setHeaderTip(uncommittedTipBlockNode) return nil } @@ -966,6 +1115,10 @@ func NewBlockchain( archivalMode bool, checkpointSyncingProviders []string, ) (*Blockchain, error) { + if err := RunBlockIndexMigrationOnce(db, params); err != nil { + return nil, errors.Wrapf(err, "NewBlockchain: Problem running block index migration") + } + trustedBlockProducerPublicKeys := make(map[PkMapKey]bool) for _, keyStr := range trustedBlockProducerPublicKeyStrs { pkBytes, _, err := Base58CheckDecode(keyStr) @@ -978,7 +1131,7 @@ func NewBlockchain( timer := &Timer{} timer.Initialize() - + blockViewCache, _ := lru.New[BlockHash, *BlockViewAndUtxoOps](100) // TODO: parameterize bc := &Blockchain{ db: db, postgres: postgres, @@ -991,13 +1144,8 @@ func NewBlockchain( eventManager: eventManager, archivalMode: archivalMode, - blockIndexByHash: collections.NewConcurrentMap[BlockHash, *BlockNode](), - blockIndexByHeight: make(map[uint64]map[BlockHash]*BlockNode), - bestChainMap: make(map[BlockHash]*BlockNode), - - bestHeaderChainMap: make(map[BlockHash]*BlockNode), - - blockViewCache: *lru.NewMap[BlockHash, *BlockViewAndUtxoOps](100), // TODO: parameterize + blockIndex: NewBlockIndex(db, snapshot, nil), // TODO: replace with actual tip. + blockViewCache: blockViewCache, snapshotCache: NewSnapshotCache(), checkpointSyncingProviders: checkpointSyncingProviders, @@ -1028,247 +1176,8 @@ func NewBlockchain( return bc, nil } -// log2FloorMasks defines the masks to use when quickly calculating -// floor(log2(x)) in a constant log2(32) = 5 steps, where x is a uint32, using -// shifts. They are derived from (2^(2^x) - 1) * (2^(2^x)), for x in 4..0. -var log2FloorMasks = []uint32{0xffff0000, 0xff00, 0xf0, 0xc, 0x2} - -// fastLog2Floor calculates and returns floor(log2(x)) in a constant 5 steps. -func fastLog2Floor(n uint32) uint8 { - rv := uint8(0) - exponent := uint8(16) - for i := 0; i < 5; i++ { - if n&log2FloorMasks[i] != 0 { - rv += exponent - n >>= exponent - } - exponent >>= 1 - } - return rv -} - -// locateInventory returns the node of the block after the first known block in -// the locator along with the number of subsequent nodes needed to either reach -// the provided stop hash or the provided max number of entries. -// -// In addition, there are two special cases: -// -// - When no locators are provided, the stop hash is treated as a request for -// that block, so it will either return the node associated with the stop hash -// if it is known, or nil if it is unknown -// - When locators are provided, but none of them are known, nodes starting -// after the genesis block will be returned -// -// This is primarily a helper function for the locateBlocks and locateHeaders -// functions. -// -// This function MUST be called with the chain state lock held (for reads). -func locateInventory(locator []*BlockHash, stopHash *BlockHash, maxEntries uint32, - blockIndex *collections.ConcurrentMap[BlockHash, *BlockNode], bestChainList []*BlockNode, - bestChainMap map[BlockHash]*BlockNode) (*BlockNode, uint32) { - - // There are no block locators so a specific block is being requested - // as identified by the stop hash. - stopNode, stopNodeExists := blockIndex.Get(*stopHash) - if len(locator) == 0 { - if !stopNodeExists { - // No blocks with the stop hash were found so there is - // nothing to do. - return nil, 0 - } - return stopNode, 1 - } - - // Find the most recent locator block hash in the main chain. In the - // case none of the hashes in the locator are in the main chain, fall - // back to the genesis block. - startNode := bestChainList[0] - for _, hash := range locator { - node, bestChainContainsNode := bestChainMap[*hash] - if bestChainContainsNode { - startNode = node - break - } - } - - // Start at the block after the most recently known block. When there - // is no next block it means the most recently known block is the tip of - // the best chain, so there is nothing more to do. - nextNodeHeight := uint32(startNode.Header.Height) + 1 - if uint32(len(bestChainList)) <= nextNodeHeight { - return nil, 0 - } - startNode = bestChainList[nextNodeHeight] - - // Calculate how many entries are needed. - tip := bestChainList[len(bestChainList)-1] - total := uint32((tip.Header.Height - startNode.Header.Height) + 1) - if stopNodeExists && stopNode.Header.Height >= startNode.Header.Height { - - _, bestChainContainsStopNode := bestChainMap[*stopNode.Hash] - if bestChainContainsStopNode { - total = uint32((stopNode.Header.Height - startNode.Header.Height) + 1) - } - } - if total > maxEntries { - total = maxEntries - } - - return startNode, total -} - -// locateHeaders returns the headers of the blocks after the first known block -// in the locator until the provided stop hash is reached, or up to the provided -// max number of block headers. -// -// See the comment on the exported function for more details on special cases. -// -// This function MUST be called with the ChainLock held (for reads). -func locateHeaders(locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32, - blockIndex *collections.ConcurrentMap[BlockHash, *BlockNode], bestChainList []*BlockNode, - bestChainMap map[BlockHash]*BlockNode) []*MsgDeSoHeader { - - // Find the node after the first known block in the locator and the - // total number of nodes after it needed while respecting the stop hash - // and max entries. - node, total := locateInventory(locator, stopHash, maxHeaders, - blockIndex, bestChainList, bestChainMap) - if total == 0 { - return nil - } - - // Populate and return the found headers. - headers, err := SafeMakeSliceWithLengthAndCapacity[*MsgDeSoHeader](0, uint64(total)) - if err != nil { - // TODO: do we really want to introduce an error here? - } - for ii := uint32(0); ii < total; ii++ { - headers = append(headers, node.Header) - if uint32(len(headers)) == total { - break - } - node = bestChainList[node.Header.Height+1] - } - return headers -} - -// LocateBestBlockChainHeaders returns the headers of the blocks after the first known block -// in the locator until the provided stop hash is reached, or up to a max of -// wire.MaxBlockHeadersPerMsg headers. Note that it returns the best headers -// considering only headers for which we have blocks (that is, it considers the -// best *block* chain we have rather than the best *header* chain). This is -// the correct thing to do because in general this function is called in order -// to serve a response to a peer's GetHeaders request. -// -// In addition, there are two special cases: -// -// - When no locators are provided, the stop hash is treated as a request for -// that header, so it will either return the header for the stop hash itself -// if it is known, or nil if it is unknown -// - When locators are provided, but none of them are known, headers starting -// after the genesis block will be returned -// -// This function is safe for concurrent access. -func (bc *Blockchain) LocateBestBlockChainHeaders( - locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32) []*MsgDeSoHeader { - - bc.ChainLock.RLock() - defer bc.ChainLock.RUnlock() - headers := locateHeaders(locator, stopHash, maxHeaders, - bc.blockIndexByHash, bc.bestChain, bc.bestChainMap) - - return headers -} - -// LatestLocator returns a block locator for the passed block node. The passed -// node can be nil in which case the block locator for the current tip -// associated with the view will be returned. -// -// BlockLocator is used to help locate a specific block. The algorithm for -// building the block locator is to add the hashes in reverse order until -// the genesis block is reached. In order to keep the list of locator hashes -// to a reasonable number of entries, first the most recent previous 12 block -// hashes are added, then the step is doubled each loop iteration to -// exponentially decrease the number of hashes as a function of the distance -// from the block being located. -// -// For example, assume a block chain with a side chain as depicted below: -// -// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 -// \-> 16a -> 17a -// -// The block locator for block 17a would be the hashes of blocks: -// [17a 16a 15 14 13 12 11 10 9 8 7 6 4 genesis] -// -// Caller is responsible for acquiring the ChainLock before calling this function. -func (bc *Blockchain) LatestLocator(tip *BlockNode) []*BlockHash { - - // Calculate the max number of entries that will ultimately be in the - // block locator. See the description of the algorithm for how these - // numbers are derived. - var maxEntries uint8 - if tip.Header.Height <= 12 { - maxEntries = uint8(tip.Header.Height) + 1 - } else { - // Requested hash itself + previous 10 entries + genesis block. - // Then floor(log2(height-10)) entries for the skip portion. - adjustedHeight := uint32(tip.Header.Height) - 10 - maxEntries = 12 + fastLog2Floor(adjustedHeight) - } - locator := make([]*BlockHash, 0, maxEntries) - - step := int32(1) - for tip != nil { - locator = append(locator, tip.Hash) - - // Nothing more to add once the genesis block has been added. - if tip.Header.Height == 0 { - break - } - - // Calculate height of previous node to include ensuring the - // final node is the genesis block. - height := int32(tip.Header.Height) - step - if height < 0 { - height = 0 - } - - // When the node is in the current chain view, all of its - // ancestors must be too, so use a much faster O(1) lookup in - // that case. Otherwise, fall back to walking backwards through - // the nodes of the other chain to the correct ancestor. - if _, exists := bc.bestHeaderChainMap[*tip.Hash]; exists { - tip = bc.bestHeaderChain[height] - } else { - tip = tip.Ancestor(uint32(height)) - } - - // Once 11 entries have been included, start doubling the - // distance between included hashes. - if len(locator) > 10 { - step *= 2 - } - } - - return locator -} - -func (bc *Blockchain) HeaderLocatorWithNodeHash(blockHash *BlockHash) ([]*BlockHash, error) { - // We can acquire the ChainLock because the only place this is called currently is from - // _handleHeaderBundle, which doesn't have the lock. - // If we do not acquire the lock, we may hit a concurrent map read write error which causes panic. - bc.ChainLock.RLock() - defer bc.ChainLock.RUnlock() - node, exists := bc.blockIndexByHash.Get(*blockHash) - if !exists { - return nil, fmt.Errorf("Blockchain.HeaderLocatorWithNodeHash: Node for hash %v is not in our blockIndexByHash", blockHash) - } - - return bc.LatestLocator(node), nil -} - -// LatestHeaderLocator calls LatestLocator in order to fetch a locator -// for the best header chain. +// LatestHeaderLocator calls returns a block locator for the current tip of the +// header chain. func (bc *Blockchain) LatestHeaderLocator() []*BlockHash { // We can acquire the ChainLock here because all calls to this function happen in peer.go // and server.go, which don't hold the lock. @@ -1277,7 +1186,7 @@ func (bc *Blockchain) LatestHeaderLocator() []*BlockHash { defer bc.ChainLock.RUnlock() headerTip := bc.headerTip() - return bc.LatestLocator(headerTip) + return []*BlockHash{headerTip.Hash} } func (bc *Blockchain) GetBlockNodesToFetch( @@ -1294,7 +1203,13 @@ func (bc *Blockchain) GetBlockNodesToFetch( // If the tip of the best block chain is in the main header chain, make that // the start point for our fetch. - headerNodeStart, blockTipExistsInBestHeaderChain := bc.bestHeaderChainMap[*bestBlockTip.Hash] + bestBlockTipHeight := uint64(bestBlockTip.Height) + headerNodeStart, blockTipExistsInBestHeaderChain, err := bc.GetBlockFromBestChainByHashAndOptionalHeight( + bestBlockTip.Hash, &bestBlockTipHeight, true) + if err != nil { + glog.Errorf("GetBlockToFetch: Problem getting block by height: %v", err) + return nil + } if !blockTipExistsInBestHeaderChain { // If the hash of the tip of the best blockchain is not in the best header chain, then // this is a case where the header chain has forked off from the best block @@ -1305,7 +1220,7 @@ func (bc *Blockchain) GetBlockNodesToFetch( // StatusBlockProcessed so this loop is guaranteed to terminate successfully. headerNodeStart = bc.headerTip() for headerNodeStart != nil && (headerNodeStart.Status&StatusBlockProcessed) == 0 { - headerNodeStart = headerNodeStart.Parent + headerNodeStart = headerNodeStart.GetParent(bc.blockIndex) } if headerNodeStart == nil { @@ -1313,7 +1228,17 @@ func (bc *Blockchain) GetBlockNodesToFetch( // an error and set it to the genesis block. glog.Errorf("GetBlockToFetch: headerNode was nil after iterating " + "backward through best header chain; using genesis block") - headerNodeStart = bc.bestHeaderChain[0] + var err error + var genesisBlockExists bool + headerNodeStart, genesisBlockExists, err = bc.GetBlockFromBestChainByHeight(0, true) + if err != nil { + glog.Errorf("GetBlockToFetch: Problem getting genesis block: %v", err) + return nil + } + if !genesisBlockExists { + glog.Errorf("GetBlockToFetch: Genesis block not found") + return nil + } } } @@ -1323,14 +1248,22 @@ func (bc *Blockchain) GetBlockNodesToFetch( currentHeight := headerNodeStart.Height + 1 blockNodesToFetch := []*BlockNode{} heightLimit := maxHeight - if heightLimit >= uint32(len(bc.bestHeaderChain)) { - heightLimit = uint32(len(bc.bestHeaderChain) - 1) + if heightLimit >= bc.blockIndex.GetHeaderTip().Height { + heightLimit = bc.blockIndex.GetHeaderTip().Height - 1 } for currentHeight <= heightLimit && len(blockNodesToFetch) < numBlocks { // Get the current hash and increment the height. - currentNode := bc.bestHeaderChain[currentHeight] + currentNode, currentNodeExists, err := bc.GetBlockFromBestChainByHeight(uint64(currentHeight), true) + if err != nil { + glog.Errorf("GetBlockToFetch: Problem getting block by height: %v", err) + return nil + } + if !currentNodeExists { + glog.Errorf("GetBlockToFetch: Block at height %d not found", currentHeight) + return nil + } currentHeight++ if _, exists := blocksToIgnore[*currentNode.Hash]; exists { @@ -1344,55 +1277,33 @@ func (bc *Blockchain) GetBlockNodesToFetch( return blockNodesToFetch } -func (bc *Blockchain) HasHeader(headerHash *BlockHash) bool { - _, exists := bc.blockIndexByHash.Get(*headerHash) - return exists +func (bc *Blockchain) HasHeader(headerHash *BlockHash) (bool, error) { + _, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(headerHash) + return exists, errors.Wrap(err, "Blockchain.HasHeader: ") } -func (bc *Blockchain) HeaderAtHeight(blockHeight uint32) *BlockNode { - if blockHeight >= uint32(len(bc.bestHeaderChain)) { - return nil - } - - return bc.bestHeaderChain[blockHeight] -} - -func (bc *Blockchain) HasBlock(blockHash *BlockHash) bool { - node, nodeExists := bc.blockIndexByHash.Get(*blockHash) - if !nodeExists { - glog.V(2).Infof("Blockchain.HasBlock: Node with hash %v does not exist in node index", blockHash) +func (bc *Blockchain) HasHeaderByHashAndHeight(headerHash *BlockHash, height uint64) bool { + if height > uint64(bc.headerTip().Height) { return false } - - if (node.Status & StatusBlockProcessed) == 0 { - glog.V(2).Infof("Blockchain.HasBlock: Node %v does not have StatusBlockProcessed so we don't have the block", node) - return false - } - - // Node exists with StatusBlockProcess set means we have it. - return true + _, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(headerHash, height) + return exists } -func (bc *Blockchain) HasBlockInBlockIndex(blockHash *BlockHash) bool { - bc.ChainLock.RLock() - defer bc.ChainLock.RUnlock() - - _, exists := bc.blockIndexByHash.Get(*blockHash) - return exists +// TODO: delete me? +func (bc *Blockchain) HeaderAtHeight(blockHeight uint32) (*BlockNode, bool, error) { + if blockHeight >= bc.blockIndex.GetHeaderTip().Height { + return nil, false, nil + } + return bc.GetBlockFromBestChainByHeight(uint64(blockHeight), true) } -// This needs to hold a lock on the blockchain because it read from an in-memory map that is -// not thread-safe. -func (bc *Blockchain) GetBlockHeaderFromIndex(blockHash *BlockHash) *MsgDeSoHeader { +func (bc *Blockchain) HasBlockInBlockIndex(blockHash *BlockHash) (bool, error) { bc.ChainLock.RLock() defer bc.ChainLock.RUnlock() - block, blockExists := bc.blockIndexByHash.Get(*blockHash) - if !blockExists { - return nil - } - - return block.Header + _, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(blockHash) + return exists, errors.Wrap(err, "Blockchain.HasBlockInBlockIndex: ") } // Don't need a lock because blocks don't get removed from the db after they're added @@ -1406,16 +1317,6 @@ func (bc *Blockchain) GetBlock(blockHash *BlockHash) *MsgDeSoBlock { return blk } -func (bc *Blockchain) GetBlockAtHeight(height uint32) *MsgDeSoBlock { - numBlocks := uint32(len(bc.bestChain)) - - if height >= numBlocks { - return nil - } - - return bc.GetBlock(bc.bestChain[height].Hash) -} - // GetBlockNodeWithHash looks for a block node in the bestChain list that matches the hash. func (bc *Blockchain) GetBlockNodeWithHash(hash *BlockHash) *BlockNode { if hash == nil { @@ -1423,7 +1324,11 @@ func (bc *Blockchain) GetBlockNodeWithHash(hash *BlockHash) *BlockNode { } bc.ChainLock.RLock() defer bc.ChainLock.RUnlock() - return bc.bestChainMap[*hash] + bn, bnExists, err := bc.blockIndex.GetBlockNodeByHashOnly(hash) + if !bnExists || err != nil { + return nil + } + return bn } // isTipMaxed compares the tip height to the MaxSyncBlockHeight height. @@ -1446,14 +1351,16 @@ func (bc *Blockchain) isTipCurrent(tip *BlockNode) bool { return tip.Height >= bc.MaxSyncBlockHeight } - minChainWorkBytes, _ := hex.DecodeString(bc.params.MinChainWorkHex) - // Not current if the cumulative work is below the threshold. - if bc.params.IsPoWBlockHeight(uint64(tip.Height)) && tip.CumWork.Cmp(BytesToBigint(minChainWorkBytes)) < 0 { - //glog.V(2).Infof("Blockchain.isTipCurrent: Tip not current because "+ - //"CumWork (%v) is less than minChainWorkBytes (%v)", - //tip.CumWork, BytesToBigint(minChainWorkBytes)) - return false + if bc.params.IsPoWBlockHeight(uint64(tip.Height)) { + minChainWorkBytes, _ := hex.DecodeString(bc.params.MinChainWorkHex) + + if tip.CumWork.Cmp(BytesToBigint(minChainWorkBytes)) < 0 { + //glog.V(2).Infof("Blockchain.isTipCurrent: Tip not current because "+ + //"CumWork (%v) is less than minChainWorkBytes (%v)", + //tip.CumWork, BytesToBigint(minChainWorkBytes)) + return false + } } // Not current if the tip has a timestamp older than the maximum @@ -1586,7 +1493,27 @@ func (bc *Blockchain) checkArchivalMode() bool { } firstSnapshotHeight := bc.snapshot.CurrentEpochSnapshotMetadata.FirstSnapshotBlockHeight - for _, blockNode := range bc.bestChain { + // @diamondhands - can we spot check just a few blocks such as firstSnapshotHeight - 1, + // firstSnapshotHeight / 2 - 1, and firstSnapshotHeight / 4 - 1 to see if they are stored? + // We take a sampling of blocks to determine if we've downloaded all the blocks up to the first snapshot height. + blockHeights := []uint64{} + increment := firstSnapshotHeight / 10 + for ii := uint64(0); ii < firstSnapshotHeight; ii += increment { + blockHeights = append(blockHeights, ii) + } + for ii := firstSnapshotHeight - 10; ii < firstSnapshotHeight; ii++ { + blockHeights = append(blockHeights, ii) + } + blockHeights = append(blockHeights, firstSnapshotHeight) + for _, height := range blockHeights { + blockNode, exists, err := bc.GetBlockFromBestChainByHeight(height, false) + if err != nil { + glog.Errorf("checkArchivalMode: Problem getting block by height: %v", err) + return false + } + if !exists { + return false + } if uint64(blockNode.Height) > firstSnapshotHeight { return false } @@ -1636,13 +1563,7 @@ func (bc *Blockchain) isHyperSyncCondition() bool { // main chain for blocks, which is why separate functions are required for // each of them. func (bc *Blockchain) headerTip() *BlockNode { - if len(bc.bestHeaderChain) == 0 { - return nil - } - - // Note this should always work because we should have the genesis block - // in here. - return bc.bestHeaderChain[len(bc.bestHeaderChain)-1] + return bc.blockIndex.GetHeaderTip() } func (bc *Blockchain) HeaderTip() *BlockNode { @@ -1674,39 +1595,135 @@ func (bc *Blockchain) Snapshot() *Snapshot { // invalidate and chop off the headers corresponding to those blocks and // their ancestors so the two generally stay in sync. func (bc *Blockchain) blockTip() *BlockNode { - var tip *BlockNode - - if len(bc.bestChain) == 0 { - return nil - } - - tip = bc.bestChain[len(bc.bestChain)-1] - - return tip + return bc.blockIndex.GetTip() } func (bc *Blockchain) BlockTip() *BlockNode { return bc.blockTip() } +// TODO: this won't work for now. Need to figure out how to handle this. func (bc *Blockchain) BestChain() []*BlockNode { - return bc.bestChain + panic("BestChain not supported.") +} + +func (bc *Blockchain) GetBlockFromBestChainByHashAndOptionalHeight( + blockHash *BlockHash, + optionalHeight *uint64, + useHeaderChain bool, +) (*BlockNode, bool, error) { + var bn *BlockNode + var exists bool + var err error + if optionalHeight != nil { + bn, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, *optionalHeight) + } else { + bn, exists, err = bc.blockIndex.GetBlockNodeByHashOnly(blockHash) + } + if err != nil { + return nil, false, err + } + if !exists { + return nil, false, nil + } + if bn.IsCommitted() { + return bn, true, nil // TODO: what do we do about header chain? they're not committed so we're going to + // have to get a bunch of parents in order to be sure it is part of the best header chain. I guess we could + // have a map, but kinda defeats the purpose of this refactor. + } + // TODO: is this legit? It seems like it's fair game... + if bc.isSyncing() && useHeaderChain && bn.IsHeaderValidated() { + return bn, true, nil + } + blockTip := bc.BlockTip() + if useHeaderChain { + blockTip = bc.HeaderTip() + } + if blockTip == nil { + return nil, false, fmt.Errorf("GetBlockFromBestChainByHashAndOptionalHeight: Block tip not found: use header chain: %v", useHeaderChain) + } + committedTip, exists := bc.GetCommittedTip() + if !exists { + return nil, false, errors.New("GetBlockFromBestChainByHashAndOptionalHeight: Committed tip not found") + } + if uint64(bn.Height) > uint64(blockTip.Height) || uint64(bn.Height) < uint64(committedTip.Height) { + return nil, false, nil + } + currNode := &BlockNode{} + *currNode = *blockTip + for currNode != nil && currNode.Height >= bn.Height { + if currNode.Height == bn.Height { + if currNode.Hash.IsEqual(blockHash) { + return currNode, true, nil + } + return nil, false, nil + } + currNode = currNode.GetParent(bc.blockIndex) + } + return nil, false, nil } +func (bc *Blockchain) GetBlockFromBestChainByHeight(height uint64, useHeaderChain bool) (*BlockNode, bool, error) { + // TODO: figure out an optimization for header chain handling uncommitted state. + if !useHeaderChain { + committedTip, exists := bc.GetCommittedTip() + if !exists { + return nil, false, nil + } + if height >= uint64(committedTip.Height) { + // For this, we can just loop back from the tip block. + currentNode := bc.blockIndex.GetTip() + for currentNode != nil { + if uint64(currentNode.Height) == height { + return currentNode, true, nil + } + if currentNode.Height < committedTip.Height { + break + } + currentNode = currentNode.GetParent(bc.blockIndex) + } + return nil, false, nil + } + } + blockNodes := bc.blockIndex.GetBlockNodesByHeight(height) + if len(blockNodes) == 0 { + return nil, false, nil + } + for _, blockNode := range blockNodes { + // If block node is committed, then we know it is + // in the best chain, whether we're looking at the header chain or not. + if blockNode.IsCommitted() { + return blockNode, true, nil + } + // TODO: this is crude and incorrect. We can have multiple headers + // at a specific height. It's possible that none of the blocks at + // this height are committed yet, but one of them is in the best chain. + // How can we figure it out? + if useHeaderChain && blockNode.IsHeaderValidated() { + return blockNode, true, nil + } + } + return nil, false, nil +} + +// TODO: need to figure out how to handle this for exchange api tests. func (bc *Blockchain) SetBestChain(bestChain []*BlockNode) { - bc.bestChain = bestChain + for _, blockNode := range bestChain { + bc.blockIndex.addNewBlockNodeToBlockIndex(blockNode) + if bc.blockIndex.GetTip() == nil { + bc.blockIndex.setTip(blockNode) + } else if bc.blockIndex.GetTip().Height < blockNode.Height { + bc.blockIndex.setTip(blockNode) + } + } } -func (bc *Blockchain) SetBestChainMap( - bestChain []*BlockNode, - bestChainMap map[BlockHash]*BlockNode, - blockIndexByHash *collections.ConcurrentMap[BlockHash, *BlockNode], - blockIndexByHeight map[uint64]map[BlockHash]*BlockNode, +func (bc *Blockchain) setBestChainMap( + blockIndexByHash *lru.Cache[BlockHash, *BlockNode], + tipNode *BlockNode, ) { - bc.bestChain = bestChain - bc.bestChainMap = bestChainMap - bc.blockIndexByHash = blockIndexByHash - bc.blockIndexByHeight = blockIndexByHeight + bc.blockIndex.blockIndexByHash = blockIndexByHash + bc.blockIndex.setTip(tipNode) } func (bc *Blockchain) _validateOrphanBlockPoW(desoBlock *MsgDeSoBlock) error { @@ -1851,26 +1868,35 @@ func (bc *Blockchain) MarkBlockInvalid(node *BlockNode, errOccurred RuleError) { //} } -func _FindCommonAncestor(node1 *BlockNode, node2 *BlockNode) *BlockNode { +// Note: we make some assumptions that we only care about ancestors in the best chain. +func (bc *Blockchain) _FindCommonAncestor(node1 *BlockNode, node2 *BlockNode) *BlockNode { if node1 == nil || node2 == nil { // If either node is nil then there can't be a common ancestor. return nil } - // Get the two nodes to be at the same height. + // If both nodes are at a height greater than the committed tip, then we know that + // we have valid parent pointers and can use the Ancestor function to get use to the right place. if node1.Height > node2.Height { - node1 = node1.Ancestor(node2.Height) - } else if node1.Height < node2.Height { - node2 = node2.Ancestor(node1.Height) + node1 = node1.Ancestor(node2.Height, bc.blockIndex) + } else if node2.Height > node1.Height { + node2 = node2.Ancestor(node1.Height, bc.blockIndex) } // Iterate the nodes backward until they're either the same or we // reach the end of the lists. We only need to check node1 for nil // since they're the same height and we are iterating both back // in tandem. - for node1 != nil && !node1.Hash.IsEqual(node2.Hash) { - node1 = node1.Parent - node2 = node2.Parent + var exists bool + for !node1.Hash.IsEqual(node2.Hash) { + node1, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(node1.Header.PrevBlockHash, uint64(node1.Height-1)) + if !exists { + return nil + } + node2, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(node2.Header.PrevBlockHash, uint64(node2.Height-1)) + if !exists { + return nil + } } // By now either node1 == node2 and we found the common ancestor or @@ -1950,16 +1976,29 @@ func CheckTransactionSanity(txn *MsgDeSoTxn, blockHeight uint32, params *DeSoPar return nil } -func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockNode, _detachNodes []*BlockNode, _attachNodes []*BlockNode) { +func (bc *Blockchain) GetReorgBlocks(tip *BlockNode, newNode *BlockNode) ( + _commonAncestor *BlockNode, _detachNodes []*BlockNode, _attachNodes []*BlockNode) { + // TODO: finding common ancestors is very expensive for txindex when txindex is very far + // behind. Currently, it requires loading the entire chain into memory. // Find the common ancestor of this block and the main header chain. - commonAncestor := _FindCommonAncestor(tip, newNode) + commonAncestor := bc._FindCommonAncestor(tip, newNode) + + if commonAncestor == nil { + glog.Fatalf("No common ancestor found between tip and new node: tip hash (%v), newNode hash (%v)", tip.Hash, newNode.Hash) + return + } // Log a warning if the reorg is going to be a big one. - numBlocks := tip.Height - commonAncestor.Height - if numBlocks > 10 { - glog.Warningf("GetReorgBlocks: Proceeding with reorg of (%d) blocks from "+ - "block (%v) at height (%d) to block (%v) at height of (%d)", - numBlocks, tip, tip.Height, newNode, newNode.Height) + if tip != nil { + numBlocks := tip.Height - commonAncestor.Height + if numBlocks > 10 { + glog.Warningf("GetReorgBlocks: Proceeding with reorg of (%d) blocks from "+ + "block (%v) at height (%d) to block (%v) at height of (%d)", + numBlocks, tip, tip.Height, newNode, newNode.Height) + } + } else { + glog.Fatal("GetReorgBlocks: Tip is nil") + return } // Get the blocks to detach. Start at the tip and work backwards to the @@ -1969,8 +2008,15 @@ func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockN // detachBlocks will have the current tip as its first element and parents // of the tip thereafter. detachBlocks := []*BlockNode{} - for currentBlock := tip; *currentBlock.Hash != *commonAncestor.Hash; currentBlock = currentBlock.Parent { + currentBlock := &BlockNode{} + *currentBlock = *tip + for currentBlock != nil && *currentBlock.Hash != *commonAncestor.Hash { detachBlocks = append(detachBlocks, currentBlock) + var exists bool + currentBlock, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(currentBlock.Header.PrevBlockHash, uint64(currentBlock.Height-1)) + if !exists { + glog.Fatalf("GetReorgBlocks: Failed to find parent of block. Parent hash %v", currentBlock.Header.PrevBlockHash) + } } // Get the blocks to attach. Start at the new node and work backwards to @@ -1981,8 +2027,16 @@ func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockN // attachNodes will have the new node as its first element and work back to // the node right after the common ancestor as its last element. attachBlocks := []*BlockNode{} - for currentBlock := newNode; *currentBlock.Hash != *commonAncestor.Hash; currentBlock = currentBlock.Parent { + currentBlock = &BlockNode{} + *currentBlock = *newNode + for *currentBlock.Hash != *commonAncestor.Hash { attachBlocks = append(attachBlocks, currentBlock) + var exists bool + currentBlock, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(currentBlock.Header.PrevBlockHash, uint64(currentBlock.Height-1)) + if !exists { + // TODO: what should we do here? + glog.Fatal("GetReorgBlocks: Failed to find parent of block") + } } // Reverse attachBlocks so that the node right after the common ancestor // will be the first element and the node at the end of the list will be @@ -1994,14 +2048,14 @@ func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockN return commonAncestor, detachBlocks, attachBlocks } -func updateBestChainInMemory(mainChainList []*BlockNode, mainChainMap map[BlockHash]*BlockNode, detachBlocks []*BlockNode, attachBlocks []*BlockNode) ( - chainList []*BlockNode, chainMap map[BlockHash]*BlockNode) { +func updateBestChainInMemory(mainChainList []*BlockNode, mainChainMap *lru.Cache[BlockHash, *BlockNode], detachBlocks []*BlockNode, attachBlocks []*BlockNode) ( + chainList []*BlockNode, chainMap *lru.Cache[BlockHash, *BlockNode]) { // Remove the nodes we detached from the end of the best chain node list. tipIndex := len(mainChainList) - 1 for blockOffset := 0; blockOffset < len(detachBlocks); blockOffset++ { blockIndex := tipIndex - blockOffset - delete(mainChainMap, *mainChainList[blockIndex].Hash) + mainChainMap.Remove(*mainChainList[blockIndex].Hash) } mainChainList = mainChainList[:len(mainChainList)-len(detachBlocks)] @@ -2010,17 +2064,20 @@ func updateBestChainInMemory(mainChainList []*BlockNode, mainChainMap map[BlockH // first, with the new tip at the end. for _, attachNode := range attachBlocks { mainChainList = append(mainChainList, attachNode) - mainChainMap[*attachNode.Hash] = attachNode + mainChainMap.Add(*attachNode.Hash, attachNode) } return mainChainList, mainChainMap } // Caller must acquire the ChainLock for writing prior to calling this. -func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { +func (bc *Blockchain) processHeaderPoW( + blockHeader *MsgDeSoHeader, + headerHash *BlockHash, +) (_blockNode *BlockNode, _isMainChain bool, _isOrphan bool, _err error) { // Only accept the header if its height is below the PoS cutover height. if !bc.params.IsPoWBlockHeight(blockHeader.Height) { - return false, false, HeaderErrorBlockHeightAfterProofOfStakeCutover + return nil, false, false, HeaderErrorBlockHeightAfterProofOfStakeCutover } // Only accept headers if the best chain is still in PoW. Once the best chain reaches the final @@ -2028,16 +2085,16 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // headers past this point because they will un-commit blocks that are already committed to the PoS // chain. if bc.BlockTip().Header.Height >= bc.params.GetFinalPoWBlockHeight() { - return false, false, HeaderErrorBestChainIsAtProofOfStakeCutover + return nil, false, false, HeaderErrorBestChainIsAtProofOfStakeCutover } // Start by checking if the header already exists in our node // index. If it does, then return an error. We should generally // expect that processHeaderPoW will only be called on headers we // haven't seen before. - _, nodeExists := bc.blockIndexByHash.Get(*headerHash) + blockNode, nodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(headerHash, blockHeader.Height) if nodeExists { - return false, false, HeaderErrorDuplicateHeader + return blockNode, false, false, HeaderErrorDuplicateHeader } // If we're here then it means we're processing a header we haven't @@ -2050,7 +2107,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B "MaxTstampOffsetSeconds %d. blockHeader.TstampSecs=%d; adjustedTime=%d", tstampDiff, bc.params.MaxTstampOffsetSeconds, blockHeader.GetTstampSecs(), bc.timeSource.AdjustedTime().Unix()) - return false, false, HeaderErrorBlockTooFarInTheFuture + return nil, false, false, HeaderErrorBlockTooFarInTheFuture } // Try to find this header's parent in our block index. @@ -2058,13 +2115,13 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // can return early because we don't process unconnectedTxns. // TODO: Should we just return an error if the header is an orphan? if blockHeader.PrevBlockHash == nil { - return false, false, HeaderErrorNilPrevHash + return nil, false, false, HeaderErrorNilPrevHash } - parentNode, parentNodeExists := bc.blockIndexByHash.Get(*blockHeader.PrevBlockHash) + parentNode, parentNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHeader.PrevBlockHash, blockHeader.Height-1) if !parentNodeExists { // This block is an orphan if its parent doesn't exist and we don't // process unconnectedTxns. - return false, true, nil + return nil, false, true, nil } // If the parent node is invalid then this header is invalid as well. Note that @@ -2072,7 +2129,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // ValidateFailed. parentHeader := parentNode.Header if parentHeader == nil || (parentNode.Status&(StatusHeaderValidateFailed|StatusBlockValidateFailed)) != 0 { - return false, false, errors.Wrapf( + return nil, false, false, errors.Wrapf( HeaderErrorInvalidParent, "Parent header: %v, Status check: %v, Parent node status: %v, Parent node header: %v", parentHeader, (parentNode.Status&(StatusHeaderValidateFailed|StatusBlockValidateFailed)) != 0, parentNode.Status, @@ -2084,7 +2141,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B if blockHeader.Height != prevHeight+1 { glog.Errorf("processHeaderPoW: Height of block (=%d) is not equal to one greater "+ "than the parent height (=%d)", blockHeader.Height, prevHeight) - return false, false, HeaderErrorHeightInvalid + return nil, false, false, HeaderErrorHeightInvalid } // Make sure the block timestamp is greater than the previous block's timestamp. @@ -2115,24 +2172,24 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B "before timestamp of previous block %v", time.Unix(int64(blockHeader.GetTstampSecs()), 0), time.Unix(int64(parentHeader.GetTstampSecs()), 0)) - return false, false, HeaderErrorTimestampTooEarly + return nil, false, false, HeaderErrorTimestampTooEarly } // Check that the proof of work beats the difficulty as calculated from // the parent block. Note that if the parent block is in the block index // then it has necessarily had its difficulty validated, and so using it to // do this check makes sense. - diffTarget, err := CalcNextDifficultyTarget( - parentNode, blockHeader.Version, bc.params) + diffTarget, err := bc.CalcNextDifficultyTarget( + parentNode, blockHeader.Version) if err != nil { - return false, false, errors.Wrapf(err, + return nil, false, false, errors.Wrapf(err, "ProcessBlock: Problem computing difficulty "+ "target from parent block %s", hex.EncodeToString(parentNode.Hash[:])) } diffTargetBigint := HashToBigint(diffTarget) blockHashBigint := HashToBigint(headerHash) if diffTargetBigint.Cmp(blockHashBigint) < 0 { - return false, false, + return nil, false, false, errors.Wrapf(HeaderErrorBlockDifficultyAboveTarget, "Target: %v, Actual: %v", diffTarget, headerHash) } @@ -2177,9 +2234,8 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B if bc.isSyncing() { bc.addNewBlockNodeToBlockIndex(newNode) } else { - newBlockIndexByHash, newBlockIndexByHeight := bc.CopyBlockIndexes() - bc.blockIndexByHash = newBlockIndexByHash - bc.blockIndexByHeight = newBlockIndexByHeight + newBlockIndexByHash := bc.CopyBlockIndexes() + bc.blockIndex.blockIndexByHash = newBlockIndexByHash bc.addNewBlockNodeToBlockIndex(newNode) } @@ -2190,38 +2246,40 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B headerTip := bc.headerTip() if headerTip.CumWork.Cmp(newNode.CumWork) < 0 { isMainChain = true - - _, detachBlocks, attachBlocks := GetReorgBlocks(headerTip, newNode) - bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( - bc.bestHeaderChain, bc.bestHeaderChainMap, detachBlocks, attachBlocks) - - // Note that we don't store the best header hash here and so this is an - // in-memory-only adjustment. See the comment above on preventing attacks. + bc.blockIndex.setHeaderTip(newNode) } - return isMainChain, false, nil + return newNode, isMainChain, false, nil } // ProcessHeader is a wrapper around processHeaderPoW and processHeaderPoS, which do the leg-work. -func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _err error) { +func (bc *Blockchain) ProcessHeader( + blockHeader *MsgDeSoHeader, + headerHash *BlockHash, + verifySignatures bool, +) (_blockNode *BlockNode, _isMainChain bool, _isOrphan bool, _err error) { bc.ChainLock.Lock() defer bc.ChainLock.Unlock() if blockHeader == nil { // If the header is nil then we return an error. Nothing we can do here. - return false, false, fmt.Errorf("ProcessHeader: Header is nil") + return nil, false, false, fmt.Errorf("ProcessHeader: Header is nil") } // If the header's height is after the PoS cut-over fork height, then we use the PoS header processing logic. // Otherwise, fall back to the PoW logic. if bc.params.IsPoSBlockHeight(blockHeader.Height) { - return bc.processHeaderPoS(blockHeader, verifySignatures) + return bc.processHeaderPoS(blockHeader, headerHash, verifySignatures) } return bc.processHeaderPoW(blockHeader, headerHash) } -func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { +func (bc *Blockchain) ProcessBlock( + desoBlock *MsgDeSoBlock, + hash *BlockHash, // hash is not required and will be computed if not provided, but speeds things up if provided. + verifySignatures bool, +) (_isMainChain bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { bc.ChainLock.Lock() defer bc.ChainLock.Unlock() @@ -2230,17 +2288,29 @@ func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures boo return false, false, nil, fmt.Errorf("ProcessBlock: Block is nil") } + if hash == nil { + var err error + hash, err = desoBlock.Hash() + if err != nil { + return false, false, nil, errors.Wrapf(err, "ProcessBlock: Problem computing block hash") + } + } + // If the block's height is after the PoS cut-over fork height, then we use the PoS block processing logic. // Otherwise, fall back to the PoW logic. if bc.params.IsPoSBlockHeight(desoBlock.Header.Height) { - return bc.processBlockPoS(desoBlock, 1, verifySignatures) + return bc.processBlockPoS(desoBlock, hash, 1, verifySignatures) } - isMainChain, isOrphan, err := bc.processBlockPoW(desoBlock, verifySignatures) + isMainChain, isOrphan, err := bc.processBlockPoW(desoBlock, hash, verifySignatures) return isMainChain, isOrphan, nil, err } -func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { +func (bc *Blockchain) processBlockPoW( + desoBlock *MsgDeSoBlock, + blockHash *BlockHash, // hash is not required and will be computed if not provided, but speeds things up if provided. + verifySignatures bool, +) (_isMainChain bool, _isOrphan bool, err error) { // Only accept the block if its height is below the PoS cutover height. if !bc.params.IsPoWBlockHeight(desoBlock.Header.Height) { return false, false, RuleErrorBlockHeightAfterProofOfStakeCutover @@ -2254,9 +2324,11 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures if blockHeader == nil { return false, false, fmt.Errorf("ProcessBlock: Block header was nil") } - blockHash, err := blockHeader.Hash() - if err != nil { - return false, false, errors.Wrapf(err, "ProcessBlock: Problem computing block hash") + if blockHash == nil { + blockHash, err = blockHeader.Hash() + if err != nil { + return false, false, errors.Wrapf(err, "ProcessBlock: Problem computing block hash") + } } // If a trusted block producer public key is set, then we only accept blocks // if they have been signed by one of these public keys. @@ -2322,12 +2394,16 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bc.timer.Start("Blockchain.ProcessBlock: BlockNode") // See if a node for the block exists in our node index. - nodeToValidate, nodeExists := bc.blockIndexByHash.Get(*blockHash) + // TODO: validate that current height - 1 > 0 + nodeToValidate, nodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeader.Height) // If no node exists for this block at all, then process the header // first before we do anything. This should create a node and set // the header validation status for it. if !nodeExists { - _, isOrphan, err := bc.processHeaderPoW(blockHeader, blockHash) + // Note: it's okay that we don't write the block node for the header + // to the db here as it happens below when we call + // PutHeightHashToNodeInfo + _, _, isOrphan, err := bc.processHeaderPoW(blockHeader, blockHash) if err != nil { // If an error occurred processing the header, then the header // should be marked as invalid, which should be sufficient. @@ -2343,7 +2419,8 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Reset the pointers after having presumably added the header to the // block index. - nodeToValidate, nodeExists = bc.blockIndexByHash.Get(*blockHash) + // TODO: validate that current height - 1 > 0 + nodeToValidate, nodeExists = bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeader.Height) } // At this point if the node still doesn't exist or if the header's validation // failed then we should return an error for the block. Note that at this point @@ -2362,7 +2439,8 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // In this case go ahead and return early. If its parents are truly legitimate then we // should re-request it and its parents from a node and reprocess it // once it is no longer an orphan. - parentNode, parentNodeExists := bc.blockIndexByHash.Get(*blockHeader.PrevBlockHash) + // TODO: validate that current height - 1 > 0 + parentNode, parentNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHeader.PrevBlockHash, blockHeader.Height-1) if !parentNodeExists || (parentNode.Status&StatusBlockProcessed) == 0 { return false, true, nil } @@ -2485,7 +2563,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // This is needed for disconnects, otherwise GetBlock() will fail (e.g. when we reorg). if err == nil { err = bc.db.Update(func(txn *badger.Txn) error { - if err := PutBlockWithTxn(txn, nil, desoBlock, bc.eventManager); err != nil { + if err := PutBlockWithTxn(txn, nil, desoBlock, blockHash, bc.eventManager); err != nil { return errors.Wrapf(err, "ProcessBlock: Problem putting block with txns") } return nil @@ -2504,7 +2582,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // set in PutBlockWithTxn. Block rewards are part of the state, and they should be identical to the ones // we've fetched during Hypersync. Is there an edge-case where for some reason they're not identical? Or // somehow ancestral records get corrupted? - if innerErr := PutBlockWithTxn(txn, bc.snapshot, desoBlock, bc.eventManager); innerErr != nil { + if innerErr := PutBlockWithTxn(txn, bc.snapshot, desoBlock, blockHash, bc.eventManager); innerErr != nil { return errors.Wrapf(err, "ProcessBlock: Problem calling PutBlock") } @@ -2613,6 +2691,11 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // update our data structures to actually make this connection. Do this // in a transaction so that it is atomic. if bc.postgres != nil { + if !nodeToValidate.IsCommitted() { + nodeToValidate.Status |= StatusBlockCommitted + bc.blockIndex.addNewBlockNodeToBlockIndex(nodeToValidate) + } + if err = bc.postgres.UpsertBlockAndTransactions(nodeToValidate, desoBlock); err != nil { return false, false, errors.Wrapf(err, "ProcessBlock: Problem upserting block and transactions") } @@ -2634,6 +2717,10 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures err = bc.db.Update(func(txn *badger.Txn) error { // This will update the node's status. bc.timer.Start("Blockchain.ProcessBlock: Transactions Db height & hash") + if !nodeToValidate.IsCommitted() { + nodeToValidate.Status |= StatusBlockCommitted + bc.blockIndex.addNewBlockNodeToBlockIndex(nodeToValidate) + } if innerErr := PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, nodeToValidate, false /*bitcoinNodes*/, bc.eventManager); innerErr != nil { return errors.Wrapf( innerErr, "ProcessBlock: Problem calling PutHeightHashToNodeInfo after validation") @@ -2680,8 +2767,11 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Now that we've set the best chain in the db, update our in-memory data // structure to reflect this. Do a quick check first to make sure it's consistent. - lastIndex := len(bc.bestChain) - 1 - bestChainHash := bc.bestChain[lastIndex].Hash + bestChainTip := bc.blockIndex.GetTip() + if bestChainTip == nil { + return false, false, fmt.Errorf("ProcessBlock: Best chain tip is nil") + } + bestChainHash := bestChainTip.Hash if !bestChainHash.IsEqual(nodeToValidate.Header.PrevBlockHash) { return false, false, fmt.Errorf("ProcessBlock: Last block in bestChain "+ @@ -2691,15 +2781,13 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // If we're syncing there's no risk of concurrency issues. Otherwise, we // need to make a copy in order to be save. - if bc.isSyncing() { - bc.bestChain = append(bc.bestChain, nodeToValidate) - bc.bestChainMap[*nodeToValidate.Hash] = nodeToValidate - } else { - newBestChain, newBestChainMap := bc.CopyBestChain() - newBestChain = append(newBestChain, nodeToValidate) - newBestChainMap[*nodeToValidate.Hash] = nodeToValidate - bc.bestChain, bc.bestChainMap = newBestChain, newBestChainMap - } + // We no longer need to worry about whether we're syncing or not. Just + // set the tip. + //if bc.isSyncing() { + // bc.blockIndex.setTip(nodeToValidate) + //} else { + bc.blockIndex.setTip(nodeToValidate) + //} // This node is on the main chain so set this variable. isMainChain = true @@ -2755,7 +2843,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Find the common ancestor of this block and the main chain. // TODO: Reorgs with postgres? - commonAncestor, detachBlocks, attachBlocks := GetReorgBlocks(currentTip, nodeToValidate) + commonAncestor, detachBlocks, attachBlocks := bc.GetReorgBlocks(currentTip, nodeToValidate) // Log a warning if the reorg is going to be a big one. numBlocks := currentTip.Height - commonAncestor.Height if numBlocks > 10 { @@ -2854,7 +2942,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // If the parent node has been marked as invalid then mark this node as // invalid as well. - if (attachNode.Parent.Status & StatusBlockValidateFailed) != 0 { + if (attachNode.GetParent(bc.blockIndex).Status & StatusBlockValidateFailed) != 0 { bc.MarkBlockInvalid(attachNode, RuleErrorPreviousBlockInvalid) continue } @@ -2923,6 +3011,14 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures if err := PutBestHashWithTxn(txn, bc.snapshot, newTipNode.Hash, ChainTypeDeSoBlock, bc.eventManager); err != nil { return err } + if !newTipNode.IsCommitted() { + newTipNode.Status |= StatusBlockCommitted + // update the block index to be safe. + bc.addNewBlockNodeToBlockIndex(newTipNode) + if err := PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, newTipNode, false, bc.eventManager); err != nil { + return err + } + } for _, detachNode := range detachBlocks { // Delete the utxo operations for the blocks we're detaching since we don't need @@ -2931,6 +3027,16 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures return errors.Wrapf(err, "ProcessBlock: Problem deleting utxo operations for block") } + // We also need to revert the committed state if applicable. + if detachNode.IsCommitted() { + detachNode.ClearCommittedStatus() + // update the block index to be safe. + bc.addNewBlockNodeToBlockIndex(detachNode) + if err = PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, detachNode, false, bc.eventManager); err != nil { + return errors.Wrapf(err, "ProcessBlock: Problem putting height hash to node info for detach node that is not committed.") + } + } + // Note we could be even more aggressive here by deleting the nodes and // corresponding blocks from the db here (i.e. not storing any side chain // data on the db). But this seems like a minor optimization that comes at @@ -2943,6 +3049,15 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures if err := PutUtxoOperationsForBlockWithTxn(txn, bc.snapshot, blockHeight, attachNode.Hash, utxoOpsForAttachBlocks[ii], bc.eventManager); err != nil { return errors.Wrapf(err, "ProcessBlock: Problem putting utxo operations for block") } + + if !attachNode.IsCommitted() { + attachNode.Status |= StatusBlockCommitted + // update the block index to be safe. + bc.addNewBlockNodeToBlockIndex(attachNode) + if err = PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, attachNode, false, bc.eventManager); err != nil { + return errors.Wrapf(err, "ProcessBlock: Problem putting height hash to node info for detach node that is not committed.") + } + } } // Write the modified utxo set to the view. @@ -2959,10 +3074,8 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Now the db has been updated, update our in-memory best chain. Note that there // is no need to update the node index because it was updated as we went along. - newBestChain, newBestChainMap := bc.CopyBestChain() - newBestChain, newBestChainMap = updateBestChainInMemory( - newBestChain, newBestChainMap, detachBlocks, attachBlocks) - bc.bestChain, bc.bestChainMap = newBestChain, newBestChainMap + bc.blockIndex.setTip(newTipNode) + bc.blockIndex.setHeaderTip(newTipNode) // If we made it here then this block is on the main chain. isMainChain = true diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index b8a05309f..fb0bed9ca 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -618,7 +618,7 @@ func TestBasicTransferReorg(t *testing.T) { // Process all of the fork blocks on the original chain to make it // experience a reorg. for _, forkBlock := range forkBlocks { - _, _, _, err := chain1.ProcessBlock(forkBlock, true /*verifySignatures*/) + _, _, _, err := chain1.ProcessBlock(forkBlock, nil, true /*verifySignatures*/) require.NoError(err) } @@ -661,7 +661,7 @@ func _shouldConnectBlock(blk *MsgDeSoBlock, t *testing.T, chain *Blockchain) { blockHash, _ := blk.Hash() verifySignatures := true - isMainChain, isOrphan, _, err := chain.ProcessBlock(blk, verifySignatures) + isMainChain, isOrphan, _, err := chain.ProcessBlock(blk, nil, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block %v should not be an orphan", blockHash) require.Truef(isMainChain, "Block %v should be on the main chain", blockHash) @@ -707,7 +707,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockA1.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockA1.Header, headerHash, false) + _, isMainChain, isOrphan, err := chain.ProcessHeader(blockA1.Header, headerHash, false) require.NoError(err) require.True(isMainChain) require.False(isOrphan) @@ -723,7 +723,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockA2.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockA2.Header, headerHash, false) + _, isMainChain, isOrphan, err := chain.ProcessHeader(blockA2.Header, headerHash, false) require.NoError(err) require.True(isMainChain) require.False(isOrphan) @@ -739,7 +739,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockB1.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockB1.Header, headerHash, false) + _, isMainChain, isOrphan, err := chain.ProcessHeader(blockB1.Header, headerHash, false) require.NoError(err) // Should not be main chain yet require.False(isMainChain) @@ -756,7 +756,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockB2.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockB2.Header, headerHash, false) + _, isMainChain, isOrphan, err := chain.ProcessHeader(blockB2.Header, headerHash, false) require.NoError(err) // Should not be main chain yet require.False(isMainChain) @@ -773,7 +773,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockB3.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockB3.Header, headerHash, false) + _, isMainChain, isOrphan, err := chain.ProcessHeader(blockB3.Header, headerHash, false) require.NoError(err) // Should not be main chain yet require.True(isMainChain) @@ -826,7 +826,7 @@ func TestProcessBlockReorgBlocks(t *testing.T) { // Block b1 fmt.Println("Connecting block b1") require.Equal(uint64(3), GetUtxoNumEntries(db, chain.snapshot)) - isMainChain, isOrphan, _, err := chain.ProcessBlock(blockB1, verifySignatures) + isMainChain, isOrphan, _, err := chain.ProcessBlock(blockB1, nil, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block b1 should not be an orphan") require.Falsef(isMainChain, "Block b1 should not be on the main chain") @@ -842,7 +842,7 @@ func TestProcessBlockReorgBlocks(t *testing.T) { // Block b2 fmt.Println("Connecting block b2") require.Equal(uint64(3), GetUtxoNumEntries(db, chain.snapshot)) - isMainChain, isOrphan, _, err := chain.ProcessBlock(blockB2, verifySignatures) + isMainChain, isOrphan, _, err := chain.ProcessBlock(blockB2, nil, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block b2 should not be an orphan") require.Falsef(isMainChain, "Block b2 should not be on the main chain") @@ -1222,6 +1222,8 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1229,6 +1231,8 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1236,11 +1240,11 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1248,8 +1252,9 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { // Blocks generating every 1 second, which is 2x too fast. TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, - StatusNone, + StatusHeaderValidated, )) + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1273,11 +1278,11 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { diffsAsInts = []int64{} for ii := 13; ii < 30; ii++ { lastNode := nodes[ii-1] - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1285,8 +1290,9 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { // Blocks generating every 4 second, which is 2x too slow. TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 4)), }, - StatusNone, + StatusHeaderValidated, )) + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1318,6 +1324,8 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1325,6 +1333,8 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1332,11 +1342,11 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1344,8 +1354,9 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { // Blocks generating every 1 second, which is 2x too fast. TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, - StatusNone, + StatusHeaderValidated, )) + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1369,11 +1380,11 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { diffsAsInts = []int64{} for ii := 13; ii < 30; ii++ { lastNode := nodes[ii-1] - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1381,8 +1392,9 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { // Blocks generating every 8 second, which is >2x too slow. TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 4)), }, - StatusNone, + StatusHeaderValidated, )) + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1414,6 +1426,8 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1421,6 +1435,8 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1428,11 +1444,11 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1440,9 +1456,9 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { // Blocks generating all at once. TstampNanoSecs: SecondsToNanoSeconds(0), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1469,12 +1485,15 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, TimeBetweenBlocks: 2 * time.Second, MaxDifficultyRetargetFactor: 3, } + bc.params = fakeParams nodes := []*BlockNode{} diffsAsInts := []int64{} @@ -1483,11 +1502,11 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1495,9 +1514,9 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { // Blocks generating every 2 second, which is under the limit. TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 2)), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1524,6 +1543,8 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1531,6 +1552,8 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1538,11 +1561,11 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1550,9 +1573,9 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { // Blocks generating every 1 second, which is 2x too fast. TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1575,11 +1598,11 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { diffsAsInts = []int64{} for ii := 13; ii < 34; ii++ { lastNode := nodes[ii-1] - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1587,9 +1610,9 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { // Blocks generating every 3 seconds, which is slow but under the limit. TstampNanoSecs: SecondsToNanoSeconds(int64(ii) * 3), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1672,7 +1695,7 @@ func TestBadBlockSignature(t *testing.T) { // A bad signature with the right public key should fail. finalBlock1.BlockProducerInfo.PublicKey = senderPkBytes - _, _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, nil, true) require.Error(err) require.Contains(err.Error(), RuleErrorInvalidBlockProducerSIgnature) @@ -1681,20 +1704,20 @@ func TestBadBlockSignature(t *testing.T) { require.NoError(err) finalBlock1.BlockProducerInfo.PublicKey = blockSignerPkBytes finalBlock1.BlockProducerInfo.Signature = nil - _, _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, nil, true) require.Error(err) require.Contains(err.Error(), RuleErrorMissingBlockProducerSignature) // If all the BlockProducerInfo is missing, things should fail finalBlock1.BlockProducerInfo = nil - _, _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, nil, true) require.Error(err) require.Contains(err.Error(), RuleErrorMissingBlockProducerSignature) // Now let's add blockSignerPK to the map of trusted keys and confirm that the block processes. chain.trustedBlockProducerPublicKeys[MakePkMapKey(blockSignerPkBytes)] = true finalBlock1.BlockProducerInfo = blockProducerInfoCopy - _, _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, nil, true) require.NoError(err) _, _ = finalBlock1, db diff --git a/lib/connection_manager.go b/lib/connection_manager.go index 3245b0e0c..858b803de 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -10,8 +10,8 @@ import ( "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/wire" - "github.com/decred/dcrd/container/lru" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" ) // connection_manager.go contains most of the logic for creating and managing @@ -53,7 +53,7 @@ type ConnectionManager struct { // TODO: seems like we don't use this. // Keep track of the nonces we've sent in our version messages so // we can prevent connections to ourselves. - sentNonces lru.Set[any] + sentNonces *lru.Cache[uint64, struct{}] // This section defines the data structures for storing all the // peers we're aware of. @@ -126,13 +126,14 @@ func NewConnectionManager( ValidateHyperSyncFlags(_hyperSync, _syncType) + sentNoncesCache, _ := lru.New[uint64, struct{}](1000) return &ConnectionManager{ srv: _srv, params: _params, listeners: _listeners, // We keep track of the last N nonces we've sent in order to detect // self connections. - sentNonces: *lru.NewSet[any](1000), + sentNonces: sentNoncesCache, //newestBlock: _newestBlock, // Initialize the peer data structures. diff --git a/lib/constants.go b/lib/constants.go index fe23b8674..56348de8c 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1956,5 +1956,7 @@ const DefaultTestnetCheckpointProvider = "https://test.deso.org" const RoutePathGetCommittedTipBlockInfo = "/api/v0/get-committed-tip-block-info" -// Constants that was removed from newer version of Btcec +const BlockIndexMigrationFileName = "block_index_migration.txt" + +// BtcecPubKeyBytesLenUncompressed is a constant that was removed from newer version of Btcec const BtcecPubKeyBytesLenUncompressed = 65 diff --git a/lib/db_utils.go b/lib/db_utils.go index cf7e214e5..013048655 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -604,7 +604,11 @@ type DBPrefixes struct { // When reading and writing data to this prefixes, please acquire the snapshotDbMutex in the snapshot. PrefixHypersyncSnapshotDBPrefix []byte `prefix_id:"[97]"` - // NEXT_TAG: 98 + // PrefixHashToHeight is used to store the height of a block given its hash. + // This helps us map a block hash to its height so we can look up the full info + // in PrefixHeightHashToNodeInfo. + PrefixHashToHeight []byte `prefix_id:"[98]"` + // NEXT_TAG: 99 } // DecodeStateKey decodes a state key into a DeSoEncoder type. This is useful for encoders which don't have a stored @@ -1138,7 +1142,7 @@ func DBSetWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, value []byte, eve return errors.Wrapf(err, "DBSetWithTxn: Problem preparing ancestral record") } // Now save the newest record to cache. - snap.DatabaseCache.Put(keyString, value) + snap.DatabaseCache.Add(keyString, value) if !snap.disableChecksum { // We have to remove the previous value from the state checksum. @@ -1237,7 +1241,7 @@ func DBDeleteWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, eventManager * return errors.Wrapf(err, "DBDeleteWithTxn: Problem preparing ancestral record") } // Now delete the past record from the cache. - snap.DatabaseCache.Delete(keyString) + snap.DatabaseCache.Remove(keyString) // We have to remove the previous value from the state checksum. // Because checksum is commutative, we can safely remove the past value here. if !snap.disableChecksum { @@ -2806,7 +2810,7 @@ func DBGetAccessGroupExistenceByAccessGroupIdWithTxn(txn *badger.Txn, snap *Snap // Lookup the snapshot cache and check if we've already stored a value there. if isState { - if exists := snap.DatabaseCache.Exists(keyString); exists { + if exists := snap.DatabaseCache.Contains(keyString); exists { return true, nil } } @@ -5044,13 +5048,22 @@ func GetBlock(blockHash *BlockHash, handle *badger.DB, snap *Snapshot) (*MsgDeSo return blockRet, nil } -func PutBlockHashToBlockWithTxn(txn *badger.Txn, snap *Snapshot, block *MsgDeSoBlock, eventManager *EventManager) error { +func PutBlockHashToBlockWithTxn( + txn *badger.Txn, + snap *Snapshot, + blockHash *BlockHash, + block *MsgDeSoBlock, + eventManager *EventManager, +) error { if block.Header == nil { return fmt.Errorf("PutBlockHashToBlockWithTxn: Header was nil in block %v", block) } - blockHash, err := block.Header.Hash() - if err != nil { - return errors.Wrap(err, "PutBlockHashToBlockWithTxn: Problem hashing header: ") + if blockHash == nil { + var err error + blockHash, err = block.Header.Hash() + if err != nil { + return errors.Wrap(err, "PutBlockHashToBlockWithTxn: Problem hashing header: ") + } } blockKey := BlockHashToBlockKey(blockHash) data, err := block.ToBytes(false) @@ -5070,12 +5083,14 @@ func PutBlockHashToBlockWithTxn(txn *badger.Txn, snap *Snapshot, block *MsgDeSoB return nil } -func PutBlockWithTxn(txn *badger.Txn, snap *Snapshot, desoBlock *MsgDeSoBlock, eventManager *EventManager) error { - blockHash, err := desoBlock.Header.Hash() - if err != nil { - return errors.Wrapf(err, "PutBlockWithTxn: Problem hashing header: ") - } - if err = PutBlockHashToBlockWithTxn(txn, snap, desoBlock, eventManager); err != nil { +func PutBlockWithTxn( + txn *badger.Txn, + snap *Snapshot, + desoBlock *MsgDeSoBlock, + blockHash *BlockHash, + eventManager *EventManager, +) error { + if err := PutBlockHashToBlockWithTxn(txn, snap, blockHash, desoBlock, eventManager); err != nil { return errors.Wrap(err, "PutBlockWithTxn: Problem putting block hash to block") } blockRewardTxn := desoBlock.Txns[0] @@ -5096,7 +5111,7 @@ func PutBlockWithTxn(txn *badger.Txn, snap *Snapshot, desoBlock *MsgDeSoBlock, e pkMapKey := pkMapKeyIter blockRewardKey := PublicKeyBlockHashToBlockRewardKey(pkMapKey[:], blockHash) - if err = DBSetWithTxn(txn, snap, blockRewardKey, EncodeUint64(blockReward), eventManager); err != nil { + if err := DBSetWithTxn(txn, snap, blockRewardKey, EncodeUint64(blockReward), eventManager); err != nil { return err } } @@ -5104,9 +5119,15 @@ func PutBlockWithTxn(txn *badger.Txn, snap *Snapshot, desoBlock *MsgDeSoBlock, e return nil } -func PutBlock(handle *badger.DB, snap *Snapshot, desoBlock *MsgDeSoBlock, eventManager *EventManager) error { +func PutBlock( + handle *badger.DB, + snap *Snapshot, + desoBlock *MsgDeSoBlock, + blockHash *BlockHash, + eventManager *EventManager, +) error { err := handle.Update(func(txn *badger.Txn) error { - return PutBlockWithTxn(txn, snap, desoBlock, eventManager) + return PutBlockWithTxn(txn, snap, desoBlock, blockHash, eventManager) }) return err @@ -5188,6 +5209,13 @@ func _heightHashToNodeIndexPrefix(bitcoinNodes bool) []byte { return prefix } +func _heightHashToNodePrefixByHeight(height uint32, bitcoinNodes bool) []byte { + prefix := _heightHashToNodeIndexPrefix(bitcoinNodes) + heightBytes := make([]byte, 4) + binary.BigEndian.PutUint32(heightBytes[:], height) + return append(prefix, heightBytes[:]...) +} + func _heightHashToNodeIndexKey(height uint32, hash *BlockHash, bitcoinNodes bool) []byte { prefix := _heightHashToNodeIndexPrefix(bitcoinNodes) @@ -5199,6 +5227,12 @@ func _heightHashToNodeIndexKey(height uint32, hash *BlockHash, bitcoinNodes bool return key } +func _hashToHeightIndexKey(hash *BlockHash) []byte { + key := append([]byte{}, Prefixes.PrefixHashToHeight...) + key = append(key, hash[:]...) + return key +} + func GetHeightHashToNodeInfoWithTxn(txn *badger.Txn, snap *Snapshot, height uint32, hash *BlockHash, bitcoinNodes bool) *BlockNode { @@ -5238,9 +5272,44 @@ func PutHeightHashToNodeInfoWithTxn(txn *badger.Txn, snap *Snapshot, if err := DBSetWithTxn(txn, snap, key, serializedNode, eventManager); err != nil { return err } + + hashToHeightKey := _hashToHeightIndexKey(node.Hash) + if err = DBSetWithTxn(txn, snap, hashToHeightKey, UintToBuf(uint64(node.Height)), eventManager); err != nil { + return err + } + return nil } +func PutHashToHeightBatch(handle *badger.DB, snap *Snapshot, hashToHeight map[BlockHash]uint32, eventManager *EventManager) error { + return handle.Update(func(txn *badger.Txn) error { + for hash, height := range hashToHeight { + key := _hashToHeightIndexKey(&hash) + if err := DBSetWithTxn(txn, snap, key, UintToBuf(uint64(height)), eventManager); err != nil { + return errors.Wrap(err, "PutHashToHeightBatch: Problem setting hash to height") + } + } + return nil + }) +} + +func GetHeightForHash(db *badger.DB, snap *Snapshot, hash *BlockHash) (uint64, error) { + var height uint64 + err := db.View(func(txn *badger.Txn) error { + key := _hashToHeightIndexKey(hash) + heightBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + return err + } + height, _ = Uvarint(heightBytes) + return nil + }) + if err != nil { + return 0, err + } + return height, nil +} + func PutHeightHashToNodeInfoBatch(handle *badger.DB, snap *Snapshot, nodes []*BlockNode, bitcoinNodes bool, eventManager *EventManager) error { @@ -5308,7 +5377,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, diffTarget, BytesToBigint(ExpectedWorkForBlockHash(diffTarget)[:]), // CumWork genesisBlock.Header, // Header - StatusHeaderValidated|StatusBlockProcessed|StatusBlockStored|StatusBlockValidated, // Status + StatusHeaderValidated|StatusBlockProcessed|StatusBlockStored|StatusBlockValidated|StatusBlockCommitted, // Status ) // Set the fields in the db to reflect the current state of our chain. @@ -5324,7 +5393,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block hash into db for block chain") } // Add the genesis block to the (hash -> block) index. - if err := PutBlockWithTxn(txn, snap, genesisBlock, eventManager); err != nil { + if err := PutBlockWithTxn(txn, snap, genesisBlock, blockHash, eventManager); err != nil { return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block into db") } // Add the genesis block to the (height, hash -> node info) index in the db. @@ -5544,9 +5613,170 @@ func GetBlockIndex(handle *badger.DB, bitcoinNodes bool, params *DeSoParams) ( return blockIndex, nil } +func (bi *BlockIndex) LoadBlockIndexFromHeight(height uint32, params *DeSoParams) error { + prefix := _heightHashToNodePrefixByHeight(height, false) + + return bi.db.View(func(txn *badger.Txn) error { + opts := badger.DefaultIteratorOptions + nodeIterator := txn.NewIterator(opts) + defer nodeIterator.Close() + for nodeIterator.Seek(prefix); nodeIterator.ValidForPrefix(prefix); nodeIterator.Next() { + var blockNode *BlockNode + + // Don't bother checking the key. We assume that the key lines up + // with what we've stored in the value in terms of (height, block hash). + item := nodeIterator.Item() + err := item.Value(func(blockNodeBytes []byte) error { + // Deserialize the block node. + var err error + // TODO: There is room for optimization here by pre-allocating a + // contiguous list of block nodes and then populating that list + // rather than having each blockNode be a stand-alone allocation. + blockNode, err = DeserializeBlockNode(blockNodeBytes) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + + // If we got here it means we read a blockNode successfully. Store it + // into our node index. + bi.addNewBlockNodeToBlockIndex(blockNode) + + // Find the parent of this block, which should already have been read + // in and connect it. Skip the genesis block, which has height 0. Also + // skip the block if its PrevBlockHash is empty, which will be true for + // the BitcoinStartBlockNode. + // + // TODO: There is room for optimization here by keeping a reference to + // the last node we've iterated over and checking if that node is the + // parent. Doing this would avoid an expensive hashmap check to get + // the parent by its block hash. + if blockNode.Height == 0 || (*blockNode.Header.PrevBlockHash == BlockHash{}) { + continue + } + if parent, ok := bi.GetBlockNodeByHashAndHeight(blockNode.Header.PrevBlockHash, uint64(blockNode.Height)); ok { + // We found the parent node so connect it. + blockNode.Parent = parent + } else { + // If we're syncing a DeSo node and we hit a PoS block, we expect there to + // be orphan blocks in the block index. In this case, we don't throw an error. + if params.IsPoSBlockHeight(uint64(blockNode.Height)) { + continue + } + // In this case we didn't find the parent so error. There shouldn't + // be any unconnectedTxns in our block index. + return fmt.Errorf("GetBlockIndex: Could not find parent for blockNode: %+v", blockNode) + } + } + return nil + }) +} + +func RunBlockIndexMigration(handle *badger.DB, snapshot *Snapshot, eventManager *EventManager, params *DeSoParams) error { + return handle.Update(func(txn *badger.Txn) error { + prefix := _heightHashToNodeIndexPrefix(false) + opts := badger.DefaultIteratorOptions + opts.Prefix = prefix + // We don't need values for this migration. + opts.PrefetchValues = false + nodeIterator := txn.NewIterator(opts) + defer nodeIterator.Close() + hashToHeightMap := make(map[BlockHash]uint32) + // Just in case we need it, get the height of the best hash. + bestHash := DbGetBestHash(handle, snapshot, ChainTypeDeSoBlock) + var bestHashHeight uint32 + for nodeIterator.Seek(prefix); nodeIterator.ValidForPrefix(prefix); nodeIterator.Next() { + item := nodeIterator.Item().Key() + + // Parse the key to get the height and hash. + height := binary.BigEndian.Uint32(item[1:5]) + hash := BlockHash{} + copy(hash[:], item[5:]) + hashToHeightMap[hash] = height + if bestHash != nil && bestHash.IsEqual(&hash) { + bestHashHeight = height + } + if len(hashToHeightMap) < 10000 { + continue + } + innerErr := PutHashToHeightBatch(handle, snapshot, hashToHeightMap, eventManager) + if innerErr != nil { + return errors.Wrap(innerErr, "RunBlockIndexMigration: Problem putting hash to height") + } + hashToHeightMap = make(map[BlockHash]uint32) + } + if len(hashToHeightMap) > 0 { + innerErr := PutHashToHeightBatch(handle, snapshot, hashToHeightMap, eventManager) + if innerErr != nil { + return errors.Wrap(innerErr, "RunBlockIndexMigration: Problem putting hash to height") + } + } + // If we don't have a best hash, then we certainly haven't hit the first pos block height. + if bestHash == nil { + return nil + } + // TODO: get best chain up to PoS Cutover height and set all blocks in that chain to committed. + firstPoSBlockHeight := params.GetFirstPoSBlockHeight() + // Look up blocks at cutover height. + prefixKey := _heightHashToNodePrefixByHeight(uint32(firstPoSBlockHeight), false) + _, valsFound, err := _enumerateKeysForPrefixWithTxn(txn, prefixKey, false) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem enumerating keys for prefix") + } + if len(valsFound) > 1 { + return fmt.Errorf("RunBlockIndexMigration: More than one block found at PoS cutover height") + } + var blockNode *BlockNode + // In this case, we need to find pull the best hash from the DB and iterate backwards. + if len(valsFound) == 0 { + blockNode = GetHeightHashToNodeInfoWithTxn(txn, snapshot, bestHashHeight, bestHash, false) + if blockNode == nil { + return fmt.Errorf("RunBlockIndexMigration: block with Best hash (%v) and height (%v) not found", bestHash, bestHashHeight) + } + } else { + blockNode, err = DeserializeBlockNode(valsFound[0]) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem deserializing block node for pos cutover") + } + } + var blockNodeBatch []*BlockNode + for blockNode != nil { + if !blockNode.IsCommitted() { + blockNode.Status |= StatusBlockCommitted + } + // TODO: make sure I don't need a copy. + blockNodeBatch = append(blockNodeBatch, blockNode) + if len(blockNodeBatch) < 10000 { + continue + } + err = PutHeightHashToNodeInfoBatch(handle, snapshot, blockNodeBatch, false /*bitcoinNodes*/, eventManager) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem putting block node batch") + } + parentBlockNode := GetHeightHashToNodeInfoWithTxn(txn, snapshot, blockNode.Height, blockNode.Hash, false /*bitcoinNodes*/) + if blockNode.Height > 0 && parentBlockNode == nil { + return errors.New("RunBlockIndexMigration: Parent block node not found") + } + blockNode = parentBlockNode + } + err = PutHeightHashToNodeInfoBatch(handle, snapshot, blockNodeBatch, false /*bitcoinNodes*/, eventManager) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem putting block node batch") + } + return nil + }) +} + +// TODO: refactor to actually get the whole best chain if that's +// what someone wants. It'll take a while, but whatever. func GetBestChain(tipNode *BlockNode) ([]*BlockNode, error) { reversedBestChain := []*BlockNode{} - for tipNode != nil { + maxBestChainInitLength := 3600 * 100 // Cache up to 100 hours of blocks. + for tipNode != nil && len(reversedBestChain) < maxBestChainInitLength { if (tipNode.Status&StatusBlockValidated) == 0 && (tipNode.Status&StatusBitcoinHeaderValidated) == 0 { diff --git a/lib/load_test.go b/lib/load_test.go index 2092ad91c..78a3b056e 100644 --- a/lib/load_test.go +++ b/lib/load_test.go @@ -189,7 +189,7 @@ func TestComputeMaxTPS(t *testing.T) { _, _ = newParams, newDB timeStart := time.Now() for _, blockToConnect := range blocksMined { - _, _, _, err := newChain.ProcessBlock(blockToConnect, true /*verifySignatures*/) + _, _, _, err := newChain.ProcessBlock(blockToConnect, nil, true /*verifySignatures*/) require.NoError(err) } elapsedSecs := (time.Since(timeStart)).Seconds() @@ -236,7 +236,7 @@ func TestConnectBlocksLoadTest(t *testing.T) { pprof.StartCPUProfile(ff) timeStart := time.Now() for _, blockToConnect := range blocksMined { - _, _, _, err := newChain.ProcessBlock(blockToConnect, false /*verifySignatures*/) + _, _, _, err := newChain.ProcessBlock(blockToConnect, nil, false /*verifySignatures*/) require.NoError(err) } elapsedSecs := (time.Since(timeStart)).Seconds() diff --git a/lib/miner.go b/lib/miner.go index 67ed8976a..6703cd554 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -208,6 +208,7 @@ func (desoMiner *DeSoMiner) MineAndProcessSingleBlock(threadIndex uint32, mempoo desoMiner.BlockProducer.chain.blockTip().Header) scs := spew.ConfigState{DisableMethods: true, Indent: " ", DisablePointerAddresses: true} glog.V(1).Infof(scs.Sdump(blockToMine)) + // Sanitize the block for the comparison we're about to do. We need to do // this because the comparison function below will think they're different // if one has nil and one has an empty list. Annoying, but this solves the @@ -248,7 +249,7 @@ func (desoMiner *DeSoMiner) MineAndProcessSingleBlock(threadIndex uint32, mempoo verifySignatures := true // TODO(miner): Replace with a call to SubmitBlock. isMainChain, isOrphan, _, err := desoMiner.BlockProducer.chain.ProcessBlock( - blockToMine, verifySignatures) + blockToMine, nil, verifySignatures) glog.V(2).Infof("Called ProcessBlock: isMainChain=(%v), isOrphan=(%v), err=(%v)", isMainChain, isOrphan, err) if err != nil { diff --git a/lib/network_manager.go b/lib/network_manager.go index 0bfcf101a..a77974800 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -11,11 +11,11 @@ import ( "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/wire" - "github.com/decred/dcrd/container/lru" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" ) @@ -69,7 +69,7 @@ type NetworkManager struct { NonValidatorInboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] // Cache of nonces used during handshake. - usedNonces lru.Set[uint64] + usedNonces *lru.Cache[uint64, struct{}] // The address manager keeps track of peer addresses we're aware of. When // we need to connect to a new outbound peer, it chooses one of the addresses @@ -121,7 +121,7 @@ func NewNetworkManager( minTxFeeRateNanosPerKB uint64, nodeServices ServiceFlag, ) *NetworkManager { - + usedNoncesCache, _ := lru.New[uint64, struct{}](1000) return &NetworkManager{ params: params, srv: srv, @@ -136,7 +136,7 @@ func NewNetworkManager( ValidatorOutboundIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), - usedNonces: *lru.NewSet[uint64](1000), + usedNonces: usedNoncesCache, connectIps: connectIps, persistentIpToRemoteNodeIdsMap: collections.NewConcurrentMap[string, RemoteNodeId](), activeValidatorsMap: collections.NewConcurrentMap[bls.SerializedPublicKey, consensus.Validator](), @@ -261,7 +261,7 @@ func (nm *NetworkManager) _handleVersionMessage(origin *Peer, desoMsg DeSoMessag // If we've seen this nonce before then return an error since this is a connection from ourselves. msgNonce := verMsg.Nonce if nm.usedNonces.Contains(msgNonce) { - nm.usedNonces.Delete(msgNonce) + nm.usedNonces.Remove(msgNonce) glog.Errorf("NetworkManager.handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ "nonce collision, nonce (%v)", origin.ID, msgNonce) nm.Disconnect(rn, "nonce collision") @@ -277,7 +277,7 @@ func (nm *NetworkManager) _handleVersionMessage(origin *Peer, desoMsg DeSoMessag return } - nm.usedNonces.Put(responseNonce) + nm.usedNonces.Add(responseNonce, struct{}{}) } // _handleVerackMessage is called when a new verack message is received. @@ -1248,7 +1248,7 @@ func (nm *NetworkManager) InitiateHandshake(rn *RemoteNode) { glog.Errorf("NetworkManager.InitiateHandshake: Error initiating handshake: %v", err) nm.Disconnect(rn, fmt.Sprintf("error initiating handshake: %v", err)) } - nm.usedNonces.Put(nonce) + nm.usedNonces.Add(nonce, struct{}{}) } // handleHandshakeComplete is called on a completed handshake with a RemoteNodes. diff --git a/lib/peer.go b/lib/peer.go index 9296db5bf..27b56a8b5 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -9,10 +9,9 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/golang-lru/v2" - "github.com/btcsuite/btcd/wire" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" ) @@ -330,7 +329,12 @@ func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) { } else if invVect.Type == InvTypeBlock { // For blocks, we check that the hash isn't known to us either in our // main header chain or in side chains. - if pp.srv.blockchain.HasHeader(¤tHash) { + exists, err := pp.srv.blockchain.HasHeader(¤tHash) + if exists { + continue + } + if err != nil { + glog.Errorf("Server._handleInv: Error checking if block exists: %v", err) continue } @@ -339,7 +343,7 @@ func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) { // If we made it here, it means the inventory was added to one of the // lists so mark it as processed on the Server. - pp.srv.inventoryBeingProcessed.Put(*invVect) + pp.srv.inventoryBeingProcessed.Add(*invVect, struct{}{}) } // If there were any transactions we don't yet have, request them using diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 7a60e84a0..ec7a00897 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -38,68 +38,76 @@ import ( // StatusHeaderValidated or StatusHeaderValidateFailed. // 5. Exit early if the's view is less than the current header chain's tip. // 6. Reorg the best header chain if the header's view is higher than the current tip. -func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures bool) ( - _isMainChain bool, _isOrphan bool, _err error, +func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) ( + _blockNode *BlockNode, _isMainChain bool, _isOrphan bool, _err error, ) { if !bc.params.IsPoSBlockHeight(header.Height) { - return false, false, errors.Errorf( + return nil, false, false, errors.Errorf( "processHeaderPoS: Header height %d is less than the ProofOfStake2ConsensusCutoverBlockHeight %d", header.Height, bc.params.GetFirstPoSBlockHeight(), ) } - headerHash, err := header.Hash() - if err != nil { - return false, false, errors.Wrapf(err, "processHeaderPoS: Problem hashing header") - } - // If the incoming header is already part of the best header chain, then we can exit early. // The header is not part of a fork, and is already an ancestor of the current header chain tip. - if _, isInBestHeaderChain := bc.bestHeaderChainMap[*headerHash]; isInBestHeaderChain { - return true, false, nil + // Here we explicitly check the bestHeaderChain.ChainMap to make sure the in-memory struct is properly + // updated. This is necessary because the block index may have been updated with the header but the + // bestHeaderChain.ChainMap may not have been updated yet. + blockNode, isInBestHeaderChain, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(headerHash, &header.Height, true) + if err != nil { + return nil, false, false, + errors.Wrapf(err, "processHeaderPoS: Problem getting block from best chain by hash: ") + } + if isInBestHeaderChain { + return blockNode, true, false, nil } // If the incoming header is part of a reorg that uncommits the committed tip from the best chain, // then we exit early. Such headers are invalid and should not be synced. committedBlockchainTip, _ := bc.GetCommittedTip() if committedBlockchainTip != nil && committedBlockchainTip.Header.Height >= header.Height { - return false, false, errors.New("processHeaderPoS: Header conflicts with committed tip") + return nil, false, false, errors.New("processHeaderPoS: Header conflicts with committed tip") } // Validate the header and index it in the block index. blockNode, isOrphan, err := bc.validateAndIndexHeaderPoS(header, headerHash, verifySignatures) if err != nil { - return false, false, errors.Wrapf(err, "processHeaderPoS: Problem validating and indexing header: ") + return blockNode, false, false, errors.Wrapf(err, "processHeaderPoS: Problem validating and indexing header: ") } - // Now that we know we have a valid header, we check the block index for it any orphan children for it - // and heal the parent pointers for all of them. - bc.healPointersForOrphanChildren(blockNode) + // Don't worry about healing orphan children when we're syncing. + //if !bc.isSyncing() { + // // Now that we know we have a valid header, we check the block index for it any orphan children for it + // // and heal the parent pointers for all of them. + // bc.healPointersForOrphanChildren(blockNode) + //} // Exit early if the header is an orphan. if isOrphan { - return false, true, nil + return blockNode, false, true, nil } // Exit early if the header's view is less than the current header chain's tip. The header is not // the new tip for the best header chain. currentTip := bc.headerTip() if header.ProposedInView <= currentTip.Header.ProposedInView { - return false, false, nil + return blockNode, false, false, nil } + bc.blockIndex.setHeaderTip(blockNode) + // The header is not an orphan and has a higher view than the current tip. We reorg the header chain // and apply the incoming header as the new tip. - _, blocksToDetach, blocksToAttach := GetReorgBlocks(currentTip, blockNode) - bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( - bc.bestHeaderChain, - bc.bestHeaderChainMap, - blocksToDetach, - blocksToAttach, - ) + //_, blocksToDetach, blocksToAttach := bc.GetReorgBlocks(currentTip, blockNode) + //bc.bestHeaderChain.Chain, bc.bestHeaderChain.ChainMap = updateBestChainInMemory( + // bc.bestHeaderChain.Chain, + // bc.bestHeaderChain.ChainMap, + // blocksToDetach, + // blocksToAttach, + //) // Success. The header is at the tip of the best header chain. - return true, false, nil + return blockNode, true, false, nil } // healPointersForOrphanChildren fixes an inconsistency in the block index that may have @@ -108,37 +116,37 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures b // later on, we not only need to store the parent in the block index but also need to update the // pointer from the orphan block's BlockNode to the parent. We do that dynamically here as we // process headers. -func (bc *Blockchain) healPointersForOrphanChildren(blockNode *BlockNode) { - // Fetch all potential children of this blockNode from the block index. - blockNodesAtNextHeight, exists := bc.blockIndexByHeight[blockNode.Header.Height+1] - if !exists { - // No children of this blockNode exist in the block index. Exit early. - return - } - - // Iterate through all block nodes at the next block height and update their parent pointers. - for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { - // Check if it's a child of the parent block node. - if !blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) { - continue - } - - // Check if it has its parent pointer set. If it does, then we exit early. - if blockNodeAtNextHeight.Parent != nil { - continue - } - - // If the parent block node is not set, then we set it to the parent block node. - blockNodeAtNextHeight.Parent = blockNode - } -} +//func (bc *Blockchain) healPointersForOrphanChildren(blockNode *BlockNode) { +// // Fetch all potential children of this blockNode from the block index. +// blockNodesAtNextHeight := bc.blockIndex.GetBlockNodesByHeight(blockNode.Header.Height + 1) +// exists := len(blockNodesAtNextHeight) > 0 +// if !exists { +// // No children of this blockNode exist in the block index. Exit early. +// return +// } +// +// // Iterate through all block nodes at the next block height and update their parent pointers. +// for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { +// // Check if it's a child of the parent block node. +// if !blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) { +// continue +// } +// +// // Check if it has its parent pointer set. If it does, then we exit early. +// if blockNodeAtNextHeight.Parent != nil { +// continue +// } +// +// // If the parent block node is not set, then we set it to the parent block node. +// blockNodeAtNextHeight.Parent = blockNode +// } +//} func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) ( _headerBlockNode *BlockNode, _isOrphan bool, _err error, ) { // Look up the header in the block index to check if it has already been validated and indexed. - blockNode, blockNodeExists := bc.blockIndexByHash.Get(*headerHash) - + blockNode, blockNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(headerHash, header.Height) // ------------------------------------ Base Cases ----------------------------------- // // The header is already validated. Exit early. @@ -157,7 +165,8 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas } // The header is an orphan. No need to store it in the block index. Exit early. - parentBlockNode, parentBlockNodeExists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + // TODO: validate that height - 1 > 0 + parentBlockNode, parentBlockNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !parentBlockNodeExists { return nil, true, nil } @@ -191,14 +200,14 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas // is also not valid. if parentBlockNode.IsHeaderValidateFailed() { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( - header, errors.New("validateAndIndexHeaderPoS: Parent header failed validations"), + header, headerHash, errors.New("validateAndIndexHeaderPoS: Parent header failed validations"), ) } // Verify that the header is properly formed. - if err := bc.isValidBlockHeaderPoS(header); err != nil { + if err = bc.isValidBlockHeaderPoS(header); err != nil { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( - header, errors.New("validateAndIndexHeaderPoS: Header failed validations"), + header, headerHash, errors.New("validateAndIndexHeaderPoS: Header failed validations"), ) } @@ -210,13 +219,13 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas } if !isValidRandomSeedSignature { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( - header, errors.New("validateAndIndexHeaderPoS: Header has invalid random seed signature"), + header, headerHash, errors.New("validateAndIndexHeaderPoS: Header has invalid random seed signature"), ) } } // Store it as HeaderValidated now that it has passed all validations. - blockNode, err = bc.storeValidatedHeaderInBlockIndex(header) + blockNode, err = bc.storeValidatedHeaderInBlockIndex(header, headerHash) if err != nil { return nil, false, errors.Wrapf(err, "validateAndIndexHeaderPoS: Problem adding header to block index: ") } @@ -240,8 +249,11 @@ func (bc *Blockchain) ProcessBlockPoS(block *MsgDeSoBlock, currentView uint64, v if block == nil { return false, false, nil, fmt.Errorf("ProcessBlockPoS: Block is nil") } - - return bc.processBlockPoS(block, currentView, verifySignatures) + blockHash, err := block.Hash() + if err != nil { + return false, false, nil, errors.Wrap(err, "ProcessBlockPoS: Problem hashing block") + } + return bc.processBlockPoS(block, blockHash, currentView, verifySignatures) } // processBlockPoS runs the Fast-HotStuff block connect and commit rule as follows: @@ -253,7 +265,12 @@ func (bc *Blockchain) ProcessBlockPoS(block *MsgDeSoBlock, currentView uint64, v // 5. Try to apply the incoming block as the tip (performing reorgs as necessary). If it can't be applied, exit here. // 6. Run the commit rule - If applicable, flushes the incoming block's grandparent to the DB // 7. Notify listeners via the EventManager of which blocks have been removed and added. -func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, verifySignatures bool) ( +func (bc *Blockchain) processBlockPoS( + block *MsgDeSoBlock, + blockHash *BlockHash, + currentView uint64, + verifySignatures bool, +) ( _success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, @@ -268,14 +285,18 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } // If we can't hash the block, we can never store in the block index and we should throw it out immediately. - if _, err := block.Hash(); err != nil { - return false, false, nil, errors.Wrapf(err, "processBlockPoS: Problem hashing block") + if blockHash == nil { + var err error + blockHash, err = block.Hash() + if err != nil { + return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem hashing block") + } } // In hypersync archival mode, we may receive blocks that have already been processed and committed during state // synchronization. However, we may want to store these blocks in the db for archival purposes. We check if the // block we're dealing with is an archival block. If it is, we store it and return early. - if success, err := bc.checkAndStoreArchivalBlock(block); err != nil { + if success, err := bc.checkAndStoreArchivalBlock(block, blockHash); err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem checking and storing archival block") } else if success { return true, false, nil, nil @@ -300,7 +321,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // on our best chain. Try to process the orphan by running basic validations. // If it passes basic integrity checks, we'll store it with the hope that we // will eventually get a parent that connects to our best chain. - return false, true, missingBlockHashes, bc.processOrphanBlockPoS(block) + return false, true, missingBlockHashes, bc.processOrphanBlockPoS(block, blockHash) } if err != nil { @@ -310,7 +331,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // We expect the utxoView for the parent block to be valid because we check that all ancestor blocks have // been validated. - parentUtxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash) + parentUtxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash, block.Header.Height-1) if err != nil { // This should never happen. If the parent is validated and extends from the tip, then we should // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. @@ -320,7 +341,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v parentUtxoView := parentUtxoViewAndUtxoOps.UtxoView // First, we perform a validation of the leader and the QC to prevent spam. // If the block fails this check, we throw it away. - passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, parentUtxoView, verifySignatures) + passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, blockHash, parentUtxoView, verifySignatures) if err != nil { // If we hit an error, we can't store it since we're not sure if it passed the spam prevention check. return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem validating leader and QC") @@ -331,7 +352,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } // Validate the block and store it in the block index. The block is guaranteed to not be an orphan. - blockNode, err := bc.validateAndIndexBlockPoS(block, parentUtxoView, verifySignatures) + blockNode, err := bc.validateAndIndexBlockPoS(block, blockHash, parentUtxoView, verifySignatures) if err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem validating block: ") @@ -354,7 +375,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // header and applying it to the header chain will result in the two chains being out of // sync. The header chain is less critical and mutations to it are reversible. So we attempt // to mutate it first before attempting to mutate the block chain. - if _, _, err = bc.processHeaderPoS(block.Header, verifySignatures); err != nil { + if _, _, _, err = bc.processHeaderPoS(block.Header, blockHash, verifySignatures); err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem processing header") } @@ -401,7 +422,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // Now that we've processed this block, we check for any blocks that were previously // stored as orphans, which are children of this block. We can process them now. - blockNodesAtNextHeight := bc.blockIndexByHeight[uint64(blockNode.Height)+1] + blockNodesAtNextHeight := bc.blockIndex.GetBlockNodesByHeight(uint64(blockNode.Height) + 1) for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { if blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) && blockNodeAtNextHeight.IsStored() && @@ -415,7 +436,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } var appliedNewTipOrphan bool if appliedNewTipOrphan, _, _, err = bc.processBlockPoS( - orphanBlock, currentView, verifySignatures); err != nil { + orphanBlock, blockNodeAtNextHeight.Hash, currentView, verifySignatures); err != nil { glog.Errorf("processBlockPoS: Problem validating orphan block %v", blockNodeAtNextHeight.Hash) continue } @@ -434,7 +455,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // As a spam-prevention measure, we will not store a block if it fails the QC or leader check // and simply throw it away. If it fails the other integrity checks, we'll store it // as validate failed. -func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { +func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock, blockHash *BlockHash) error { // Construct a UtxoView, so we can perform the QC and leader checks. utxoView := bc.GetCommittedTipView() @@ -544,32 +565,32 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // As a spam-prevention measure, we just throw away this block and don't store it. return nil } - if err != nil { - return errors.Wrap(err, "processOrphanBlockPoS: Problem getting snapshot global params") + + if blockHash == nil { + blockHash, err = block.Header.Hash() + if err != nil { + return errors.Wrap(err, "processOrphanBlockPoS: Problem hashing block") + } } + // All blocks should pass the basic integrity validations, which ensure the block // is not malformed. If the block is malformed, we should store it as ValidateFailed. if err = bc.isProperlyFormedBlockPoS(block); err != nil { - if _, innerErr := bc.storeValidateFailedBlockInBlockIndex(block); innerErr != nil { + if _, innerErr := bc.storeValidateFailedBlockInBlockIndex(block, blockHash); innerErr != nil { return errors.Wrapf(innerErr, "processOrphanBlockPoS: Problem adding validate failed block to block index: %v", err) } return nil } // Add to blockIndexByHash with status STORED only as we are not sure if it's valid yet. - _, err = bc.storeBlockInBlockIndex(block) + _, err = bc.storeBlockInBlockIndex(block, blockHash) return errors.Wrap(err, "processBlockPoS: Problem adding block to block index: ") } // checkAndStoreArchivalBlock is a helper function that takes in a block and checks if it's an archival block. // If it is, it stores the block in the db and returns true. If it's not, it returns false, or false and an error. -func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock) (_success bool, _err error) { - // First, get the block hash and lookup the block index. - blockHash, err := block.Hash() - if err != nil { - return false, errors.Wrap(err, "checkAndStoreArchivalBlock: Problem hashing block") - } - blockNode, exists := bc.blockIndexByHash.Get(*blockHash) +func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock, blockHash *BlockHash) (_success bool, _err error) { + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, block.Header.Height) // If the blockNode doesn't exist, or the block is not committed, or it's already stored, then we're not dealing // with an archival block. Archival blocks must have an existing blockNode, be committed, and not be stored. if !exists || !blockNode.IsCommitted() || blockNode.IsStored() { @@ -578,8 +599,7 @@ func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock) (_success // If we get to this point, we're dealing with an archival block, so we'll attempt to store it. // This means, this block node is already marked as COMMITTED and VALIDATED, and we just need to store it. - _, err = bc.storeBlockInBlockIndex(block) - if err != nil { + if _, err := bc.storeBlockInBlockIndex(block, blockHash); err != nil { return false, errors.Wrap(err, "checkAndStoreArchivalBlock: Problem storing block in block index") } return true, nil @@ -587,9 +607,9 @@ func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock) (_success // storeValidateFailedBlockWithWrappedError is a helper function that takes in a block and an error and // stores the block in the block index with status VALIDATE_FAILED. It returns the resulting BlockNode. -func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlock, outerErr error) ( +func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlock, hash *BlockHash, outerErr error) ( *BlockNode, error) { - blockNode, innerErr := bc.storeValidateFailedBlockInBlockIndex(block) + blockNode, innerErr := bc.storeValidateFailedBlockInBlockIndex(block, hash) if innerErr != nil { return nil, errors.Wrapf(innerErr, "storeValidateFailedBlockWithWrappedError: Problem adding validate failed block to block index: %v", @@ -600,6 +620,7 @@ func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlo func (bc *Blockchain) validateLeaderAndQC( block *MsgDeSoBlock, + blockHash *BlockHash, parentUtxoView *UtxoView, verifySignatures bool, ) (_passedSpamPreventionCheck bool, _err error) { @@ -621,7 +642,7 @@ func (bc *Blockchain) validateLeaderAndQC( "validateLeaderAndQC: Problem getting snapshot epoch number for epoch #%d", currentEpochEntry.EpochNumber) } - isValidPartialSig, err := parentUtxoView.hasValidProposerPartialSignaturePoS(block, snapshotAtEpochNumber) + isValidPartialSig, err := parentUtxoView.hasValidProposerPartialSignaturePoS(block, blockHash, snapshotAtEpochNumber) if err != nil { return false, errors.Wrap(err, "validateLeaderAndQC: Problem validating proposer partial sig") @@ -679,15 +700,16 @@ func (bc *Blockchain) validateLeaderAndQC( // return the new BlockNode. // - Error case: Something goes wrong that doesn't result in the block being marked VALIDATE or VALIDATE_FAILED. In // this case, we will add the block to the block index with status STORED and return the BlockNode. -func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoView *UtxoView, verifySignatures bool) ( - *BlockNode, error) { - blockHash, err := block.Header.Hash() - if err != nil { - return nil, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem hashing block %v", block) - } +func (bc *Blockchain) validateAndIndexBlockPoS( + block *MsgDeSoBlock, + blockHash *BlockHash, + parentUtxoView *UtxoView, + verifySignatures bool, +) (*BlockNode, error) { // Base case - Check if the block is validated or validate failed. If so, we can return early. - blockNode, exists := bc.blockIndexByHash.Get(*blockHash) + // TODO: validate height doesn't overflow uint32 + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, block.Header.Height) if exists && (blockNode.IsValidateFailed() || blockNode.IsValidated()) { return blockNode, nil } @@ -708,7 +730,7 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // Run the validation for the parent and update the block index with the parent's status. We first // check if the parent has a cached status. If so, we use the cached status. Otherwise, we run // the full validation algorithm on it, then index it and use the result. - parentBlockNode, err := bc.validatePreviouslyIndexedBlockPoS(block.Header.PrevBlockHash, verifySignatures) + parentBlockNode, err := bc.validatePreviouslyIndexedBlockPoS(block.Header.PrevBlockHash, block.Header.Height-1, verifySignatures) if err != nil { return blockNode, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem validating previously indexed block: ") } @@ -717,13 +739,13 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // this block as ValidateFailed. If the parent is not ValidateFailed, we ONLY store the block and move on. // We don't want to store it as ValidateFailed because we don't know if it's actually invalid. if parentBlockNode.IsValidateFailed() { - return bc.storeValidateFailedBlockWithWrappedError(block, errors.New("parent block is ValidateFailed")) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, errors.New("parent block is ValidateFailed")) } // If the parent block still has a Stored status, it means that we weren't able to validate it // despite trying. The current block will also be stored as a Stored block. if !parentBlockNode.IsValidated() { - return bc.storeBlockInBlockIndex(block) + return bc.storeBlockInBlockIndex(block, blockHash) } // Validate the block's random seed signature @@ -731,14 +753,14 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(block.Header) if err != nil { var innerErr error - blockNode, innerErr = bc.storeBlockInBlockIndex(block) + blockNode, innerErr = bc.storeBlockInBlockIndex(block, blockHash) if innerErr != nil { return nil, errors.Wrapf(innerErr, "validateAndIndexBlockPoS: Problem adding block to block index: %v", err) } return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem validating random seed signature") } if !isValidRandomSeedSignature { - return bc.storeValidateFailedBlockWithWrappedError(block, errors.New("invalid random seed signature")) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, errors.New("invalid random seed signature")) } } @@ -746,15 +768,15 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi serializedBlock, err := block.ToBytes(false) if err != nil { return bc.storeValidateFailedBlockWithWrappedError( - block, errors.Wrap(err, "validateAndIndexBlockPoS: Problem serializing block")) + block, blockHash, errors.Wrap(err, "validateAndIndexBlockPoS: Problem serializing block")) } if uint64(len(serializedBlock)) > parentUtxoView.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS { - return bc.storeValidateFailedBlockWithWrappedError(block, RuleErrorBlockTooBig) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, RuleErrorBlockTooBig) } // Check if the block is properly formed and passes all basic validations. if err = bc.isValidBlockPoS(block); err != nil { - return bc.storeValidateFailedBlockWithWrappedError(block, err) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, err) } // Connect this block to the parent block's UtxoView. @@ -765,7 +787,7 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // If we fail to connect the block, then it means the block is invalid. We should store it as ValidateFailed. if _, err = parentUtxoView.ConnectBlock(block, txHashes, verifySignatures, nil, block.Header.Height); err != nil { // If it doesn't connect, we want to mark it as ValidateFailed. - return bc.storeValidateFailedBlockWithWrappedError(block, err) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, err) } // If the block is too far in the future, we leave it as STORED and return early. @@ -774,11 +796,11 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem checking block timestamp") } if failsTimestampDriftCheck { - return bc.storeBlockInBlockIndex(block) + return bc.storeBlockInBlockIndex(block, blockHash) } // We can now add this block to the block index since we have performed all basic validations. - blockNode, err = bc.storeValidatedBlockInBlockIndex(block) + blockNode, err = bc.storeValidatedBlockInBlockIndex(block, blockHash) if err != nil { return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem adding block to block index: ") } @@ -789,10 +811,11 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // cached block, and runs the validateAndIndexBlockPoS algorithm on it. It returns the resulting BlockNode. func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( blockHash *BlockHash, + blockHeight uint64, verifySignatures bool, ) (*BlockNode, error) { // Check if the block is already in the block index. If so, we check its current status first. - blockNode, exists := bc.blockIndexByHash.Get(*blockHash) + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeight) if !exists { // We should never really hit this if the block has already been cached in the block index first. // We check here anyway to be safe. @@ -814,7 +837,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( return nil, errors.Wrapf(err, "validatePreviouslyIndexedBlockPoS: Problem fetching block from DB") } // Build utxoView for the block's parent. - parentUtxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash) + parentUtxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash, block.Header.Height-1) if err != nil { // This should never happen. If the parent is validated and extends from the tip, then we should // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. @@ -823,7 +846,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( parentUtxoView := parentUtxoViewAndUtxoOps.UtxoView // If the block isn't validated or validate failed, we need to run the anti-spam checks on it. - passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, parentUtxoView, verifySignatures) + passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, blockHash, parentUtxoView, verifySignatures) if err != nil { // If we hit an error, that means there was an intermittent issue when trying to // validate the QC or the leader. @@ -832,7 +855,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( if !passedSpamPreventionCheck { // If the QC or Leader check failed, we'll never accept this block, but we've already stored it, // so we need to mark it as ValidateFailed. - blockNode, err = bc.storeValidateFailedBlockInBlockIndex(block) + blockNode, err = bc.storeValidateFailedBlockInBlockIndex(block, blockHash) if err != nil { return nil, errors.Wrap(err, "validatePreviouslyIndexedBlockPoS: Problem adding validate failed block to block index") @@ -841,7 +864,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( } // We run the full validation algorithm on the block. - return bc.validateAndIndexBlockPoS(block, parentUtxoView, verifySignatures) + return bc.validateAndIndexBlockPoS(block, blockHash, parentUtxoView, verifySignatures) } // isValidBlockPoS performs all basic block integrity checks. Any error @@ -890,7 +913,7 @@ func (bc *Blockchain) isValidBlockHeaderPoS(header *MsgDeSoHeader) error { // greater than its parent's timestamp. func (bc *Blockchain) isBlockTimestampValidRelativeToParentPoS(header *MsgDeSoHeader) error { // Validate that the timestamp is not less than its parent. - parentBlockNode, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1050,7 +1073,7 @@ func (bc *Blockchain) hasValidBlockHeightPoS(header *MsgDeSoHeader) error { return RuleErrorPoSBlockBeforeCutoverHeight } // Validate that the block height is exactly one greater than its parent. - parentBlockNode, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1065,7 +1088,7 @@ func (bc *Blockchain) hasValidBlockHeightPoS(header *MsgDeSoHeader) error { // hasValidBlockViewPoS validates the view for a given block header func (bc *Blockchain) hasValidBlockViewPoS(header *MsgDeSoHeader) error { // Validate that the view is greater than the latest uncommitted block. - parentBlockNode, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1094,7 +1117,7 @@ func (bc *Blockchain) hasValidBlockViewPoS(header *MsgDeSoHeader) error { func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(header *MsgDeSoHeader) (bool, error) { // Validate that the leader proposed a valid random seed signature. - parentBlock, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlock, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1115,8 +1138,20 @@ func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(header *MsgDeSoHead return isVerified, nil } -func (bav *UtxoView) hasValidProposerPartialSignaturePoS(block *MsgDeSoBlock, snapshotAtEpochNumber uint64) ( - bool, error) { +func (bav *UtxoView) hasValidProposerPartialSignaturePoS( + block *MsgDeSoBlock, + blockHash *BlockHash, + snapshotAtEpochNumber uint64, +) (bool, error) { + // If we aren't provided a hash, we can just compute it on the fly. + // It's more efficient for us not to recompute the hash though, so we only do it if we have to. + if blockHash == nil { + var err error + blockHash, err = block.Hash() + if err != nil { + return false, errors.Wrapf(err, "hasValidProposerPartialSignaturePoS: Problem hashing block") + } + } votingPublicKey := block.Header.ProposerVotingPublicKey proposerPartialSig := block.Header.ProposerVotePartialSignature // If the proposer partial sig is nil, we can't validate it. That's an error. @@ -1140,11 +1175,6 @@ func (bav *UtxoView) hasValidProposerPartialSignaturePoS(block *MsgDeSoBlock, sn if !snapshotBlockProposerValidatorEntry.VotingPublicKey.Eq(votingPublicKey) { return false, nil } - // Get the block's hash - blockHash, err := block.Header.Hash() - if err != nil { - return false, errors.Wrapf(err, "hasValidProposerPartialSignaturePoS: Problem hashing block") - } // Now that we have the snapshot validator entry and validated that the // voting public key from this block's header matches the snapshotted // voting public key, we can validate the partial sig. @@ -1314,16 +1344,18 @@ func (bc *Blockchain) getStoredLineageFromCommittedTip(header *MsgDeSoHeader) ( _missingBlockHashes []*BlockHash, _err error, ) { - highestCommittedBlock, idx := bc.GetCommittedTip() - if idx == -1 || highestCommittedBlock == nil { + highestCommittedBlock, exists := bc.GetCommittedTip() + if !exists || highestCommittedBlock == nil { return nil, nil, errors.New("getStoredLineageFromCommittedTip: No committed blocks found") } currentHash := header.PrevBlockHash.NewBlockHash() + currentHeight := header.Height - 1 ancestors := []*BlockNode{} prevHeight := header.Height prevView := header.GetView() for { - currentBlock, exists := bc.blockIndexByHash.Get(*currentHash) + // TODO: is currentHeight correct here? + currentBlock, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(currentHash, currentHeight) if !exists { return nil, []*BlockHash{currentHash}, RuleErrorMissingAncestorBlock } @@ -1361,20 +1393,26 @@ func (bc *Blockchain) getStoredLineageFromCommittedTip(header *MsgDeSoHeader) ( // getOrCreateBlockNodeFromBlockIndex returns the block node from the block index if it exists. // Otherwise, it creates a new block node and adds it to the blockIndexByHash and blockIndexByHeight. -func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - hash, err := block.Header.Hash() - if err != nil { - return nil, errors.Wrapf(err, "getOrCreateBlockNodeFromBlockIndex: Problem hashing block %v", block) +func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + // If we aren't provided a hash, we can just compute it on the fly. + // It's more efficient for us not to recompute the hash though, so we only do it if we have to. + if hash == nil { + var err error + hash, err = block.Hash() + if err != nil { + return nil, errors.Wrapf(err, "storeBlockInBlockIndex: Problem hashing block") + } } - blockNode, _ := bc.blockIndexByHash.Get(*hash) - prevBlockNode, _ := bc.blockIndexByHash.Get(*block.Header.PrevBlockHash) + blockNode, _ := bc.blockIndex.GetBlockNodeByHashAndHeight(hash, block.Header.Height) if blockNode != nil { // If the block node already exists, we should set its parent if it doesn't have one already. if blockNode.Parent == nil { + prevBlockNode, _ := bc.blockIndex.GetBlockNodeByHashAndHeight(block.Header.PrevBlockHash, block.Header.Height-1) blockNode.Parent = prevBlockNode } return blockNode, nil } + prevBlockNode, _ := bc.blockIndex.GetBlockNodeByHashAndHeight(block.Header.PrevBlockHash, block.Header.Height-1) newBlockNode := NewBlockNode(prevBlockNode, hash, uint32(block.Header.Height), nil, nil, block.Header, StatusNone) bc.addNewBlockNodeToBlockIndex(newBlockNode) return newBlockNode, nil @@ -1382,8 +1420,8 @@ func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock) (* // storeBlockInBlockIndex upserts the blocks into the in-memory block index & badger and updates its status to // StatusBlockStored. It also writes the block to the block index in badger -func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) +func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidatedHeaderInBlockIndex: Problem getting or creating block node") } @@ -1401,8 +1439,8 @@ func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (* return blockNode, nil } -func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(header *MsgDeSoHeader, wrapperError error) error { - if _, innerErr := bc.storeValidateFailedHeaderInBlockIndex(header); innerErr != nil { +func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(header *MsgDeSoHeader, hash *BlockHash, wrapperError error) error { + if _, innerErr := bc.storeValidateFailedHeaderInBlockIndex(header, hash); innerErr != nil { return errors.Wrapf(innerErr, "%v", wrapperError) } return wrapperError @@ -1410,8 +1448,8 @@ func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(head // storeValidateFailedHeaderInBlockIndex stores the header in the block index only and sets its status to // StatusHeaderValidateFailed. It does not write the header to the DB. -func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) +func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeader, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidateFailedHeaderInBlockIndex: Problem getting or creating block node") } @@ -1432,8 +1470,8 @@ func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeade // storeBlockInBlockIndex upserts the blocks into the in-memory block index & badger and updates its status to // StatusBlockStored. It also writes the block to the block index in badger // by calling upsertBlockAndBlockNodeToDB. -func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) +func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block, hash) if err != nil { return nil, errors.Wrapf(err, "storeBlockInBlockIndex: Problem getting or creating block node") } @@ -1453,8 +1491,8 @@ func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, e // status to StatusBlockValidated. If it does not have the status StatusBlockStored already, we add that as we // will store the block in the DB after updating its status. It also writes the block to the block index in // badger by calling upsertBlockAndBlockNodeToDB. -func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) +func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidatedBlockInBlockIndex: Problem getting or creating block node") } @@ -1483,8 +1521,8 @@ func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock) (*Blo // status to StatusBlockValidateFailed. If it does not have the status StatusBlockStored already, we add that as we // will store the block in the DB after updating its status. It also writes the block to the block index in badger // by calling upsertBlockAndBlockNodeToDB. -func (bc *Blockchain) storeValidateFailedBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) +func (bc *Blockchain) storeValidateFailedBlockInBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidateFailedBlockInBlockIndex: Problem getting or creating block node") } @@ -1517,7 +1555,8 @@ func (bc *Blockchain) upsertBlockAndBlockNodeToDB(block *MsgDeSoBlock, blockNode // Store the block in badger err := bc.db.Update(func(txn *badger.Txn) error { if storeFullBlock { - if innerErr := PutBlockHashToBlockWithTxn(txn, bc.snapshot, block, bc.eventManager); innerErr != nil { + if innerErr := PutBlockHashToBlockWithTxn( + txn, bc.snapshot, blockNode.Hash, block, bc.eventManager); innerErr != nil { return errors.Wrapf(innerErr, "upsertBlockAndBlockNodeToDB: Problem calling PutBlockHashToBlockWithTxn") } } @@ -1634,8 +1673,7 @@ func (bc *Blockchain) shouldReorg(blockNode *BlockNode, currentView uint64) bool // addTipBlockToBestChain adds the block as the new tip of the best chain. func (bc *Blockchain) addTipBlockToBestChain(blockNode *BlockNode) { - bc.bestChain = append(bc.bestChain, blockNode) - bc.bestChainMap[*blockNode.Hash] = blockNode + bc.blockIndex.setTip(blockNode) } // removeTipBlockFromBestChain removes the current tip from the best chain. It @@ -1644,9 +1682,9 @@ func (bc *Blockchain) addTipBlockToBestChain(blockNode *BlockNode) { // the bestChain slice and bestChainMap map. func (bc *Blockchain) removeTipBlockFromBestChain() *BlockNode { // Remove the last block from the best chain. - lastBlock := bc.bestChain[len(bc.bestChain)-1] - delete(bc.bestChainMap, *lastBlock.Hash) - bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] + lastBlock := bc.blockIndex.GetTip() + // Uhhh what happens if we don't have the parent set up!? + bc.blockIndex.setTip(lastBlock.GetParent(bc.blockIndex)) return lastBlock } @@ -1657,26 +1695,27 @@ func (bc *Blockchain) runCommitRuleOnBestChain(verifySignatures bool) error { currentBlock := bc.BlockTip() // If we can commit the grandparent, commit it. // Otherwise, we can't commit it and return nil. - blockToCommit, canCommit := bc.canCommitGrandparent(currentBlock) + blockNodeToCommit, canCommit := bc.canCommitGrandparent(currentBlock) if !canCommit { return nil } // Find all uncommitted ancestors of block to commit - _, idx := bc.GetCommittedTip() - if idx == -1 { + committedTip, exists := bc.GetCommittedTip() + if !exists { // This is an edge case we'll never hit in practice since all the PoW blocks // are committed. return errors.New("runCommitRuleOnBestChain: No committed blocks found") } uncommittedAncestors := []*BlockNode{} - for ii := idx + 1; ii < len(bc.bestChain); ii++ { - uncommittedAncestors = append(uncommittedAncestors, bc.bestChain[ii]) - if bc.bestChain[ii].Hash.IsEqual(blockToCommit) { - break - } + currentNode := &BlockNode{} + *currentNode = *blockNodeToCommit + for currentNode.Height > committedTip.Height { + uncommittedAncestors = append(uncommittedAncestors, currentNode) + currentNode = currentNode.GetParent(bc.blockIndex) } + uncommittedAncestors = collections.Reverse(uncommittedAncestors) for ii := 0; ii < len(uncommittedAncestors); ii++ { - if err := bc.commitBlockPoS(uncommittedAncestors[ii].Hash, verifySignatures); err != nil { + if err := bc.commitBlockPoS(uncommittedAncestors[ii].Hash, uint64(uncommittedAncestors[ii].Height), verifySignatures); err != nil { return errors.Wrapf(err, "runCommitRuleOnBestChain: Problem committing block %v", uncommittedAncestors[ii].Hash.String()) } @@ -1689,18 +1728,28 @@ func (bc *Blockchain) runCommitRuleOnBestChain(verifySignatures bool) error { // between the grandparent and parent of the new block, meaning the grandparent and parent // are proposed in consecutive views, and the "parent" is an ancestor of the incoming block // (not necessarily consecutive views). Additionally, the grandparent must not already be committed. -func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) (_grandparentBlockHash *BlockHash, _canCommit bool, +func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) ( + _grandparentBlockNode *BlockNode, + _canCommit bool, ) { // TODO: Is it sufficient that the current block's header points to the parent // or does it need to have something to do with the QC? - parent := bc.bestChainMap[*currentBlock.Header.PrevBlockHash] - grandParent := bc.bestChainMap[*parent.Header.PrevBlockHash] + parent, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(currentBlock.Header.PrevBlockHash, uint64(currentBlock.Height-1)) + if !exists { + glog.Errorf("canCommitGrandparent: Parent block %v not found in best chain map", currentBlock.Header.PrevBlockHash.String()) + return nil, false + } + grandParent, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(parent.Header.PrevBlockHash, uint64(parent.Height-1)) + if !exists { + glog.Errorf("canCommitGrandparent: Grandparent block %v not found in best chain map", parent.Header.PrevBlockHash.String()) + return nil, false + } if grandParent.IsCommitted() { return nil, false } if grandParent.Header.ProposedInView+1 == parent.Header.ProposedInView { // Then we can run the commit rule up to the grandparent! - return grandParent.Hash, true + return grandParent, true } return nil, false } @@ -1708,9 +1757,9 @@ func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) (_grandparen // commitBlockPoS commits the block with the given hash. Specifically, this updates the // BlockStatus to include StatusBlockCommitted and flushes the view after connecting the block // to the DB and updates relevant badger indexes with info about the block. -func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool) error { +func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, blockHeight uint64, verifySignatures bool) error { // block must be in the best chain. we grab the block node from there. - blockNode, exists := bc.bestChainMap[*blockHash] + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeight) if !exists { return errors.Errorf("commitBlockPoS: Block %v not found in best chain map", blockHash.String()) } @@ -1720,7 +1769,7 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool return errors.Errorf("commitBlockPoS: Block %v is already committed", blockHash.String()) } // Connect a view up to block we are committing. - utxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*blockHash) + utxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*blockHash, uint64(blockNode.Height)) if err != nil { return errors.Wrapf(err, "commitBlockPoS: Problem initializing UtxoView: ") } @@ -1839,7 +1888,10 @@ func (bc *Blockchain) GetUncommittedBlocks(tipHash *BlockHash) ([]*BlockNode, er } bc.ChainLock.RLock() defer bc.ChainLock.RUnlock() - tipBlock, exists := bc.bestChainMap[*tipHash] + tipBlock, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(tipHash) + if err != nil { + return nil, errors.Wrapf(err, "GetUncommittedBlocks: Problem getting block %v", tipHash.String()) + } if !exists { return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in best chain map", tipHash.String()) } @@ -1855,9 +1907,9 @@ func (bc *Blockchain) GetUncommittedBlocks(tipHash *BlockHash) ([]*BlockNode, er if currentParentHash == nil { return nil, errors.Errorf("GetUncommittedBlocks: Block %v has nil PrevBlockHash", currentBlock.Hash) } - currentBlock, _ = bc.blockIndexByHash.Get(*currentParentHash) + currentBlock, _ = bc.blockIndex.GetBlockNodeByHashAndHeight(currentParentHash, currentBlock.Header.Height-1) if currentBlock == nil { - return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in block index", currentBlock.Hash) + return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in block index", currentParentHash) } } return collections.Reverse(uncommittedBlockNodes), nil @@ -1892,18 +1944,17 @@ func (viewAndUtxoOps *BlockViewAndUtxoOps) Copy() *BlockViewAndUtxoOps { // GetUncommittedTipView builds a UtxoView to the uncommitted tip. func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks - blockViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*bc.BlockTip().Hash) + blockTip := bc.BlockTip() + blockViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*blockTip.Hash, uint64(blockTip.Height)) if err != nil { return nil, errors.Wrapf(err, "GetUncommittedTipView: Problem getting UtxoView at block hash") } return blockViewAndUtxoOps.UtxoView, nil } -func (bc *Blockchain) getCachedBlockViewAndUtxoOps(blockHash BlockHash) (*BlockViewAndUtxoOps, error, bool) { - if viewAndUtxoOpsAtHash, exists := bc.blockViewCache.Get(blockHash); exists { - return viewAndUtxoOpsAtHash, nil, true - } - return nil, nil, false +func (bc *Blockchain) getCachedBlockViewAndUtxoOps(blockHash BlockHash) (*BlockViewAndUtxoOps, bool) { + viewAndUtxoOpsAtHash, exists := bc.blockViewCache.Get(blockHash) + return viewAndUtxoOpsAtHash, exists } // GetUtxoViewAndUtxoOpsAtBlockHash builds a UtxoView to the block provided and returns a BlockViewAndUtxoOps @@ -1912,18 +1963,19 @@ func (bc *Blockchain) getCachedBlockViewAndUtxoOps(blockHash BlockHash) (*BlockV // all uncommitted ancestors of this block. Then it checks the block view cache to see if we have already // computed this view. If not, connecting the uncommitted ancestor blocks and saving to the cache. The // returned UtxoOps and FullBlock should NOT be modified. -func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( +func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash, blockHeight uint64) ( *BlockViewAndUtxoOps, error) { // Always fetch the lineage from the committed tip to the block provided first to // ensure that a valid UtxoView is returned. uncommittedAncestors := []*BlockNode{} - currentBlock, _ := bc.blockIndexByHash.Get(blockHash) + currentBlock, _ := bc.blockIndex.GetBlockNodeByHashAndHeight(&blockHash, blockHeight) if currentBlock == nil { return nil, errors.Errorf("GetUtxoViewAndUtxoOpsAtBlockHash: Block %v not found in block index", blockHash) } - highestCommittedBlock, _ := bc.GetCommittedTip() - if highestCommittedBlock == nil { + highestCommittedBlock, exists := bc.GetCommittedTip() + glog.Infof("Highest committed block: %v", highestCommittedBlock) + if !exists || highestCommittedBlock == nil { return nil, errors.Errorf("GetUtxoViewAndUtxoOpsAtBlockHash: No committed blocks found") } // If the provided block is committed, we need to make sure it's the committed tip. @@ -1940,7 +1992,7 @@ func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( if currentParentHash == nil { return nil, errors.Errorf("GetUtxoViewAndUtxoOpsAtBlockHash: Block %v has nil PrevBlockHash", currentBlock.Hash) } - currentBlock, _ = bc.blockIndexByHash.Get(*currentParentHash) + currentBlock, _ = bc.blockIndex.GetBlockNodeByHashAndHeight(currentParentHash, currentBlock.Header.Height-1) if currentBlock == nil { return nil, errors.Errorf("GetUtxoViewAndUtxoOpsAtBlockHash: Block %v not found in block index", currentParentHash) } @@ -1948,15 +2000,8 @@ func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( return nil, errors.Errorf( "GetUtxoViewAndUtxoOpsAtBlockHash: extends from a committed block that isn't the committed tip") } - if currentBlock.IsCommitted() && !currentBlock.Hash.IsEqual(highestCommittedBlock.Hash) { - return nil, errors.Errorf( - "GetUtxoViewAndUtxoOpsAtBlockHash: extends from a committed block that isn't the committed tip") - } - } - viewAndUtxoOpsAtHash, err, exists := bc.getCachedBlockViewAndUtxoOps(blockHash) - if err != nil { - return nil, errors.Wrapf(err, "GetUtxoViewAndUtxoOpsAtBlockHash: Problem getting cached BlockViewAndUtxoOps") } + viewAndUtxoOpsAtHash, exists := bc.getCachedBlockViewAndUtxoOps(blockHash) if exists { viewAndUtxoOpsCopy := viewAndUtxoOpsAtHash.Copy() return viewAndUtxoOpsCopy, nil @@ -1970,6 +2015,8 @@ func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( var utxoOps [][]*UtxoOperation var fullBlock *MsgDeSoBlock for ii := len(uncommittedAncestors) - 1; ii >= 0; ii-- { + glog.Infof("Connecting block %v", uncommittedAncestors[ii]) + var err error // We need to get these blocks from badger fullBlock, err = GetBlock(uncommittedAncestors[ii].Hash, bc.db, bc.snapshot) if err != nil { @@ -1990,7 +2037,7 @@ func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( utxoView.TipHash = &blockHash // Save a copy of the UtxoView to the cache. copiedView := utxoView.CopyUtxoView() - bc.blockViewCache.Put(blockHash, &BlockViewAndUtxoOps{ + bc.blockViewCache.Add(blockHash, &BlockViewAndUtxoOps{ UtxoView: copiedView, UtxoOps: utxoOps, Block: fullBlock, @@ -2003,13 +2050,16 @@ func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( } // GetCommittedTip returns the highest committed block and its index in the best chain. -func (bc *Blockchain) GetCommittedTip() (*BlockNode, int) { - for ii := len(bc.bestChain) - 1; ii >= 0; ii-- { - if bc.bestChain[ii].IsCommitted() { - return bc.bestChain[ii], ii +func (bc *Blockchain) GetCommittedTip() (*BlockNode, bool) { + // iterate backwards from the tip node + currentNode := bc.blockIndex.GetTip() + for currentNode != nil { + if currentNode.IsCommitted() { + return currentNode, true } + currentNode = currentNode.GetParent(bc.blockIndex) } - return nil, -1 + return nil, false } // GetSafeBlocks returns all headers of blocks from which the chain can safely extend. @@ -2034,9 +2084,9 @@ func (bc *Blockchain) GetSafeBlocks() ([]*MsgDeSoHeader, error) { func (bc *Blockchain) getSafeBlockNodes() ([]*BlockNode, error) { // First get committed tip. - committedTip, idx := bc.GetCommittedTip() - if idx == -1 || committedTip == nil { - return nil, errors.New("getSafeBlockNodes: No committed blocks found") + committedTip, exists := bc.GetCommittedTip() + if !exists || committedTip == nil { + return []*BlockNode{}, nil } // Now get all blocks from the committed tip to the best chain tip. safeBlocks := []*BlockNode{committedTip} @@ -2103,8 +2153,8 @@ func (bc *Blockchain) GetProofOfStakeGenesisQuorumCertificate() (*QuorumCertific func (bc *Blockchain) GetFinalCommittedPoWBlock() (*BlockNode, error) { // Fetch the block node for the cutover block - blockNodes, blockNodesExist := bc.blockIndexByHeight[bc.params.GetFinalPoWBlockHeight()] - if !blockNodesExist { + blockNodes := bc.blockIndex.GetBlockNodesByHeight(bc.params.GetFinalPoWBlockHeight()) + if len(blockNodes) == 0 { return nil, errors.Errorf("Error fetching cutover block nodes before height %d", bc.params.GetFinalPoWBlockHeight()) } diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 5553887c4..7e3ee7430 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2,6 +2,7 @@ package lib import ( "bytes" + "encoding/hex" "fmt" "math" "math/rand" @@ -251,8 +252,8 @@ func TestHasValidBlockHeight(t *testing.T) { ValidatorsVoteQC: nil, ValidatorsTimeoutAggregateQC: nil, }, StatusBlockStored|StatusBlockValidated) - bc.bestChain = []*BlockNode{genesisBlock} - bc.blockIndexByHash.Set(*genesisBlock.Hash, genesisBlock) + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{*genesisBlock.Hash: genesisBlock}) + bc.blockIndex.blockIndexByHash.Add(*genesisBlock.Hash, genesisBlock) // Create a block with a valid header. randomPayload := RandomBytes(256) randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) @@ -301,7 +302,8 @@ func TestHasValidBlockHeight(t *testing.T) { require.Equal(t, err, RuleErrorInvalidPoSBlockHeight) block.Header.Height = 2 - bc.blockIndexByHash = collections.NewConcurrentMap[BlockHash, *BlockNode]() + // TODO: make sure setting to genesis block works. + bc.blockIndex = NewBlockIndex(bc.db, bc.snapshot, genesisBlock) err = bc.hasValidBlockHeightPoS(block.Header) require.Equal(t, err, RuleErrorMissingParentBlock) } @@ -317,19 +319,19 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { t.Cleanup(resetGlobalDeSoParams) hash1 := NewBlockHash(RandomBytes(32)) hash2 := NewBlockHash(RandomBytes(32)) - genesisNode := NewBlockNode(nil, hash1, 1, nil, nil, &MsgDeSoHeader{ + genesisNode := NewBlockNode(nil, hash1, 0, nil, nil, &MsgDeSoHeader{ Version: 2, - Height: 1, + Height: 0, ProposedInView: 1, }, StatusBlockStored|StatusBlockValidated) - block2 := NewBlockNode(genesisNode, hash2, 2, nil, nil, &MsgDeSoHeader{ + block2 := NewBlockNode(genesisNode, hash2, 1, nil, nil, &MsgDeSoHeader{ Version: 2, - Height: 2, + Height: 1, ProposedInView: 2, ValidatorsVoteQC: nil, ValidatorsTimeoutAggregateQC: nil, }, StatusBlockStored|StatusBlockValidated) - bc.blockIndexByHash = collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ *hash1: genesisNode, *hash2: block2, }) @@ -372,22 +374,21 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { }, }, } - blockNode, err := bc.storeBlockInBlockIndex(block) - require.NoError(t, err) newHash, err := block.Hash() require.NoError(t, err) + blockNode, err := bc.storeBlockInBlockIndex(block, newHash) + require.NoError(t, err) // Check the block index by hash - blockNodeFromIndex, exists := bc.blockIndexByHash.Get(*newHash) + blockNodeFromIndex, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(newHash, uint64(blockNode.Height)) require.True(t, exists) require.True(t, blockNodeFromIndex.Hash.IsEqual(blockNode.Hash)) require.Equal(t, blockNodeFromIndex.Height, uint32(2)) require.True(t, blockNodeFromIndex.IsStored()) require.False(t, blockNodeFromIndex.IsValidated()) // Check the block index by height - byHeightBlockNodes, exists := bc.blockIndexByHeight[2] - require.True(t, exists) + byHeightBlockNodes := bc.blockIndex.GetBlockNodesByHeight(2) require.Len(t, byHeightBlockNodes, 1) - require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(newHash)) require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 1) // Check the DB for the block @@ -399,20 +400,19 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { require.NoError(t, err) require.True(t, bytes.Equal(uncommittedBytes, origBlockBytes)) // Okay now we update the status of the block to include validated. - blockNode, err = bc.storeValidatedBlockInBlockIndex(block) + blockNode, err = bc.storeValidatedBlockInBlockIndex(block, newHash) require.NoError(t, err) - blockNodeFromIndex, exists = bc.blockIndexByHash.Get(*newHash) + blockNodeFromIndex, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(newHash, uncommittedBlock.Header.Height) require.True(t, exists) require.True(t, blockNodeFromIndex.Hash.IsEqual(blockNode.Hash)) require.Equal(t, blockNodeFromIndex.Height, uint32(2)) require.True(t, blockNodeFromIndex.IsStored()) require.True(t, blockNodeFromIndex.IsValidated()) // Check the block index by height. - byHeightBlockNodes, exists = bc.blockIndexByHeight[2] - require.True(t, exists) + byHeightBlockNodes = bc.blockIndex.GetBlockNodesByHeight(2) require.Len(t, byHeightBlockNodes, 1) - require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) - require.True(t, byHeightBlockNodes[*newHash].IsValidated()) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[0].IsValidated()) require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 1) @@ -425,28 +425,28 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { require.False(t, updatedBlockHash.IsEqual(newHash)) // Okay now put this new block in there. - blockNode, err = bc.storeBlockInBlockIndex(block) + blockNode, err = bc.storeBlockInBlockIndex(block, updatedBlockHash) require.NoError(t, err) // Make sure the blockIndexByHash is correct. - updatedBlockNode, exists := bc.blockIndexByHash.Get(*updatedBlockHash) + updatedBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(updatedBlockHash, uint64(blockNode.Height)) require.True(t, exists) require.True(t, updatedBlockNode.Hash.IsEqual(updatedBlockHash)) require.Equal(t, updatedBlockNode.Height, uint32(2)) require.True(t, updatedBlockNode.IsStored()) require.False(t, updatedBlockNode.IsValidated()) // Make sure the blockIndexByHeight is correct - byHeightBlockNodes, exists = bc.blockIndexByHeight[2] + byHeightBlockNodes = bc.blockIndex.GetBlockNodesByHeight(2) require.True(t, exists) require.Len(t, byHeightBlockNodes, 2) - require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) - require.True(t, byHeightBlockNodes[*updatedBlockHash].Hash.IsEqual(updatedBlockHash)) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(newHash) || byHeightBlockNodes[1].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(updatedBlockHash) || byHeightBlockNodes[1].Hash.IsEqual(updatedBlockHash)) require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 2) // If we're missing a field in the header, we should get an error // as we can't compute the hash. block.Header.ProposerVotingPublicKey = nil - _, err = bc.storeBlockInBlockIndex(block) + _, err = bc.storeBlockInBlockIndex(block, nil) require.Error(t, err) } @@ -473,11 +473,7 @@ func TestHasValidBlockViewPoS(t *testing.T) { ValidatorsVoteQC: nil, ValidatorsTimeoutAggregateQC: nil, }, StatusBlockStored|StatusBlockValidated) - bc.bestChain = []*BlockNode{ - genesisNode, - block2, - } - bc.blockIndexByHash = collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ *hash1: genesisNode, *hash2: block2, }) @@ -809,8 +805,7 @@ func TestGetLineageFromCommittedTip(t *testing.T) { Height: 1, ProposedInView: 1, }, StatusBlockStored|StatusBlockValidated|StatusBlockCommitted) - bc.bestChain = []*BlockNode{genesisNode} - bc.blockIndexByHash = collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ *hash1: genesisNode, }) block := &MsgDeSoBlock{ @@ -846,8 +841,8 @@ func TestGetLineageFromCommittedTip(t *testing.T) { ProposedInView: 2, PrevBlockHash: hash1, }, StatusBlockStored|StatusBlockValidated|StatusBlockCommitted) - bc.bestChain = append(bc.bestChain, block2) - bc.blockIndexByHash.Set(*hash2, block2) + bc.blockIndex.setTip(block2) + bc.blockIndex.blockIndexByHash.Add(*hash2, block2) ancestors, missingBlockHashes, err = bc.getStoredLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorDoesNotExtendCommittedTip) @@ -1237,21 +1232,27 @@ func TestShouldReorg(t *testing.T) { hash1 := NewBlockHash(RandomBytes(32)) hash2 := NewBlockHash(RandomBytes(32)) hash3 := NewBlockHash(RandomBytes(32)) - bc.bestChain = []*BlockNode{ + chain := []*BlockNode{ { Hash: hash1, Status: StatusBlockStored | StatusBlockValidated | StatusBlockCommitted, + Height: 0, }, { Hash: hash3, Status: StatusBlockStored | StatusBlockValidated, + Height: 1, }, } + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ + *hash1: chain[0], + *hash3: chain[1], + }) newBlock := &BlockNode{ Header: &MsgDeSoHeader{ ProposedInView: 2, - PrevBlockHash: bc.bestChain[1].Hash, + PrevBlockHash: chain[1].Hash, }, } @@ -1275,7 +1276,7 @@ func TestShouldReorg(t *testing.T) { // 1. Simple reorg. Just replacing the uncommitted tip. // 2. Create a longer chain and reorg to it. // 3. Make sure no reorg when current view is greater than block's view -// 4. Super happy path of simply extending current uncommitted tip. +// 4. Super happy path of simply extet anding current uncommitted tip. func TestTryApplyNewTip(t *testing.T) { setBalanceModelBlockHeights(t) bc, _, _ := NewTestBlockchain(t) @@ -1319,9 +1320,9 @@ func TestTryApplyNewTip(t *testing.T) { bc.addTipBlockToBestChain(bn1) bc.addTipBlockToBestChain(bn2) bc.addTipBlockToBestChain(bn3) - bc.blockIndexByHash.Set(*hash1, bn1) - bc.blockIndexByHash.Set(*hash2, bn2) - bc.blockIndexByHash.Set(*hash3, bn3) + bc.blockIndex.blockIndexByHash.Add(*hash1, bn1) + bc.blockIndex.blockIndexByHash.Add(*hash2, bn2) + bc.blockIndex.blockIndexByHash.Add(*hash3, bn3) // Simple reorg. Just replacing the uncommitted tip. newBlock := &MsgDeSoBlock{ @@ -1337,44 +1338,47 @@ func TestTryApplyNewTip(t *testing.T) { ancestors, _, err := bc.getStoredLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) checkBestChainForHash := func(hash *BlockHash) bool { - return collections.Any(bc.bestChain, func(bn *BlockNode) bool { - return bn.Hash.IsEqual(hash) - }) + _, exists, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(hash, nil, false) + require.NoError(t, err) + return exists } // Try to apply newBlock as tip. This should succeed. newBlockNode := &BlockNode{ Header: newBlock.Header, Hash: newBlockHash, + Height: 4, } appliedNewTip, connectedBlockHashes, disconnectedBlockHashes, err := bc.tryApplyNewTip(newBlockNode, 9, ancestors) require.NoError(t, err) require.True(t, appliedNewTip) // hash 3 should no longer be in the best chain or best chain map - _, hash3ExistsInBestChainMap := bc.bestChainMap[*hash3] + _, hash3ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(hash3, nil, false) + require.NoError(t, err) require.False(t, hash3ExistsInBestChainMap) require.False(t, checkBestChainForHash(hash3)) require.Len(t, connectedBlockHashes, 1) require.Len(t, disconnectedBlockHashes, 1) // newBlock should be in the best chain and the best chain map and should be the tip. - _, newBlockExistsInBestChainMap := bc.bestChainMap[*newBlockHash] + _, newBlockExistsInBestChainMap, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(newBlockHash, nil, false) + require.NoError(t, err) require.True(t, newBlockExistsInBestChainMap) require.True(t, checkBestChainForHash(newBlockHash)) require.True(t, bc.BlockTip().Hash.IsEqual(newBlockHash)) // Make sure block 2 and block 1 are still in the best chain. - _, hash2ExistsInBestChainMap := bc.bestChainMap[*hash2] + _, hash2ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(hash2, nil, false) + require.NoError(t, err) require.True(t, hash2ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash2)) - _, hash1ExistsInBestChainMap := bc.bestChainMap[*hash1] + _, hash1ExistsInBestChainMap := bc.blockIndex.blockIndexByHash.Get(*hash1) require.True(t, hash1ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash1)) // Remove newBlock from the best chain and block index to reset the state. - bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] - delete(bc.bestChainMap, *newBlockHash) + bc.blockIndex.setTip(bc.blockIndex.GetTip().GetParent(bc.blockIndex)) // Add block 3 back bc.addTipBlockToBestChain(bn3) @@ -1403,14 +1407,16 @@ func TestTryApplyNewTip(t *testing.T) { Height: 6, }, } - bc.blockIndexByHash.Set(*hash4, bn4) - bc.blockIndexByHash.Set(*hash5, bn5) + bc.blockIndex.blockIndexByHash.Add(*hash4, bn4) + bc.blockIndex.blockIndexByHash.Add(*hash5, bn5) // Set new block's parent to hash5 newBlockNode.Header.PrevBlockHash = hash5 newBlockNode.Header.ProposedInView = 7 newBlockNode.Header.Height = 7 newBlockNode.Height = 7 + // Clear parent out for safety. + newBlockNode.Parent = nil require.NoError(t, err) ancestors, _, err = bc.getStoredLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) @@ -1422,19 +1428,23 @@ func TestTryApplyNewTip(t *testing.T) { // newBlockHash should be tip. require.True(t, bc.BlockTip().Hash.IsEqual(newBlockHash)) // hash 3 should no longer be in the best chain or best chain map - _, hash3ExistsInBestChainMap = bc.bestChainMap[*hash3] + _, hash3ExistsInBestChainMap, err = bc.GetBlockFromBestChainByHashAndOptionalHeight(hash3, nil, false) + require.NoError(t, err) require.False(t, hash3ExistsInBestChainMap) require.False(t, checkBestChainForHash(hash3)) // hash 2 should no longer be in the best chain or best chain map - _, hash2ExistsInBestChainMap = bc.bestChainMap[*hash2] + _, hash2ExistsInBestChainMap, err = bc.GetBlockFromBestChainByHashAndOptionalHeight(hash2, nil, false) + require.NoError(t, err) require.False(t, hash2ExistsInBestChainMap) require.False(t, checkBestChainForHash(hash2)) // hash 4 should be in the best chain and the best chain map - _, hash4ExistsInBestChainMap := bc.bestChainMap[*hash4] + _, hash4ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(hash4, nil, false) + require.NoError(t, err) require.True(t, hash4ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash4)) // hash 5 should be in the best chain and the best chain map - _, hash5ExistsInBestChainMap := bc.bestChainMap[*hash5] + _, hash5ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(hash5, nil, false) + require.NoError(t, err) require.True(t, hash5ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash5)) @@ -1442,11 +1452,12 @@ func TestTryApplyNewTip(t *testing.T) { require.Len(t, connectedBlockHashes, 3) require.Len(t, disconnectedBlockHashes, 2) - // Reset the state of the best chain. - delete(bc.bestChainMap, *hash4) - delete(bc.bestChainMap, *hash5) - delete(bc.bestChainMap, *newBlockHash) - bc.bestChain = bc.bestChain[:len(bc.bestChain)-3] + // Reset the state of the best chain - parent of newBlock + //bc.bestChain.ChainMap.Remove(*hash4) + //bc.bestChain.ChainMap.Remove(*hash5) + //bc.bestChain.ChainMap.Remove(*newBlockHash) + //bc.bestChain.Chain = bc.bestChain.Chain[:len(bc.bestChain.Chain)-3] + bc.blockIndex.setTip(newBlockNode.GetParent(bc.blockIndex)) // Add block 2 and 3 back. bc.addTipBlockToBestChain(bn2) @@ -1516,8 +1527,8 @@ func TestCanCommitGrandparent(t *testing.T) { PrevBlockHash: hash1, }, } - bc.bestChainMap[*hash1] = bn1 - bc.bestChainMap[*hash2] = bn2 + bc.blockIndex.addNewBlockNodeToBlockIndex(bn1) + bc.blockIndex.addNewBlockNodeToBlockIndex(bn2) // define incoming block hash3 := NewBlockHash(RandomBytes(32)) @@ -1536,7 +1547,7 @@ func TestCanCommitGrandparent(t *testing.T) { // (meaning they are in consecutive views). So we should be able // to commit bn1. grandparentHash, canCommit := bc.canCommitGrandparent(bn3) - require.True(t, hash1.IsEqual(grandparentHash)) + require.True(t, hash1.IsEqual(grandparentHash.Hash)) require.True(t, canCommit) // Update bn1 to be committed. We no longer can run the commit since bn1 is already @@ -1611,7 +1622,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { // Add one more block to the best chain, but have the view be further in the future. // this should trigger a commit on block 2. - blockTemplate4 := _generateBlockAndAddToBestChain(testMeta, 14, 20, 429) + blockTemplate4 := _generateBlockAndAddToBestChain(testMeta, 15, 20, 429) err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) @@ -1623,7 +1634,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { // Okay so add block 5 to the best chain. This should NOT trigger a commit on block 3 // as block 4 is not a direct child of block 3 based on its view. - blockTemplate5 := _generateBlockAndAddToBestChain(testMeta, 15, 21, 654) + blockTemplate5 := _generateBlockAndAddToBestChain(testMeta, 16, 21, 654) err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) @@ -1634,9 +1645,9 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, blockHash4, blockHash5}, blockHash2) // If we now add a block that is a descendent of block 5, we should be able to commit - // blocks 3 and 4 as block 4 and 5 possess a direct parent child relationship and + // blocks 3 and 4 as block 4 and 5 possess a direct parent child relationship, and // we have a descendent of block 5. - blockTemplate6 := _generateBlockAndAddToBestChain(testMeta, 16, 22, 912) + blockTemplate6 := _generateBlockAndAddToBestChain(testMeta, 17, 22, 912) require.NoError(t, err) err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) @@ -1659,7 +1670,8 @@ func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, u } for _, committedHash := range committedBlocks { // Okay so let's make sure the block is committed. - blockNode, exists := testMeta.chain.bestChainMap[*committedHash] + blockNode, exists, err := testMeta.chain.GetBlockFromBestChainByHashAndOptionalHeight(committedHash, nil, false) + require.NoError(testMeta.t, err) require.True(testMeta.t, exists) require.True(testMeta.t, blockNode.IsCommitted()) @@ -1684,7 +1696,8 @@ func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, u } for _, uncommittedBlockHash := range uncommittedBlocks { // Okay so let's make sure the block is uncommitted. - blockNode, exists := testMeta.chain.bestChainMap[*uncommittedBlockHash] + blockNode, exists, err := testMeta.chain.GetBlockFromBestChainByHashAndOptionalHeight(uncommittedBlockHash, nil, false) + require.NoError(testMeta.t, err) require.True(testMeta.t, exists) require.False(testMeta.t, blockNode.IsCommitted()) // TODO: Verify DB results?? Kinda silly to make sure everything is missing. @@ -1839,7 +1852,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { futureBlockHash, err = futureBlock.Hash() require.NoError(t, err) - futureBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*futureBlockHash) + futureBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(futureBlockHash, futureBlock.Header.Height) require.True(t, exists) require.False(t, futureBlockNode.IsCommitted()) require.True(t, futureBlockNode.IsStored()) @@ -1848,10 +1861,12 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { } var timeoutBlockHash *BlockHash + var timeoutBlockHeight uint64 { // Okay let's timeout view 15 var timeoutBlock *MsgDeSoBlock timeoutBlock = _generateRealBlock(testMeta, 15, 16, 381, blockHash3, true) + timeoutBlockHeight = timeoutBlock.Header.Height success, _, _, err := testMeta.chain.ProcessBlockPoS(timeoutBlock, 15, true) fmt.Println(err) require.True(t, success) @@ -1874,12 +1889,18 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { // Timeout block will no longer be in best chain, and will still be in an uncommitted state in the block index _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, reorgBlockHash}, blockHash2) _verifyRandomSeedHashHelper(testMeta, reorgBlock) - _, exists := testMeta.chain.bestChainMap[*timeoutBlockHash] + _, exists, err := testMeta.chain.GetBlockFromBestChainByHashAndOptionalHeight(timeoutBlockHash, nil, false) + require.NoError(t, err) require.False(t, exists) - timeoutBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*timeoutBlockHash) + timeoutBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(timeoutBlockHash, timeoutBlockHeight) require.True(t, exists) require.False(t, timeoutBlockNode.IsCommitted()) + + // The reorg block hash should be in the block index now. + reorgBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(reorgBlockHash, reorgBlock.Header.Height) + require.True(t, exists) + require.True(t, reorgBlockNode.IsStored()) } var dummyParentBlockHash, orphanBlockHash *BlockHash { @@ -1905,7 +1926,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.Len(t, missingBlockHashes, 1) require.True(t, missingBlockHashes[0].IsEqual(dummyParentBlockHash)) require.NoError(t, err) - orphanBlockInIndex, orphanBlockExists := testMeta.chain.blockIndexByHash.Get(*orphanBlockHash) + orphanBlockInIndex, orphanBlockExists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(orphanBlockHash, orphanBlock.Header.Height) require.True(t, orphanBlockExists) require.NotNil(t, orphanBlockInIndex) require.True(t, orphanBlockInIndex.IsStored()) @@ -1918,7 +1939,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.Len(t, missingBlockHashes, 0) require.NoError(t, err) - orphanBlockInIndex, orphanBlockExists = testMeta.chain.blockIndexByHash.Get(*orphanBlockHash) + orphanBlockInIndex, orphanBlockExists = testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(orphanBlockHash, orphanBlock.Header.Height) require.True(t, orphanBlockExists) require.NotNil(t, orphanBlockInIndex) require.True(t, orphanBlockInIndex.IsStored()) @@ -1944,7 +1965,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.True(t, missingBlockHashes[0].IsEqual(randomHash)) require.NoError(t, err) - malformedOrphanBlockInIndex, malformedOrphanBlockExists := testMeta.chain.blockIndexByHash.Get(*malformedOrphanBlockHash) + malformedOrphanBlockInIndex, malformedOrphanBlockExists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(malformedOrphanBlockHash, malformedOrphanBlock.Header.Height) require.True(t, malformedOrphanBlockExists) require.True(t, malformedOrphanBlockInIndex.IsValidateFailed()) require.True(t, malformedOrphanBlockInIndex.IsStored()) @@ -1982,11 +2003,12 @@ func TestGetSafeBlocks(t *testing.T) { testMeta := NewTestPoSBlockchainWithValidators(t) committedHash := testMeta.chain.BlockTip().Hash var block1 *MsgDeSoBlock + fmt.Println("HEX STUFF: ", hex.EncodeToString(committedHash[:])) block1 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight), uint64(testMeta.savedHeight), 1723, committedHash, false) block1Hash, err := block1.Hash() require.NoError(t, err) // Add block 1 w/ stored and validated - bn1, err := testMeta.chain.storeValidatedBlockInBlockIndex(block1) + bn1, err := testMeta.chain.storeValidatedBlockInBlockIndex(block1, nil) require.NoError(t, err) require.True(t, bn1.Hash.IsEqual(block1Hash)) // Create block 2 w/ block 1 as parent and add it to the block index w/ stored & validated @@ -1994,13 +2016,13 @@ func TestGetSafeBlocks(t *testing.T) { block2 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+1), uint64(testMeta.savedHeight+1), 1293, block1Hash, false) block2Hash, err := block2.Hash() require.NoError(t, err) - bn2, err := testMeta.chain.storeValidatedBlockInBlockIndex(block2) + bn2, err := testMeta.chain.storeValidatedBlockInBlockIndex(block2, nil) require.NoError(t, err) require.True(t, bn2.Hash.IsEqual(block2Hash)) // Add block 3 only as stored and validated var block3 *MsgDeSoBlock block3 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+2), uint64(testMeta.savedHeight+2), 1372, block2Hash, false) - bn3, err := testMeta.chain.storeValidatedBlockInBlockIndex(block3) + bn3, err := testMeta.chain.storeValidatedBlockInBlockIndex(block3, nil) require.NoError(t, err) block3Hash, err := block3.Hash() require.NoError(t, err) @@ -2008,7 +2030,7 @@ func TestGetSafeBlocks(t *testing.T) { // Add block 3' only as stored var block3Prime *MsgDeSoBlock block3Prime = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+2), uint64(testMeta.savedHeight+3), 137175, block2Hash, false) - bn3Prime, err := testMeta.chain.storeBlockInBlockIndex(block3Prime) + bn3Prime, err := testMeta.chain.storeBlockInBlockIndex(block3Prime, nil) require.NoError(t, err) block3PrimeHash, err := block3Prime.Hash() require.NoError(t, err) @@ -2019,7 +2041,7 @@ func TestGetSafeBlocks(t *testing.T) { block5.Header.Height = uint64(testMeta.savedHeight + 5) block5Hash, err := block5.Hash() require.NoError(t, err) - _, err = testMeta.chain.storeValidatedBlockInBlockIndex(block5) + _, err = testMeta.chain.storeValidatedBlockInBlockIndex(block5, nil) require.NoError(t, err) // Okay let's get the safe blocks. safeBlocks, err := testMeta.chain.GetSafeBlocks() @@ -2040,7 +2062,7 @@ func TestGetSafeBlocks(t *testing.T) { require.False(t, _checkSafeBlocksForBlockHash(block5Hash, safeBlocks)) // Update block 3 prime to be validated and it should now be a safe block. - bn3Prime, err = testMeta.chain.storeValidatedBlockInBlockIndex(block3Prime) + bn3Prime, err = testMeta.chain.storeValidatedBlockInBlockIndex(block3Prime, nil) require.NoError(t, err) require.True(t, bn3Prime.IsValidated()) safeBlocks, err = testMeta.chain.GetSafeBlocks() @@ -2062,12 +2084,12 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Give the block a random parent, so it is truly an orphan. realBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) updateProposerVotePartialSignatureForBlock(testMeta, realBlock) - err := testMeta.chain.processOrphanBlockPoS(realBlock) + err := testMeta.chain.processOrphanBlockPoS(realBlock, nil) require.NoError(t, err) // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.False(t, blockNode.IsValidateFailed()) @@ -2083,12 +2105,12 @@ func TestProcessOrphanBlockPoS(t *testing.T) { realBlock.Header.Version = 1 updateProposerVotePartialSignatureForBlock(testMeta, realBlock) // There should be no error, but the block should be marked as ValidateFailed. - err := testMeta.chain.processOrphanBlockPoS(realBlock) + err := testMeta.chain.processOrphanBlockPoS(realBlock, nil) require.NoError(t, err) // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.True(t, blockNode.IsValidateFailed()) @@ -2110,12 +2132,12 @@ func TestProcessOrphanBlockPoS(t *testing.T) { realBlock.Header.ProposerVotingPublicKey = _generateRandomBLSPrivateKey(t).PublicKey() updateProposerVotePartialSignatureForBlock(testMeta, realBlock) // There should be no error, but the block should be marked as ValidateFailed. - err = testMeta.chain.processOrphanBlockPoS(realBlock) + err = testMeta.chain.processOrphanBlockPoS(realBlock, nil) require.NoError(t, err) // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.False(t, exists) } @@ -2162,12 +2184,12 @@ func TestProcessOrphanBlockPoS(t *testing.T) { } updateProposerVotePartialSignatureForBlock(testMeta, realBlock) // There should be no error, but the block should be marked as ValidateFailed. - err = testMeta.chain.processOrphanBlockPoS(realBlock) + err = testMeta.chain.processOrphanBlockPoS(realBlock, nil) require.NoError(t, err) // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.False(t, exists) } { @@ -2180,12 +2202,12 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Give the block a random parent, so it is truly an orphan. nextEpochBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) updateProposerVotePartialSignatureForBlock(testMeta, nextEpochBlock) - err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock) + err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock, nil) require.NoError(t, err) // Get the block node from the block index. blockHash, err := nextEpochBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, nextEpochBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.False(t, blockNode.IsValidateFailed()) @@ -2205,12 +2227,12 @@ func TestProcessOrphanBlockPoS(t *testing.T) { nextEpochBlock.Header.ProposerVotingPublicKey = _generateRandomBLSPrivateKey(t).PublicKey() updateProposerVotePartialSignatureForBlock(testMeta, nextEpochBlock) // There should be no error, but the block should be marked as ValidateFailed. - err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock) + err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock, nil) require.NoError(t, err) // Get the block node from the block index. blockHash, err := nextEpochBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, nextEpochBlock.Header.Height) require.False(t, exists) } { @@ -2224,7 +2246,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { nextEpochBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) updateProposerVotePartialSignatureForBlock(testMeta, nextEpochBlock) // Update the QC to not have a supermajority. - err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock) + err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock, nil) require.NoError(t, err) // Update the QC to not have a supermajority. // Get all the bls keys for the validators that aren't the leader. @@ -2256,12 +2278,12 @@ func TestProcessOrphanBlockPoS(t *testing.T) { Signature: aggregatedSignature, } updateProposerVotePartialSignatureForBlock(testMeta, nextEpochBlock) - err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock) + err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock, nil) require.NoError(t, err) // Get the block node from the block index. blockHash, err := nextEpochBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, nextEpochBlock.Header.Height) require.False(t, exists) } { @@ -2285,12 +2307,12 @@ func TestProcessOrphanBlockPoS(t *testing.T) { twoEpochsInFutureBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) updateProposerVotePartialSignatureForBlock(testMeta, twoEpochsInFutureBlock) // We should get an error that this block is too far in the future. - err = testMeta.chain.processOrphanBlockPoS(twoEpochsInFutureBlock) + err = testMeta.chain.processOrphanBlockPoS(twoEpochsInFutureBlock, nil) require.Error(t, err) // The block shouldn't be in the block index. blockHash, err := twoEpochsInFutureBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, twoEpochsInFutureBlock.Header.Height) require.False(t, exists) } { @@ -2302,12 +2324,12 @@ func TestProcessOrphanBlockPoS(t *testing.T) { require.NoError(t, err) var prevEpochBlock *MsgDeSoBlock prevEpochBlock = _generateRealBlock(testMeta, prevEpochEntry.FinalBlockHeight, prevEpochEntry.FinalBlockHeight, 17283, testMeta.chain.BlockTip().Hash, false) - err = testMeta.chain.processOrphanBlockPoS(prevEpochBlock) + err = testMeta.chain.processOrphanBlockPoS(prevEpochBlock, nil) require.NoError(t, err) // The block should be in the block index. blockHash, err := prevEpochBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, prevEpochBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.False(t, blockNode.IsValidateFailed()) @@ -2323,7 +2345,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { utxoView := _newUtxoView(testMeta) snapshotEpochNumber, err := utxoView.GetCurrentSnapshotEpochNumber() require.NoError(t, err) - isValid, err := utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err := utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.True(t, isValid) @@ -2331,7 +2353,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { realVotingPublicKey := realBlock.Header.ProposerVotingPublicKey { realBlock.Header.ProposerVotingPublicKey = _generateRandomBLSPrivateKey(t).PublicKey() - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) // Reset the proposer voting public key @@ -2343,7 +2365,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { incorrectPayload := consensus.GetVoteSignaturePayload(13, testMeta.chain.BlockTip().Hash) realBlock.Header.ProposerVotePartialSignature, err = testMeta.blsPubKeyToBLSKeyMap[realBlock.Header.ProposerVotingPublicKey.ToString()].Sign(incorrectPayload[:]) - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) } @@ -2356,7 +2378,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { correctPayload := consensus.GetVoteSignaturePayload(12, realBlockHash) wrongPrivateKey := _generateRandomBLSPrivateKey(t) realBlock.Header.ProposerVotePartialSignature, err = wrongPrivateKey.Sign(correctPayload[:]) - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) } @@ -2375,7 +2397,7 @@ func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { require.NoError(t, err) realBlockHash, err := realBlock.Hash() require.NoError(t, err) - realBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*realBlockHash) + realBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(realBlockHash, realBlock.Header.Height) require.True(t, exists) require.True(t, realBlockNode.IsStored()) require.False(t, realBlockNode.IsValidateFailed()) @@ -2461,13 +2483,13 @@ func _generateRealBlockWithFailingTxn(testMeta *TestMeta, blockHeight uint64, vi } // TODO: Get real seed signature. - prevBlock, exists := testMeta.chain.blockIndexByHash.Get(*prevBlockHash) + prevBlock, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(prevBlockHash, blockHeight-1) require.True(testMeta.t, exists) // Always update the testMeta latestBlockView - latestBlockViewAndUtxoOps, err := testMeta.chain.GetUtxoViewAndUtxoOpsAtBlockHash(*prevBlockHash) + latestBlockViewAndUtxoOps, err := testMeta.chain.GetUtxoViewAndUtxoOpsAtBlockHash(*prevBlockHash, blockHeight-1) require.NoError(testMeta.t, err) latestBlockView := latestBlockViewAndUtxoOps.UtxoView - latestBlockNode, latestBlockNodeExists := testMeta.chain.blockIndexByHash.Get(*prevBlockHash) + latestBlockNode, latestBlockNodeExists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(prevBlockHash, blockHeight-1) require.True(testMeta.t, latestBlockNodeExists) latestBlockHeight := latestBlockNode.Height testMeta.posMempool.UpdateLatestBlock(latestBlockView, uint64(latestBlockHeight)) @@ -2521,10 +2543,10 @@ func _generateDummyBlock(testMeta *TestMeta, blockHeight uint64, view uint64, se require.NoError(testMeta.t, err) // Add block to block index. - blockNode, err := testMeta.chain.storeBlockInBlockIndex(msgDesoBlock) + blockNode, err := testMeta.chain.storeBlockInBlockIndex(msgDesoBlock, nil) require.NoError(testMeta.t, err) require.True(testMeta.t, blockNode.IsStored()) - _, exists := testMeta.chain.blockIndexByHash.Get(*newBlockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(newBlockHash, msgDesoBlock.Header.Height) require.True(testMeta.t, exists) // Remove the transactions from this block from the mempool. // This prevents nonce reuse issues when trying to make failing blocks. @@ -2537,17 +2559,20 @@ func _generateDummyBlock(testMeta *TestMeta, blockHeight uint64, view uint64, se // _generateBlockAndAddToBestChain generates a BlockTemplate by calling _generateRealBlock and then adds it to the // best chain. Finally it updates the PosMempool's latest block view. func _generateBlockAndAddToBestChain(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64) *MsgDeSoBlock { - blockTemplate := _generateRealBlock(testMeta, blockHeight, view, seed, testMeta.chain.BlockTip().Hash, false) + prevBlockNode, exists, err := testMeta.chain.GetBlockFromBestChainByHeight(blockHeight-1, false) + require.NoError(testMeta.t, err) + require.True(testMeta.t, exists) + blockTemplate := _generateRealBlock(testMeta, blockHeight, view, seed, prevBlockNode.Hash, false) var msgDesoBlock *MsgDeSoBlock msgDesoBlock = blockTemplate newBlockHash, err := msgDesoBlock.Hash() require.NoError(testMeta.t, err) // Add block to block index. - blockNode, err := testMeta.chain.storeValidatedBlockInBlockIndex(msgDesoBlock) + blockNode, err := testMeta.chain.storeValidatedBlockInBlockIndex(msgDesoBlock, nil) require.NoError(testMeta.t, err) require.True(testMeta.t, blockNode.IsStored()) require.True(testMeta.t, blockNode.IsValidated()) - newBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*newBlockHash) + newBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(newBlockHash, msgDesoBlock.Header.Height) require.True(testMeta.t, exists) testMeta.chain.addTipBlockToBestChain(newBlockNode) // Update the latest block view @@ -2678,7 +2703,7 @@ func _getFullRealBlockTemplate( // Get leader voting private key. leaderVotingPrivateKey := testMeta.pubKeyToBLSKeyMap[leaderPublicKey] // Get hash of last block - chainTip, _ := testMeta.chain.blockIndexByHash.Get(*blockTemplate.Header.PrevBlockHash) + chainTip, _ := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockTemplate.Header.PrevBlockHash, blockTemplate.Header.Height-1) chainTipHash := chainTip.Hash // Get the vote signature payload // Hack to get view numbers working properly w/ PoW blocks. @@ -2819,6 +2844,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { for ii := 0; ii < 10; ii++ { _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, oldPool) require.NoError(t, err) + fmt.Println("CHAIN TIP: ", chain.BlockTip().Hash) } m0PubBytes, _, _ := Base58CheckDecode(m0Pub) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index ea731def3..ceb3923e8 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -250,7 +250,10 @@ func (fc *FastHotStuffConsensus) handleBlockProposalEvent( // Fetch the parent block parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) - parentBlock, parentBlockExists := fc.blockchain.blockIndexByHash.Get(*parentBlockHash) + parentBlock, parentBlockExists, err := fc.blockchain.blockIndex.GetBlockNodeByHashOnly(parentBlockHash) + if err != nil { + return errors.Errorf("Error fetching parent block: %v", parentBlockHash) + } if !parentBlockExists { return errors.Errorf("Error fetching parent block: %v", parentBlockHash) } @@ -314,7 +317,7 @@ func (fc *FastHotStuffConsensus) handleBlockProposalEvent( } // Process the block locally - missingBlockHashes, err := fc.tryProcessBlockAsNewTip(blockProposal) + missingBlockHashes, err := fc.tryProcessBlockAsNewTip(blockProposal, blockHash) if err != nil { return errors.Errorf("Error processing block locally: %v", err) } @@ -487,7 +490,8 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo tipBlockHash := BlockHashFromConsensusInterface(event.TipBlockHash) // Fetch the HighQC from the Blockchain struct - tipBlockNode, tipBlockExists := fc.blockchain.blockIndexByHash.Get(*tipBlockHash) + // TODO: validate that TipHeight is a uint32 + tipBlockNode, tipBlockExists := fc.blockchain.blockIndex.GetBlockNodeByHashAndHeight(tipBlockHash, event.TipBlockHeight) if !tipBlockExists { return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error fetching tip block: %v", tipBlockHash) } @@ -558,13 +562,17 @@ func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoVa // If we don't have the highQC's block on hand, then we need to request it from the peer. We do // that first before storing the timeout message locally in the FastHotStuffEventLoop. This // prevents spamming of timeout messages by peers. - if !fc.blockchain.HasBlockInBlockIndex(msg.HighQC.BlockHash) { - err := errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Missing highQC's block: %v", msg.HighQC.BlockHash) + hasBlockInBlockIndex, err := fc.blockchain.HasBlockInBlockIndex(msg.HighQC.BlockHash) + if err != nil { + return nil, errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error fetching block: ") + } + if !hasBlockInBlockIndex { + err = errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Missing highQC's block: %v", msg.HighQC.BlockHash) return []*BlockHash{msg.HighQC.BlockHash}, err } // Process the timeout message locally in the FastHotStuffEventLoop - if err := fc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { + if err = fc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { // If we can't process the timeout locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. glog.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout msg: %v", err) @@ -575,7 +583,11 @@ func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoVa return nil, nil } -func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) (missingBlockHashes []*BlockHash, _err error) { +func (fc *FastHotStuffConsensus) HandleBlock( + pp *Peer, + msg *MsgDeSoBlock, + blockHash *BlockHash, +) (missingBlockHashes []*BlockHash, _err error) { glog.V(2).Infof("FastHotStuffConsensus.HandleBlock: Received block: \n%s", msg.String()) glog.V(2).Infof("FastHotStuffConsensus.HandleBlock: %s", fc.fastHotStuffEventLoop.ToString()) @@ -597,7 +609,7 @@ func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) (missi // Try to apply the block as the new tip of the blockchain. If the block is an orphan, then // we will get back a list of missing ancestor block hashes. We can fetch the missing blocks // from the network and retry. - missingBlockHashes, err := fc.tryProcessBlockAsNewTip(msg) + missingBlockHashes, err := fc.tryProcessBlockAsNewTip(msg, blockHash) if err != nil { // If we get an error here, it means something went wrong with the block processing algorithm. // Nothing we can do to recover here. @@ -627,10 +639,14 @@ func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) (missi // // Reference Implementation: // https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L573 -func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]*BlockHash, error) { +func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip( + block *MsgDeSoBlock, + blockHash *BlockHash, +) ([]*BlockHash, error) { // Try to apply the block locally as the new tip of the blockchain successfullyAppliedNewTip, _, missingBlockHashes, err := fc.blockchain.processBlockPoS( block, // Pass in the block itself + blockHash, fc.fastHotStuffEventLoop.GetCurrentView(), // Pass in the current view to ensure we don't process a stale block true, // Make sure we verify signatures in the block ) @@ -693,7 +709,7 @@ func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("Error hashing tip block: %v", err) } - utxoViewAndUtxoOps, err := fc.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*tipBlockHash) + utxoViewAndUtxoOps, err := fc.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*tipBlockHash, tipBlock.Height) if err != nil { return nil, errors.Errorf("Error fetching UtxoView for tip block: %v", err) } @@ -733,13 +749,16 @@ func (fc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) // Fetch the parent block - parentBlock, parentBlockExists := fc.blockchain.blockIndexByHash.Get(*parentBlockHash) + parentBlock, parentBlockExists, err := fc.blockchain.blockIndex.GetBlockNodeByHashOnly(parentBlockHash) + if err != nil { + return nil, errors.Errorf("Error fetching parent block: %v", parentBlockHash) + } if !parentBlockExists { return nil, errors.Errorf("Error fetching parent block: %v", parentBlockHash) } // Build a UtxoView at the parent block - parentUtxoViewAndUtxoOps, err := fc.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*parentBlockHash) + parentUtxoViewAndUtxoOps, err := fc.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*parentBlockHash, uint64(parentBlock.Height)) if err != nil { // This should never happen as long as the parent block is a descendant of the committed tip. return nil, errors.Errorf("Error fetching UtxoView for parent block: %v", parentBlockHash) diff --git a/lib/pos_consensus_test.go b/lib/pos_consensus_test.go index 130aa901c..8010e5aeb 100644 --- a/lib/pos_consensus_test.go +++ b/lib/pos_consensus_test.go @@ -102,6 +102,11 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { currentView := blockHeader.ValidatorsVoteQC.GetView() + 1 nextView := currentView + 1 + blockIndex := NewBlockIndex(nil, nil, nil) + blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ + *blockHash: {Header: blockHeader, Height: uint32(blockHeader.Height), Hash: blockHash}, + }) + // Create a mock consensus fastHotStuffConsensus := FastHotStuffConsensus{ lock: sync.RWMutex{}, @@ -111,11 +116,9 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { }, params: &DeSoTestnetParams, blockchain: &Blockchain{ - ChainLock: deadlock.RWMutex{}, - blockIndexByHash: collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ - *blockHash: {Header: blockHeader}, - }), - params: &DeSoTestnetParams, + ChainLock: deadlock.RWMutex{}, + blockIndex: blockIndex, + params: &DeSoTestnetParams, }, fastHotStuffEventLoop: &consensus.MockFastHotStuffEventLoop{ OnIsInitialized: alwaysReturnTrue, diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 6895c7e30..5592553f0 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -8,10 +8,9 @@ import ( "sync/atomic" "time" - "github.com/decred/dcrd/container/lru" - "github.com/dgraph-io/badger/v3" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" ) @@ -183,11 +182,11 @@ type PosMempool struct { // recentBlockTxnCache is an LRU KV cache used to track the transaction that have been included in blocks. // This cache is used to power logic that waits for a transaction to either be validated in the mempool // or be included in a block. - recentBlockTxnCache lru.Set[BlockHash] + recentBlockTxnCache *lru.Cache[BlockHash, struct{}] // recentRejectedTxnCache is a cache to store the txns that were recently rejected so that we can return better // errors for them. - recentRejectedTxnCache lru.Map[BlockHash, error] + recentRejectedTxnCache *lru.Cache[BlockHash, error] } func NewPosMempool() *PosMempool { @@ -234,8 +233,8 @@ func (mp *PosMempool) Init( mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis mp.maxValidationViewConnects = maxValidationViewConnects mp.transactionValidationRefreshIntervalMillis = transactionValidationRefreshIntervalMillis - mp.recentBlockTxnCache = *lru.NewSet[BlockHash](100000) // cache 100K latest txns from blocks. - mp.recentRejectedTxnCache = *lru.NewMap[BlockHash, error](100000) // cache 100K rejected txns. + mp.recentBlockTxnCache, _ = lru.New[BlockHash, struct{}](100000) // cache 100K latest txns from blocks. + mp.recentRejectedTxnCache, _ = lru.New[BlockHash, error](100000) // cache 100K rejected txns. // Recreate and initialize the transaction register and the nonce tracker. mp.txnRegister = NewTransactionRegister() @@ -486,11 +485,11 @@ func (mp *PosMempool) AddTransaction(txn *MsgDeSoTxn, txnTimestamp time.Time) er } func (mp *PosMempool) addTxnHashToRecentBlockCache(txnHash BlockHash) { - mp.recentBlockTxnCache.Put(txnHash) + mp.recentBlockTxnCache.Add(txnHash, struct{}{}) } func (mp *PosMempool) deleteTxnHashFromRecentBlockCache(txnHash BlockHash) { - mp.recentBlockTxnCache.Delete(txnHash) + mp.recentBlockTxnCache.Remove(txnHash) } func (mp *PosMempool) isTxnHashInRecentBlockCache(txnHash BlockHash) bool { return mp.recentBlockTxnCache.Contains(txnHash) @@ -847,7 +846,7 @@ func (mp *PosMempool) validateTransactions() error { // Mark the txn as invalid and add an error to the cache so we can return it to the user if they // try to resubmit it. txn.SetValidated(false) - mp.recentRejectedTxnCache.Put(*txn.Hash, err) + mp.recentRejectedTxnCache.Add(*txn.Hash, err) // Try to remove the transaction with a lock. mp.removeTransaction(txn, true) diff --git a/lib/postgres.go b/lib/postgres.go index a80000674..9c9ba432f 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -6,7 +6,7 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/deso-protocol/core/collections" + "github.com/hashicorp/golang-lru/v2" "net/url" "regexp" "strings" @@ -1298,6 +1298,8 @@ func (postgres *Postgres) UpsertBlockTx(tx *pg.Tx, blockNode *BlockNode) error { // The genesis block has a nil parent if blockNode.Parent != nil { block.ParentHash = blockNode.Parent.Hash + } else if !blockNode.Header.PrevBlockHash.IsEqual(GenesisBlockHash) { + block.ParentHash = blockNode.Header.PrevBlockHash } _, err := tx.Model(block).WherePK().OnConflict("(hash) DO UPDATE").Insert() @@ -1305,16 +1307,16 @@ func (postgres *Postgres) UpsertBlockTx(tx *pg.Tx, blockNode *BlockNode) error { } // GetBlockIndex gets all the PGBlocks and creates a map of BlockHash to BlockNode as needed by blockchain.go -func (postgres *Postgres) GetBlockIndex() (*collections.ConcurrentMap[BlockHash, *BlockNode], error) { +func (postgres *Postgres) GetBlockIndex() (*lru.Cache[BlockHash, *BlockNode], error) { var blocks []PGBlock err := postgres.db.Model(&blocks).Select() if err != nil { return nil, err } - blockMap := collections.NewConcurrentMap[BlockHash, *BlockNode]() + blockMap, _ := lru.New[BlockHash, *BlockNode](MaxBlockIndexNodes) for _, block := range blocks { - blockMap.Set(*block.Hash, &BlockNode{ + blockMap.Add(*block.Hash, &BlockNode{ Hash: block.Hash, Height: uint32(block.Height), DifficultyTarget: block.DifficultyTarget, @@ -1333,17 +1335,18 @@ func (postgres *Postgres) GetBlockIndex() (*collections.ConcurrentMap[BlockHash, } // Setup parent pointers - blockMap.Iterate(func(key BlockHash, blockNode *BlockNode) { + for _, key := range blockMap.Keys() { + blockNode, _ := blockMap.Get(key) // Genesis block has nil parent parentHash := blockNode.Header.PrevBlockHash if parentHash != nil { parent, exists := blockMap.Get(*parentHash) - if !exists { + if !exists && blockNode.Height > 0 { glog.Fatal("Parent block not found in block map") } blockNode.Parent = parent } - }) + } return blockMap, nil } diff --git a/lib/server.go b/lib/server.go index 78d72a8e3..47638cf2e 100644 --- a/lib/server.go +++ b/lib/server.go @@ -4,27 +4,25 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/deso-protocol/go-deadlock" + "github.com/dgraph-io/badger/v3" "net" + "path/filepath" "reflect" "runtime" "strings" "sync/atomic" "time" - "github.com/btcsuite/btcd/wire" - "github.com/deso-protocol/core/collections" - "github.com/deso-protocol/core/consensus" - - "github.com/decred/dcrd/container/lru" - "github.com/DataDog/datadog-go/v5/statsd" - "github.com/btcsuite/btcd/addrmgr" chainlib "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" - "github.com/dgraph-io/badger/v3" + "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/consensus" + "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" ) @@ -90,7 +88,8 @@ type Server struct { // adding it to this map and checking this map before replying will make it // so that we only send a reply to the first peer that sent us the inv, which // is more efficient. - inventoryBeingProcessed lru.Set[InvVect] + inventoryBeingProcessed *lru.Cache[InvVect, struct{}] + // hasRequestedSync indicates whether we've bootstrapped our mempool // by requesting all mempool transactions from a // peer. It's initially false @@ -228,7 +227,7 @@ func (srv *Server) _removeRequest(hash *BlockHash) { Type: InvTypeTx, Hash: *hash, } - srv.inventoryBeingProcessed.Delete(*invVect) + srv.inventoryBeingProcessed.Remove(*invVect) } // dataLock must be acquired for writing before calling this function. @@ -363,6 +362,25 @@ func ValidateHyperSyncFlags(isHypersync bool, syncType NodeSyncType) { } } +func RunBlockIndexMigrationOnce(db *badger.DB, params *DeSoParams) error { + blockIndexMigrationFileName := filepath.Join(db.Opts().Dir, BlockIndexMigrationFileName) + glog.V(2).Info("FileName: ", blockIndexMigrationFileName) + hasRunMigration, err := ReadBoolFromFile(blockIndexMigrationFileName) + if err == nil && hasRunMigration { + glog.V(2).Info("Block index migration has already been run") + return nil + } + glog.V(2).Info("Running block index migration") + if err = RunBlockIndexMigration(db, nil, nil, params); err != nil { + return errors.Wrapf(err, "Problem running block index migration") + } + if err = SaveBoolToFile(blockIndexMigrationFileName, true); err != nil { + return errors.Wrapf(err, "Problem saving block index migration file") + } + glog.V(2).Info("Block index migration complete") + return nil +} + // NewServer initializes all of the internal data structures. Right now this basically // looks as follows: // - ConnectionManager starts and keeps track of peers. @@ -687,7 +705,8 @@ func NewServer( srv.blockProducer = _blockProducer srv.incomingMessages = _incomingMessages // Make this hold a multiple of what we hold for individual peers. - srv.inventoryBeingProcessed = *lru.NewSet[InvVect](maxKnownInventory) + srv.inventoryBeingProcessed, _ = lru.New[InvVect, struct{}](maxKnownInventory) + srv.requestTimeoutSeconds = 10 srv.statsdClient = statsd @@ -753,7 +772,21 @@ func (srv *Server) _handleGetHeaders(pp *Peer, msg *MsgDeSoGetHeaders) { if pp.NegotiatedProtocolVersion >= ProtocolVersion2 { maxHeadersPerMsg = MaxHeadersPerMsgPos } - headers := srv.blockchain.LocateBestBlockChainHeaders(msg.BlockLocator, msg.StopHash, maxHeadersPerMsg) + // FIXME: We can eliminate the call to LocateBestBlockChainHeaders and do a much + // simpler "shortcut" version that doesn't require complicated tree-traversal bs. + // The shortcut would be to just return all headers starting from msg.BlockLocator[0] + // up to msg.StopHash or maxHeadersPerMsg, whichever comes first. This would allow + // other nodes to sync from us and *keep* in sync with us, while allowing us to delete + // ALL of the complicated logic around locators and the best header chain. This all works + // because msg.BlockLocator[0] is the requesting-node's tip hash. The rest of the + // hashes, and all of the locator bs, are only needed to resolve forks, which can't + // happen with PoS anymore. + //headers := srv.blockchain.LocateBestBlockChainHeaders(msg.BlockLocator, msg.StopHash, maxHeadersPerMsg) + + headers, err := srv.GetHeadersForLocatorAndStopHash(msg.BlockLocator, msg.StopHash, maxHeadersPerMsg) + if err != nil { + glog.Errorf("Server._handleGetHeadersMessage: Error getting headers: %v", err) + } // Send found headers to the requesting peer. blockTip := srv.blockchain.blockTip() @@ -767,6 +800,51 @@ func (srv *Server) _handleGetHeaders(pp *Peer, msg *MsgDeSoGetHeaders) { headers, blockTip.Hash, blockTip.Height, pp) } +func (srv *Server) GetHeadersForLocatorAndStopHash( + locator []*BlockHash, + stopHash *BlockHash, + maxHeadersPerMsg uint32, +) ([]*MsgDeSoHeader, error) { + var headers []*MsgDeSoHeader + + stopNode, stopNodeExists, stopNodeError := srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight(stopHash, nil, true) + // Special case when there is no block locator provided but only a stop hash. + if len(locator) == 0 { + if stopNodeError != nil || !stopNodeExists || stopNode == nil { + return nil, fmt.Errorf("GetHeadersForLocatorAndStopHash: Stop hash provided but no stop node found") + } + return []*MsgDeSoHeader{stopNode.Header}, nil + } + startNode, startNodeExists, startNodeError := srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight(locator[0], nil, true) + if startNodeError != nil || !startNodeExists || startNode == nil { + return nil, fmt.Errorf("GetHeadersForLocatorAndStopHash: Start hash provided but no start node found") + } + nextNodeHeight := startNode.Header.Height + 1 + nextNode, nextNodeExists, nextNodeError := srv.blockchain.GetBlockFromBestChainByHeight(nextNodeHeight, true) + if nextNodeError != nil { + return nil, fmt.Errorf("GetHeadersForLocatorAndStopHash: Error getting start node by height: %v", startNodeError) + } + if !nextNodeExists || nextNode == nil { + return nil, nil + } + for ii := uint32(0); ii < maxHeadersPerMsg; ii++ { + headers = append(headers, nextNode.Header) + if stopNode != nil && nextNode.Hash.IsEqual(stopNode.Hash) { + break + } + nextNode, nextNodeExists, nextNodeError = srv.blockchain.GetBlockFromBestChainByHeight( + nextNode.Header.Height+1, true) + if nextNodeError != nil { + glog.Errorf("Server._handleGetHeadersMessage: Error getting next node by height: %v", nextNodeError) + break + } + if !nextNodeExists || nextNode == nil { + break + } + } + return headers, nil +} + // GetSnapshot is used for sending MsgDeSoGetSnapshot messages to peers. We will // check if the passed peer has been assigned to an in-progress prefix and if so, // we will request a snapshot data chunk from them. Otherwise, we will assign a @@ -862,7 +940,17 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { } // Go through the block nodes in the blockchain and download the blocks if they're not stored. - for _, blockNode := range srv.blockchain.bestChain { + for ii := uint32(srv.blockchain.lowestBlockNotStored); ii <= srv.blockchain.blockTip().Height; ii++ { + // TODO: this may be really slow. + blockNode, exists, err := srv.blockchain.GetBlockFromBestChainByHeight(uint64(ii), true) + if err != nil { + glog.Errorf("GetBlocksToStore: Error getting block from best chain by height: %v", err) + return + } + if !exists { + glog.Errorf("GetBlocksToStore: Block at height %v not found in best chain", ii) + return + } // We find the first block that's not stored and get ready to download blocks starting from this block onwards. if blockNode.Status&StatusBlockStored == 0 { maxBlocksInFlight := MaxBlocksInFlight @@ -872,28 +960,37 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { maxBlocksInFlight = MaxBlocksInFlightPoS } + srv.blockchain.lowestBlockNotStored = uint64(blockNode.Height) numBlocksToFetch := maxBlocksInFlight - len(pp.requestedBlocks) - currentHeight := int(blockNode.Height) + currentHeight := uint64(blockNode.Height) blockNodesToFetch := []*BlockNode{} // In case there are blocks at tip that are already stored (which shouldn't really happen), we'll not download them. - var heightLimit int - for heightLimit = len(srv.blockchain.bestChain) - 1; heightLimit >= 0; heightLimit-- { - if !srv.blockchain.bestChain[heightLimit].Status.IsFullyProcessed() { + // We filter those out in the loop below by checking IsFullyProcessed. + // Find the blocks that we should download. + for len(blockNodesToFetch) < numBlocksToFetch { + if currentHeight > uint64(srv.blockchain.blockTip().Height) { break } - } - - // Find the blocks that we should download. - for currentHeight <= heightLimit && - len(blockNodesToFetch) < numBlocksToFetch { - // Get the current hash and increment the height. Genesis has height 0, so currentHeight corresponds to // the array index. - currentNode := srv.blockchain.bestChain[currentHeight] + // TODO: this may be really slow. + currentNode, currNodeExists, err := srv.blockchain.GetBlockFromBestChainByHeight(currentHeight, true) + if err != nil { + glog.Errorf("GetBlocksToStore: Error getting block from best chain by height: %v", err) + return + } + if !currNodeExists { + glog.Errorf("GetBlocksToStore: Block at height %v not found in best chain", currentHeight) + return + } currentHeight++ + // If this node is already fully processed, then we don't need to download it. + if currentNode.Status.IsFullyProcessed() { + break + } // If we've already requested this block then we don't request it again. - if _, exists := pp.requestedBlocks[*currentNode.Hash]; exists { + if _, exists = pp.requestedBlocks[*currentNode.Hash]; exists { continue } @@ -1013,14 +1110,20 @@ func (srv *Server) shouldVerifySignatures(header *MsgDeSoHeader, isHeaderChain b if checkpointBlockInfo == nil { return true, false } - var hasSeenCheckpointBlockHash bool - var checkpointBlockNode *BlockNode + // TODO: @diamondhands - why can't we move this up in this function? It seems like we can avoid + // checking if we have the checkpoint block node if the header we're processing is below the height. + // This will save us 17-18% of the time it takes to process headers. + // If the current header has a height below the checkpoint block height, we should skip signature verification + // even if we've seen the checkpoint block hash. + if header.Height < checkpointBlockInfo.Height { + return false, false + } srv.blockchain.ChainLock.RLock() defer srv.blockchain.ChainLock.RUnlock() - if isHeaderChain { - checkpointBlockNode, hasSeenCheckpointBlockHash = srv.blockchain.bestHeaderChainMap[*checkpointBlockInfo.Hash] - } else { - checkpointBlockNode, hasSeenCheckpointBlockHash = srv.blockchain.bestChainMap[*checkpointBlockInfo.Hash] + checkpointBlockNode, hasSeenCheckpointBlockHash, err := srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight( + checkpointBlockInfo.Hash, &checkpointBlockInfo.Height, isHeaderChain) + if err != nil { + glog.Fatalf("shouldVerifySignatures: Problem getting checkpoint block node from best chain: %v", err) } // If we haven't seen the checkpoint block hash yet, we skip signature verification. if !hasSeenCheckpointBlockHash { @@ -1031,11 +1134,6 @@ func (srv *Server) shouldVerifySignatures(header *MsgDeSoHeader, isHeaderChain b } return false, false } - // If the current header has a height below the checkpoint block height, we should skip signature verification - // even if we've seen the checkpoint block hash. - if header.Height < checkpointBlockInfo.Height { - return false, false - } // Make sure that the header in the best chain map has the correct height, otherwise we need to disconnect this peer. if uint64(checkpointBlockNode.Height) != checkpointBlockInfo.Height { return true, true @@ -1048,13 +1146,11 @@ func (srv *Server) getCheckpointSyncingStatus(isHeaders bool) string { if checkpointBlockInfo == nil { return "" } - hasSeenCheckPointBlockHash := false - srv.blockchain.ChainLock.RLock() - defer srv.blockchain.ChainLock.RUnlock() - if isHeaders { - _, hasSeenCheckPointBlockHash = srv.blockchain.bestHeaderChainMap[*checkpointBlockInfo.Hash] - } else { - _, hasSeenCheckPointBlockHash = srv.blockchain.bestChainMap[*checkpointBlockInfo.Hash] + _, hasSeenCheckPointBlockHash, err := srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight( + checkpointBlockInfo.Hash, &checkpointBlockInfo.Height, isHeaders) + + if err != nil { + glog.Fatalf("getCheckpointSyncingStatus: Problem getting checkpoint block node from best chain: %v", err) } if !hasSeenCheckPointBlockHash { return fmt.Sprintf("", checkpointBlockInfo.String()) @@ -1079,6 +1175,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // Start by processing all the headers given to us. They should start // right after the tip of our header chain ideally. While going through them // tally up the number that we actually process. + var blockNodeBatch []*BlockNode for ii, headerReceived := range msg.Headers { // If we've set a maximum height for node sync and we've reached it, // then we will not process any more headers. @@ -1091,7 +1188,8 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // have this issue. Hitting duplicates after we're done syncing is // fine and can happen in certain cases. headerHash, _ := headerReceived.Hash() - if srv.blockchain.HasHeader(headerHash) { + hasHeader := srv.blockchain.HasHeaderByHashAndHeight(headerHash, headerReceived.Height) + if hasHeader { if srv.blockchain.isSyncing() { glog.Warningf("Server._handleHeaderBundle: Duplicate header %v received from peer %v "+ @@ -1117,6 +1215,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // If we get here then we have a header we haven't seen before. // check if we need to verify signatures + // TODO: we can add some logic into should verify signatures to avoid trying to get the checkpoint block node. verifySignatures, shouldDisconnect := srv.shouldVerifySignatures(headerReceived, true) if shouldDisconnect { glog.Errorf("Server._handleHeaderBundle: Disconnecting peer %v in state %s because a mismatch was "+ @@ -1129,7 +1228,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // Process the header, as we haven't seen it before, set verifySignatures to false // if we're in the process of syncing. - _, isOrphan, err := srv.blockchain.ProcessHeader(headerReceived, headerHash, verifySignatures) + blockNode, _, isOrphan, err := srv.blockchain.ProcessHeader(headerReceived, headerHash, verifySignatures) numLogHeaders := 2000 if ii%numLogHeaders == 0 { @@ -1149,9 +1248,29 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { pp, srv.blockchain.chainState(), err, isOrphan) pp.Disconnect("Error processing header") + // Just to be safe, we flush all the headers we just got even tho we have a header. + currTime := time.Now() + if err = PutHeightHashToNodeInfoBatch( + srv.blockchain.db, srv.snapshot, blockNodeBatch, false /*bitcoinNodes*/, srv.eventManager); err != nil { + glog.Errorf("Server._handleHeaderBundle: Problem writing block nodes to db, error: (%v)", err) + return + } + glog.V(0).Info("Server._handleHeaderBundle: PutHeightHashToNodeInfoBatch took: ", time.Since(currTime)) return } + + // Append the block node to the block node batch. + if blockNode != nil { + blockNodeBatch = append(blockNodeBatch, blockNode) + } } + currTime := time.Now() + if err := PutHeightHashToNodeInfoBatch( + srv.blockchain.db, srv.snapshot, blockNodeBatch, false /*bitcoinNodes*/, srv.eventManager); err != nil { + glog.Errorf("Server._handleHeaderBundle: Problem writing block nodes to db, error: (%v)", err) + return + } + glog.V(0).Info("Server._handleHeaderBundle: PutHeightHashToNodeInfoBatch took: ", time.Since(currTime)) // After processing all the headers this will check to see if we are fully current // and send a request to our Peer to start a Mempool sync if so. @@ -1233,11 +1352,21 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // expected height at which the snapshot should be taking place. We do this to make sure that the // snapshot we receive from the peer is up-to-date. // TODO: error handle if the hash doesn't exist for some reason. + expectedSnapshotHeightBlock, expectedSnapshotHeightblockExists, err := + srv.blockchain.GetBlockFromBestChainByHeight(expectedSnapshotHeight, true) + if err != nil { + glog.Errorf("Server._handleHeaderBundle: Problem getting expected snapshot height block, error (%v)", err) + return + } + if !expectedSnapshotHeightblockExists || expectedSnapshotHeightBlock == nil { + glog.Errorf("Server._handleHeaderBundle: Expected snapshot height block doesn't exist.") + return + } srv.HyperSyncProgress.SnapshotMetadata = &SnapshotEpochMetadata{ SnapshotBlockHeight: expectedSnapshotHeight, FirstSnapshotBlockHeight: expectedSnapshotHeight, CurrentEpochChecksumBytes: []byte{}, - CurrentEpochBlockHash: srv.blockchain.bestHeaderChain[expectedSnapshotHeight].Hash, + CurrentEpochBlockHash: expectedSnapshotHeightBlock.Hash, } srv.HyperSyncProgress.PrefixProgress = []*SyncPrefixProgress{} srv.HyperSyncProgress.Completed = false @@ -1314,7 +1443,8 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // we're either not aware of or that we don't think is the best chain. // Doing things this way makes it so that when we request blocks we // are 100% positive the peer has them. - if !srv.blockchain.HasHeader(msg.TipHash) { + hasHeader := srv.blockchain.HasHeaderByHashAndHeight(msg.TipHash, uint64(msg.TipHeight)) + if !hasHeader { glog.V(1).Infof("Server._handleHeaderBundle: Peer's tip is not in our "+ "blockchain so not requesting anything else from them. Our block "+ "tip %v, their tip %v:%d, peer: %v", @@ -1352,23 +1482,17 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // even if the peer has a long fork with more work than our current header // chain. lastHash, _ := msg.Headers[len(msg.Headers)-1].Hash() - locator, err := srv.blockchain.HeaderLocatorWithNodeHash(lastHash) - if err != nil { - glog.Warningf("Server._handleHeaderBundle: Disconnecting peer %v because "+ - "she indicated that she has more headers but the last hash %v in "+ - "the header bundle does not correspond to a block in our index.", - pp, lastHash) - pp.Disconnect("Last hash in header bundle not in our index") - return - } pp.AddDeSoMessage(&MsgDeSoGetHeaders{ StopHash: &BlockHash{}, - BlockLocator: locator, + BlockLocator: []*BlockHash{lastHash}, }, false) headerTip := srv.blockchain.headerTip() glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* headers for blocks starting at "+ "header tip %v out of %d from peer %v", headerTip.Header, msg.TipHeight, pp) + // TODO: this may be wrong? + glog.V(0).Infof("Server._handleHeaderBundle: Num Headers in header chain: (header tip height: %v) ", + srv.blockchain.blockIndex.GetHeaderTip()) } func (srv *Server) _handleGetBlocks(pp *Peer, msg *MsgDeSoGetBlocks) { @@ -1651,10 +1775,18 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { srv.snapshot.PrintChecksum("Finished hyper sync. Checksum is:") glog.Infof(CLog(Magenta, fmt.Sprintf("Metadata checksum: (%v)", srv.HyperSyncProgress.SnapshotMetadata.CurrentEpochChecksumBytes))) - - glog.Infof(CLog(Yellow, fmt.Sprintf("Best header chain %v best block chain %v", - srv.blockchain.bestHeaderChain[msg.SnapshotMetadata.SnapshotBlockHeight], srv.blockchain.bestChain))) - + blockNode, exists, err := srv.blockchain.GetBlockFromBestChainByHeight(msg.SnapshotMetadata.SnapshotBlockHeight, true) + if err != nil { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, error (%v)", err) + return + } + if !exists { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, block node does not exist: (%v)", msg.SnapshotMetadata.SnapshotBlockHeight) + //return + } else { + glog.Infof(CLog(Yellow, fmt.Sprintf("Best header chain %v best block chain %v", + blockNode, srv.blockchain.blockIndex.GetTip()))) + } // Verify that the state checksum matches the one in HyperSyncProgress snapshot metadata. // If the checksums don't match, it means that we've been interacting with a peer that was misbehaving. checksumBytes, err := srv.snapshot.Checksum.ToBytes() @@ -1696,18 +1828,55 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { // being too large and possibly causing an error in badger. glog.V(0).Infof("Server._handleSnapshot: Updating snapshot block nodes in the database") var blockNodeBatch []*BlockNode + flushBlockNodeStartTime := time.Now() + // Disable deadlock detection, as the process of flushing entries to file can take a long time and + // if it takes longer than the deadlock detection timeout interval, it will cause an error to be thrown. + deadlock.Opts.Disable = true + defer func() { + deadlock.Opts.Disable = false + }() // acquire the chain lock while we update the best chain and best chain map. srv.blockchain.ChainLock.Lock() - for ii := uint64(1); ii <= srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight; ii++ { - currentNode := srv.blockchain.bestHeaderChain[ii] + // TODO: we should iterate in reverse so we can use GetBlockFromBestChainByHashAndOptionalHeight + // by doing currentNode.Height - 1 and currentNode.Header.PrevBlockHash. + currentNode, currentNodeExists, err := srv.blockchain.GetBlockFromBestChainByHeight(srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight, true) + if err != nil { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, error: (%v)", err) + // TODO: should we return here? + } + if !currentNodeExists { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, block node does not exist") + // TODO: should we return here? + } + // Set the block tip to the snapshot height block node. + srv.blockchain.blockIndex.setTip(currentNode) + for currentNode.Height > 0 { + //for ii := uint64(1); ii <= srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight; ii++ { // Do not set the StatusBlockStored flag, because we still need to download the past blocks. currentNode.Status |= StatusBlockProcessed currentNode.Status |= StatusBlockValidated currentNode.Status |= StatusBlockCommitted srv.blockchain.addNewBlockNodeToBlockIndex(currentNode) - srv.blockchain.bestChainMap[*currentNode.Hash] = currentNode - srv.blockchain.bestChain = append(srv.blockchain.bestChain, currentNode) blockNodeBatch = append(blockNodeBatch, currentNode) + if (srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight-uint64(currentNode.Height))%100000 == 0 { + glog.V(0).Infof("Time to process %v of %v block nodes in %v", + srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight-uint64(currentNode.Height), + srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight, + time.Since(flushBlockNodeStartTime), + ) + } + + prevNodeHeight := uint64(currentNode.Height) - 1 + currentNode, currentNodeExists, err = srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight(currentNode.Header.PrevBlockHash, &prevNodeHeight, true) + if err != nil { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, error: (%v)", err) + break + } + if !currentNodeExists { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, block node does not exist") + break + } + // TODO: should we adjust this value for batch sizes? if len(blockNodeBatch) < 10000 { continue } @@ -1724,6 +1893,8 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { glog.Errorf("Server._handleSnapshot: Problem updating snapshot block nodes, error: (%v)", err) } } + glog.V(0).Infof("Time to store %v block nodes in the database: %v", + srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight, time.Since(flushBlockNodeStartTime)) err = PutBestHash(srv.blockchain.db, srv.snapshot, msg.SnapshotMetadata.CurrentEpochBlockHash, ChainTypeDeSoBlock, srv.eventManager) if err != nil { @@ -1731,7 +1902,7 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { } // We also reset the in-memory snapshot cache, because it is populated with stale records after // we've initialized the chain with seed transactions. - srv.snapshot.DatabaseCache = *lru.NewMap[string, []byte](DatabaseCacheSize) + srv.snapshot.DatabaseCache, _ = lru.New[string, []byte](int(DatabaseCacheSize)) // If we got here then we finished the snapshot sync so set appropriate flags. srv.blockchain.syncingState = false @@ -2020,7 +2191,7 @@ func (srv *Server) _relayTransactions() { // Add the transaction to the peer's known inventory. We do // it here when we enqueue the message to the peers outgoing - // message queue so that we don't have remember to do it later. + // message queue so that we don't have to remember to do it later. pp.knownInventory.Add(*invVect, struct{}{}) invMsg.InvList = append(invMsg.InvList, invVect) } @@ -2339,14 +2510,14 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { "Server._handleBlock: Processing block %v with FastHotStuffConsensus with SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp, ))) - blockHashesToRequest, err = srv.fastHotStuffConsensus.HandleBlock(pp, blk) + blockHashesToRequest, err = srv.fastHotStuffConsensus.HandleBlock(pp, blk, blockHash) isOrphan = len(blockHashesToRequest) > 0 } else if !verifySignatures { glog.V(0).Infof(CLog(Cyan, fmt.Sprintf( "Server._handleBlock: Processing block %v WITHOUT signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp, ))) - _, isOrphan, blockHashesToRequest, err = srv.blockchain.ProcessBlock(blk, false) + _, isOrphan, blockHashesToRequest, err = srv.blockchain.ProcessBlock(blk, blockHash, false) } else { // TODO: Signature checking slows things down because it acquires the ChainLock. // The optimal solution is to check signatures in a way that doesn't acquire the @@ -2355,7 +2526,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { "Server._handleBlock: Processing block %v WITH signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp, ))) - _, isOrphan, blockHashesToRequest, err = srv.blockchain.ProcessBlock(blk, true) + _, isOrphan, blockHashesToRequest, err = srv.blockchain.ProcessBlock(blk, blockHash, true) } // If we hit an error then abort mission entirely. We should generally never diff --git a/lib/snapshot.go b/lib/snapshot.go index 83f59fc1d..0c0c731ee 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -5,7 +5,6 @@ import ( "context" "encoding/hex" "fmt" - "github.com/deso-protocol/go-deadlock" "math" "reflect" "runtime" @@ -14,11 +13,12 @@ import ( "time" "github.com/cloudflare/circl/group" - "github.com/decred/dcrd/container/lru" + "github.com/deso-protocol/go-deadlock" "github.com/dgraph-io/badger/v3" "github.com/fatih/color" "github.com/golang/glog" "github.com/google/uuid" + "github.com/hashicorp/golang-lru/v2" "github.com/oleiade/lane" "github.com/pkg/errors" "golang.org/x/sync/semaphore" @@ -313,7 +313,7 @@ type Snapshot struct { // DatabaseCache is used to store most recent DB records that we've read/written. // This is a low-level optimization for ancestral records that // saves us read time when we're writing to the DB during UtxoView flush. - DatabaseCache lru.Map[string, []byte] + DatabaseCache *lru.Cache[string, []byte] // AncestralFlushCounter is used to offset ancestral records flush to occur only after x blocks. AncestralFlushCounter uint64 @@ -483,11 +483,14 @@ func NewSnapshot( "This may lead to unexpected behavior.") } + databaseCache, _ := lru.New[string, []byte](int(DatabaseCacheSize)) + // Set the snapshot. snap := &Snapshot{ - mainDb: mainDb, - SnapshotDbMutex: &snapshotDbMutex, - DatabaseCache: *lru.NewMap[string, []byte](DatabaseCacheSize), + mainDb: mainDb, + SnapshotDbMutex: &snapshotDbMutex, + DatabaseCache: databaseCache, + AncestralFlushCounter: uint64(0), snapshotBlockHeightPeriod: snapshotBlockHeightPeriod, OperationChannel: operationChannel, @@ -1406,7 +1409,7 @@ type StateChecksum struct { ctx context.Context // hashToCurveCache is a cache of computed hashToCurve mappings - hashToCurveCache lru.Map[string, group.Element] + hashToCurveCache *lru.Cache[string, group.Element] // When we want to add a database record to the state checksum, we will first have to // map the record to the Ristretto255 curve using the hash_to_curve. We will then add the @@ -1434,7 +1437,7 @@ func (sc *StateChecksum) Initialize(mainDb *badger.DB, snapshotDbMutex *sync.Mut sc.maxWorkers = int64(runtime.GOMAXPROCS(0)) // Set the hashToCurveCache - sc.hashToCurveCache = *lru.NewMap[string, group.Element](HashToCurveCache) + sc.hashToCurveCache, _ = lru.New[string, group.Element](int(HashToCurveCache)) // Set the worker pool semaphore and context. sc.semaphore = semaphore.NewWeighted(sc.maxWorkers) @@ -1505,7 +1508,7 @@ func (sc *StateChecksum) HashToCurve(bytes []byte) group.Element { // Compute the hash_to_curve primitive, mapping the bytes to an elliptic curve point. hashElement = sc.curve.HashToElement(bytes, sc.dst) // Also add to the hashToCurveCache - sc.hashToCurveCache.Put(bytesStr, hashElement) + sc.hashToCurveCache.Add(bytesStr, hashElement) } return hashElement diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 19a5dd664..f81bcf421 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -777,7 +777,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser return true, nil } - blockHeight := uint64(server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Height) + blockHeight := uint64(server.blockchain.blockIndex.GetTip().Height) stateChangeSyncer.MempoolFlushId = originalCommittedFlushId @@ -804,7 +804,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser mempoolUtxoView.Snapshot = nil server.blockchain.ChainLock.RLock() - mempoolUtxoView.TipHash = server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Hash + mempoolUtxoView.TipHash = server.blockchain.blockIndex.GetTip().Hash server.blockchain.ChainLock.RUnlock() // A new transaction is created so that we can simulate writes to the db without actually writing to the db. @@ -815,7 +815,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser defer txn.Discard() glog.V(2).Infof("Time since mempool sync start: %v", time.Since(startTime)) startTime = time.Now() - err = mempoolUtxoView.FlushToDbWithTxn(txn, uint64(server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Height)) + err = mempoolUtxoView.FlushToDbWithTxn(txn, uint64(server.blockchain.blockIndex.GetTip().Height)) if err != nil { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: originalCommittedFlushId, @@ -847,7 +847,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser // TODO: Have Z look at if we need to do some caching in the uncommitted blocks logic. // First connect the uncommitted blocks to the mempool view. for _, uncommittedBlock := range uncommittedBlocks { - utxoViewAndOpsAtBlockHash, err := server.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*uncommittedBlock.Hash) + utxoViewAndOpsAtBlockHash, err := server.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*uncommittedBlock.Hash, uint64(uncommittedBlock.Height)) if err != nil { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: originalCommittedFlushId, diff --git a/lib/txindex.go b/lib/txindex.go index 029f44487..55dd06b99 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -150,8 +150,8 @@ func NewTXIndex(coreChain *Blockchain, params *DeSoParams, dataDirectory string) } func (txi *TXIndex) FinishedSyncing() bool { - committedTip, idx := txi.CoreChain.GetCommittedTip() - if idx == -1 { + committedTip, exists := txi.CoreChain.GetCommittedTip() + if !exists { return false } return txi.TXIndexChain.BlockTip().Height == committedTip.Height @@ -171,7 +171,8 @@ func (txi *TXIndex) Start() { txi.updateWaitGroup.Done() return default: - if txi.CoreChain.ChainState() == SyncStateFullyCurrent { + chainState := txi.CoreChain.ChainState() + if chainState == SyncStateFullyCurrent || (chainState == SyncStateNeedBlocksss && txi.CoreChain.headerTip().Height-txi.CoreChain.blockTip().Height < 10) { if !txi.CoreChain.IsFullyStored() { glog.V(1).Infof("TXIndex: Waiting, blockchain is not fully stored") break @@ -207,8 +208,7 @@ func (txi *TXIndex) Stop() { // GetTxindexUpdateBlockNodes ... func (txi *TXIndex) GetTxindexUpdateBlockNodes() ( - _txindexTipNode *BlockNode, _blockTipNode *BlockNode, _commonAncestor *BlockNode, - _detachBlocks []*BlockNode, _attachBlocks []*BlockNode) { + _txindexTipNode *BlockNode, _blockTipNode *BlockNode, _commonAncestor *BlockNode) { // Get the current txindex tip. txindexTipHash := txi.TXIndexChain.BlockTip() @@ -218,33 +218,18 @@ func (txi *TXIndex) GetTxindexUpdateBlockNodes() ( // case. glog.Error("Error: TXIndexChain had nil tip; this should never " + "happen and it means the transaction index is broken.") - return nil, nil, nil, nil, nil + return nil, nil, nil } // If the tip of the txindex is no longer stored in the block index, it // means the txindex hit a fork that we are no longer keeping track of. // The only thing we can really do in this case is rebuild the entire index // from scratch. To do that, we return all the blocks in the index to detach // and all the blocks in the real chain to attach. - txindexTipNode, _ := txi.TXIndexChain.blockIndexByHash.Get(*txindexTipHash.Hash) + txindexTipNode, _ := txi.TXIndexChain.blockIndex.GetBlockNodeByHashAndHeight(txindexTipHash.Hash, uint64(txindexTipHash.Height)) // Get the committed tip. committedTip, _ := txi.CoreChain.GetCommittedTip() - if txindexTipNode == nil { - glog.Info("GetTxindexUpdateBlockNodes: Txindex tip was not found; building txindex starting at genesis block") - - newTxIndexBestChain, _ := txi.TXIndexChain.CopyBestChain() - newBlockchainBestChain, _ := txi.CoreChain.CopyBestChain() - - return txindexTipNode, committedTip, nil, newTxIndexBestChain, newBlockchainBestChain - } - - derefedTxindexTipNode := *txindexTipNode - - // At this point, we know our txindex tip is in our block index so - // there must be a common ancestor between the tip and the block tip. - commonAncestor, detachBlocks, attachBlocks := GetReorgBlocks(&derefedTxindexTipNode, committedTip) - - return txindexTipNode, committedTip, commonAncestor, detachBlocks, attachBlocks + return txindexTipNode, committedTip, txindexTipNode } // Update syncs the transaction index with the blockchain. @@ -264,7 +249,7 @@ func (txi *TXIndex) Update() error { // done with the rest of the function. txi.TXIndexLock.Lock() defer txi.TXIndexLock.Unlock() - txindexTipNode, blockTipNode, commonAncestor, detachBlocks, attachBlocks := txi.GetTxindexUpdateBlockNodes() + txindexTipNode, blockTipNode, commonAncestor := txi.GetTxindexUpdateBlockNodes() // Note that the blockchain's ChainLock does not need to be held at this // point because we're just reading blocks from the db, which never get @@ -293,97 +278,103 @@ func (txi *TXIndex) Update() error { // For each of the blocks we're removing, delete the transactions from // the transaction index. - for _, blockToDetach := range detachBlocks { - if txi.killed { - glog.Infof(CLog(Yellow, "TxIndex: Update: Killed while detaching blocks")) - break - } - // Go through each txn in the block and delete its mappings from our - // txindex. - glog.V(1).Infof("Update: Detaching block (height: %d, hash: %v)", - blockToDetach.Height, blockToDetach.Hash) - blockMsg, err := GetBlock(blockToDetach.Hash, txi.TXIndexChain.DB(), nil) - if err != nil { - return fmt.Errorf("Update: Problem fetching detach block "+ - "with hash %v: %v", blockToDetach.Hash, err) - } - blockHeight := uint64(txi.CoreChain.blockTip().Height) - err = txi.TXIndexChain.DB().Update(func(dbTxn *badger.Txn) error { - for _, txn := range blockMsg.Txns { - if err := DbDeleteTxindexTransactionMappingsWithTxn(dbTxn, nil, - blockHeight, txn, txi.Params, txi.CoreChain.eventManager, true); err != nil { - - return fmt.Errorf("Update: Problem deleting "+ - "transaction mappings for transaction %v: %v", txn.Hash(), err) - } - } - return nil - }) - if err != nil { - return err - } - - // Now that all the transactions have been deleted from our txindex, - // it's safe to disconnect the block from our txindex chain. - utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) - utxoOps, err := GetUtxoOperationsForBlock( - txi.TXIndexChain.DB(), nil, blockToDetach.Hash) - if err != nil { - return fmt.Errorf( - "Update: Error getting UtxoOps for block %v: %v", blockToDetach, err) - } - // Compute the hashes for all the transactions. - txHashes, err := ComputeTransactionHashes(blockMsg.Txns) - if err != nil { - return fmt.Errorf( - "Update: Error computing tx hashes for block %v: %v", - blockToDetach, err) - } - if err := utxoView.DisconnectBlock(blockMsg, txHashes, utxoOps, blockHeight); err != nil { - return fmt.Errorf("Update: Error detaching block "+ - "%v from UtxoView: %v", blockToDetach, err) - } - if err := utxoView.FlushToDb(blockHeight); err != nil { - return fmt.Errorf("Update: Error flushing view to db for block "+ - "%v: %v", blockToDetach, err) - } - // We have to flush a couple of extra things that the view doesn't flush... - if err := PutBestHash(txi.TXIndexChain.DB(), nil, utxoView.TipHash, ChainTypeDeSoBlock, txi.CoreChain.eventManager); err != nil { - return fmt.Errorf("Update: Error putting best hash for block "+ - "%v: %v", blockToDetach, err) - } - err = txi.TXIndexChain.DB().Update(func(txn *badger.Txn) error { - if err := DeleteUtxoOperationsForBlockWithTxn(txn, nil, blockToDetach.Hash, txi.TXIndexChain.eventManager, true); err != nil { - return fmt.Errorf("Update: Error deleting UtxoOperations 1 for block %v, %v", blockToDetach.Hash, err) - } - if err := txn.Delete(BlockHashToBlockKey(blockToDetach.Hash)); err != nil { - return fmt.Errorf("Update: Error deleting UtxoOperations 2 for block %v %v", blockToDetach.Hash, err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Update: Error updating badgger: %v", err) - } - // Delete this block from the chain db so we don't get duplicate block errors. - - // Remove this block from our bestChain data structures. - newBlockIndexByHash, newBlockIndexByHeight := txi.TXIndexChain.CopyBlockIndexes() - newBestChain, newBestChainMap := txi.TXIndexChain.CopyBestChain() - newBestChain = newBestChain[:len(newBestChain)-1] - delete(newBestChainMap, *(blockToDetach.Hash)) - newBlockIndexByHash.Remove(*(blockToDetach.Hash)) - - txi.TXIndexChain.SetBestChainMap(newBestChain, newBestChainMap, newBlockIndexByHash, newBlockIndexByHeight) - - // At this point the entries for the block should have been removed - // from both our Txindex chain and our transaction index mappings. - } + // TODO: delete - we're simplifying the txindex logic to only use committed state. + //for _, blockToDetach := range detachBlocks { + // if txi.killed { + // glog.Infof(CLog(Yellow, "TxIndex: Update: Killed while detaching blocks")) + // break + // } + // // Go through each txn in the block and delete its mappings from our + // // txindex. + // glog.V(1).Infof("Update: Detaching block (height: %d, hash: %v)", + // blockToDetach.Height, blockToDetach.Hash) + // blockMsg, err := GetBlock(blockToDetach.Hash, txi.TXIndexChain.DB(), nil) + // if err != nil { + // return fmt.Errorf("Update: Problem fetching detach block "+ + // "with hash %v: %v", blockToDetach.Hash, err) + // } + // blockHeight := uint64(txi.CoreChain.blockTip().Height) + // err = txi.TXIndexChain.DB().Update(func(dbTxn *badger.Txn) error { + // for _, txn := range blockMsg.Txns { + // if err := DbDeleteTxindexTransactionMappingsWithTxn(dbTxn, nil, + // blockHeight, txn, txi.Params, txi.CoreChain.eventManager, true); err != nil { + // + // return fmt.Errorf("Update: Problem deleting "+ + // "transaction mappings for transaction %v: %v", txn.Hash(), err) + // } + // } + // return nil + // }) + // if err != nil { + // return err + // } + // + // // Now that all the transactions have been deleted from our txindex, + // // it's safe to disconnect the block from our txindex chain. + // utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) + // utxoOps, err := GetUtxoOperationsForBlock( + // txi.TXIndexChain.DB(), nil, blockToDetach.Hash) + // if err != nil { + // return fmt.Errorf( + // "Update: Error getting UtxoOps for block %v: %v", blockToDetach, err) + // } + // // Compute the hashes for all the transactions. + // txHashes, err := ComputeTransactionHashes(blockMsg.Txns) + // if err != nil { + // return fmt.Errorf( + // "Update: Error computing tx hashes for block %v: %v", + // blockToDetach, err) + // } + // if err := utxoView.DisconnectBlock(blockMsg, txHashes, utxoOps, blockHeight); err != nil { + // return fmt.Errorf("Update: Error detaching block "+ + // "%v from UtxoView: %v", blockToDetach, err) + // } + // if err := utxoView.FlushToDb(blockHeight); err != nil { + // return fmt.Errorf("Update: Error flushing view to db for block "+ + // "%v: %v", blockToDetach, err) + // } + // // We have to flush a couple of extra things that the view doesn't flush... + // if err := PutBestHash(txi.TXIndexChain.DB(), nil, utxoView.TipHash, ChainTypeDeSoBlock, txi.CoreChain.eventManager); err != nil { + // return fmt.Errorf("Update: Error putting best hash for block "+ + // "%v: %v", blockToDetach, err) + // } + // err = txi.TXIndexChain.DB().Update(func(txn *badger.Txn) error { + // if err := DeleteUtxoOperationsForBlockWithTxn(txn, nil, blockToDetach.Hash, txi.TXIndexChain.eventManager, true); err != nil { + // return fmt.Errorf("Update: Error deleting UtxoOperations 1 for block %v, %v", blockToDetach.Hash, err) + // } + // if err := txn.Delete(BlockHashToBlockKey(blockToDetach.Hash)); err != nil { + // return fmt.Errorf("Update: Error deleting UtxoOperations 2 for block %v %v", blockToDetach.Hash, err) + // } + // return nil + // }) + // + // if err != nil { + // return fmt.Errorf("Update: Error updating badgger: %v", err) + // } + // // Delete this block from the chain db so we don't get duplicate block errors. + // + // // Remove this block from our bestChain data structures. + // newBlockIndex := txi.TXIndexChain.CopyBlockIndexes() + // newTip := blockToDetach.GetParent(txi.TXIndexChain.blockIndex) + // if newTip == nil { + // return fmt.Errorf("Update: Error getting parent of block %v", blockToDetach) + // } + // + // txi.TXIndexChain.SetBestChainMap(newBlockIndex, newTip) + // + // // At this point the entries for the block should have been removed + // // from both our Txindex chain and our transaction index mappings. + //} // For each of the blocks we're adding, process them on our txindex chain // and add their mappings to our txn index. Compute any metadata that might // be useful. - for _, blockToAttach := range attachBlocks { + // Get the next block after the current txindex tip hash. we know we've already processed the txindex tip hash. + blockToAttach, exists, err := txi.CoreChain.GetBlockFromBestChainByHeight(uint64(txindexTipNode.Height+1), false) + if !exists || err != nil { + return fmt.Errorf("Update: Problem getting block at height %d: %v", txindexTipNode.Height+1, err) + } + for !blockToAttach.Hash.IsEqual(blockTipNode.Hash) { if txi.killed { glog.Infof(CLog(Yellow, "TxIndex: Update: Killed while attaching blocks")) break @@ -408,7 +399,7 @@ func (txi *TXIndex) Update() error { utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) if blockToAttach.Header.PrevBlockHash != nil && !utxoView.TipHash.IsEqual(blockToAttach.Header.PrevBlockHash) { var utxoViewAndUtxoOps *BlockViewAndUtxoOps - utxoViewAndUtxoOps, err = txi.TXIndexChain.GetUtxoViewAndUtxoOpsAtBlockHash(*blockToAttach.Header.PrevBlockHash) + utxoViewAndUtxoOps, err = txi.TXIndexChain.GetUtxoViewAndUtxoOpsAtBlockHash(*blockToAttach.Header.PrevBlockHash, blockToAttach.Header.Height-1) if err != nil { return fmt.Errorf("Update: Problem getting UtxoView at block hash %v: %v", blockToAttach.Header.PrevBlockHash, err) @@ -448,11 +439,16 @@ func (txi *TXIndex) Update() error { // Now that we have added all the txns to our TxIndex db, attach the block // to update our chain. - _, _, _, err = txi.TXIndexChain.ProcessBlock(blockMsg, false /*verifySignatures*/) + _, _, _, err = txi.TXIndexChain.ProcessBlock(blockMsg, blockToAttach.Hash, false /*verifySignatures*/) if err != nil { return fmt.Errorf("Update: Problem attaching block %v: %v", blockToAttach, err) } + var exists bool + blockToAttach, exists, err = txi.CoreChain.GetBlockFromBestChainByHeight(uint64(blockToAttach.Height+1), false) + if !exists || err != nil { + return fmt.Errorf("Update: Problem getting block at height %d: %v", blockToAttach.Height+1, err) + } } glog.Infof("Update: Txindex update complete. New tip: (height: %d, hash: %v)", diff --git a/lib/types.go b/lib/types.go index dd7bf0518..5c486c99d 100644 --- a/lib/types.go +++ b/lib/types.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "reflect" "sort" "github.com/deso-protocol/uint256" @@ -238,7 +237,7 @@ func (bh *BlockHash) IsEqual(target *BlockHash) bool { return false } - return reflect.DeepEqual(bh[:], target[:]) + return bytes.Equal(bh[:], target[:]) } func (bh *BlockHash) NewBlockHash() *BlockHash {