12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348 |
- // Copyright 2015 The go-ethereum Authors
- // This file is part of the go-ethereum library.
- //
- // The go-ethereum library is free software: you can redistribute it and/or modify
- // it under the terms of the GNU Lesser General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // The go-ethereum library is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU Lesser General Public License for more details.
- //
- // You should have received a copy of the GNU Lesser General Public License
- // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
- // Package downloader contains the manual full chain synchronisation.
- package downloader
- import (
- "errors"
- "fmt"
- "math/big"
- "sync"
- "sync/atomic"
- "time"
- "github.com/ethereum/go-ethereum"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state/snapshot"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/eth/protocols/eth"
- "github.com/ethereum/go-ethereum/eth/protocols/snap"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/permission/core"
- "github.com/ethereum/go-ethereum/trie"
- )
- var (
- MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
- MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
- MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
- MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
- MaxStateFetch = 384 // Amount of node state values to allow fetching per request
- rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
- rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests
- rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
- ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
- ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
- qosTuningPeers = 5 // Number of peers to tune based on (best peers)
- qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence
- qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value
- maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
- maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
- maxResultsProcess = 2048 // Number of content download results to import at once into the chain
- fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
- lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
- reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
- reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
- fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
- fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
- fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
- fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
- fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync
- )
- var (
- errBusy = errors.New("busy")
- errUnknownPeer = errors.New("peer is unknown or unhealthy")
- errBadPeer = errors.New("action from bad peer ignored")
- errStallingPeer = errors.New("peer is stalling")
- errUnsyncedPeer = errors.New("unsynced peer")
- errNoPeers = errors.New("no peers to keep download active")
- errTimeout = errors.New("timeout")
- errEmptyHeaderSet = errors.New("empty header set by peer")
- errPeersUnavailable = errors.New("no peers available or all tried for download")
- errInvalidAncestor = errors.New("retrieved ancestor is invalid")
- errInvalidChain = errors.New("retrieved hash chain is invalid")
- errInvalidBody = errors.New("retrieved block body is invalid")
- errInvalidReceipt = errors.New("retrieved receipt is invalid")
- errCancelStateFetch = errors.New("state data download canceled (requested)")
- errCancelHeaderFetch = errors.New("block header download canceled (requested)")
- errCancelBlockFetch = errors.New("block download canceled (requested)")
- errCancelContentProcessing = errors.New("content processing canceled (requested)")
- errCanceled = errors.New("syncing canceled (requested)")
- errNoSyncActive = errors.New("no sync active")
- errTooOld = errors.New("peer's protocol version too old")
- errNoAncestorFound = errors.New("no common ancestor found")
- )
- type Downloader struct {
- // WARNING: The `rttEstimate` and `rttConfidence` fields are accessed atomically.
- // On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is
- // guaranteed to be so aligned, so take advantage of that. For more information,
- // see https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- rttEstimate uint64 // Round trip time to target for download requests
- rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops)
- mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
- mux *event.TypeMux // Event multiplexer to announce sync operation events
- checkpoint uint64 // Checkpoint block number to enforce head against (e.g. fast sync)
- genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT)
- queue *queue // Scheduler for selecting the hashes to download
- peers *peerSet // Set of active peers from which download can proceed
- stateDB ethdb.Database // Database to state sync into (and deduplicate via)
- stateBloom *trie.SyncBloom // Bloom filter for fast trie node and contract code existence checks
- // Statistics
- syncStatsChainOrigin uint64 // Origin block number where syncing started at
- syncStatsChainHeight uint64 // Highest block number known when syncing started
- syncStatsState stateSyncStats
- syncStatsLock sync.RWMutex // Lock protecting the sync stats fields
- lightchain LightChain
- blockchain BlockChain
- // Callbacks
- dropPeer peerDropFn // Drops a peer for misbehaving
- // Status
- synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
- synchronising int32
- notified int32
- committed int32
- ancientLimit uint64 // The maximum block number which can be regarded as ancient data.
- // Channels
- headerCh chan dataPack // Channel receiving inbound block headers
- bodyCh chan dataPack // Channel receiving inbound block bodies
- receiptCh chan dataPack // Channel receiving inbound receipts
- bodyWakeCh chan bool // Channel to signal the block body fetcher of new tasks
- receiptWakeCh chan bool // Channel to signal the receipt fetcher of new tasks
- headerProcCh chan []*types.Header // Channel to feed the header processor new tasks
- // State sync
- pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
- pivotLock sync.RWMutex // Lock protecting pivot header reads from updates
- snapSync bool // Whether to run state sync over the snap protocol
- SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now
- stateSyncStart chan *stateSync
- trackStateReq chan *stateReq
- stateCh chan dataPack // Channel receiving inbound node state data
- // Cancellation and termination
- cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop)
- cancelCh chan struct{} // Channel to cancel mid-flight syncs
- cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers
- cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited.
- quitCh chan struct{} // Quit channel to signal termination
- quitLock sync.Mutex // Lock to prevent double closes
- // Testing hooks
- syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run
- bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch
- receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
- chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
- }
- // LightChain encapsulates functions required to synchronise a light chain.
- type LightChain interface {
- // HasHeader verifies a header's presence in the local chain.
- HasHeader(common.Hash, uint64) bool
- // GetHeaderByHash retrieves a header from the local chain.
- GetHeaderByHash(common.Hash) *types.Header
- // CurrentHeader retrieves the head header from the local chain.
- CurrentHeader() *types.Header
- // GetTd returns the total difficulty of a local block.
- GetTd(common.Hash, uint64) *big.Int
- // InsertHeaderChain inserts a batch of headers into the local chain.
- InsertHeaderChain([]*types.Header, int) (int, error)
- // SetHead rewinds the local chain to a new head.
- SetHead(uint64) error
- }
- // BlockChain encapsulates functions required to sync a (full or fast) blockchain.
- type BlockChain interface {
- LightChain
- // HasBlock verifies a block's presence in the local chain.
- HasBlock(common.Hash, uint64) bool
- // HasFastBlock verifies a fast block's presence in the local chain.
- HasFastBlock(common.Hash, uint64) bool
- // GetBlockByHash retrieves a block from the local chain.
- GetBlockByHash(common.Hash) *types.Block
- // CurrentBlock retrieves the head block from the local chain.
- CurrentBlock() *types.Block
- // CurrentFastBlock retrieves the head fast block from the local chain.
- CurrentFastBlock() *types.Block
- // FastSyncCommitHead directly commits the head block to a certain entity.
- FastSyncCommitHead(common.Hash) error
- // InsertChain inserts a batch of blocks into the local chain.
- InsertChain(types.Blocks) (int, error)
- // InsertReceiptChain inserts a batch of receipts into the local chain.
- InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error)
- // Snapshots returns the blockchain snapshot tree to paused it during sync.
- Snapshots() *snapshot.Tree
- }
- // New creates a new downloader to fetch hashes and blocks from remote peers.
- func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader {
- // Quorum
- // reset the value of maxForkAncenstry for Quorum based
- fullMaxForkAncestry = uint64(params.GetImmutabilityThreshold())
- // End Quorum
- if lightchain == nil {
- lightchain = chain
- }
- dl := &Downloader{
- stateDB: stateDb,
- stateBloom: stateBloom,
- mux: mux,
- checkpoint: checkpoint,
- queue: newQueue(blockCacheMaxItems, blockCacheInitialItems),
- peers: newPeerSet(),
- rttEstimate: uint64(rttMaxEstimate),
- rttConfidence: uint64(1000000),
- blockchain: chain,
- lightchain: lightchain,
- dropPeer: dropPeer,
- headerCh: make(chan dataPack, 1),
- bodyCh: make(chan dataPack, 1),
- receiptCh: make(chan dataPack, 1),
- bodyWakeCh: make(chan bool, 1),
- receiptWakeCh: make(chan bool, 1),
- headerProcCh: make(chan []*types.Header, 1),
- quitCh: make(chan struct{}),
- stateCh: make(chan dataPack),
- SnapSyncer: snap.NewSyncer(stateDb),
- stateSyncStart: make(chan *stateSync),
- syncStatsState: stateSyncStats{
- processed: rawdb.ReadFastTrieProgress(stateDb),
- },
- trackStateReq: make(chan *stateReq),
- }
- go dl.qosTuner()
- go dl.stateFetcher()
- return dl
- }
- // Progress retrieves the synchronisation boundaries, specifically the origin
- // block where synchronisation started at (may have failed/suspended); the block
- // or header sync is currently at; and the latest known block which the sync targets.
- //
- // In addition, during the state download phase of fast synchronisation the number
- // of processed and the total number of known states are also returned. Otherwise
- // these are zero.
- func (d *Downloader) Progress() ethereum.SyncProgress {
- // Lock the current stats and return the progress
- d.syncStatsLock.RLock()
- defer d.syncStatsLock.RUnlock()
- current := uint64(0)
- mode := d.getMode()
- switch {
- case d.blockchain != nil && mode == FullSync:
- current = d.blockchain.CurrentBlock().NumberU64()
- case d.blockchain != nil && mode == FastSync:
- current = d.blockchain.CurrentFastBlock().NumberU64()
- case d.lightchain != nil:
- current = d.lightchain.CurrentHeader().Number.Uint64()
- default:
- log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode)
- }
- return ethereum.SyncProgress{
- StartingBlock: d.syncStatsChainOrigin,
- CurrentBlock: current,
- HighestBlock: d.syncStatsChainHeight,
- PulledStates: d.syncStatsState.processed,
- KnownStates: d.syncStatsState.processed + d.syncStatsState.pending,
- }
- }
- // Synchronising returns whether the downloader is currently retrieving blocks.
- func (d *Downloader) Synchronising() bool {
- return atomic.LoadInt32(&d.synchronising) > 0
- }
- // RegisterPeer injects a new download peer into the set of block source to be
- // used for fetching hashes and blocks from.
- func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
- var logger log.Logger
- if len(id) < 16 {
- // Tests use short IDs, don't choke on them
- logger = log.New("peer", id)
- } else {
- logger = log.New("peer", id[:8])
- }
- logger.Trace("Registering sync peer")
- if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
- logger.Error("Failed to register sync peer", "err", err)
- return err
- }
- d.qosReduceConfidence()
- return nil
- }
- // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
- func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error {
- return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
- }
- // UnregisterPeer remove a peer from the known list, preventing any action from
- // the specified peer. An effort is also made to return any pending fetches into
- // the queue.
- func (d *Downloader) UnregisterPeer(id string) error {
- // Unregister the peer from the active peer set and revoke any fetch tasks
- var logger log.Logger
- if len(id) < 16 {
- // Tests use short IDs, don't choke on them
- logger = log.New("peer", id)
- } else {
- logger = log.New("peer", id[:8])
- }
- logger.Trace("Unregistering sync peer")
- if err := d.peers.Unregister(id); err != nil {
- logger.Error("Failed to unregister sync peer", "err", err)
- return err
- }
- d.queue.Revoke(id)
- return nil
- }
- // Synchronise tries to sync up our local block chain with a remote peer, both
- // adding various sanity checks as well as wrapping it with various log entries.
- func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
- err := d.synchronise(id, head, td, mode)
- switch err {
- case nil, errBusy, errCanceled:
- return err
- }
- if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) ||
- errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) ||
- errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) {
- log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
- if d.dropPeer == nil {
- // The dropPeer method is nil when `--copydb` is used for a local copy.
- // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
- log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
- } else {
- d.dropPeer(id)
- }
- return err
- }
- log.Warn("Synchronisation failed, retrying", "err", err)
- return err
- }
- // synchronise will select the peer and use it for synchronising. If an empty string is given
- // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
- // checks fail an error will be returned. This method is synchronous
- func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
- // Mock out the synchronisation if testing
- if d.synchroniseMock != nil {
- return d.synchroniseMock(id, hash)
- }
- // Make sure only one goroutine is ever allowed past this point at once
- if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
- return errBusy
- }
- // Quorum
- // changes for permissions. added set sync status to indicate permissions that node sync has started
- core.SetSyncStatus()
- defer atomic.StoreInt32(&d.synchronising, 0)
- // Post a user notification of the sync (only once per session)
- if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
- log.Info("Block synchronisation started")
- }
- // If we are already full syncing, but have a fast-sync bloom filter laying
- // around, make sure it doesn't use memory any more. This is a special case
- // when the user attempts to fast sync a new empty network.
- if mode == FullSync && d.stateBloom != nil {
- d.stateBloom.Close()
- }
- // If snap sync was requested, create the snap scheduler and switch to fast
- // sync mode. Long term we could drop fast sync or merge the two together,
- // but until snap becomes prevalent, we should support both. TODO(karalabe).
- if mode == SnapSync {
- if !d.snapSync {
- // Snap sync uses the snapshot namespace to store potentially flakey data until
- // sync completely heals and finishes. Pause snapshot maintenance in the mean
- // time to prevent access.
- if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
- snapshots.Disable()
- }
- log.Warn("Enabling snapshot sync prototype")
- d.snapSync = true
- }
- mode = FastSync
- }
- // Reset the queue, peer set and wake channels to clean any internal leftover state
- d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)
- d.peers.Reset()
- for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
- select {
- case <-ch:
- default:
- }
- }
- for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} {
- for empty := false; !empty; {
- select {
- case <-ch:
- default:
- empty = true
- }
- }
- }
- for empty := false; !empty; {
- select {
- case <-d.headerProcCh:
- default:
- empty = true
- }
- }
- // Create cancel channel for aborting mid-flight and mark the master peer
- d.cancelLock.Lock()
- d.cancelCh = make(chan struct{})
- d.cancelPeer = id
- d.cancelLock.Unlock()
- defer d.Cancel() // No matter what, we can't leave the cancel channel open
- // Atomically set the requested sync mode
- atomic.StoreUint32(&d.mode, uint32(mode))
- // Retrieve the origin peer and initiate the downloading process
- p := d.peers.Peer(id)
- if p == nil {
- return errUnknownPeer
- }
- if mode == BoundedFullSync {
- return d.syncWithPeerUntil(p, hash, td)
- }
- return d.syncWithPeer(p, hash, td)
- }
- func (d *Downloader) getMode() SyncMode {
- return SyncMode(atomic.LoadUint32(&d.mode))
- }
- // syncWithPeer starts a block synchronization based on the hash chain from the
- // specified peer and head hash.
- func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
- d.mux.Post(StartEvent{})
- defer func() {
- // reset on error
- if err != nil {
- d.mux.Post(FailedEvent{err})
- } else {
- latest := d.lightchain.CurrentHeader()
- d.mux.Post(DoneEvent{latest})
- }
- }()
- if p.version < eth.ETH65 {
- return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, eth.ETH65)
- }
- mode := d.getMode()
- log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode)
- defer func(start time.Time) {
- log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
- }(time.Now())
- // Look up the sync boundaries: the common ancestor and the target block
- latest, pivot, err := d.fetchHead(p)
- if err != nil {
- return err
- }
- if mode == FastSync && pivot == nil {
- // If no pivot block was returned, the head is below the min full block
- // threshold (i.e. new chian). In that case we won't really fast sync
- // anyway, but still need a valid pivot block to avoid some code hitting
- // nil panics on an access.
- pivot = d.blockchain.CurrentBlock().Header()
- }
- height := latest.Number.Uint64()
- origin, err := d.findAncestor(p, latest)
- if err != nil {
- return err
- }
- d.syncStatsLock.Lock()
- if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
- d.syncStatsChainOrigin = origin
- }
- d.syncStatsChainHeight = height
- d.syncStatsLock.Unlock()
- // Ensure our origin point is below any fast sync pivot point
- if mode == FastSync {
- if height <= uint64(fsMinFullBlocks) {
- origin = 0
- } else {
- pivotNumber := pivot.Number.Uint64()
- if pivotNumber <= origin {
- origin = pivotNumber - 1
- }
- // Write out the pivot into the database so a rollback beyond it will
- // reenable fast sync
- rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)
- }
- }
- d.committed = 1
- if mode == FastSync && pivot.Number.Uint64() != 0 {
- d.committed = 0
- }
- if mode == FastSync {
- // Set the ancient data limitation.
- // If we are running fast sync, all block data older than ancientLimit will be
- // written to the ancient store. More recent data will be written to the active
- // database and will wait for the freezer to migrate.
- //
- // If there is a checkpoint available, then calculate the ancientLimit through
- // that. Otherwise calculate the ancient limit through the advertised height
- // of the remote peer.
- //
- // The reason for picking checkpoint first is that a malicious peer can give us
- // a fake (very high) height, forcing the ancient limit to also be very high.
- // The peer would start to feed us valid blocks until head, resulting in all of
- // the blocks might be written into the ancient store. A following mini-reorg
- // could cause issues.
- if d.checkpoint != 0 && d.checkpoint > fullMaxForkAncestry+1 {
- d.ancientLimit = d.checkpoint
- } else if height > fullMaxForkAncestry+1 {
- d.ancientLimit = height - fullMaxForkAncestry - 1
- } else {
- d.ancientLimit = 0
- }
- frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
- // If a part of blockchain data has already been written into active store,
- // disable the ancient style insertion explicitly.
- if origin >= frozen && frozen != 0 {
- d.ancientLimit = 0
- log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1)
- } else if d.ancientLimit > 0 {
- log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit)
- }
- // Rewind the ancient store and blockchain if reorg happens.
- if origin+1 < frozen {
- if err := d.lightchain.SetHead(origin + 1); err != nil {
- return err
- }
- }
- }
- // Initiate the sync using a concurrent header and content retrieval algorithm
- d.queue.Prepare(origin+1, mode)
- if d.syncInitHook != nil {
- d.syncInitHook(origin, height)
- }
- fetchers := []func() error{
- func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved
- func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync
- func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync
- func() error { return d.processHeaders(origin+1, td) },
- }
- if mode == FastSync {
- d.pivotLock.Lock()
- d.pivotHeader = pivot
- d.pivotLock.Unlock()
- fetchers = append(fetchers, func() error { return d.processFastSyncContent() })
- } else if mode == FullSync {
- fetchers = append(fetchers, d.processFullSyncContent)
- }
- return d.spawnSync(fetchers)
- }
- // spawnSync runs d.process and all given fetcher functions to completion in
- // separate goroutines, returning the first error that appears.
- func (d *Downloader) spawnSync(fetchers []func() error) error {
- errc := make(chan error, len(fetchers))
- d.cancelWg.Add(len(fetchers))
- for _, fn := range fetchers {
- fn := fn
- go func() { defer d.cancelWg.Done(); errc <- fn() }()
- }
- // Wait for the first error, then terminate the others.
- var err error
- for i := 0; i < len(fetchers); i++ {
- if i == len(fetchers)-1 {
- // Close the queue when all fetchers have exited.
- // This will cause the block processor to end when
- // it has processed the queue.
- d.queue.Close()
- }
- if err = <-errc; err != nil && err != errCanceled {
- break
- }
- }
- d.queue.Close()
- d.Cancel()
- return err
- }
- // cancel aborts all of the operations and resets the queue. However, cancel does
- // not wait for the running download goroutines to finish. This method should be
- // used when cancelling the downloads from inside the downloader.
- func (d *Downloader) cancel() {
- // Close the current cancel channel
- d.cancelLock.Lock()
- defer d.cancelLock.Unlock()
- if d.cancelCh != nil {
- select {
- case <-d.cancelCh:
- // Channel was already closed
- default:
- close(d.cancelCh)
- }
- }
- }
- // Cancel aborts all of the operations and waits for all download goroutines to
- // finish before returning.
- func (d *Downloader) Cancel() {
- d.cancel()
- d.cancelWg.Wait()
- }
- // Terminate interrupts the downloader, canceling all pending operations.
- // The downloader cannot be reused after calling Terminate.
- func (d *Downloader) Terminate() {
- // Close the termination channel (make sure double close is allowed)
- d.quitLock.Lock()
- select {
- case <-d.quitCh:
- default:
- close(d.quitCh)
- }
- if d.stateBloom != nil {
- d.stateBloom.Close()
- }
- d.quitLock.Unlock()
- // Cancel any pending download requests
- d.Cancel()
- }
- // fetchHead retrieves the head header and prior pivot block (if available) from
- // a remote peer.
- func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) {
- p.log.Debug("Retrieving remote chain head")
- mode := d.getMode()
- // Request the advertised remote head block and wait for the response
- latest, _ := p.peer.Head()
- fetch := 1
- if mode == FastSync {
- fetch = 2 // head + pivot headers
- }
- go p.peer.RequestHeadersByHash(latest, fetch, fsMinFullBlocks-1, true)
- ttl := d.requestTTL()
- timeout := time.After(ttl)
- for {
- select {
- case <-d.cancelCh:
- return nil, nil, errCanceled
- case packet := <-d.headerCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
- break
- }
- // Make sure the peer gave us at least one and at most the requested headers
- headers := packet.(*headerPack).headers
- if len(headers) == 0 || len(headers) > fetch {
- return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch)
- }
- // The first header needs to be the head, validate against the checkpoint
- // and request. If only 1 header was returned, make sure there's no pivot
- // or there was not one requested.
- head := headers[0]
- if (mode == FastSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint {
- return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint)
- }
- if len(headers) == 1 {
- if mode == FastSync && head.Number.Uint64() > uint64(fsMinFullBlocks) {
- return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer)
- }
- p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", head.Hash())
- return head, nil, nil
- }
- // At this point we have 2 headers in total and the first is the
- // validated head of the chian. Check the pivot number and return,
- pivot := headers[1]
- if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) {
- return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks))
- }
- return head, pivot, nil
- case <-timeout:
- p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
- return nil, nil, errTimeout
- case <-d.bodyCh:
- case <-d.receiptCh:
- // Out of bounds delivery, ignore
- }
- }
- }
- // calculateRequestSpan calculates what headers to request from a peer when trying to determine the
- // common ancestor.
- // It returns parameters to be used for peer.RequestHeadersByNumber:
- // from - starting block number
- // count - number of headers to request
- // skip - number of headers to skip
- // and also returns 'max', the last block which is expected to be returned by the remote peers,
- // given the (from,count,skip)
- func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
- var (
- from int
- count int
- MaxCount = MaxHeaderFetch / 16
- )
- // requestHead is the highest block that we will ask for. If requestHead is not offset,
- // the highest block that we will get is 16 blocks back from head, which means we
- // will fetch 14 or 15 blocks unnecessarily in the case the height difference
- // between us and the peer is 1-2 blocks, which is most common
- requestHead := int(remoteHeight) - 1
- if requestHead < 0 {
- requestHead = 0
- }
- // requestBottom is the lowest block we want included in the query
- // Ideally, we want to include the one just below our own head
- requestBottom := int(localHeight - 1)
- if requestBottom < 0 {
- requestBottom = 0
- }
- totalSpan := requestHead - requestBottom
- span := 1 + totalSpan/MaxCount
- if span < 2 {
- span = 2
- }
- if span > 16 {
- span = 16
- }
- count = 1 + totalSpan/span
- if count > MaxCount {
- count = MaxCount
- }
- if count < 2 {
- count = 2
- }
- from = requestHead - (count-1)*span
- if from < 0 {
- from = 0
- }
- max := from + (count-1)*span
- return int64(from), count, span - 1, uint64(max)
- }
- // findAncestor tries to locate the common ancestor link of the local chain and
- // a remote peers blockchain. In the general case when our node was in sync and
- // on the correct chain, checking the top N links should already get us a match.
- // In the rare scenario when we ended up on a long reorganisation (i.e. none of
- // the head links match), we do a binary search to find the common ancestor.
- func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) {
- // Figure out the valid ancestor range to prevent rewrite attacks
- var (
- floor = int64(-1)
- localHeight uint64
- remoteHeight = remoteHeader.Number.Uint64()
- )
- mode := d.getMode()
- switch mode {
- case FullSync:
- localHeight = d.blockchain.CurrentBlock().NumberU64()
- case FastSync:
- localHeight = d.blockchain.CurrentFastBlock().NumberU64()
- default:
- localHeight = d.lightchain.CurrentHeader().Number.Uint64()
- }
- p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
- // Recap floor value for binary search
- maxForkAncestry := fullMaxForkAncestry
- if d.getMode() == LightSync {
- maxForkAncestry = lightMaxForkAncestry
- }
- if localHeight >= maxForkAncestry {
- // We're above the max reorg threshold, find the earliest fork point
- floor = int64(localHeight - maxForkAncestry)
- }
- // If we're doing a light sync, ensure the floor doesn't go below the CHT, as
- // all headers before that point will be missing.
- if mode == LightSync {
- // If we don't know the current CHT position, find it
- if d.genesis == 0 {
- header := d.lightchain.CurrentHeader()
- for header != nil {
- d.genesis = header.Number.Uint64()
- if floor >= int64(d.genesis)-1 {
- break
- }
- header = d.lightchain.GetHeaderByHash(header.ParentHash)
- }
- }
- // We already know the "genesis" block number, cap floor to that
- if floor < int64(d.genesis)-1 {
- floor = int64(d.genesis) - 1
- }
- }
- ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor)
- if err == nil {
- return ancestor, nil
- }
- // The returned error was not nil.
- // If the error returned does not reflect that a common ancestor was not found, return it.
- // If the error reflects that a common ancestor was not found, continue to binary search,
- // where the error value will be reassigned.
- if !errors.Is(err, errNoAncestorFound) {
- return 0, err
- }
- ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor)
- if err != nil {
- return 0, err
- }
- return ancestor, nil
- }
- func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (commonAncestor uint64, err error) {
- from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
- p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip)
- go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false)
- // Wait for the remote response to the head fetch
- number, hash := uint64(0), common.Hash{}
- ttl := d.requestTTL()
- timeout := time.After(ttl)
- for finished := false; !finished; {
- select {
- case <-d.cancelCh:
- return 0, errCanceled
- case packet := <-d.headerCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- headers := packet.(*headerPack).headers
- if len(headers) == 0 {
- p.log.Warn("Empty head header set")
- return 0, errEmptyHeaderSet
- }
- // Make sure the peer's reply conforms to the request
- for i, header := range headers {
- expectNumber := from + int64(i)*int64(skip+1)
- if number := header.Number.Int64(); number != expectNumber {
- p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number)
- return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering"))
- }
- }
- // Check if a common ancestor was found
- finished = true
- for i := len(headers) - 1; i >= 0; i-- {
- // Skip any headers that underflow/overflow our requested set
- if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max {
- continue
- }
- // Otherwise check if we already know the header or not
- h := headers[i].Hash()
- n := headers[i].Number.Uint64()
- var known bool
- switch mode {
- case FullSync:
- known = d.blockchain.HasBlock(h, n)
- case FastSync:
- known = d.blockchain.HasFastBlock(h, n)
- default:
- known = d.lightchain.HasHeader(h, n)
- }
- if known {
- number, hash = n, h
- break
- }
- }
- case <-timeout:
- p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
- return 0, errTimeout
- case <-d.bodyCh:
- case <-d.receiptCh:
- // Out of bounds delivery, ignore
- }
- }
- // If the head fetch already found an ancestor, return
- if hash != (common.Hash{}) {
- if int64(number) <= floor {
- p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
- return 0, errInvalidAncestor
- }
- p.log.Debug("Found common ancestor", "number", number, "hash", hash)
- return number, nil
- }
- return 0, errNoAncestorFound
- }
- func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (commonAncestor uint64, err error) {
- hash := common.Hash{}
- // Ancestor not found, we need to binary search over our chain
- start, end := uint64(0), remoteHeight
- if floor > 0 {
- start = uint64(floor)
- }
- p.log.Trace("Binary searching for common ancestor", "start", start, "end", end)
- for start+1 < end {
- // Split our chain interval in two, and request the hash to cross check
- check := (start + end) / 2
- ttl := d.requestTTL()
- timeout := time.After(ttl)
- go p.peer.RequestHeadersByNumber(check, 1, 0, false)
- // Wait until a reply arrives to this request
- for arrived := false; !arrived; {
- select {
- case <-d.cancelCh:
- return 0, errCanceled
- case packet := <-d.headerCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- headers := packet.(*headerPack).headers
- if len(headers) != 1 {
- p.log.Warn("Multiple headers for single request", "headers", len(headers))
- return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers))
- }
- arrived = true
- // Modify the search interval based on the response
- h := headers[0].Hash()
- n := headers[0].Number.Uint64()
- var known bool
- switch mode {
- case FullSync:
- known = d.blockchain.HasBlock(h, n)
- case FastSync:
- known = d.blockchain.HasFastBlock(h, n)
- default:
- known = d.lightchain.HasHeader(h, n)
- }
- if !known {
- end = check
- break
- }
- header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists
- if header.Number.Uint64() != check {
- p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
- return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number)
- }
- start = check
- hash = h
- case <-timeout:
- p.log.Debug("Waiting for search header timed out", "elapsed", ttl)
- return 0, errTimeout
- case <-d.bodyCh:
- case <-d.receiptCh:
- // Out of bounds delivery, ignore
- }
- }
- }
- // Ensure valid ancestry and return
- if int64(start) <= floor {
- p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
- return 0, errInvalidAncestor
- }
- p.log.Debug("Found common ancestor", "number", start, "hash", hash)
- return start, nil
- }
- // fetchHeaders keeps retrieving headers concurrently from the number
- // requested, until no more are returned, potentially throttling on the way. To
- // facilitate concurrency but still protect against malicious nodes sending bad
- // headers, we construct a header chain skeleton using the "origin" peer we are
- // syncing with, and fill in the missing headers using anyone else. Headers from
- // other peers are only accepted if they map cleanly to the skeleton. If no one
- // can fill in the skeleton - not even the origin peer - it's assumed invalid and
- // the origin is dropped.
- func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {
- p.log.Debug("Directing header downloads", "origin", from)
- defer p.log.Debug("Header download terminated")
- // Create a timeout timer, and the associated header fetcher
- skeleton := true // Skeleton assembly phase or finishing up
- pivoting := false // Whether the next request is pivot verification
- request := time.Now() // time of the last skeleton fetch request
- timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
- <-timeout.C // timeout channel should be initially empty
- defer timeout.Stop()
- var ttl time.Duration
- getHeaders := func(from uint64) {
- request = time.Now()
- ttl = d.requestTTL()
- timeout.Reset(ttl)
- if skeleton {
- p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
- go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
- } else {
- p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
- go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)
- }
- }
- getNextPivot := func() {
- pivoting = true
- request = time.Now()
- ttl = d.requestTTL()
- timeout.Reset(ttl)
- d.pivotLock.RLock()
- pivot := d.pivotHeader.Number.Uint64()
- d.pivotLock.RUnlock()
- p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks))
- go p.peer.RequestHeadersByNumber(pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep
- }
- // Start pulling the header chain skeleton until all is done
- ancestor := from
- getHeaders(from)
- mode := d.getMode()
- for {
- select {
- case <-d.cancelCh:
- return errCanceled
- case packet := <-d.headerCh:
- // Make sure the active peer is giving us the skeleton headers
- if packet.PeerId() != p.id {
- log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
- break
- }
- headerReqTimer.UpdateSince(request)
- timeout.Stop()
- // If the pivot is being checked, move if it became stale and run the real retrieval
- var pivot uint64
- d.pivotLock.RLock()
- if d.pivotHeader != nil {
- pivot = d.pivotHeader.Number.Uint64()
- }
- d.pivotLock.RUnlock()
- if pivoting {
- if packet.Items() == 2 {
- // Retrieve the headers and do some sanity checks, just in case
- headers := packet.(*headerPack).headers
- if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want {
- log.Warn("Peer sent invalid next pivot", "have", have, "want", want)
- return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want)
- }
- if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want {
- log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want)
- return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want)
- }
- log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number)
- pivot = headers[0].Number.Uint64()
- d.pivotLock.Lock()
- d.pivotHeader = headers[0]
- d.pivotLock.Unlock()
- // Write out the pivot into the database so a rollback beyond
- // it will reenable fast sync and update the state root that
- // the state syncer will be downloading.
- rawdb.WriteLastPivotNumber(d.stateDB, pivot)
- }
- pivoting = false
- getHeaders(from)
- continue
- }
- // If the skeleton's finished, pull any remaining head headers directly from the origin
- if skeleton && packet.Items() == 0 {
- skeleton = false
- getHeaders(from)
- continue
- }
- // If no more headers are inbound, notify the content fetchers and return
- if packet.Items() == 0 {
- // Don't abort header fetches while the pivot is downloading
- if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
- p.log.Debug("No headers, waiting for pivot commit")
- select {
- case <-time.After(fsHeaderContCheck):
- getHeaders(from)
- continue
- case <-d.cancelCh:
- return errCanceled
- }
- }
- // Pivot done (or not in fast sync) and no more headers, terminate the process
- p.log.Debug("No more headers available")
- select {
- case d.headerProcCh <- nil:
- return nil
- case <-d.cancelCh:
- return errCanceled
- }
- }
- headers := packet.(*headerPack).headers
- // If we received a skeleton batch, resolve internals concurrently
- if skeleton {
- filled, proced, err := d.fillHeaderSkeleton(from, headers)
- if err != nil {
- p.log.Debug("Skeleton chain invalid", "err", err)
- return fmt.Errorf("%w: %v", errInvalidChain, err)
- }
- headers = filled[proced:]
- from += uint64(proced)
- } else {
- // If we're closing in on the chain head, but haven't yet reached it, delay
- // the last few headers so mini reorgs on the head don't cause invalid hash
- // chain errors.
- if n := len(headers); n > 0 {
- // Retrieve the current head we're at
- var head uint64
- if mode == LightSync {
- head = d.lightchain.CurrentHeader().Number.Uint64()
- } else {
- head = d.blockchain.CurrentFastBlock().NumberU64()
- if full := d.blockchain.CurrentBlock().NumberU64(); head < full {
- head = full
- }
- }
- // If the head is below the common ancestor, we're actually deduplicating
- // already existing chain segments, so use the ancestor as the fake head.
- // Otherwise we might end up delaying header deliveries pointlessly.
- if head < ancestor {
- head = ancestor
- }
- // If the head is way older than this batch, delay the last few headers
- if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() {
- delay := reorgProtHeaderDelay
- if delay > n {
- delay = n
- }
- headers = headers[:n-delay]
- }
- }
- }
- // Insert all the new headers and fetch the next batch
- if len(headers) > 0 {
- p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
- select {
- case d.headerProcCh <- headers:
- case <-d.cancelCh:
- return errCanceled
- }
- from += uint64(len(headers))
- // If we're still skeleton filling fast sync, check pivot staleness
- // before continuing to the next skeleton filling
- if skeleton && pivot > 0 {
- getNextPivot()
- } else {
- getHeaders(from)
- }
- } else {
- // No headers delivered, or all of them being delayed, sleep a bit and retry
- p.log.Trace("All headers delayed, waiting")
- select {
- case <-time.After(fsHeaderContCheck):
- getHeaders(from)
- continue
- case <-d.cancelCh:
- return errCanceled
- }
- }
- case <-timeout.C:
- if d.dropPeer == nil {
- // The dropPeer method is nil when `--copydb` is used for a local copy.
- // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
- p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id)
- break
- }
- // Header retrieval timed out, consider the peer bad and drop
- p.log.Debug("Header request timed out", "elapsed", ttl)
- headerTimeoutMeter.Mark(1)
- d.dropPeer(p.id)
- // Finish the sync gracefully instead of dumping the gathered data though
- for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
- select {
- case ch <- false:
- case <-d.cancelCh:
- }
- }
- select {
- case d.headerProcCh <- nil:
- case <-d.cancelCh:
- }
- return fmt.Errorf("%w: header request timed out", errBadPeer)
- }
- }
- }
- // fillHeaderSkeleton concurrently retrieves headers from all our available peers
- // and maps them to the provided skeleton header chain.
- //
- // Any partial results from the beginning of the skeleton is (if possible) forwarded
- // immediately to the header processor to keep the rest of the pipeline full even
- // in the case of header stalls.
- //
- // The method returns the entire filled skeleton and also the number of headers
- // already forwarded for processing.
- func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
- log.Debug("Filling up skeleton", "from", from)
- d.queue.ScheduleSkeleton(from, skeleton)
- var (
- deliver = func(packet dataPack) (int, error) {
- pack := packet.(*headerPack)
- return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh)
- }
- expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
- reserve = func(p *peerConnection, count int) (*fetchRequest, bool, bool) {
- return d.queue.ReserveHeaders(p, count), false, false
- }
- fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
- capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) }
- setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) {
- p.SetHeadersIdle(accepted, deliveryTime)
- }
- )
- err := d.fetchParts(d.headerCh, deliver, d.queue.headerContCh, expire,
- d.queue.PendingHeaders, d.queue.InFlightHeaders, reserve,
- nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
- log.Debug("Skeleton fill terminated", "err", err)
- filled, proced := d.queue.RetrieveHeaders()
- return filled, proced, err
- }
- // fetchBodies iteratively downloads the scheduled block bodies, taking any
- // available peers, reserving a chunk of blocks for each, waiting for delivery
- // and also periodically checking for timeouts.
- func (d *Downloader) fetchBodies(from uint64) error {
- log.Debug("Downloading block bodies", "origin", from)
- var (
- deliver = func(packet dataPack) (int, error) {
- pack := packet.(*bodyPack)
- return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles)
- }
- expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
- fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
- capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }
- setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetBodiesIdle(accepted, deliveryTime) }
- )
- err := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire,
- d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ReserveBodies,
- d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
- log.Debug("Block body download terminated", "err", err)
- return err
- }
- // fetchReceipts iteratively downloads the scheduled block receipts, taking any
- // available peers, reserving a chunk of receipts for each, waiting for delivery
- // and also periodically checking for timeouts.
- func (d *Downloader) fetchReceipts(from uint64) error {
- log.Debug("Downloading transaction receipts", "origin", from)
- var (
- deliver = func(packet dataPack) (int, error) {
- pack := packet.(*receiptPack)
- return d.queue.DeliverReceipts(pack.peerID, pack.receipts)
- }
- expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
- fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
- capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }
- setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) {
- p.SetReceiptsIdle(accepted, deliveryTime)
- }
- )
- err := d.fetchParts(d.receiptCh, deliver, d.receiptWakeCh, expire,
- d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ReserveReceipts,
- d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
- log.Debug("Transaction receipt download terminated", "err", err)
- return err
- }
- // fetchParts iteratively downloads scheduled block parts, taking any available
- // peers, reserving a chunk of fetch requests for each, waiting for delivery and
- // also periodically checking for timeouts.
- //
- // As the scheduling/timeout logic mostly is the same for all downloaded data
- // types, this method is used by each for data gathering and is instrumented with
- // various callbacks to handle the slight differences between processing them.
- //
- // The instrumentation parameters:
- // - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer)
- // - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers)
- // - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`)
- // - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed)
- // - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
- // - pending: task callback for the number of requests still needing download (detect completion/non-completability)
- // - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish)
- // - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use)
- // - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions)
- // - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
- // - fetch: network callback to actually send a particular download request to a physical remote peer
- // - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
- // - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
- // - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
- // - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
- // - kind: textual label of the type being downloaded to display in log messages
- func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
- expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool),
- fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
- idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int, time.Time), kind string) error {
- // Create a ticker to detect expired retrieval tasks
- ticker := time.NewTicker(100 * time.Millisecond)
- defer ticker.Stop()
- update := make(chan struct{}, 1)
- // Prepare the queue and fetch block parts until the block header fetcher's done
- finished := false
- for {
- select {
- case <-d.cancelCh:
- return errCanceled
- case packet := <-deliveryCh:
- deliveryTime := time.Now()
- // If the peer was previously banned and failed to deliver its pack
- // in a reasonable time frame, ignore its message.
- if peer := d.peers.Peer(packet.PeerId()); peer != nil {
- // Deliver the received chunk of data and check chain validity
- accepted, err := deliver(packet)
- if errors.Is(err, errInvalidChain) {
- return err
- }
- // Unless a peer delivered something completely else than requested (usually
- // caused by a timed out request which came through in the end), set it to
- // idle. If the delivery's stale, the peer should have already been idled.
- if !errors.Is(err, errStaleDelivery) {
- setIdle(peer, accepted, deliveryTime)
- }
- // Issue a log to the user to see what's going on
- switch {
- case err == nil && packet.Items() == 0:
- peer.log.Trace("Requested data not delivered", "type", kind)
- case err == nil:
- peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
- default:
- peer.log.Debug("Failed to deliver retrieved data", "type", kind, "err", err)
- }
- }
- // Blocks assembled, try to update the progress
- select {
- case update <- struct{}{}:
- default:
- }
- case cont := <-wakeCh:
- // The header fetcher sent a continuation flag, check if it's done
- if !cont {
- finished = true
- }
- // Headers arrive, try to update the progress
- select {
- case update <- struct{}{}:
- default:
- }
- case <-ticker.C:
- // Sanity check update the progress
- select {
- case update <- struct{}{}:
- default:
- }
- case <-update:
- // Short circuit if we lost all our peers
- if d.peers.Len() == 0 {
- return errNoPeers
- }
- // Check for fetch request timeouts and demote the responsible peers
- for pid, fails := range expire() {
- if peer := d.peers.Peer(pid); peer != nil {
- // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps
- // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times
- // out that sync wise we need to get rid of the peer.
- //
- // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth
- // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
- // how response times reacts, to it always requests one more than the minimum (i.e. min 2).
- if fails > 2 {
- peer.log.Trace("Data delivery timed out", "type", kind)
- setIdle(peer, 0, time.Now())
- } else {
- peer.log.Debug("Stalling delivery, dropping", "type", kind)
- if d.dropPeer == nil {
- // The dropPeer method is nil when `--copydb` is used for a local copy.
- // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
- peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid)
- } else {
- d.dropPeer(pid)
- // If this peer was the master peer, abort sync immediately
- d.cancelLock.RLock()
- master := pid == d.cancelPeer
- d.cancelLock.RUnlock()
- if master {
- d.cancel()
- return errTimeout
- }
- }
- }
- }
- }
- // If there's nothing more to fetch, wait or terminate
- if pending() == 0 {
- if !inFlight() && finished {
- log.Debug("Data fetching completed", "type", kind)
- return nil
- }
- break
- }
- // Send a download request to all idle peers, until throttled
- progressed, throttled, running := false, false, inFlight()
- idles, total := idle()
- pendCount := pending()
- for _, peer := range idles {
- // Short circuit if throttling activated
- if throttled {
- break
- }
- // Short circuit if there is no more available task.
- if pendCount = pending(); pendCount == 0 {
- break
- }
- // Reserve a chunk of fetches for a peer. A nil can mean either that
- // no more headers are available, or that the peer is known not to
- // have them.
- request, progress, throttle := reserve(peer, capacity(peer))
- if progress {
- progressed = true
- }
- if throttle {
- throttled = true
- throttleCounter.Inc(1)
- }
- if request == nil {
- continue
- }
- if request.From > 0 {
- peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From)
- } else {
- peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number)
- }
- // Fetch the chunk and make sure any errors return the hashes to the queue
- if fetchHook != nil {
- fetchHook(request.Headers)
- }
- if err := fetch(peer, request); err != nil {
- // Although we could try and make an attempt to fix this, this error really
- // means that we've double allocated a fetch task to a peer. If that is the
- // case, the internal state of the downloader and the queue is very wrong so
- // better hard crash and note the error instead of silently accumulating into
- // a much bigger issue.
- panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind))
- }
- running = true
- }
- // Make sure that we have peers available for fetching. If all peers have been tried
- // and all failed throw an error
- if !progressed && !throttled && !running && len(idles) == total && pendCount > 0 {
- return errPeersUnavailable
- }
- }
- }
- }
- // processHeaders takes batches of retrieved headers from an input channel and
- // keeps processing and scheduling them into the header chain and downloader's
- // queue until the stream ends or a failure occurs.
- func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
- // Keep a count of uncertain headers to roll back
- var (
- rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis)
- rollbackErr error
- mode = d.getMode()
- )
- defer func() {
- if rollback > 0 {
- lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
- if mode != LightSync {
- lastFastBlock = d.blockchain.CurrentFastBlock().Number()
- lastBlock = d.blockchain.CurrentBlock().Number()
- }
- if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
- // We're already unwinding the stack, only print the error to make it more visible
- log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err)
- }
- curFastBlock, curBlock := common.Big0, common.Big0
- if mode != LightSync {
- curFastBlock = d.blockchain.CurrentFastBlock().Number()
- curBlock = d.blockchain.CurrentBlock().Number()
- }
- log.Warn("Rolled back chain segment",
- "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
- "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
- "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
- }
- }()
- // Wait for batches of headers to process
- gotHeaders := false
- for {
- select {
- case <-d.cancelCh:
- rollbackErr = errCanceled
- return errCanceled
- case headers := <-d.headerProcCh:
- // Terminate header processing if we synced up
- if len(headers) == 0 {
- // Notify everyone that headers are fully processed
- for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
- select {
- case ch <- false:
- case <-d.cancelCh:
- }
- }
- // If no headers were retrieved at all, the peer violated its TD promise that it had a
- // better chain compared to ours. The only exception is if its promised blocks were
- // already imported by other means (e.g. fetcher):
- //
- // R <remote peer>, L <local node>: Both at block 10
- // R: Mine block 11, and propagate it to L
- // L: Queue block 11 for import
- // L: Notice that R's head and TD increased compared to ours, start sync
- // L: Import of block 11 finishes
- // L: Sync begins, and finds common ancestor at 11
- // L: Request new headers up from 11 (R's TD was higher, it must have something)
- // R: Nothing to give
- if mode != LightSync {
- head := d.blockchain.CurrentBlock()
- if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
- return errStallingPeer
- }
- }
- // If fast or light syncing, ensure promised headers are indeed delivered. This is
- // needed to detect scenarios where an attacker feeds a bad pivot and then bails out
- // of delivering the post-pivot blocks that would flag the invalid content.
- //
- // This check cannot be executed "as is" for full imports, since blocks may still be
- // queued for processing when the header download completes. However, as long as the
- // peer gave us something useful, we're already happy/progressed (above check).
- if mode == FastSync || mode == LightSync {
- head := d.lightchain.CurrentHeader()
- if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
- return errStallingPeer
- }
- }
- // Disable any rollback and return
- rollback = 0
- return nil
- }
- // Otherwise split the chunk of headers into batches and process them
- gotHeaders = true
- for len(headers) > 0 {
- // Terminate if something failed in between processing chunks
- select {
- case <-d.cancelCh:
- rollbackErr = errCanceled
- return errCanceled
- default:
- }
- // Select the next chunk of headers to import
- limit := maxHeadersProcess
- if limit > len(headers) {
- limit = len(headers)
- }
- chunk := headers[:limit]
- // In case of header only syncing, validate the chunk immediately
- if mode == FastSync || mode == LightSync {
- // If we're importing pure headers, verify based on their recentness
- var pivot uint64
- d.pivotLock.RLock()
- if d.pivotHeader != nil {
- pivot = d.pivotHeader.Number.Uint64()
- }
- d.pivotLock.RUnlock()
- frequency := fsHeaderCheckFrequency
- if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
- frequency = 1
- }
- if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
- rollbackErr = err
- // If some headers were inserted, track them as uncertain
- if (mode == FastSync || frequency > 1) && n > 0 && rollback == 0 {
- rollback = chunk[0].Number.Uint64()
- }
- log.Warn("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
- return fmt.Errorf("%w: %v", errInvalidChain, err)
- }
- // All verifications passed, track all headers within the alloted limits
- if mode == FastSync {
- head := chunk[len(chunk)-1].Number.Uint64()
- if head-rollback > uint64(fsHeaderSafetyNet) {
- rollback = head - uint64(fsHeaderSafetyNet)
- } else {
- rollback = 1
- }
- }
- }
- // Unless we're doing light chains, schedule the headers for associated content retrieval
- if mode == FullSync || mode == FastSync || mode == BoundedFullSync {
- // If we've reached the allowed number of pending headers, stall a bit
- for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
- select {
- case <-d.cancelCh:
- rollbackErr = errCanceled
- return errCanceled
- case <-time.After(time.Second):
- }
- }
- // Otherwise insert the headers for content retrieval
- inserts := d.queue.Schedule(chunk, origin)
- if len(inserts) != len(chunk) {
- rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunk))
- return fmt.Errorf("%w: stale headers", errBadPeer)
- }
- }
- headers = headers[limit:]
- origin += uint64(limit)
- }
- // Update the highest block number we know if a higher one is found.
- d.syncStatsLock.Lock()
- if d.syncStatsChainHeight < origin {
- d.syncStatsChainHeight = origin - 1
- }
- d.syncStatsLock.Unlock()
- // Signal the content downloaders of the availablility of new tasks
- for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
- select {
- case ch <- true:
- default:
- }
- }
- }
- }
- }
- // processFullSyncContent takes fetch results from the queue and imports them into the chain.
- func (d *Downloader) processFullSyncContent() error {
- for {
- results := d.queue.Results(true)
- if len(results) == 0 {
- return nil
- }
- if d.chainInsertHook != nil {
- d.chainInsertHook(results)
- }
- if err := d.importBlockResults(results); err != nil {
- return err
- }
- }
- }
- func (d *Downloader) importBlockResults(results []*fetchResult) error {
- // Check for any early termination requests
- if len(results) == 0 {
- return nil
- }
- select {
- case <-d.quitCh:
- return errCancelContentProcessing
- default:
- }
- // Retrieve the a batch of results to import
- first, last := results[0].Header, results[len(results)-1].Header
- log.Debug("Inserting downloaded chain", "items", len(results),
- "firstnum", first.Number, "firsthash", first.Hash(),
- "lastnum", last.Number, "lasthash", last.Hash(),
- )
- blocks := make([]*types.Block, len(results))
- for i, result := range results {
- blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
- }
- if index, err := d.blockchain.InsertChain(blocks); err != nil {
- if index < len(results) {
- log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
- } else {
- // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
- // when it needs to preprocess blocks to import a sidechain.
- // The importer will put together a new list of blocks to import, which is a superset
- // of the blocks delivered from the downloader, and the indexing will be off.
- log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
- }
- return fmt.Errorf("%w: %v", errInvalidChain, err)
- }
- return nil
- }
- // processFastSyncContent takes fetch results from the queue and writes them to the
- // database. It also controls the synchronisation of state nodes of the pivot block.
- func (d *Downloader) processFastSyncContent() error {
- // Start syncing state of the reported head block. This should get us most of
- // the state of the pivot block.
- d.pivotLock.RLock()
- sync := d.syncState(d.pivotHeader.Root)
- d.pivotLock.RUnlock()
- defer func() {
- // The `sync` object is replaced every time the pivot moves. We need to
- // defer close the very last active one, hence the lazy evaluation vs.
- // calling defer sync.Cancel() !!!
- sync.Cancel()
- }()
- closeOnErr := func(s *stateSync) {
- if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled {
- d.queue.Close() // wake up Results
- }
- }
- go closeOnErr(sync)
- // To cater for moving pivot points, track the pivot block and subsequently
- // accumulated download results separately.
- var (
- oldPivot *fetchResult // Locked in pivot block, might change eventually
- oldTail []*fetchResult // Downloaded content after the pivot
- )
- for {
- // Wait for the next batch of downloaded data to be available, and if the pivot
- // block became stale, move the goalpost
- results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness
- if len(results) == 0 {
- // If pivot sync is done, stop
- if oldPivot == nil {
- return sync.Cancel()
- }
- // If sync failed, stop
- select {
- case <-d.cancelCh:
- sync.Cancel()
- return errCanceled
- default:
- }
- }
- if d.chainInsertHook != nil {
- d.chainInsertHook(results)
- }
- // If we haven't downloaded the pivot block yet, check pivot staleness
- // notifications from the header downloader
- d.pivotLock.RLock()
- pivot := d.pivotHeader
- d.pivotLock.RUnlock()
- if oldPivot == nil {
- if pivot.Root != sync.root {
- sync.Cancel()
- sync = d.syncState(pivot.Root)
- go closeOnErr(sync)
- }
- } else {
- results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
- }
- // Split around the pivot block and process the two sides via fast/full sync
- if atomic.LoadInt32(&d.committed) == 0 {
- latest := results[len(results)-1].Header
- // If the height is above the pivot block by 2 sets, it means the pivot
- // become stale in the network and it was garbage collected, move to a
- // new pivot.
- //
- // Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
- // need to be taken into account, otherwise we're detecting the pivot move
- // late and will drop peers due to unavailable state!!!
- if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) {
- log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay))
- pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted
- d.pivotLock.Lock()
- d.pivotHeader = pivot
- d.pivotLock.Unlock()
- // Write out the pivot into the database so a rollback beyond it will
- // reenable fast sync
- rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64())
- }
- }
- P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results)
- if err := d.commitFastSyncData(beforeP, sync); err != nil {
- return err
- }
- if P != nil {
- // If new pivot block found, cancel old state retrieval and restart
- if oldPivot != P {
- sync.Cancel()
- sync = d.syncState(P.Header.Root)
- go closeOnErr(sync)
- oldPivot = P
- }
- // Wait for completion, occasionally checking for pivot staleness
- select {
- case <-sync.done:
- if sync.err != nil {
- return sync.err
- }
- if err := d.commitPivotBlock(P); err != nil {
- return err
- }
- oldPivot = nil
- case <-time.After(time.Second):
- oldTail = afterP
- continue
- }
- }
- // Fast sync done, pivot commit done, full import
- if err := d.importBlockResults(afterP); err != nil {
- return err
- }
- }
- }
- func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
- if len(results) == 0 {
- return nil, nil, nil
- }
- if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot {
- // the pivot is somewhere in the future
- return nil, results, nil
- }
- // This can also be optimized, but only happens very seldom
- for _, result := range results {
- num := result.Header.Number.Uint64()
- switch {
- case num < pivot:
- before = append(before, result)
- case num == pivot:
- p = result
- default:
- after = append(after, result)
- }
- }
- return p, before, after
- }
- func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error {
- // Check for any early termination requests
- if len(results) == 0 {
- return nil
- }
- select {
- case <-d.quitCh:
- return errCancelContentProcessing
- case <-stateSync.done:
- if err := stateSync.Wait(); err != nil {
- return err
- }
- default:
- }
- // Retrieve the a batch of results to import
- first, last := results[0].Header, results[len(results)-1].Header
- log.Debug("Inserting fast-sync blocks", "items", len(results),
- "firstnum", first.Number, "firsthash", first.Hash(),
- "lastnumn", last.Number, "lasthash", last.Hash(),
- )
- blocks := make([]*types.Block, len(results))
- receipts := make([]types.Receipts, len(results))
- for i, result := range results {
- blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
- receipts[i] = result.Receipts
- }
- if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
- log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
- return fmt.Errorf("%w: %v", errInvalidChain, err)
- }
- return nil
- }
- func (d *Downloader) commitPivotBlock(result *fetchResult) error {
- block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
- log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash())
- // Commit the pivot block as the new head, will require full sync from here on
- if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil {
- return err
- }
- if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {
- return err
- }
- atomic.StoreInt32(&d.committed, 1)
- // If we had a bloom filter for the state sync, deallocate it now. Note, we only
- // deallocate internally, but keep the empty wrapper. This ensures that if we do
- // a rollback after committing the pivot and restarting fast sync, we don't end
- // up using a nil bloom. Empty bloom is fine, it just returns that it does not
- // have the info we need, so reach down to the database instead.
- if d.stateBloom != nil {
- d.stateBloom.Close()
- }
- return nil
- }
- // DeliverHeaders injects a new batch of block headers received from a remote
- // node into the download schedule.
- func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error {
- return d.deliver(d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
- }
- // DeliverBodies injects a new batch of block bodies received from a remote node.
- func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) error {
- return d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
- }
- // DeliverReceipts injects a new batch of receipts received from a remote node.
- func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) error {
- return d.deliver(d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
- }
- // DeliverNodeData injects a new batch of node state data received from a remote node.
- func (d *Downloader) DeliverNodeData(id string, data [][]byte) error {
- return d.deliver(d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
- }
- // DeliverSnapPacket is invoked from a peer's message handler when it transmits a
- // data packet for the local node to consume.
- func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error {
- switch packet := packet.(type) {
- case *snap.AccountRangePacket:
- hashes, accounts, err := packet.Unpack()
- if err != nil {
- return err
- }
- return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof)
- case *snap.StorageRangesPacket:
- hashset, slotset := packet.Unpack()
- return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof)
- case *snap.ByteCodesPacket:
- return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes)
- case *snap.TrieNodesPacket:
- return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes)
- default:
- return fmt.Errorf("unexpected snap packet type: %T", packet)
- }
- }
- // deliver injects a new batch of data received from a remote node.
- func (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
- // Update the delivery metrics for both good and failed deliveries
- inMeter.Mark(int64(packet.Items()))
- defer func() {
- if err != nil {
- dropMeter.Mark(int64(packet.Items()))
- }
- }()
- // Deliver or abort if the sync is canceled while queuing
- d.cancelLock.RLock()
- cancel := d.cancelCh
- d.cancelLock.RUnlock()
- if cancel == nil {
- return errNoSyncActive
- }
- select {
- case destCh <- packet:
- return nil
- case <-cancel:
- return errNoSyncActive
- }
- }
- // qosTuner is the quality of service tuning loop that occasionally gathers the
- // peer latency statistics and updates the estimated request round trip time.
- func (d *Downloader) qosTuner() {
- for {
- // Retrieve the current median RTT and integrate into the previoust target RTT
- rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
- atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
- // A new RTT cycle passed, increase our confidence in the estimated RTT
- conf := atomic.LoadUint64(&d.rttConfidence)
- conf = conf + (1000000-conf)/2
- atomic.StoreUint64(&d.rttConfidence, conf)
- // Log the new QoS values and sleep until the next RTT
- log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
- select {
- case <-d.quitCh:
- return
- case <-time.After(rtt):
- }
- }
- }
- // qosReduceConfidence is meant to be called when a new peer joins the downloader's
- // peer set, needing to reduce the confidence we have in out QoS estimates.
- func (d *Downloader) qosReduceConfidence() {
- // If we have a single peer, confidence is always 1
- peers := uint64(d.peers.Len())
- if peers == 0 {
- // Ensure peer connectivity races don't catch us off guard
- return
- }
- if peers == 1 {
- atomic.StoreUint64(&d.rttConfidence, 1000000)
- return
- }
- // If we have a ton of peers, don't drop confidence)
- if peers >= uint64(qosConfidenceCap) {
- return
- }
- // Otherwise drop the confidence factor
- conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers
- if float64(conf)/1000000 < rttMinConfidence {
- conf = uint64(rttMinConfidence * 1000000)
- }
- atomic.StoreUint64(&d.rttConfidence, conf)
- rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
- log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
- }
- // requestRTT returns the current target round trip time for a download request
- // to complete in.
- //
- // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
- // the downloader tries to adapt queries to the RTT, so multiple RTT values can
- // be adapted to, but smaller ones are preferred (stabler download stream).
- func (d *Downloader) requestRTT() time.Duration {
- return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
- }
- // requestTTL returns the current timeout allowance for a single download request
- // to finish under.
- func (d *Downloader) requestTTL() time.Duration {
- var (
- rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate))
- conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0
- )
- ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf)
- if ttl > ttlLimit {
- ttl = ttlLimit
- }
- return ttl
- }
- // Extra downloader functionality for non-proof-of-work consensus
- // Synchronizes with a peer, but only up to the provided Hash
- func (d *Downloader) syncWithPeerUntil(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
- d.mux.Post(StartEvent{})
- defer func() {
- // reset on error
- if err != nil {
- d.mux.Post(FailedEvent{err})
- } else {
- // Raft syncWithPeerUntil never use the latest field in DoneEvent
- // therefore post empty DoneEvent only
- d.mux.Post(DoneEvent{})
- }
- }()
- if p.version < 62 {
- return errTooOld
- }
- log.Info("Synchronising with the network", "id", p.id, "version", p.version)
- defer func(start time.Time) {
- log.Info("Synchronisation terminated", "duration", time.Since(start))
- }(time.Now())
- frozen, _ := d.stateDB.Ancients()
- localHeight := d.blockchain.CurrentBlock().NumberU64()
- // check if recovering state db and only ancient db is present
- // in this case its possible that local hash is not the latest
- // as per raft wal. change header to remote header
- var remoteHeader *types.Header
- if localHeight == 0 && frozen > 0 {
- // statedb was removed and is being recovered now
- // we trust the peer height and sync upto that
- remoteHeader, _, err = d.fetchHead(p) // #21529
- } else {
- remoteHeader, err = d.fetchHeader(p, hash)
- }
- if err != nil {
- return err
- }
- remoteHeight := remoteHeader.Number.Uint64()
- d.syncStatsLock.Lock()
- if d.syncStatsChainHeight <= localHeight || d.syncStatsChainOrigin > localHeight {
- d.syncStatsChainOrigin = localHeight
- }
- d.syncStatsChainHeight = remoteHeight
- d.syncStatsLock.Unlock()
- d.queue.Prepare(localHeight+1, d.getMode())
- if d.syncInitHook != nil {
- d.syncInitHook(localHeight, remoteHeight)
- }
- fetchers := []func() error{
- func() error { return d.fetchBoundedHeaders(p, localHeight+1, remoteHeight) },
- func() error { return d.fetchBodies(localHeight + 1) },
- func() error { return d.fetchReceipts(localHeight + 1) }, // Receipts are only retrieved during fast sync
- func() error { return d.processHeaders(localHeight+1, td) }, // #21529
- d.processFullSyncContent, //This must be added to clear the buffer of downloaded content as it's being filled
- }
- return d.spawnSync(fetchers)
- }
- // Fetches a single header from a peer
- func (d *Downloader) fetchHeader(p *peerConnection, hash common.Hash) (*types.Header, error) {
- log.Info("retrieving remote chain height", "peer", p)
- go p.peer.RequestHeadersByHash(hash, 1, 0, false)
- timeout := time.After(d.requestTTL())
- for {
- select {
- case <-d.cancelCh:
- return nil, errCancelBlockFetch
- case packet := <-d.headerCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- log.Info("Received headers from incorrect peer", "peer id", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- headers := packet.(*headerPack).headers
- if len(headers) != 1 {
- log.Info("invalid number of head headers (!= 1)", "peer", p, "len(headers)", len(headers))
- return nil, errBadPeer
- }
- return headers[0], nil
- case <-timeout:
- log.Info("head header timeout", "peer", p)
- return nil, errTimeout
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Out of bounds delivery, ignore
- }
- }
- }
- // Not defined in go's stdlib:
- func minInt(a, b int) int {
- if a < b {
- return a
- }
- return b
- }
- // Fetches headers between `from` and `to`, inclusive.
- // Assumes invariant: from <= to.
- func (d *Downloader) fetchBoundedHeaders(p *peerConnection, from uint64, to uint64) error {
- log.Info("directing header downloads", "peer", p, "from", from, "to", to)
- defer log.Info("header download terminated", "peer", p)
- // Create a timeout timer, and the associated header fetcher
- skeleton := true // Skeleton assembly phase or finishing up
- request := time.Now() // time of the last skeleton fetch request
- timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
- <-timeout.C // timeout channel should be initially empty
- defer timeout.Stop()
- getHeaders := func(from uint64) {
- request = time.Now()
- timeout.Reset(d.requestTTL())
- skeletonStart := from + uint64(MaxHeaderFetch) - 1
- if skeleton {
- if skeletonStart > to {
- skeleton = false
- }
- }
- if skeleton {
- numSkeletonHeaders := minInt(MaxSkeletonSize, (int(to-from)+1)/MaxHeaderFetch)
- log.Trace("fetching skeleton headers", "peer", p, "num skeleton headers", numSkeletonHeaders, "from", from)
- go p.peer.RequestHeadersByNumber(skeletonStart, numSkeletonHeaders, MaxHeaderFetch-1, false)
- } else {
- // There are not enough headers remaining to warrant a skeleton fetch.
- // Grab all of the remaining headers.
- numHeaders := int(to-from) + 1
- log.Trace("fetching full headers", "peer", p, "num headers", numHeaders, "from", from)
- go p.peer.RequestHeadersByNumber(from, numHeaders, 0, false)
- }
- }
- // Start pulling the header chain skeleton until all is done
- getHeaders(from)
- for {
- select {
- case <-d.cancelCh:
- return errCancelHeaderFetch
- case packet := <-d.headerCh:
- // Make sure the active peer is giving us the skeleton headers
- if packet.PeerId() != p.id {
- log.Info("Received headers from incorrect peer", "peer id", packet.PeerId())
- break
- }
- headerReqTimer.UpdateSince(request)
- timeout.Stop()
- headers := packet.(*headerPack).headers
- // If we received a skeleton batch, resolve internals concurrently
- if skeleton {
- filled, proced, err := d.fillHeaderSkeleton(from, headers)
- if err != nil {
- log.Debug("skeleton chain invalid", "peer", p, "err", err)
- return errInvalidChain
- }
- headers = filled[proced:]
- from += uint64(proced)
- }
- // Insert all the new headers and fetch the next batch
- if len(headers) > 0 {
- log.Trace("schedule headers", "peer", p, "num headers", len(headers), "from", from)
- select {
- case d.headerProcCh <- headers:
- case <-d.cancelCh:
- return errCancelHeaderFetch
- }
- from += uint64(len(headers))
- }
- if from <= to {
- getHeaders(from)
- } else {
- // Notify the content fetchers that no more headers are inbound and return.
- select {
- case d.headerProcCh <- nil:
- return nil
- case <-d.cancelCh:
- return errCancelHeaderFetch
- }
- }
- case <-timeout.C:
- // Header retrieval timed out, consider the peer bad and drop
- log.Info("header request timed out", "peer", p)
- headerTimeoutMeter.Mark(1)
- d.dropPeer(p.id)
- // Finish the sync gracefully instead of dumping the gathered data though
- for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
- select {
- case ch <- false:
- case <-d.cancelCh:
- }
- }
- select {
- case d.headerProcCh <- nil:
- case <-d.cancelCh:
- }
- return errBadPeer
- }
- }
- }
|