上一节分析到Fetcher用于同步网络节点的新区块和新的交易数据,如果新区块和本地最新的区块相隔距离较远,说明本地区块数据太旧,Fetcher就不会同步这些区块。这时候就要借助Downloader来同步完整的区块数据。
一,启动Downloader ProtocolManager初始化的时候会进行Downloader的初始化:
func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { if lightchain == nil { lightchain = chain } dl := &Downloader{ mode: mode, stateDB: stateDb, mux: mux, queue: newQueue(), peers: newPeerSet(), rttEstimate: uint64(rttMaxEstimate), rttConfidence: uint64(1000000), blockchain: chain, lightchain: lightchain, dropPeer: dropPeer, headerCh: make(chan dataPack, 1), bodyCh: make(chan dataPack, 1), receiptCh: make(chan dataPack, 1), bodyWakeCh: make(chan bool, 1), receiptWakeCh: make(chan bool, 1), headerProcCh: make(chan []*types.Header, 1), quitCh: make(chan struct{}), stateCh: make(chan dataPack), stateSyncStart: make(chan *stateSync), trackStateReq: make(chan *stateReq), } go dl.qosTuner() go dl.stateFetcher() return dl }首先初始化Downloader对象的成员,然后启动dl.qosTuner() goroutine计算请求回路时间,启动dl.stateFetcher() goroutine 开启Downloader状态监控。
ProtocolManager收到新的区块消息广播或者有新的P2P网络节点加入的时候会调用ProtocolManager的 synchronise(peer *peer)方法,这时候会调用Downloader的Synchronise(peer.id, pHead, pTd, mode)方法。
Synchronise方法,重置d.queue和d.peers,清空d.bodyWakeCh, d.receiptWakeCh,d.headerCh, d.bodyCh, d.receiptCh,d.headerProcCh。调用d.syncWithPeer(p, hash, td)方法:
func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { d.mux.Post(StartEvent{}) defer func() { // reset on error if err != nil { d.mux.Post(FailedEvent{err}) } else { d.mux.Post(DoneEvent{}) } }() if p.version < 62 { return errTooOld } log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode) defer func(start time.Time) { log.Debug("Synchronisation terminated", "elapsed", time.Since(start)) }(time.Now()) // Look up the sync boundaries: the common ancestor and the target block latest, err := d.fetchHeight(p) if err != nil { return err } height := latest.Number.Uint64() origin, err := d.findAncestor(p, height) if err != nil { return err } d.syncStatsLock.Lock() if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { d.syncStatsChainOrigin = origin } d.syncStatsChainHeight = height d.syncStatsLock.Unlock() // Ensure our origin point is below any fast sync pivot point pivot := uint64(0) if d.mode == FastSync { if height <= uint64(fsMinFullBlocks) { origin = 0 } else { pivot = height - uint64(fsMinFullBlocks) if pivot <= origin { origin = pivot - 1 } } } d.committed = 1 if d.mode == FastSync && pivot != 0 { d.committed = 0 } // Initiate the sync using a concurrent header and content retrieval algorithm d.queue.Prepare(origin+1, d.mode) if d.syncInitHook != nil { d.syncInitHook(origin, height) } fetchers := []func() error{ func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync func() error { return d.processHeaders(origin+1, pivot, td) }, } if d.mode == FastSync { fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) }) } else if d.mode == FullSync { fetchers = append(fetchers, d.processFullSyncContent) } return d.spawnSync(fetchers) }首先调用latest, err := d.fetchHeight(p)获取到peer节点最新的区块头,这个方法有点绕,我们来分析一下:
func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { p.log.Debug("Retrieving remote chain height") // Request the advertised remote head block and wait for the response head, _ := p.peer.Head() go p.peer.RequestHeadersByHash(head, 1, 0, false) ttl := d.requestTTL() timeout := time.After(ttl) for { select { case <-d.cancelCh: return nil, errCancelBlockFetch case packet := <-d.headerCh: // Discard anything not from the origin peer if packet.PeerId() != p.id { log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) break } // Make sure the peer actually gave something valid headers := packet.(*headerPack).headers if len(headers) != 1 { p.log.Debug("Multiple headers for single request", "headers", len(headers)) return nil, errBadPeer } head := headers[0] p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash()) return head, nil case <-timeout: p.log.Debug("Waiting for head header timed out", "elapsed", ttl) return nil, errTimeout case <-d.bodyCh: case <-d.receiptCh: // Out of bounds delivery, ignore } } }1,调用peer.RequestHeadersByHash(head, 1, 0, false),给网络节点发送一个GetBlockHeadersMsg的消息 2,然后阻塞住线程,直到收到d.headerCh或者timeout 3,本地节点会收到网络节点的BlockHeadersMsg的消息返回 4,调用downloader.DeliverHeaders(p.id, headers) 5,这时候会把p.id和headers打包发送给d.headerCh 6,这时候select收到d.headerCh,阻塞打开,并返回header内容
syncWithPeer() 方法接着调用 d.findAncestor(p, height)来获取本地节点和网络节点共同的祖先:
func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) { // Figure out the valid ancestor range to prevent rewrite attacks floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64() if d.mode == FullSync { ceil = d.blockchain.CurrentBlock().NumberU64() } else if d.mode == FastSync { ceil = d.blockchain.CurrentFastBlock().NumberU64() } if ceil >= MaxForkAncestry { floor = int64(ceil - MaxForkAncestry) } p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height) // Request the topmost blocks to short circuit binary ancestor lookup head := ceil if head > height { head = height } from := int64(head) - int64(MaxHeaderFetch) if from < 0 { from = 0 } // Span out with 15 block gaps into the future to catch bad head reports limit := 2 * MaxHeaderFetch / 16 count := 1 + int((int64(ceil)-from)/16) if count > limit { count = limit } go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false) // Wait for the remote response to the head fetch number, hash := uint64(0), common.Hash{} ttl := d.requestTTL() timeout := time.After(ttl) for finished := false; !finished; { select { case <-d.cancelCh: return 0, errCancelHeaderFetch case packet := <-d.headerCh: // Discard anything not from the origin peer if packet.PeerId() != p.id { log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) break } // Make sure the peer actually gave something valid headers := packet.(*headerPack).headers if len(headers) == 0 { p.log.Warn("Empty head header set") return 0, errEmptyHeaderSet } // Make sure the peer's reply conforms to the request for i := 0; i < len(headers); i++ { if number := headers[i].Number.Int64(); number != from+int64(i)*16 { p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) return 0, errInvalidChain } } // Check if a common ancestor was found finished = true for i := len(headers) - 1; i >= 0; i-- { // Skip any headers that underflow/overflow our requested set if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { continue } // Otherwise check if we already know the header or not if (d.mode == FullSync && d.blockchain.HasBlock(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) { number, hash = headers[i].Number.Uint64(), headers[i].Hash() // If every header is known, even future ones, the peer straight out lied about its head if number > height && i == limit-1 { p.log.Warn("Lied about chain head", "reported", height, "found", number) return 0, errStallingPeer } break } } case <-timeout: p.log.Debug("Waiting for head header timed out", "elapsed", ttl) return 0, errTimeout case <-d.bodyCh: case <-d.receiptCh: // Out of bounds delivery, ignore } } // If the head fetch already found an ancestor, return if !common.EmptyHash(hash) { if int64(number) <= floor { p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) return 0, errInvalidAncestor } p.log.Debug("Found common ancestor", "number", number, "hash", hash) return number, nil } // Ancestor not found, we need to binary search over our chain start, end := uint64(0), head if floor > 0 { start = uint64(floor) } for start+1 < end { // Split our chain interval in two, and request the hash to cross check check := (start + end) / 2 ttl := d.requestTTL() timeout := time.After(ttl) go p.peer.RequestHeadersByNumber(check, 1, 0, false) // Wait until a reply arrives to this request for arrived := false; !arrived; { select { case <-d.cancelCh: return 0, errCancelHeaderFetch case packer := <-d.headerCh: // Discard anything not from the origin peer if packer.PeerId() != p.id { log.Debug("Received headers from incorrect peer", "peer", packer.PeerId()) break } // Make sure the peer actually gave something valid headers := packer.(*headerPack).headers if len(headers) != 1 { p.log.Debug("Multiple headers for single request", "headers", len(headers)) return 0, errBadPeer } arrived = true // Modify the search interval based on the response if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) { end = check break } header := d.lightchain.GetHeaderByHash(headers[0].Hash()) // Independent of sync mode, header surely exists if header.Number.Uint64() != check { p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) return 0, errBadPeer } start = check case <-timeout: p.log.Debug("Waiting for search header timed out", "elapsed", ttl) return 0, errTimeout case <-d.bodyCh: case <-d.receiptCh: // Out of bounds delivery, ignore } } } // Ensure valid ancestry and return if int64(start) <= floor { p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) return 0, errInvalidAncestor } p.log.Debug("Found common ancestor", "number", start, "hash", hash) return start, nil }1,调用peer.RequestHeadersByNumber(uint64(from), count, 15, false),获取header。这里传入 count和 15,指从本地最高的header往前数192个区块的头,每16个区块取一个区块头。为了后面select收到d.headerCh时加以验证。 2,select收到了headers,遍历header,看是否在本地是否存在这个header,如果有,并且不为空,就说明找到共同的祖先,返回祖先number 3,如果没有找到共同的祖先,再重新从本地的区块链MaxForkAncestry起的一半的位置开始取区块头,一一验证是否跟网络节点返回的header一致,如果有就说明有共同的祖先,并返回,没有的话就返回0.
继续syncWithPeer()方法,找到同步的轴心的pivot,最后把要同步的数据和同步的方法传给d.spawnSync(fetchers),并执行。d.spawnSync(fetchers)挨个执行传入的同步方法。
二,Downloader同步数据方法 fetchHeaders(),fetchBodies() , fetchReceipts()
func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error { p.log.Debug("Directing header downloads", "origin", from) defer p.log.Debug("Header download terminated") // Create a timeout timer, and the associated header fetcher skeleton := true // Skeleton assembly phase or finishing up request := time.Now() // time of the last skeleton fetch request timeout := time.NewTimer(0) // timer to dump a non-responsive active peer <-timeout.C // timeout channel should be initially empty defer timeout.Stop() var ttl time.Duration getHeaders := func(from uint64) { request = time.Now() ttl = d.requestTTL() timeout.Reset(ttl) if skeleton { p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) } else { p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) } } // Start pulling the header chain skeleton until all is done getHeaders(from) for { select { case <-d.cancelCh: return errCancelHeaderFetch case packet := <-d.headerCh: // Make sure the active peer is giving us the skeleton headers if packet.PeerId() != p.id { log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) break } headerReqTimer.UpdateSince(request) timeout.Stop() // If the skeleton's finished, pull any remaining head headers directly from the origin if packet.Items() == 0 && skeleton { skeleton = false getHeaders(from) continue } // If no more headers are inbound, notify the content fetchers and return if packet.Items() == 0 { // Don't abort header fetches while the pivot is downloading if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { p.log.Debug("No headers, waiting for pivot commit") select { case <-time.After(fsHeaderContCheck): getHeaders(from) continue case <-d.cancelCh: return errCancelHeaderFetch } } // Pivot done (or not in fast sync) and no more headers, terminate the process p.log.Debug("No more headers available") select { case d.headerProcCh <- nil: return nil case <-d.cancelCh: return errCancelHeaderFetch } } headers := packet.(*headerPack).headers // If we received a skeleton batch, resolve internals concurrently if skeleton { filled, proced, err := d.fillHeaderSkeleton(from, headers) if err != nil { p.log.Debug("Skeleton chain invalid", "err", err) return errInvalidChain } headers = filled[proced:] from += uint64(proced) } // Insert all the new headers and fetch the next batch if len(headers) > 0 { p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) select { case d.headerProcCh <- headers: case <-d.cancelCh: return errCancelHeaderFetch } from += uint64(len(headers)) } getHeaders(from) case <-timeout.C: if d.dropPeer == nil { // The dropPeer method is nil when `--copydb` is used for a local copy. // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) break } // Header retrieval timed out, consider the peer bad and drop p.log.Debug("Header request timed out", "elapsed", ttl) headerTimeoutMeter.Mark(1) d.dropPeer(p.id) // Finish the sync gracefully instead of dumping the gathered data though for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { select { case ch <- false: case <-d.cancelCh: } } select { case d.headerProcCh <- nil: case <-d.cancelCh: } return errBadPeer } } }1,getHeaders()调用peer.RequestHeadersByNumber()方法 获取网络节点的headers。 2,有两种获取方式,首先走的是skeleton方式,从查找到的共同祖先区块+192个区块位置开始,每隔192个区块,获取128个区块头。非skeleton方式,从共同祖先区块开始,获取192个区块头。 3,如果第一种方式获取不到区块头,则执行第二种获取方式,如果第二种方式还是没有获取到区块头的话,直接返回 4,如果是skeleton获取到的,调用fillHeaderSkeleton()方法加入到skeleton header chain 5,然后调整from值,再递归调用getHeaders()方法
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { log.Debug("Filling up skeleton", "from", from) d.queue.ScheduleSkeleton(from, skeleton) var ( deliver = func(packet dataPack) (int, error) { pack := packet.(*headerPack) return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) } expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } throttle = func() bool { return false } reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) { return d.queue.ReserveHeaders(p, count), false, nil } fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) } setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) } ) err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") log.Debug("Skeleton fill terminated", "err", err) filled, proced := d.queue.RetrieveHeaders() return filled, proced, err }a) 把skeleton的headers加入queue.ScheduleSkeleton调度队列, b) 然后执行d.fetchParts()方法。 d.fetchParts()方法主要做了这几件事情 1,对收到的headers执行d.queue.DeliverHeaders()方法。 2,如果d.queue.PendingHeaders有pending的headers,调用d.peers.HeaderIdlePeers获取到idle的peers 3,调用d.queue.ReserveHeaders把pending的headers储备到idle的peers里面 4,用idle的peers调用p.FetchHeaders(req.From, MaxHeaderFetch)去获取headers c) 最后执行d.queue.RetrieveHeaders(),获取到filled进去的headers
其他同步区块数据的方法d.fetchBodies() , d.fetchReceipts() 和fetchHeaders()流程类似,还更简单一些。
三,Downloader同步数据过程 d.processHeaders(), d.processFastSyncContent(latest) , d.processFullSyncContent 1,d.processHeaders() 方法
func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error { // Keep a count of uncertain headers to roll back rollback := []*types.Header{} defer func() { if len(rollback) > 0 { // Flatten the headers and roll them back hashes := make([]common.Hash, len(rollback)) for i, header := range rollback { hashes[i] = header.Hash() } lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 if d.mode != LightSync { lastFastBlock = d.blockchain.CurrentFastBlock().Number() lastBlock = d.blockchain.CurrentBlock().Number() } d.lightchain.Rollback(hashes) curFastBlock, curBlock := common.Big0, common.Big0 if d.mode != LightSync { curFastBlock = d.blockchain.CurrentFastBlock().Number() curBlock = d.blockchain.CurrentBlock().Number() } log.Warn("Rolled back headers", "count", len(hashes), "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), "block", fmt.Sprintf("%d->%d", lastBlock, curBlock)) } }() // Wait for batches of headers to process gotHeaders := false for { select { case <-d.cancelCh: return errCancelHeaderProcessing case headers := <-d.headerProcCh: // Terminate header processing if we synced up if len(headers) == 0 { // Notify everyone that headers are fully processed for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { select { case ch <- false: case <-d.cancelCh: } } if d.mode != LightSync { head := d.blockchain.CurrentBlock() if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { return errStallingPeer } } if d.mode == FastSync || d.mode == LightSync { head := d.lightchain.CurrentHeader() if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { return errStallingPeer } } // Disable any rollback and return rollback = nil return nil } // Otherwise split the chunk of headers into batches and process them gotHeaders = true for len(headers) > 0 { // Terminate if something failed in between processing chunks select { case <-d.cancelCh: return errCancelHeaderProcessing default: } // Select the next chunk of headers to import limit := maxHeadersProcess if limit > len(headers) { limit = len(headers) } chunk := headers[:limit] // In case of header only syncing, validate the chunk immediately if d.mode == FastSync || d.mode == LightSync { // Collect the yet unknown headers to mark them as uncertain unknown := make([]*types.Header, 0, len(headers)) for _, header := range chunk { if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { unknown = append(unknown, header) } } // If we're importing pure headers, verify based on their recentness frequency := fsHeaderCheckFrequency if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { frequency = 1 } if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { // If some headers were inserted, add them too to the rollback list if n > 0 { rollback = append(rollback, chunk[:n]...) } log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err) return errInvalidChain } // All verifications passed, store newly found uncertain headers rollback = append(rollback, unknown...) if len(rollback) > fsHeaderSafetyNet { rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) } } // Unless we're doing light chains, schedule the headers for associated content retrieval if d.mode == FullSync || d.mode == FastSync { // If we've reached the allowed number of pending headers, stall a bit for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { select { case <-d.cancelCh: return errCancelHeaderProcessing case <-time.After(time.Second): } } // Otherwise insert the headers for content retrieval inserts := d.queue.Schedule(chunk, origin) if len(inserts) != len(chunk) { log.Debug("Stale headers") return errBadPeer } } headers = headers[limit:] origin += uint64(limit) } // Signal the content downloaders of the availablility of new tasks for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { select { case ch <- true: default: } } } } }1,收到从fetchHeaders()方法 中d.headerProcCh发送过来的headers 2,如果是FastSync或者LightSync模式,直接调用lightchain.InsertHeaderChain(chunk, frequency)插入到headerChain。 3,如果是FullSync或者FastSyn模式,调用d.queue.Schedule(chunk, origin),放入downloader.queue来调度
2,processFastSyncContent() 方法
func (d *Downloader) processFastSyncContent(latest *types.Header) error { // Start syncing state of the reported head block. This should get us most of // the state of the pivot block. stateSync := d.syncState(latest.Root) defer stateSync.Cancel() go func() { if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { d.queue.Close() // wake up WaitResults } }() // Figure out the ideal pivot block. Note, that this goalpost may move if the // sync takes long enough for the chain head to move significantly. pivot := uint64(0) if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) { pivot = height - uint64(fsMinFullBlocks) } // To cater for moving pivot points, track the pivot block and subsequently // accumulated download results separatey. var ( oldPivot *fetchResult // Locked in pivot block, might change eventually oldTail []*fetchResult // Downloaded content after the pivot ) for { // Wait for the next batch of downloaded data to be available, and if the pivot // block became stale, move the goalpost results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness if len(results) == 0 { // If pivot sync is done, stop if oldPivot == nil { return stateSync.Cancel() } // If sync failed, stop select { case <-d.cancelCh: return stateSync.Cancel() default: } } if d.chainInsertHook != nil { d.chainInsertHook(results) } if oldPivot != nil { results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) } // Split around the pivot block and process the two sides via fast/full sync if atomic.LoadInt32(&d.committed) == 0 { latest = results[len(results)-1].Header if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) pivot = height - uint64(fsMinFullBlocks) } } P, beforeP, afterP := splitAroundPivot(pivot, results) if err := d.commitFastSyncData(beforeP, stateSync); err != nil { return err } if P != nil { // If new pivot block found, cancel old state retrieval and restart if oldPivot != P { stateSync.Cancel() stateSync = d.syncState(P.Header.Root) defer stateSync.Cancel() go func() { if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { d.queue.Close() // wake up WaitResults } }() oldPivot = P } // Wait for completion, occasionally checking for pivot staleness select { case <-stateSync.done: if stateSync.err != nil { return stateSync.err } if err := d.commitPivotBlock(P); err != nil { return err } oldPivot = nil case <-time.After(time.Second): oldTail = afterP continue } } // Fast sync done, pivot commit done, full import if err := d.importBlockResults(afterP); err != nil { return err } } }1,同步最新的状态信息,的到最新的pivot值 2,不停的从d.queue 的result缓存中获取要处理的result数据 3,如果results数据为空,同时pivot也为空的时候,说明同步完成了,并返回 4,根据pivot值和results计算:pivot值对应的result,和pivot值之前的results和pivot值之后的results 5,调用commitFastSyncData把pivot值之前的results 插入本地区块链中,带上收据和交易数据 6,更新同步状态信息后,把pivot值对应的result 调用commitPivotBlock插入本地区块链中,并调用FastSyncCommitHead,记录这个pivot的hash值 7,调用d.importBlockResults把pivot值之后的results插入本地区块链中,这时候不插入区块交易收据数据。
3,processFullSyncContent()方法
func (d *Downloader) processFullSyncContent() error { for { results := d.queue.Results(true) if len(results) == 0 { return nil } if d.chainInsertHook != nil { d.chainInsertHook(results) } if err := d.importBlockResults(results); err != nil { return err } } } func (d *Downloader) importBlockResults(results []*fetchResult) error { // Check for any early termination requests if len(results) == 0 { return nil } select { case <-d.quitCh: return errCancelContentProcessing default: } // Retrieve the a batch of results to import first, last := results[0].Header, results[len(results)-1].Header log.Debug("Inserting downloaded chain", "items", len(results), "firstnum", first.Number, "firsthash", first.Hash(), "lastnum", last.Number, "lasthash", last.Hash(), ) blocks := make([]*types.Block, len(results)) for i, result := range results { blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) } if index, err := d.blockchain.InsertChain(blocks); err != nil { log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) return errInvalidChain } return nil }processFullSyncContent方法比较简单:直接获取缓存的results数据,并插入到本地区块链中。
总结: Downloader看似非常复杂,其实逻辑还好,如果没有light模式,读起来会好很多。其实light模式不太成熟,基本也没什么用。fast模式比full模式逻辑上面多了一个pivot,处理起来就复杂很多。但是fast模式在本地存储了收据数据,大大减少了区块交易验证的时间。如果要更清楚明白fast模式的原理,可以看看以太坊白皮书关于fast模式同步这一部分:https://github.com/ethereum/go-ethereum/pull/1889