func (this scheduler) DeliverHeaders(id string, headers []types. Header, headerProcCh chan []types. Header) (int, error) {this.lock.Lock()defer this.lock.Unlock()// Short circuit if the data was never requestedrequest := this.headerPendPool[id]if request == nil {return 0, errNoFetchesPending}headerReqTimer.UpdateSince(request. Time)delete(this.headerPendPool, id)// Ensure headers can be mapped onto the skeleton chaintarget := this.headerTaskPool[request. From]. Hash()log. Error("DeliverHeaders", "length", len(headers))accepted := len(headers) == MaxHeaderFetchif accepted {if headers[0]. Number.Uint64() != request. From {log. Error("First header broke chain ordering", "peer", id, "number", headers[0]. Number, "hash", headers[0]. Hash(), request. From)accepted = false} else if headers[len(headers)-1]. Hash() != target {log. Error("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1]. Number, "hash", headers[len(headers)-1]. Hash(), "expected", target)accepted = false}}if accepted {for i, header := range headers[1:] {hash := header. Hash()if want := request. From + 1 + uint64(i); header. Number.Uint64() != want {log. Warn("Header broke chain ordering", "peer", id, "number", header. Number, "hash", hash, "expected", want)accepted = falsebreak}if headers[i]. Hash() != header. ParentHash {log. Warn("Header broke chain ancestry", "peer", id, "number", header. Number, "hash", hash)accepted = falsebreak}}}// If the batch of headers wasn't accepted, mark as unavailableif !accepted {log. Trace("Skeleton filling not accepted", "peer", id, "from", request. From)miss := this.headerPeerMiss[id]if miss == nil {this.headerPeerMiss[id] = make(map[uint64]struct{})miss = this.headerPeerMiss[id]}miss[request. From] = struct{}{}this.headerTaskQueue.Push(request. From, -float32(request. From))return 0, errors. New("delivery not accepted")}// Clean up a successful fetch and try to deliver any sub-resultscopy(this.headerResults[request. From-this.headerOffset:], headers)delete(this.headerTaskPool, request. From)ready := 0for this.headerProced+ready < len(this.headerResults) && this.headerResults[this.headerProced+ready] != nil {ready += MaxHeaderFetch}if ready > 0 {//Headers are ready for delivery, gather them and push forward (non blocking)process := make([]types. Header, ready)copy(process, this.headerResults[this.headerProced:this.headerProced+ready])select {case headerProcCh <- process:log. Trace("Pre-scheduled new headers", "peer", id, "count", len(process), "from", process[0]. Number)this.headerProced += len(process)default:}}// Check for termination and returnif len(this.headerTaskPool) == 0 {this.headerContCh <- false}return len(headers), nil}