diff --git a/les/api_backend.go b/les/api_backend.go index ef5846504236..56f617a7db50 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -174,10 +174,10 @@ func (b *LesApiBackend) AccountManager() *accounts.Manager { } func (b *LesApiBackend) BloomStatus() (uint64, uint64) { - if b.eth.bbIndexer == nil { + if b.eth.bloomIndexer == nil { return 0, 0 } - sections, _, _ := b.eth.bbIndexer.Sections() + sections, _, _ := b.eth.bloomIndexer.Sections() return light.BloomTrieFrequency, sections } diff --git a/les/backend.go b/les/backend.go index 52ad495b805f..3a68d13ebe77 100644 --- a/les/backend.go +++ b/les/backend.go @@ -62,8 +62,8 @@ type LightEthereum struct { // DB interfaces chainDb ethdb.Database // Block chain database - bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests - bbIndexer, chtIndexer, bltIndexer *core.ChainIndexer + bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests + bloomIndexer, chtIndexer, bloomTrieIndexer *core.ChainIndexer ApiBackend *LesApiBackend @@ -92,29 +92,29 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { quitSync := make(chan struct{}) leth := &LightEthereum{ - chainConfig: chainConfig, - chainDb: chainDb, - eventMux: ctx.EventMux, - peers: peers, - reqDist: newRequestDistributor(peers, quitSync), - accountManager: ctx.AccountManager, - engine: eth.CreateConsensusEngine(ctx, config, chainConfig, chainDb), - shutdownChan: make(chan bool), - networkId: config.NetworkId, - bloomRequests: make(chan chan *bloombits.Retrieval), - bbIndexer: eth.NewBloomIndexer(chainDb, light.BloomTrieFrequency), - chtIndexer: light.NewChtIndexer(chainDb, true), - bltIndexer: light.NewBloomTrieIndexer(chainDb, true), + chainConfig: chainConfig, + chainDb: chainDb, + eventMux: ctx.EventMux, + peers: peers, + reqDist: newRequestDistributor(peers, quitSync), + accountManager: ctx.AccountManager, + engine: eth.CreateConsensusEngine(ctx, config, chainConfig, chainDb), + shutdownChan: make(chan bool), + networkId: config.NetworkId, + bloomRequests: make(chan chan *bloombits.Retrieval), + bloomIndexer: eth.NewBloomIndexer(chainDb, light.BloomTrieFrequency), + chtIndexer: light.NewChtIndexer(chainDb, true), + bloomTrieIndexer: light.NewBloomTrieIndexer(chainDb, true), } leth.relay = NewLesTxRelay(peers, leth.reqDist) leth.serverPool = newServerPool(chainDb, quitSync, &leth.wg) leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool) - leth.odr = NewLesOdr(chainDb, leth.chtIndexer, leth.bltIndexer, leth.bbIndexer, leth.retriever) + leth.odr = NewLesOdr(chainDb, leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer, leth.retriever) if leth.blockchain, err = light.NewLightChain(leth.odr, leth.chainConfig, leth.engine); err != nil { return nil, err } - leth.bbIndexer.Start(leth.blockchain) + leth.bloomIndexer.Start(leth.blockchain) // Rewind the chain in case of an incompatible config upgrade. if compat, ok := genesisErr.(*params.ConfigCompatError); ok { log.Warn("Rewinding chain to upgrade configuration", "err", compat) @@ -233,14 +233,14 @@ func (s *LightEthereum) Start(srvr *p2p.Server) error { // Ethereum protocol. func (s *LightEthereum) Stop() error { s.odr.Stop() - if s.bbIndexer != nil { - s.bbIndexer.Close() + if s.bloomIndexer != nil { + s.bloomIndexer.Close() } if s.chtIndexer != nil { s.chtIndexer.Close() } - if s.bltIndexer != nil { - s.bltIndexer.Close() + if s.bloomTrieIndexer != nil { + s.bloomTrieIndexer.Close() } s.blockchain.Stop() s.protocolManager.Stop() diff --git a/les/handler.go b/les/handler.go index 287eca99b885..de07b7244622 100644 --- a/les/handler.go +++ b/les/handler.go @@ -52,14 +52,14 @@ const ( ethVersion = 63 // equivalent eth version for the downloader - MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request - MaxBodyFetch = 32 // Amount of block bodies to be fetched per retrieval request - MaxReceiptFetch = 128 // Amount of transaction receipts to allow fetching per request - MaxCodeFetch = 64 // Amount of contract codes to allow fetching per request - MaxProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request - MaxPPTProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request - MaxTxSend = 64 // Amount of transactions to be send per request - MaxTxStatus = 256 // Amount of transactions to queried per request + MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request + MaxBodyFetch = 32 // Amount of block bodies to be fetched per retrieval request + MaxReceiptFetch = 128 // Amount of transaction receipts to allow fetching per request + MaxCodeFetch = 64 // Amount of contract codes to allow fetching per request + MaxProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request + MaxHelperTrieProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request + MaxTxSend = 64 // Amount of transactions to be send per request + MaxTxStatus = 256 // Amount of transactions to queried per request disableClientRemovePeer = false ) @@ -318,7 +318,7 @@ func (pm *ProtocolManager) handle(p *peer) error { } } -var reqList = []uint64{GetBlockHeadersMsg, GetBlockBodiesMsg, GetCodeMsg, GetReceiptsMsg, GetProofsV1Msg, SendTxMsg, SendTxV2Msg, GetTxStatusMsg, GetHeaderProofsMsg, GetProofsV2Msg, GetPPTProofsMsg} +var reqList = []uint64{GetBlockHeadersMsg, GetBlockBodiesMsg, GetCodeMsg, GetReceiptsMsg, GetProofsV1Msg, SendTxMsg, SendTxV2Msg, GetTxStatusMsg, GetHeaderProofsMsg, GetProofsV2Msg, GetHelperTrieProofsMsg} // handleMsg is invoked whenever an inbound message is received from a remote // peer. The remote connection is torn down upon returning any error. @@ -835,7 +835,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { proofs []ChtResp ) reqCnt := len(req.Reqs) - if reject(uint64(reqCnt), MaxPPTProofsFetch) { + if reject(uint64(reqCnt), MaxHelperTrieProofsFetch) { return errResp(ErrRequestRejected, "") } trieDb := ethdb.NewTable(pm.chainDb, light.ChtTablePrefix) @@ -862,12 +862,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost) return p.SendHeaderProofs(req.ReqID, bv, proofs) - case GetPPTProofsMsg: - p.Log().Trace("Received PPT proof request") + case GetHelperTrieProofsMsg: + p.Log().Trace("Received helper trie proof request") // Decode the retrieval message var req struct { ReqID uint64 - Reqs []PPTReq + Reqs []HelperTrieReq } if err := msg.Decode(&req); err != nil { return errResp(ErrDecode, "msg %v: %v", msg, err) @@ -878,15 +878,15 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { auxData [][]byte ) reqCnt := len(req.Reqs) - if reject(uint64(reqCnt), MaxPPTProofsFetch) { + if reject(uint64(reqCnt), MaxHelperTrieProofsFetch) { return errResp(ErrRequestRejected, "") } var ( - lastIdx uint64 - lastPPTId uint - root common.Hash - tr *trie.Trie + lastIdx uint64 + lastType uint + root common.Hash + tr *trie.Trie ) nodes := light.NewNodeSet() @@ -895,18 +895,18 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { if nodes.DataSize()+auxBytes >= softResponseLimit { break } - if tr == nil || req.PPTId != lastPPTId || req.TrieIdx != lastIdx { + if tr == nil || req.HelperTrieType != lastType || req.TrieIdx != lastIdx { var prefix string - root, prefix = pm.getPPT(req.PPTId, req.TrieIdx) + root, prefix = pm.getHelperTrie(req.HelperTrieType, req.TrieIdx) if root != (common.Hash{}) { if t, err := trie.New(root, ethdb.NewTable(pm.chainDb, prefix)); err == nil { tr = t } } - lastPPTId = req.PPTId + lastType = req.HelperTrieType lastIdx = req.TrieIdx } - if req.AuxReq == PPTAuxRoot { + if req.AuxReq == auxRoot { var data []byte if root != (common.Hash{}) { data = root[:] @@ -918,7 +918,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { tr.Prove(req.Key, req.FromLevel, nodes) } if req.AuxReq != 0 { - data := pm.getPPTAuxData(req) + data := pm.getHelperTrieAuxData(req) auxData = append(auxData, data) auxBytes += len(data) } @@ -927,7 +927,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { proofs := nodes.NodeList() bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost) pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost) - return p.SendPPTProofs(req.ReqID, bv, PPTResps{Proofs: proofs, AuxData: auxData}) + return p.SendHelperTrieProofs(req.ReqID, bv, HelperTrieResps{Proofs: proofs, AuxData: auxData}) case HeaderProofsMsg: if pm.odr == nil { @@ -949,15 +949,15 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { Obj: resp.Data, } - case PPTProofsMsg: + case HelperTrieProofsMsg: if pm.odr == nil { return errResp(ErrUnexpectedResponse, "") } - p.Log().Trace("Received PPT proof response") + p.Log().Trace("Received helper trie proof response") var resp struct { ReqID, BV uint64 - Data PPTResps + Data HelperTrieResps } if err := msg.Decode(&resp); err != nil { return errResp(ErrDecode, "msg %v: %v", msg, err) @@ -965,7 +965,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { p.fcServer.GotReply(resp.ReqID, resp.BV) deliverMsg = &Msg{ - MsgType: MsgPPTProofs, + MsgType: MsgHelperTrieProofs, ReqID: resp.ReqID, Obj: resp.Data, } @@ -1077,22 +1077,22 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { return nil } -// getPPT returns the post-processed trie root for the given trie ID and section index -func (pm *ProtocolManager) getPPT(id uint, idx uint64) (common.Hash, string) { +// getHelperTrie returns the post-processed trie root for the given trie ID and section index +func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) { switch id { - case PPTChain: + case htCanonical: sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.ChtFrequency-1) return light.GetChtV2Root(pm.chainDb, idx, sectionHead), light.ChtTablePrefix - case PPTBloomBits: + case htBloomBits: sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.BloomTrieFrequency-1) return light.GetBloomTrieRoot(pm.chainDb, idx, sectionHead), light.BloomTrieTablePrefix } return common.Hash{}, "" } -// getPPTAuxData returns requested auxiliary data for the given PPT request -func (pm *ProtocolManager) getPPTAuxData(req PPTReq) []byte { - if req.PPTId == PPTChain && req.AuxReq == PPTChainAuxHeader { +// getHelperTrieAuxData returns requested auxiliary data for the given HelperTrie request +func (pm *ProtocolManager) getHelperTrieAuxData(req HelperTrieReq) []byte { + if req.HelperTrieType == htCanonical && req.AuxReq == auxHeader { if len(req.Key) != 8 { return nil } diff --git a/les/odr.go b/les/odr.go index 30c04564c469..986630dbfd78 100644 --- a/les/odr.go +++ b/les/odr.go @@ -28,16 +28,16 @@ import ( // LesOdr implements light.OdrBackend type LesOdr struct { db ethdb.Database - chtIndexer, bltIndexer, bloomIndexer *core.ChainIndexer + chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer retriever *retrieveManager stop chan struct{} } -func NewLesOdr(db ethdb.Database, chtIndexer, bltIndexer, bloomIndexer *core.ChainIndexer, retriever *retrieveManager) *LesOdr { +func NewLesOdr(db ethdb.Database, chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer, retriever *retrieveManager) *LesOdr { return &LesOdr{ db: db, chtIndexer: chtIndexer, - bltIndexer: bltIndexer, + bloomTrieIndexer: bloomTrieIndexer, bloomIndexer: bloomIndexer, retriever: retriever, stop: make(chan struct{}), @@ -59,9 +59,9 @@ func (odr *LesOdr) ChtIndexer() *core.ChainIndexer { return odr.chtIndexer } -// BltIndexer returns the bloom trie chain indexer -func (odr *LesOdr) BltIndexer() *core.ChainIndexer { - return odr.bltIndexer +// BloomTrieIndexer returns the bloom trie chain indexer +func (odr *LesOdr) BloomTrieIndexer() *core.ChainIndexer { + return odr.bloomTrieIndexer } // BloomIndexer returns the bloombits chain indexer @@ -76,7 +76,7 @@ const ( MsgProofsV1 MsgProofsV2 MsgHeaderProofs - MsgPPTProofs + MsgHelperTrieProofs ) // Msg encodes a LES message that delivers reply data for a request diff --git a/les/odr_requests.go b/les/odr_requests.go index aaadf12504a8..937a4f1d9d3f 100644 --- a/les/odr_requests.go +++ b/les/odr_requests.go @@ -310,21 +310,24 @@ func (r *CodeRequest) Validate(db ethdb.Database, msg *Msg) error { } const ( - PPTChain = iota - PPTBloomBits - - PPTAuxRoot = 1 - PPTChainAuxHeader = 2 + // helper trie type constants + htCanonical = iota // Canonical hash trie + htBloomBits // BloomBits trie + + // applicable for all helper trie requests + auxRoot = 1 + // applicable for htCanonical + auxHeader = 2 ) -type PPTReq struct { - PPTId uint +type HelperTrieReq struct { + HelperTrieType uint TrieIdx uint64 Key []byte FromLevel, AuxReq uint } -type PPTResps struct { // describes all responses, not just a single one +type HelperTrieResps struct { // describes all responses, not just a single one Proofs light.NodeList AuxData [][]byte } @@ -351,7 +354,7 @@ func (r *ChtRequest) GetCost(peer *peer) uint64 { case lpv1: return peer.GetRequestCost(GetHeaderProofsMsg, 1) case lpv2: - return peer.GetRequestCost(GetPPTProofsMsg, 1) + return peer.GetRequestCost(GetHelperTrieProofsMsg, 1) default: panic(nil) } @@ -362,7 +365,7 @@ func (r *ChtRequest) CanSend(peer *peer) bool { peer.lock.RLock() defer peer.lock.RUnlock() - return peer.headInfo.Number >= light.PPTConfirmations && r.ChtNum <= (peer.headInfo.Number-light.PPTConfirmations)/light.ChtFrequency + return peer.headInfo.Number >= light.HelperTrieConfirmations && r.ChtNum <= (peer.headInfo.Number-light.HelperTrieConfirmations)/light.ChtFrequency } // Request sends an ODR request to the LES network (implementation of LesOdrRequest) @@ -370,13 +373,13 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error { peer.Log().Debug("Requesting CHT", "cht", r.ChtNum, "block", r.BlockNum) var encNum [8]byte binary.BigEndian.PutUint64(encNum[:], r.BlockNum) - req := PPTReq{ - PPTId: PPTChain, - TrieIdx: r.ChtNum, - Key: encNum[:], - AuxReq: PPTChainAuxHeader, + req := HelperTrieReq{ + HelperTrieType: htCanonical, + TrieIdx: r.ChtNum, + Key: encNum[:], + AuxReq: auxHeader, } - return peer.RequestPPTProofs(reqID, r.GetCost(peer), []PPTReq{req}) + return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req}) } // Valid processes an ODR request reply message from the LES network @@ -412,8 +415,8 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error { r.Header = proof.Header r.Proof = light.NodeList(proof.Proof).NodeSet() r.Td = node.Td - case MsgPPTProofs: - resp := msg.Obj.(PPTResps) + case MsgHelperTrieProofs: + resp := msg.Obj.(HelperTrieResps) if len(resp.AuxData) != 1 { return errInvalidEntryCount } @@ -461,7 +464,7 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error { } type BloomReq struct { - BltNum, BitIdx, SectionIdx, FromLevel uint64 + BloomTrieNum, BitIdx, SectionIdx, FromLevel uint64 } // ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface @@ -470,7 +473,7 @@ type BloomRequest light.BloomRequest // GetCost returns the cost of the given ODR request according to the serving // peer's cost table (implementation of LesOdrRequest) func (r *BloomRequest) GetCost(peer *peer) uint64 { - return peer.GetRequestCost(GetPPTProofsMsg, len(r.SectionIdxList)) + return peer.GetRequestCost(GetHelperTrieProofsMsg, len(r.SectionIdxList)) } // CanSend tells if a certain peer is suitable for serving the given request @@ -481,39 +484,39 @@ func (r *BloomRequest) CanSend(peer *peer) bool { if peer.version < lpv2 { return false } - return peer.headInfo.Number >= light.PPTConfirmations && r.BltNum <= (peer.headInfo.Number-light.PPTConfirmations)/light.BloomTrieFrequency + return peer.headInfo.Number >= light.HelperTrieConfirmations && r.BloomTrieNum <= (peer.headInfo.Number-light.HelperTrieConfirmations)/light.BloomTrieFrequency } // Request sends an ODR request to the LES network (implementation of LesOdrRequest) func (r *BloomRequest) Request(reqID uint64, peer *peer) error { - peer.Log().Debug("Requesting BloomBits", "blt", r.BltNum, "bitIdx", r.BitIdx, "sections", r.SectionIdxList) - reqs := make([]PPTReq, len(r.SectionIdxList)) + peer.Log().Debug("Requesting BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIdxList) + reqs := make([]HelperTrieReq, len(r.SectionIdxList)) var encNumber [10]byte binary.BigEndian.PutUint16(encNumber[0:2], uint16(r.BitIdx)) for i, sectionIdx := range r.SectionIdxList { binary.BigEndian.PutUint64(encNumber[2:10], sectionIdx) - reqs[i] = PPTReq{ - PPTId: PPTBloomBits, - TrieIdx: r.BltNum, - Key: common.CopyBytes(encNumber[:]), + reqs[i] = HelperTrieReq{ + HelperTrieType: htBloomBits, + TrieIdx: r.BloomTrieNum, + Key: common.CopyBytes(encNumber[:]), } } - return peer.RequestPPTProofs(reqID, r.GetCost(peer), reqs) + return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), reqs) } // Valid processes an ODR request reply message from the LES network // returns true and stores results in memory if the message was a valid reply // to the request (implementation of LesOdrRequest) func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error { - log.Debug("Validating BloomBits", "blt", r.BltNum, "bitIdx", r.BitIdx, "sections", r.SectionIdxList) + log.Debug("Validating BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIdxList) // Ensure we have a correct message with a single proof element - if msg.MsgType != MsgPPTProofs { + if msg.MsgType != MsgHelperTrieProofs { return errInvalidMessageType } - resps := msg.Obj.(PPTResps) + resps := msg.Obj.(HelperTrieResps) proofs := resps.Proofs nodeSet := proofs.NodeSet() reads := &readTraceDB{db: nodeSet} @@ -526,7 +529,7 @@ func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error { for i, idx := range r.SectionIdxList { binary.BigEndian.PutUint64(encNumber[2:10], idx) - value, err, _ := trie.VerifyProof(r.BltRoot, encNumber[:], reads) + value, err, _ := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads) if err != nil { return err } diff --git a/les/peer.go b/les/peer.go index ee7f2adce6af..104afb6dc9ed 100644 --- a/les/peer.go +++ b/les/peer.go @@ -227,9 +227,9 @@ func (p *peer) SendHeaderProofs(reqID, bv uint64, proofs []ChtResp) error { return sendResponse(p.rw, HeaderProofsMsg, reqID, bv, proofs) } -// SendPPTProofs sends a batch of PPT proofs, corresponding to the ones requested. -func (p *peer) SendPPTProofs(reqID, bv uint64, resp PPTResps) error { - return sendResponse(p.rw, PPTProofsMsg, reqID, bv, resp) +// SendHelperTrieProofs sends a batch of HelperTrie proofs, corresponding to the ones requested. +func (p *peer) SendHelperTrieProofs(reqID, bv uint64, resp HelperTrieResps) error { + return sendResponse(p.rw, HelperTrieProofsMsg, reqID, bv, resp) } // SendTxStatus sends a batch of transaction status records, corresponding to the ones requested. @@ -285,23 +285,23 @@ func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error { } -// RequestPPTProofs fetches a batch of PPT merkle proofs from a remote node. -func (p *peer) RequestPPTProofs(reqID, cost uint64, reqs []PPTReq) error { - p.Log().Debug("Fetching batch of PPT proofs", "count", len(reqs)) +// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node. +func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq) error { + p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs)) switch p.version { case lpv1: reqsV1 := make([]ChtReq, len(reqs)) for i, req := range reqs { - if req.PPTId != PPTChain || req.AuxReq != PPTChainAuxHeader || len(req.Key) != 8 { + if req.HelperTrieType != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 { return fmt.Errorf("Request invalid in LES/1 mode") } blockNum := binary.BigEndian.Uint64(req.Key) - // convert PPT request to old CHT request + // convert HelperTrie request to old CHT request reqsV1[i] = ChtReq{ChtNum: (req.TrieIdx+1)*(light.ChtFrequency/light.ChtV1Frequency) - 1, BlockNum: blockNum, FromLevel: req.FromLevel} } return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqsV1) case lpv2: - return sendRequest(p.rw, GetPPTProofsMsg, reqID, cost, reqs) + return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs) default: panic(nil) } diff --git a/les/protocol.go b/les/protocol.go index 19e0912982d7..146b02030e29 100644 --- a/les/protocol.go +++ b/les/protocol.go @@ -71,13 +71,13 @@ const ( GetHeaderProofsMsg = 0x0d HeaderProofsMsg = 0x0e // Protocol messages belonging to LPV2 - GetProofsV2Msg = 0x0f - ProofsV2Msg = 0x10 - GetPPTProofsMsg = 0x11 - PPTProofsMsg = 0x12 - SendTxV2Msg = 0x13 - GetTxStatusMsg = 0x14 - TxStatusMsg = 0x15 + GetProofsV2Msg = 0x0f + ProofsV2Msg = 0x10 + GetHelperTrieProofsMsg = 0x11 + HelperTrieProofsMsg = 0x12 + SendTxV2Msg = 0x13 + GetTxStatusMsg = 0x14 + TxStatusMsg = 0x15 ) type errCode int diff --git a/les/server.go b/les/server.go index d3f76daae074..d8f93cd87b4d 100644 --- a/les/server.go +++ b/les/server.go @@ -46,7 +46,7 @@ type LesServer struct { privateKey *ecdsa.PrivateKey quitSync chan struct{} - chtIndexer, bltIndexer *core.ChainIndexer + chtIndexer, bloomTrieIndexer *core.ChainIndexer } func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { @@ -62,11 +62,11 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { } srv := &LesServer{ - protocolManager: pm, - quitSync: quitSync, - lesTopics: lesTopics, - chtIndexer: light.NewChtIndexer(eth.ChainDb(), false), - bltIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), false), + protocolManager: pm, + quitSync: quitSync, + lesTopics: lesTopics, + chtIndexer: light.NewChtIndexer(eth.ChainDb(), false), + bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), false), } logger := log.New() @@ -82,12 +82,12 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { logger.Info("CHT", "section", chtLastSection, "sectionHead", fmt.Sprintf("%064x", chtSectionHead), "root", fmt.Sprintf("%064x", chtRoot)) } - bltSectionCount, _, _ := srv.bltIndexer.Sections() - if bltSectionCount != 0 { - bltLastSection := bltSectionCount - 1 - bltSectionHead := srv.bltIndexer.SectionHead(bltLastSection) - bltRoot := light.GetBloomTrieRoot(pm.chainDb, bltLastSection, bltSectionHead) - logger.Info("BloomTrie", "section", bltLastSection, "sectionHead", fmt.Sprintf("%064x", bltSectionHead), "root", fmt.Sprintf("%064x", bltRoot)) + bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections() + if bloomTrieSectionCount != 0 { + bloomTrieLastSection := bloomTrieSectionCount - 1 + bloomTrieSectionHead := srv.bloomTrieIndexer.SectionHead(bloomTrieLastSection) + bloomTrieRoot := light.GetBloomTrieRoot(pm.chainDb, bloomTrieLastSection, bloomTrieSectionHead) + logger.Info("BloomTrie", "section", bloomTrieLastSection, "sectionHead", fmt.Sprintf("%064x", bloomTrieSectionHead), "root", fmt.Sprintf("%064x", bloomTrieRoot)) } srv.chtIndexer.Start(eth.BlockChain()) @@ -123,8 +123,8 @@ func (s *LesServer) Start(srvr *p2p.Server) { s.protocolManager.blockLoop() } -func (s *LesServer) SetBloomBitsIndexer(bbIndexer *core.ChainIndexer) { - bbIndexer.AddChildIndexer(s.bltIndexer) +func (s *LesServer) SetBloomBitsIndexer(bloomIndexer *core.ChainIndexer) { + bloomIndexer.AddChildIndexer(s.bloomTrieIndexer) } // Stop stops the LES service diff --git a/light/lightchain.go b/light/lightchain.go index b3791f08493a..30baeaccb700 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -95,8 +95,8 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus. if bc.genesisBlock == nil { return nil, core.ErrNoGenesis } - if ppt, ok := trustedCheckpoints[bc.genesisBlock.Hash()]; ok { - bc.addTrustedCheckpoint(ppt) + if cp, ok := trustedCheckpoints[bc.genesisBlock.Hash()]; ok { + bc.addTrustedCheckpoint(cp) } if err := bc.loadLastState(); err != nil { @@ -114,19 +114,19 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus. } // addTrustedCheckpoint adds a trusted checkpoint to the blockchain -func (self *LightChain) addTrustedCheckpoint(ppt trustedCheckpoint) { +func (self *LightChain) addTrustedCheckpoint(cp trustedCheckpoint) { if self.odr.ChtIndexer() != nil { - StoreChtRoot(self.chainDb, ppt.sectionIdx, ppt.sectionHead, ppt.chtRoot) - self.odr.ChtIndexer().AddKnownSectionHead(ppt.sectionIdx, ppt.sectionHead) + StoreChtRoot(self.chainDb, cp.sectionIdx, cp.sectionHead, cp.chtRoot) + self.odr.ChtIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead) } - if self.odr.BltIndexer() != nil { - StoreBloomTrieRoot(self.chainDb, ppt.sectionIdx, ppt.sectionHead, ppt.bltRoot) - self.odr.BltIndexer().AddKnownSectionHead(ppt.sectionIdx, ppt.sectionHead) + if self.odr.BloomTrieIndexer() != nil { + StoreBloomTrieRoot(self.chainDb, cp.sectionIdx, cp.sectionHead, cp.bloomTrieRoot) + self.odr.BloomTrieIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead) } if self.odr.BloomIndexer() != nil { - self.odr.BloomIndexer().AddKnownSectionHead(ppt.sectionIdx, ppt.sectionHead) + self.odr.BloomIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead) } - log.Info("Added trusted PPT", "chain name", ppt.name) + log.Info("Added trusted checkpoint", "chain name", cp.name) } func (self *LightChain) getProcInterrupt() bool { diff --git a/light/odr.go b/light/odr.go index f5fbfc55495d..e2c3d9c5a44c 100644 --- a/light/odr.go +++ b/light/odr.go @@ -36,7 +36,7 @@ var NoOdr = context.Background() type OdrBackend interface { Database() ethdb.Database ChtIndexer() *core.ChainIndexer - BltIndexer() *core.ChainIndexer + BloomTrieIndexer() *core.ChainIndexer BloomIndexer() *core.ChainIndexer Retrieve(ctx context.Context, req OdrRequest) error } @@ -150,10 +150,10 @@ func (req *ChtRequest) StoreResult(db ethdb.Database) { // BloomRequest is the ODR request type for retrieving bloom filters from a CHT structure type BloomRequest struct { OdrRequest - BltNum uint64 + BloomTrieNum uint64 BitIdx uint SectionIdxList []uint64 - BltRoot common.Hash + BloomTrieRoot common.Hash BloomBits [][]byte Proofs *NodeSet } diff --git a/light/odr_util.go b/light/odr_util.go index a6ad165e21a4..a0eb6303d440 100644 --- a/light/odr_util.go +++ b/light/odr_util.go @@ -150,18 +150,18 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi ) var ( - bltCount, sectionHeadNum uint64 - sectionHead common.Hash + bloomTrieCount, sectionHeadNum uint64 + sectionHead common.Hash ) - if odr.BltIndexer() != nil { - bltCount, sectionHeadNum, sectionHead = odr.BltIndexer().Sections() + if odr.BloomTrieIndexer() != nil { + bloomTrieCount, sectionHeadNum, sectionHead = odr.BloomTrieIndexer().Sections() canonicalHash := core.GetCanonicalHash(db, sectionHeadNum) // if the BloomTrie was injected as a trusted checkpoint, we have no canonical hash yet so we accept zero hash too - for bltCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) { - bltCount-- - if bltCount > 0 { - sectionHeadNum = bltCount*BloomTrieFrequency - 1 - sectionHead = odr.BltIndexer().SectionHead(bltCount - 1) + for bloomTrieCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) { + bloomTrieCount-- + if bloomTrieCount > 0 { + sectionHeadNum = bloomTrieCount*BloomTrieFrequency - 1 + sectionHead = odr.BloomTrieIndexer().SectionHead(bloomTrieCount - 1) canonicalHash = core.GetCanonicalHash(db, sectionHeadNum) } } @@ -176,8 +176,8 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi if err == nil { result[i] = bloomBits } else { - if sectionIdx >= bltCount { - return nil, ErrNoTrustedBlt + if sectionIdx >= bloomTrieCount { + return nil, ErrNoTrustedBloomTrie } reqList = append(reqList, sectionIdx) reqIdx = append(reqIdx, i) @@ -187,7 +187,7 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi return result, nil } - r := &BloomRequest{BltRoot: GetBloomTrieRoot(db, bltCount-1, sectionHead), BltNum: bltCount - 1, BitIdx: bitIdx, SectionIdxList: reqList} + r := &BloomRequest{BloomTrieRoot: GetBloomTrieRoot(db, bloomTrieCount-1, sectionHead), BloomTrieNum: bloomTrieCount - 1, BitIdx: bitIdx, SectionIdxList: reqList} if err := odr.Retrieve(ctx, r); err != nil { return nil, err } else { diff --git a/light/postprocess.go b/light/postprocess.go index 2e54b1d79f73..e7e513880f84 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -35,36 +35,36 @@ import ( ) const ( - ChtFrequency = 32768 - ChtV1Frequency = 4096 // as long as we want to retain LES/1 compatibility, servers generate CHTs with the old, higher frequency - PPTConfirmations = 2048 // number of confirmations before a server is expected to have the given PPT available - PPTProcessConfirmations = 256 // number of confirmations before a PPT is generated + ChtFrequency = 32768 + ChtV1Frequency = 4096 // as long as we want to retain LES/1 compatibility, servers generate CHTs with the old, higher frequency + HelperTrieConfirmations = 2048 // number of confirmations before a server is expected to have the given HelperTrie available + HelperTrieProcessConfirmations = 256 // number of confirmations before a HelperTrie is generated ) // trustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with // the appropriate section index and head hash. It is used to start light syncing from this checkpoint // and avoid downloading the entire header chain while still being able to securely access old headers/logs. type trustedCheckpoint struct { - name string - sectionIdx uint64 - sectionHead, chtRoot, bltRoot common.Hash + name string + sectionIdx uint64 + sectionHead, chtRoot, bloomTrieRoot common.Hash } var ( mainnetCheckpoint = trustedCheckpoint{ - name: "ETH mainnet", - sectionIdx: 129, - sectionHead: common.HexToHash("64100587c8ec9a76870056d07cb0f58622552d16de6253a59cac4b580c899501"), - chtRoot: common.HexToHash("bb4fb4076cbe6923c8a8ce8f158452bbe19564959313466989fda095a60884ca"), - bltRoot: common.HexToHash("0db524b2c4a2a9520a42fd842b02d2e8fb58ff37c75cf57bd0eb82daeace6716"), + name: "ETH mainnet", + sectionIdx: 129, + sectionHead: common.HexToHash("64100587c8ec9a76870056d07cb0f58622552d16de6253a59cac4b580c899501"), + chtRoot: common.HexToHash("bb4fb4076cbe6923c8a8ce8f158452bbe19564959313466989fda095a60884ca"), + bloomTrieRoot: common.HexToHash("0db524b2c4a2a9520a42fd842b02d2e8fb58ff37c75cf57bd0eb82daeace6716"), } ropstenCheckpoint = trustedCheckpoint{ - name: "Ropsten testnet", - sectionIdx: 50, - sectionHead: common.HexToHash("00bd65923a1aa67f85e6b4ae67835784dd54be165c37f056691723c55bf016bd"), - chtRoot: common.HexToHash("6f56dc61936752cc1f8c84b4addabdbe6a1c19693de3f21cb818362df2117f03"), - bltRoot: common.HexToHash("aca7d7c504d22737242effc3fdc604a762a0af9ced898036b5986c3a15220208"), + name: "Ropsten testnet", + sectionIdx: 50, + sectionHead: common.HexToHash("00bd65923a1aa67f85e6b4ae67835784dd54be165c37f056691723c55bf016bd"), + chtRoot: common.HexToHash("6f56dc61936752cc1f8c84b4addabdbe6a1c19693de3f21cb818362df2117f03"), + bloomTrieRoot: common.HexToHash("aca7d7c504d22737242effc3fdc604a762a0af9ced898036b5986c3a15220208"), } ) @@ -75,11 +75,11 @@ var trustedCheckpoints = map[common.Hash]trustedCheckpoint{ } var ( - ErrNoTrustedCht = errors.New("No trusted canonical hash trie") - ErrNoTrustedBlt = errors.New("No trusted bloom trie") - ErrNoHeader = errors.New("Header not found") - chtPrefix = []byte("chtRoot-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash - ChtTablePrefix = "cht-" + ErrNoTrustedCht = errors.New("No trusted canonical hash trie") + ErrNoTrustedBloomTrie = errors.New("No trusted bloom trie") + ErrNoHeader = errors.New("Header not found") + chtPrefix = []byte("chtRoot-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash + ChtTablePrefix = "cht-" ) // ChtNode structures are stored in the Canonical Hash Trie in an RLP encoded format @@ -126,10 +126,10 @@ func NewChtIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer { var sectionSize, confirmReq uint64 if clientMode { sectionSize = ChtFrequency - confirmReq = PPTConfirmations + confirmReq = HelperTrieConfirmations } else { sectionSize = ChtV1Frequency - confirmReq = PPTProcessConfirmations + confirmReq = HelperTrieProcessConfirmations } return core.NewChainIndexer(db, idb, &ChtIndexerBackend{db: db, cdb: cdb, sectionSize: sectionSize}, sectionSize, confirmReq, time.Millisecond*100, "cht") } @@ -219,10 +219,10 @@ func NewBloomTrieIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer var confirmReq uint64 if clientMode { backend.parentSectionSize = BloomTrieFrequency - confirmReq = PPTConfirmations + confirmReq = HelperTrieConfirmations } else { backend.parentSectionSize = ethBloomBitsSection - confirmReq = PPTProcessConfirmations + confirmReq = HelperTrieProcessConfirmations } backend.bloomTrieRatio = BloomTrieFrequency / backend.parentSectionSize backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio)