diff --git a/.travis.yml b/.travis.yml index e99711258..ced7bdc14 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,6 @@ language: go go_import_path: github.com/ethereumproject/go-ethereum -go: 1.8 +go: 1.8.x os: - linux - osx diff --git a/README.md b/README.md index 56f1e2cda..f47f53840 100644 --- a/README.md +++ b/README.md @@ -54,8 +54,8 @@ $ go install -ldflags "-X main.Version="`git describe --tags` ./cmd/... ``` #### Using release source code tarball -Because of strict Go directory structure, tarball needs to be extracted into proper subdirectory under `$GOPATH`. -Following commands are example of building the v4.1.1 release: +Because of strict Go directory structure, the tarball needs to be extracted into proper subdirectory under `$GOPATH`. +The following commands are an example of building the v4.1.1 release: ``` $ mkdir -p $GOPATH/src/github.com/ethereumproject $ cd $GOPATH/src/github.com/ethereumproject diff --git a/cmd/geth/cmd.go b/cmd/geth/cmd.go index 8056f0d18..613ec7f04 100644 --- a/cmd/geth/cmd.go +++ b/cmd/geth/cmd.go @@ -864,7 +864,7 @@ func runStatusSyncLogs(e *eth.Ethereum, interval string, maxPeers int) { currentBlockHex = blockchain.CurrentFastBlock().Hash().Hex() } } - if current == height && !(current == 0 && height == 0) { + if current >= height && !(current == 0 && height == 0) { fMode = "Import " // with spaces to make same length as Discover, FastSync, FullSync fOfHeight = strings.Repeat(" ", 12) fHeightRatio = strings.Repeat(" ", 7) diff --git a/core/blockchain.go b/core/blockchain.go index 29c305626..252d7835f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -150,7 +150,7 @@ func NewBlockChain(chainDb ethdb.Database, config *ChainConfig, pow pow.PoW, mux // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain for i := range config.BadHashes { if header := bc.GetHeader(config.BadHashes[i].Hash); header != nil && header.Number.Cmp(config.BadHashes[i].Block) == 0 { - glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]) + glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%s]", header.Number, header.ParentHash.Hex()) bc.SetHead(header.Number.Uint64() - 1) glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation") } @@ -198,7 +198,7 @@ func NewBlockChainDryrun(chainDb ethdb.Database, config *ChainConfig, pow pow.Po // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain for i := range config.BadHashes { if header := bc.GetHeader(config.BadHashes[i].Hash); header != nil && header.Number.Cmp(config.BadHashes[i].Block) == 0 { - glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]) + glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%s]", header.Number, header.ParentHash.Hex()) bc.SetHead(header.Number.Uint64() - 1) glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation") } @@ -612,7 +612,7 @@ func (self *BlockChain) LoadLastState(dryrun bool) error { glog.V(logger.Info).Infof("Validating currentFastBlock: %v", self.currentFastBlock.Number()) if e := self.blockIsInvalid(self.currentFastBlock); e != nil { if !dryrun { - glog.V(logger.Warn).Infof("WARNING: Found unhealthy head fast block #%d (%x): %v \nAttempting chain reset with recovery.", self.currentFastBlock.Number(), self.currentFastBlock.Hash(), e) + glog.V(logger.Warn).Infof("WARNING: Found unhealthy head fast block #%d [%x]: %v \nAttempting chain reset with recovery.", self.currentFastBlock.Number(), self.currentFastBlock.Hash(), e) return recoverOrReset() } return fmt.Errorf("invalid currentFastBlock: %v", e) @@ -1082,7 +1082,6 @@ type WriteStatus byte const ( NonStatTy WriteStatus = iota CanonStatTy - SplitStatTy SideStatTy ) @@ -1272,6 +1271,11 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err case SideStatTy: mlogWriteStatus = "SIDE" } + parent := self.GetBlock(block.ParentHash()) + parentTimeDiff := new(big.Int) + if parent != nil { + parentTimeDiff = new(big.Int).Sub(block.Time(), parent.Time()) + } mlogBlockchain.Send(mlogBlockchainWriteBlock.SetDetailValues( mlogWriteStatus, err, @@ -1282,6 +1286,10 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err block.GasUsed(), block.Coinbase().Hex(), block.Time(), + block.Difficulty(), + len(block.Uncles()), + block.ReceivedAt, + parentTimeDiff, )) }() } @@ -1302,9 +1310,19 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err externTd := new(big.Int).Add(block.Difficulty(), ptd) // If the total difficulty is higher than our known, add it to the canonical chain - // Second clause in the if statement reduces the vulnerability to selfish mining. - // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf - if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) { + // Compare local vs external difficulties + tdCompare := externTd.Cmp(localTd) + + // Initialize reorg if incoming TD is greater than local. + reorg := tdCompare > 0 + + // If TDs are the same, randomize. + if tdCompare == 0 { + // Reduces the vulnerability to selfish mining. + // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf + reorg = mrand.Float64() < 0.5 + } + if reorg { // Reorganise the chain if the parent is not the head block if block.ParentHash() != self.currentBlock.Hash() { if err := self.reorg(self.currentBlock, block); err != nil { @@ -1468,7 +1486,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (chainIndex int, err err switch status { case CanonStatTy: if glog.V(logger.Debug) { - glog.Infof("[%v] inserted block #%d (%d TXs %v G %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), block.GasUsed(), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) + glog.Infof("[%v] inserted block #%d (%d TXs %v G %d UNCs) [%s]. Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), block.GasUsed(), len(block.Uncles()), block.Hash().Hex(), time.Since(bstart)) } events = append(events, ChainEvent{block, block.Hash(), logs}) @@ -1486,12 +1504,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (chainIndex int, err err } case SideStatTy: if glog.V(logger.Detail) { - glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) + glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) [%s]. Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Hex(), time.Since(bstart)) } events = append(events, ChainSideEvent{block, logs}) - - case SplitStatTy: - events = append(events, ChainSplitEvent{block, logs}) } stats.processed++ } @@ -1511,15 +1526,15 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (chainIndex int, err err tend, )) } - glog.V(logger.Info).Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", + glog.V(logger.Info).Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%s / %s]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), - start.Hash().Bytes()[:4], - end.Hash().Bytes()[:4]) + start.Hash().Hex(), + end.Hash().Hex()) } go self.postChainEvents(events, coalescedLogs) @@ -1596,9 +1611,17 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } } + commonHash := commonBlock.Hash() if glog.V(logger.Debug) { - commonHash := commonBlock.Hash() - glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4]) + glog.Infof("Chain split detected @ [%s]. Reorganising chain from #%v %s to %s", commonHash.Hex(), numSplit, oldStart.Hash().Hex(), newStart.Hash().Hex()) + } + if logger.MlogEnabled() { + mlogBlockchain.Send(mlogBlockchainReorgBlocks.SetDetailValues( + commonHash.Hex(), + numSplit, + oldStart.Hash().Hex(), + newStart.Hash().Hex(), + )) } var addedTxs types.Transactions @@ -1689,7 +1712,7 @@ func (chain *BlockChain) update() { if len(blocks) > 0 { types.BlockBy(types.Number).Sort(blocks) if i, err := chain.InsertChain(blocks); err != nil { - log.Printf("periodic future chain update on block #%d (%x): %s", blocks[i].Number(), blocks[i].Hash(), err) + log.Printf("periodic future chain update on block #%d [%s]: %s", blocks[i].Number(), blocks[i].Hash().Hex(), err) } } } diff --git a/core/mlog.go b/core/mlog.go index 11b7e9a21..bab83a359 100644 --- a/core/mlog.go +++ b/core/mlog.go @@ -38,6 +38,10 @@ A STATUS of NONE means it was written _without_ any abnormal chain event, such a {"BLOCK", "GAS_USED", "BIGINT"}, {"BLOCK", "COINBASE", "STRING"}, {"BLOCK", "TIME", "BIGINT"}, + {"BLOCK", "DIFFICULTY", "BIGINT"}, + {"BLOCK", "UNCLES", "INT"}, + {"BLOCK", "RECEIVED_AT", "BIGINT"}, + {"BLOCK", "DIFF_PARENT_TIME", "BIGINT"}, }, } @@ -58,6 +62,19 @@ var mlogBlockchainInsertBlocks = logger.MLogT{ }, } +var mlogBlockchainReorgBlocks = logger.MLogT{ + Description: "Called when a chain split is detected and a subset of blocks are reoganized.", + Receiver: "BLOCKCHAIN", + Verb: "REORG", + Subject: "BLOCKS", + Details: []logger.MLogDetailT{ + {"REORG", "LAST_COMMON_HASH", "STRING"}, + {"REORG", "SPLIT_NUMBER", "BIGINT"}, + {"BLOCKS", "OLD_START_HASH", "STRING"}, + {"BLOCKS", "NEW_START_HASH", "STRING"}, + }, +} + var mlogTxPoolAddTx = logger.MLogT{ Description: `Called once when a valid transaction is added to tx pool. $TO.NAME will be the account address hex or '[NEW_CONTRACT]' in case of a contract.`, diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 1944ab195..2106798ad 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -266,8 +266,20 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error { + var err error + defer func() { + if logger.MlogEnabled() { + mlogDownloader.Send(mlogDownloaderRegisterPeer.SetDetailValues( + id, + version, + err, + )) + } + }() + glog.V(logger.Detail).Infoln("Registering peer", id) - if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil { + err = d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)) + if err != nil { glog.V(logger.Error).Infoln("Register failed:", err) return err } @@ -280,9 +292,21 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea // the specified peer. An effort is also made to return any pending fetches into // the queue. func (d *Downloader) UnregisterPeer(id string) error { + + var err error + defer func() { + if logger.MlogEnabled() { + mlogDownloader.Send(mlogDownloaderUnregisterPeer.SetDetailValues( + id, + err, + )) + } + }() + // Unregister the peer from the active peer set and revoke any fetch tasks glog.V(logger.Detail).Infoln("Unregistering peer", id) - if err := d.peers.Unregister(id); err != nil { + err = d.peers.Unregister(id) + if err != nil { glog.V(logger.Error).Infoln("Unregister failed:", err) return err } @@ -685,10 +709,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { // If the head fetch already found an ancestor, return if !common.EmptyHash(hash) { if int64(number) <= floor { - glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor) + glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash, floor) return 0, errInvalidAncestor } - glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4]) + glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash) return number, nil } // Ancestor not found, we need to binary search over our chain @@ -1479,7 +1503,16 @@ func (d *Downloader) qosTuner() { atomic.StoreUint64(&d.rttConfidence, conf) // Log the new QoS values and sleep until the next RTT - glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()) + ttl := d.requestTTL() + if logger.MlogEnabled() { + mlogDownloader.Send(mlogDownloaderTuneQOS.SetDetailValues( + rtt, + float64(conf)/1000000.0, + ttl, + )) + } + glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, ttl) + select { case <-d.quitCh: return diff --git a/eth/downloader/mlog.go b/eth/downloader/mlog.go new file mode 100644 index 000000000..4455815c8 --- /dev/null +++ b/eth/downloader/mlog.go @@ -0,0 +1,49 @@ +package downloader + +import "github.com/ethereumproject/go-ethereum/logger" + +var mlogDownloader = logger.MLogRegisterAvailable("downloader", mLogLines) + +var mLogLines = []logger.MLogT{ + mlogDownloaderRegisterPeer, + mlogDownloaderUnregisterPeer, + mlogDownloaderTuneQOS, +} + +var mlogDownloaderRegisterPeer = logger.MLogT{ + Description: "Called when the downloader registers a peer.", + Receiver: "DOWNLOADER", + Verb: "REGISTER", + Subject: "PEER", + Details: []logger.MLogDetailT{ + {"PEER", "ID", "STRING"}, + {"PEER", "VERSION", "INT"}, + {"REGISTER", "ERROR", "STRING_OR_NULL"}, + }, +} + +var mlogDownloaderUnregisterPeer = logger.MLogT{ + Description: "Called when the downloader unregisters a peer.", + Receiver: "DOWNLOADER", + Verb: "UNREGISTER", + Subject: "PEER", + Details: []logger.MLogDetailT{ + {"PEER", "ID", "STRING"}, + {"UNREGISTER", "ERROR", "STRING_OR_NULL"}, + }, +} + +var mlogDownloaderTuneQOS = logger.MLogT{ + Description: `Called at intervals to gather peer latency statistics. Estimates request round trip time. + +RTT reports the estimated Request Round Trip time, confidence is measures from 0-1 (1 is ultimately confidenct), +and TTL reports the Timeout Allowance for a single downloader request to finish within.`, + Receiver: "DOWNLOADER", + Verb: "TUNE", + Subject: "QOS", + Details: []logger.MLogDetailT{ + {"QOS", "RTT", "DURATION"}, + {"QOS", "CONFIDENCE", "NUMBER"}, + {"QOS", "TTL", "DURATION"}, + }, +} \ No newline at end of file diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go index 78bd3cb1b..27d65c8dc 100644 --- a/eth/fetcher/fetcher.go +++ b/eth/fetcher/fetcher.go @@ -134,7 +134,7 @@ type Fetcher struct { getBlock blockRetrievalFn // Retrieves a block from the local chain validateBlock blockValidatorFn // Checks if a block's headers have a valid proof of work broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers - chainHeight chainHeightFn // Retrieves the current chain's height + chainHeight chainHeightFn // Retrieves the current local chain's height (blockchain.currentBlock.Number) insertChain chainInsertFn // Injects a batch of blocks into the chain dropPeer peerDropFn // Drops a peer for misbehaving @@ -205,7 +205,7 @@ func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time } } -// Enqueue tries to fill gaps the the fetcher's future import queue. +// Enqueue tries to fill gaps in the fetcher's future import queue. func (f *Fetcher) Enqueue(peer string, block *types.Block) error { op := &inject{ origin: peer, @@ -334,7 +334,15 @@ func (f *Fetcher) loop() { // If we have a valid block number, check that it's potentially useful if notification.number > 0 { if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { - glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist) + if logger.MlogEnabled() { + mlogFetcher.Send(mlogFetcherDiscardAnnouncement.SetDetailValues( + notification.origin, + notification.number, + notification.hash.Str(), + dist, + )) + } + glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%s], distance %d", notification.origin, notification.number, notification.hash.Hex(), dist) metrics.FetchAnnounceDrops.Mark(1) break } @@ -466,7 +474,7 @@ func (f *Fetcher) loop() { if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil { // If the delivered header does not match the promised number, drop the announcer if header.Number.Uint64() != announce.number { - glog.V(logger.Detail).Infof("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64()) + glog.V(logger.Detail).Infof("[eth/62] Peer %s: invalid block number for [%s]: announced %d, provided %d", announce.origin, header.Hash().Hex(), announce.number, header.Number.Uint64()) f.dropPeer(announce.origin) f.forgetHash(hash) continue @@ -478,7 +486,7 @@ func (f *Fetcher) loop() { // If the block is empty (header only), short circuit into the final import queue if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) { - glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]) + glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%s] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Hex()) block := types.NewBlockWithHeader(header) block.ReceivedAt = task.time @@ -490,7 +498,7 @@ func (f *Fetcher) loop() { // Otherwise add to the list of blocks needing completion incomplete = append(incomplete, announce) } else { - glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]) + glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%s] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Hex()) f.forgetHash(hash) } } else { @@ -621,14 +629,28 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) { // Ensure the peer isn't DOSing us count := f.queues[peer] + 1 if count > blockLimit { - glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit) + glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%s], exceeded allowance (%d)", peer, block.NumberU64(), hash.Hex(), blockLimit) metrics.FetchBroadcastDOS.Mark(1) f.forgetHash(hash) return } - // Discard any past or too distant blocks if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { - glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist) + if logger.MlogEnabled() { + mlogFetcher.Send(mlogFetcherDiscardAnnouncement.SetDetailValues( + peer, + block.NumberU64(), + hash.Str(), + dist, + )) + } + glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%s], distance %d", peer, block.NumberU64(), hash.Hex(), dist) + metrics.FetchBroadcastDrops.Mark(1) + f.forgetHash(hash) + return + } + // Don't queue block if we already have it. + if f.getBlock(hash) != nil { + glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%s], already have", peer, block.NumberU64(), hash.Hex()) metrics.FetchBroadcastDrops.Mark(1) f.forgetHash(hash) return @@ -646,26 +668,35 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) { f.queueChangeHook(op.block.Hash(), true) } if glog.V(logger.Debug) { - glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size()) + glog.Infof("Peer %s: queued block #%d [%s], total %v", peer, block.NumberU64(), hash.Hex(), f.queue.Size()) } } } // insert spawns a new goroutine to run a block insertion into the chain. If the -// block's number is at the same height as the current import phase, if updates +// block's number is at the same height as the current import phase, it updates // the phase states accordingly. func (f *Fetcher) insert(peer string, block *types.Block) { hash := block.Hash() // Run the import on a new thread - glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4]) + glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%s]", peer, block.NumberU64(), hash.Hex()) go func() { - defer func() { f.done <- hash }() - // If the parent's unknown, abort insertion + haveParent := true + + defer func() { + if haveParent { + f.done <- hash + } + }() + + // If the parent's unknown, abort insertion, and don't forget the hash and block; + // use queue gap fill to get unknown parent. parent := f.getBlock(block.ParentHash()) if parent == nil { - glog.V(logger.Debug).Infof("Peer %s: parent []%x] of block #%d [%x…] unknown", block.ParentHash().Bytes()[:4], peer, block.NumberU64(), hash[:4]) + glog.V(logger.Debug).Infof("Peer %s: parent [%s] of block #%d [%s] unknown", peer, block.ParentHash().Hex(), block.NumberU64(), hash.Hex()) + haveParent = false return } // Quickly validate the header and propagate the block if it passes @@ -680,13 +711,13 @@ func (f *Fetcher) insert(peer string, block *types.Block) { default: // Something went very wrong, drop the peer - glog.V(logger.Debug).Infof("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err) + glog.V(logger.Debug).Infof("Peer %s: block #%d [%s] verification failed: %v", peer, block.NumberU64(), hash.Hex(), err) f.dropPeer(peer) return } // Run the actual import and log any issues if _, err := f.insertChain(types.Blocks{block}); err != nil { - glog.V(logger.Warn).Infof("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err) + glog.V(logger.Warn).Infof("Peer %s: block #%d [%s] import failed: %v", peer, block.NumberU64(), hash.Hex(), err) return } // If import succeeded, broadcast the block diff --git a/eth/fetcher/mlog.go b/eth/fetcher/mlog.go new file mode 100644 index 000000000..a3e72fd22 --- /dev/null +++ b/eth/fetcher/mlog.go @@ -0,0 +1,22 @@ +package fetcher + +import "github.com/ethereumproject/go-ethereum/logger" + +var mlogFetcher = logger.MLogRegisterAvailable("fetcher", mLogLines) + +var mLogLines = []logger.MLogT{ + mlogFetcherDiscardAnnouncement, +} + +var mlogFetcherDiscardAnnouncement = logger.MLogT{ + Description: "Called when a block announcement is discarded.", + Receiver: "FETCHER", + Verb: "DISCARD", + Subject: "ANNOUNCEMENT", + Details: []logger.MLogDetailT{ + {"ANNOUNCEMENT", "ORIGIN", "STRING"}, + {"ANNOUNCEMENT", "NUMBER", "INT"}, + {"ANNOUNCEMENT", "HASH", "STRING"}, + {"ANNOUNCEMENT", "DISTANCE", "INT"}, + }, +} diff --git a/eth/handler.go b/eth/handler.go index 0a8f32e88..50e010d6c 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -632,26 +632,31 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Mark the peer as owning the block and schedule it for import p.MarkBlock(request.Block.Hash()) - p.SetHead(request.Block.ParentHash(), request.TD) pm.fetcher.Enqueue(p.id, request.Block) // Assuming the block is importable by the peer, but possibly not yet done so, // calculate the head hash and TD that the peer truly must have. var ( trueHead = request.Block.ParentHash() - trueTD = request.TD + trueTD = new(big.Int).Sub(request.TD, request.Block.Difficulty()) ) // Update the peers total difficulty if better than the previous if _, td := p.Head(); trueTD.Cmp(td) > 0 { + glog.V(logger.Debug).Infof("Peer %s: setting head: tdWas=%v trueTD=%v", p.id, td, trueTD) p.SetHead(trueHead, trueTD) - + // Schedule a sync if above ours. Note, this will not fire a sync for a gap of // a singe block (as the true TD is below the propagated block), however this // scenario should easily be covered by the fetcher. currentBlock := pm.blockchain.CurrentBlock() - if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash())) > 0 { + if localTd := pm.blockchain.GetTd(currentBlock.Hash()); trueTD.Cmp(localTd) > 0 { + glog.V(logger.Info).Infof("Peer %s: localTD=%v (<) peerTrueTD=%v, synchronising", p.id, localTd, trueTD) go pm.synchronise(p) + } else { + glog.V(logger.Detail).Infof("Peer %s: localTD=%v (>=) peerTrueTD=%v, NOT synchronising", p.id, localTd, trueTD) } + } else { + glog.V(logger.Detail).Infof("Peer %s: NOT setting head: tdWas=%v trueTD=%v", p.id, td, trueTD) } case msg.Code == TxMsg: @@ -720,7 +725,7 @@ func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) for _, peer := range peers { peer.SendTransactions(types.Transactions{tx}) } - glog.V(logger.Detail).Infoln("broadcast tx to", len(peers), "peers") + glog.V(logger.Detail).Infof("broadcast tx [%s] to %d peers", tx.Hash().Hex(), len(peers)) } // Mined broadcast loop diff --git a/eth/peer.go b/eth/peer.go index e26c05037..83b6b7e84 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -281,7 +281,7 @@ func (p *peer) readStatus(network int, status *statusData, genesis common.Hash) return errResp(ErrDecode, "msg %v: %v", msg, err) } if status.GenesisBlock != genesis { - return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock, genesis) + return errResp(ErrGenesisBlockMismatch, "%x (!= %x…)", status.GenesisBlock, genesis.Bytes()[:8]) } if int(status.NetworkId) != network { return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, network) diff --git a/eth/protocol_test.go b/eth/protocol_test.go index 22436c514..9419e6207 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -65,7 +65,7 @@ func testStatusMsgErrors(t *testing.T, protocol int) { }, { code: StatusMsg, data: statusData{uint32(protocol), NetworkId, td, currentBlock, common.Hash{3}}, - wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis), + wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x…)", genesis.Bytes()[:8]), }, }