cmd.go 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. // Copyright 2014 The go-ethereum Authors
  2. // This file is part of go-ethereum.
  3. //
  4. // go-ethereum is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // go-ethereum is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU General Public License
  15. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
  16. // Package utils contains internal helper functions for go-ethereum commands.
  17. package utils
  18. import (
  19. "compress/gzip"
  20. "fmt"
  21. "io"
  22. "os"
  23. "os/signal"
  24. "runtime"
  25. "strings"
  26. "syscall"
  27. "time"
  28. "github.com/ethereum/go-ethereum/common"
  29. "github.com/ethereum/go-ethereum/core"
  30. "github.com/ethereum/go-ethereum/core/rawdb"
  31. "github.com/ethereum/go-ethereum/core/types"
  32. "github.com/ethereum/go-ethereum/crypto"
  33. "github.com/ethereum/go-ethereum/eth/ethconfig"
  34. "github.com/ethereum/go-ethereum/ethdb"
  35. "github.com/ethereum/go-ethereum/internal/debug"
  36. "github.com/ethereum/go-ethereum/log"
  37. "github.com/ethereum/go-ethereum/node"
  38. "github.com/ethereum/go-ethereum/rlp"
  39. "gopkg.in/urfave/cli.v1"
  40. )
  41. const (
  42. importBatchSize = 2500
  43. )
  44. // Fatalf formats a message to standard error and exits the program.
  45. // The message is also printed to standard output if standard error
  46. // is redirected to a different file.
  47. func Fatalf(format string, args ...interface{}) {
  48. w := io.MultiWriter(os.Stdout, os.Stderr)
  49. if runtime.GOOS == "windows" {
  50. // The SameFile check below doesn't work on Windows.
  51. // stdout is unlikely to get redirected though, so just print there.
  52. w = os.Stdout
  53. } else {
  54. outf, _ := os.Stdout.Stat()
  55. errf, _ := os.Stderr.Stat()
  56. if outf != nil && errf != nil && os.SameFile(outf, errf) {
  57. w = os.Stderr
  58. }
  59. }
  60. fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
  61. os.Exit(1)
  62. }
  63. func StartNode(ctx *cli.Context, stack *node.Node) {
  64. if err := stack.Start(); err != nil {
  65. Fatalf("Error starting protocol stack: %v", err)
  66. }
  67. go func() {
  68. sigc := make(chan os.Signal, 1)
  69. signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
  70. defer signal.Stop(sigc)
  71. minFreeDiskSpace := ethconfig.Defaults.TrieDirtyCache
  72. if ctx.GlobalIsSet(MinFreeDiskSpaceFlag.Name) {
  73. minFreeDiskSpace = ctx.GlobalInt(MinFreeDiskSpaceFlag.Name)
  74. } else if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
  75. minFreeDiskSpace = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
  76. }
  77. if minFreeDiskSpace > 0 {
  78. go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
  79. }
  80. <-sigc
  81. log.Info("Got interrupt, shutting down...")
  82. go stack.Close()
  83. for i := 10; i > 0; i-- {
  84. <-sigc
  85. if i > 1 {
  86. log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
  87. }
  88. }
  89. debug.Exit() // ensure trace and CPU profile data is flushed.
  90. debug.LoudPanic("boom")
  91. }()
  92. }
  93. func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) {
  94. for {
  95. freeSpace, err := getFreeDiskSpace(path)
  96. if err != nil {
  97. log.Warn("Failed to get free disk space", "path", path, "err", err)
  98. break
  99. }
  100. if freeSpace < freeDiskSpaceCritical {
  101. log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace))
  102. sigc <- syscall.SIGTERM
  103. break
  104. } else if freeSpace < 2*freeDiskSpaceCritical {
  105. log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical))
  106. }
  107. time.Sleep(60 * time.Second)
  108. }
  109. }
  110. func ImportChain(chain *core.BlockChain, fn string) error {
  111. // Watch for Ctrl-C while the import is running.
  112. // If a signal is received, the import will stop at the next batch.
  113. interrupt := make(chan os.Signal, 1)
  114. stop := make(chan struct{})
  115. signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
  116. defer signal.Stop(interrupt)
  117. defer close(interrupt)
  118. go func() {
  119. if _, ok := <-interrupt; ok {
  120. log.Info("Interrupted during import, stopping at next batch")
  121. }
  122. close(stop)
  123. }()
  124. checkInterrupt := func() bool {
  125. select {
  126. case <-stop:
  127. return true
  128. default:
  129. return false
  130. }
  131. }
  132. log.Info("Importing blockchain", "file", fn)
  133. // Open the file handle and potentially unwrap the gzip stream
  134. fh, err := os.Open(fn)
  135. if err != nil {
  136. return err
  137. }
  138. defer fh.Close()
  139. var reader io.Reader = fh
  140. if strings.HasSuffix(fn, ".gz") {
  141. if reader, err = gzip.NewReader(reader); err != nil {
  142. return err
  143. }
  144. }
  145. stream := rlp.NewStream(reader, 0)
  146. // Run actual the import.
  147. blocks := make(types.Blocks, importBatchSize)
  148. n := 0
  149. for batch := 0; ; batch++ {
  150. // Load a batch of RLP blocks.
  151. if checkInterrupt() {
  152. return fmt.Errorf("interrupted")
  153. }
  154. i := 0
  155. for ; i < importBatchSize; i++ {
  156. var b types.Block
  157. if err := stream.Decode(&b); err == io.EOF {
  158. break
  159. } else if err != nil {
  160. return fmt.Errorf("at block %d: %v", n, err)
  161. }
  162. // don't import first block
  163. if b.NumberU64() == 0 {
  164. i--
  165. continue
  166. }
  167. blocks[i] = &b
  168. n++
  169. }
  170. if i == 0 {
  171. break
  172. }
  173. // Import the batch.
  174. if checkInterrupt() {
  175. return fmt.Errorf("interrupted")
  176. }
  177. missing := missingBlocks(chain, blocks[:i])
  178. if len(missing) == 0 {
  179. log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
  180. continue
  181. }
  182. if _, err := chain.InsertChain(missing); err != nil {
  183. return fmt.Errorf("invalid block %d: %v", n, err)
  184. }
  185. }
  186. return nil
  187. }
  188. func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
  189. head := chain.CurrentBlock()
  190. for i, block := range blocks {
  191. // If we're behind the chain head, only check block, state is available at head
  192. if head.NumberU64() > block.NumberU64() {
  193. if !chain.HasBlock(block.Hash(), block.NumberU64()) {
  194. return blocks[i:]
  195. }
  196. continue
  197. }
  198. // If we're above the chain head, state availability is a must
  199. if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
  200. return blocks[i:]
  201. }
  202. }
  203. return nil
  204. }
  205. // ExportChain exports a blockchain into the specified file, truncating any data
  206. // already present in the file.
  207. func ExportChain(blockchain *core.BlockChain, fn string) error {
  208. log.Info("Exporting blockchain", "file", fn)
  209. // Open the file handle and potentially wrap with a gzip stream
  210. fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
  211. if err != nil {
  212. return err
  213. }
  214. defer fh.Close()
  215. var writer io.Writer = fh
  216. if strings.HasSuffix(fn, ".gz") {
  217. writer = gzip.NewWriter(writer)
  218. defer writer.(*gzip.Writer).Close()
  219. }
  220. // Iterate over the blocks and export them
  221. if err := blockchain.Export(writer); err != nil {
  222. return err
  223. }
  224. log.Info("Exported blockchain", "file", fn)
  225. return nil
  226. }
  227. // ExportAppendChain exports a blockchain into the specified file, appending to
  228. // the file if data already exists in it.
  229. func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
  230. log.Info("Exporting blockchain", "file", fn)
  231. // Open the file handle and potentially wrap with a gzip stream
  232. fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
  233. if err != nil {
  234. return err
  235. }
  236. defer fh.Close()
  237. var writer io.Writer = fh
  238. if strings.HasSuffix(fn, ".gz") {
  239. writer = gzip.NewWriter(writer)
  240. defer writer.(*gzip.Writer).Close()
  241. }
  242. // Iterate over the blocks and export them
  243. if err := blockchain.ExportN(writer, first, last); err != nil {
  244. return err
  245. }
  246. log.Info("Exported blockchain to", "file", fn)
  247. return nil
  248. }
  249. // ImportPreimages imports a batch of exported hash preimages into the database.
  250. func ImportPreimages(db ethdb.Database, fn string) error {
  251. log.Info("Importing preimages", "file", fn)
  252. // Open the file handle and potentially unwrap the gzip stream
  253. fh, err := os.Open(fn)
  254. if err != nil {
  255. return err
  256. }
  257. defer fh.Close()
  258. var reader io.Reader = fh
  259. if strings.HasSuffix(fn, ".gz") {
  260. if reader, err = gzip.NewReader(reader); err != nil {
  261. return err
  262. }
  263. }
  264. stream := rlp.NewStream(reader, 0)
  265. // Import the preimages in batches to prevent disk trashing
  266. preimages := make(map[common.Hash][]byte)
  267. for {
  268. // Read the next entry and ensure it's not junk
  269. var blob []byte
  270. if err := stream.Decode(&blob); err != nil {
  271. if err == io.EOF {
  272. break
  273. }
  274. return err
  275. }
  276. // Accumulate the preimages and flush when enough ws gathered
  277. preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
  278. if len(preimages) > 1024 {
  279. rawdb.WritePreimages(db, preimages)
  280. preimages = make(map[common.Hash][]byte)
  281. }
  282. }
  283. // Flush the last batch preimage data
  284. if len(preimages) > 0 {
  285. rawdb.WritePreimages(db, preimages)
  286. }
  287. return nil
  288. }
  289. // ExportPreimages exports all known hash preimages into the specified file,
  290. // truncating any data already present in the file.
  291. func ExportPreimages(db ethdb.Database, fn string) error {
  292. log.Info("Exporting preimages", "file", fn)
  293. // Open the file handle and potentially wrap with a gzip stream
  294. fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
  295. if err != nil {
  296. return err
  297. }
  298. defer fh.Close()
  299. var writer io.Writer = fh
  300. if strings.HasSuffix(fn, ".gz") {
  301. writer = gzip.NewWriter(writer)
  302. defer writer.(*gzip.Writer).Close()
  303. }
  304. // Iterate over the preimages and export them
  305. it := db.NewIterator([]byte("secure-key-"), nil)
  306. defer it.Release()
  307. for it.Next() {
  308. if err := rlp.Encode(writer, it.Value()); err != nil {
  309. return err
  310. }
  311. }
  312. log.Info("Exported preimages", "file", fn)
  313. return nil
  314. }