difflayer_test.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package snapshot
  17. import (
  18. "bytes"
  19. "math/rand"
  20. "testing"
  21. "github.com/VictoriaMetrics/fastcache"
  22. "github.com/ethereum/go-ethereum/common"
  23. "github.com/ethereum/go-ethereum/crypto"
  24. "github.com/ethereum/go-ethereum/ethdb/memorydb"
  25. )
  26. func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} {
  27. copy := make(map[common.Hash]struct{})
  28. for hash := range destructs {
  29. copy[hash] = struct{}{}
  30. }
  31. return copy
  32. }
  33. func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte {
  34. copy := make(map[common.Hash][]byte)
  35. for hash, blob := range accounts {
  36. copy[hash] = blob
  37. }
  38. return copy
  39. }
  40. func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
  41. copy := make(map[common.Hash]map[common.Hash][]byte)
  42. for accHash, slots := range storage {
  43. copy[accHash] = make(map[common.Hash][]byte)
  44. for slotHash, blob := range slots {
  45. copy[accHash][slotHash] = blob
  46. }
  47. }
  48. return copy
  49. }
  50. // TestMergeBasics tests some simple merges
  51. func TestMergeBasics(t *testing.T) {
  52. var (
  53. destructs = make(map[common.Hash]struct{})
  54. accounts = make(map[common.Hash][]byte)
  55. storage = make(map[common.Hash]map[common.Hash][]byte)
  56. )
  57. // Fill up a parent
  58. for i := 0; i < 100; i++ {
  59. h := randomHash()
  60. data := randomAccount()
  61. accounts[h] = data
  62. if rand.Intn(4) == 0 {
  63. destructs[h] = struct{}{}
  64. }
  65. if rand.Intn(2) == 0 {
  66. accStorage := make(map[common.Hash][]byte)
  67. value := make([]byte, 32)
  68. rand.Read(value)
  69. accStorage[randomHash()] = value
  70. storage[h] = accStorage
  71. }
  72. }
  73. // Add some (identical) layers on top
  74. parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
  75. child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
  76. child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
  77. child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
  78. child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
  79. // And flatten
  80. merged := (child.flatten()).(*diffLayer)
  81. { // Check account lists
  82. if have, want := len(merged.accountList), 0; have != want {
  83. t.Errorf("accountList wrong: have %v, want %v", have, want)
  84. }
  85. if have, want := len(merged.AccountList()), len(accounts); have != want {
  86. t.Errorf("AccountList() wrong: have %v, want %v", have, want)
  87. }
  88. if have, want := len(merged.accountList), len(accounts); have != want {
  89. t.Errorf("accountList [2] wrong: have %v, want %v", have, want)
  90. }
  91. }
  92. { // Check account drops
  93. if have, want := len(merged.destructSet), len(destructs); have != want {
  94. t.Errorf("accountDrop wrong: have %v, want %v", have, want)
  95. }
  96. }
  97. { // Check storage lists
  98. i := 0
  99. for aHash, sMap := range storage {
  100. if have, want := len(merged.storageList), i; have != want {
  101. t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
  102. }
  103. list, _ := merged.StorageList(aHash)
  104. if have, want := len(list), len(sMap); have != want {
  105. t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
  106. }
  107. if have, want := len(merged.storageList[aHash]), len(sMap); have != want {
  108. t.Errorf("storageList wrong: have %v, want %v", have, want)
  109. }
  110. i++
  111. }
  112. }
  113. }
  114. // TestMergeDelete tests some deletion
  115. func TestMergeDelete(t *testing.T) {
  116. var (
  117. storage = make(map[common.Hash]map[common.Hash][]byte)
  118. )
  119. // Fill up a parent
  120. h1 := common.HexToHash("0x01")
  121. h2 := common.HexToHash("0x02")
  122. flipDrops := func() map[common.Hash]struct{} {
  123. return map[common.Hash]struct{}{
  124. h2: {},
  125. }
  126. }
  127. flipAccs := func() map[common.Hash][]byte {
  128. return map[common.Hash][]byte{
  129. h1: randomAccount(),
  130. }
  131. }
  132. flopDrops := func() map[common.Hash]struct{} {
  133. return map[common.Hash]struct{}{
  134. h1: {},
  135. }
  136. }
  137. flopAccs := func() map[common.Hash][]byte {
  138. return map[common.Hash][]byte{
  139. h2: randomAccount(),
  140. }
  141. }
  142. // Add some flipAccs-flopping layers on top
  143. parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
  144. child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
  145. child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
  146. child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
  147. child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
  148. child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
  149. child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
  150. if data, _ := child.Account(h1); data == nil {
  151. t.Errorf("last diff layer: expected %x account to be non-nil", h1)
  152. }
  153. if data, _ := child.Account(h2); data != nil {
  154. t.Errorf("last diff layer: expected %x account to be nil", h2)
  155. }
  156. if _, ok := child.destructSet[h1]; ok {
  157. t.Errorf("last diff layer: expected %x drop to be missing", h1)
  158. }
  159. if _, ok := child.destructSet[h2]; !ok {
  160. t.Errorf("last diff layer: expected %x drop to be present", h1)
  161. }
  162. // And flatten
  163. merged := (child.flatten()).(*diffLayer)
  164. if data, _ := merged.Account(h1); data == nil {
  165. t.Errorf("merged layer: expected %x account to be non-nil", h1)
  166. }
  167. if data, _ := merged.Account(h2); data != nil {
  168. t.Errorf("merged layer: expected %x account to be nil", h2)
  169. }
  170. if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
  171. t.Errorf("merged diff layer: expected %x drop to be present", h1)
  172. }
  173. if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
  174. t.Errorf("merged diff layer: expected %x drop to be present", h1)
  175. }
  176. // If we add more granular metering of memory, we can enable this again,
  177. // but it's not implemented for now
  178. //if have, want := merged.memory, child.memory; have != want {
  179. // t.Errorf("mem wrong: have %d, want %d", have, want)
  180. //}
  181. }
  182. // This tests that if we create a new account, and set a slot, and then merge
  183. // it, the lists will be correct.
  184. func TestInsertAndMerge(t *testing.T) {
  185. // Fill up a parent
  186. var (
  187. acc = common.HexToHash("0x01")
  188. slot = common.HexToHash("0x02")
  189. parent *diffLayer
  190. child *diffLayer
  191. )
  192. {
  193. var (
  194. destructs = make(map[common.Hash]struct{})
  195. accounts = make(map[common.Hash][]byte)
  196. storage = make(map[common.Hash]map[common.Hash][]byte)
  197. )
  198. parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
  199. }
  200. {
  201. var (
  202. destructs = make(map[common.Hash]struct{})
  203. accounts = make(map[common.Hash][]byte)
  204. storage = make(map[common.Hash]map[common.Hash][]byte)
  205. )
  206. accounts[acc] = randomAccount()
  207. storage[acc] = make(map[common.Hash][]byte)
  208. storage[acc][slot] = []byte{0x01}
  209. child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
  210. }
  211. // And flatten
  212. merged := (child.flatten()).(*diffLayer)
  213. { // Check that slot value is present
  214. have, _ := merged.Storage(acc, slot)
  215. if want := []byte{0x01}; !bytes.Equal(have, want) {
  216. t.Errorf("merged slot value wrong: have %x, want %x", have, want)
  217. }
  218. }
  219. }
  220. func emptyLayer() *diskLayer {
  221. return &diskLayer{
  222. diskdb: memorydb.New(),
  223. cache: fastcache.New(500 * 1024),
  224. }
  225. }
  226. // BenchmarkSearch checks how long it takes to find a non-existing key
  227. // BenchmarkSearch-6 200000 10481 ns/op (1K per layer)
  228. // BenchmarkSearch-6 200000 10760 ns/op (10K per layer)
  229. // BenchmarkSearch-6 100000 17866 ns/op
  230. //
  231. // BenchmarkSearch-6 500000 3723 ns/op (10k per layer, only top-level RLock()
  232. func BenchmarkSearch(b *testing.B) {
  233. // First, we set up 128 diff layers, with 1K items each
  234. fill := func(parent snapshot) *diffLayer {
  235. var (
  236. destructs = make(map[common.Hash]struct{})
  237. accounts = make(map[common.Hash][]byte)
  238. storage = make(map[common.Hash]map[common.Hash][]byte)
  239. )
  240. for i := 0; i < 10000; i++ {
  241. accounts[randomHash()] = randomAccount()
  242. }
  243. return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
  244. }
  245. var layer snapshot
  246. layer = emptyLayer()
  247. for i := 0; i < 128; i++ {
  248. layer = fill(layer)
  249. }
  250. key := crypto.Keccak256Hash([]byte{0x13, 0x38})
  251. b.ResetTimer()
  252. for i := 0; i < b.N; i++ {
  253. layer.AccountRLP(key)
  254. }
  255. }
  256. // BenchmarkSearchSlot checks how long it takes to find a non-existing key
  257. // - Number of layers: 128
  258. // - Each layers contains the account, with a couple of storage slots
  259. // BenchmarkSearchSlot-6 100000 14554 ns/op
  260. // BenchmarkSearchSlot-6 100000 22254 ns/op (when checking parent root using mutex)
  261. // BenchmarkSearchSlot-6 100000 14551 ns/op (when checking parent number using atomic)
  262. // With bloom filter:
  263. // BenchmarkSearchSlot-6 3467835 351 ns/op
  264. func BenchmarkSearchSlot(b *testing.B) {
  265. // First, we set up 128 diff layers, with 1K items each
  266. accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
  267. storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
  268. accountRLP := randomAccount()
  269. fill := func(parent snapshot) *diffLayer {
  270. var (
  271. destructs = make(map[common.Hash]struct{})
  272. accounts = make(map[common.Hash][]byte)
  273. storage = make(map[common.Hash]map[common.Hash][]byte)
  274. )
  275. accounts[accountKey] = accountRLP
  276. accStorage := make(map[common.Hash][]byte)
  277. for i := 0; i < 5; i++ {
  278. value := make([]byte, 32)
  279. rand.Read(value)
  280. accStorage[randomHash()] = value
  281. storage[accountKey] = accStorage
  282. }
  283. return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
  284. }
  285. var layer snapshot
  286. layer = emptyLayer()
  287. for i := 0; i < 128; i++ {
  288. layer = fill(layer)
  289. }
  290. b.ResetTimer()
  291. for i := 0; i < b.N; i++ {
  292. layer.Storage(accountKey, storageKey)
  293. }
  294. }
  295. // With accountList and sorting
  296. // BenchmarkFlatten-6 50 29890856 ns/op
  297. //
  298. // Without sorting and tracking accountList
  299. // BenchmarkFlatten-6 300 5511511 ns/op
  300. func BenchmarkFlatten(b *testing.B) {
  301. fill := func(parent snapshot) *diffLayer {
  302. var (
  303. destructs = make(map[common.Hash]struct{})
  304. accounts = make(map[common.Hash][]byte)
  305. storage = make(map[common.Hash]map[common.Hash][]byte)
  306. )
  307. for i := 0; i < 100; i++ {
  308. accountKey := randomHash()
  309. accounts[accountKey] = randomAccount()
  310. accStorage := make(map[common.Hash][]byte)
  311. for i := 0; i < 20; i++ {
  312. value := make([]byte, 32)
  313. rand.Read(value)
  314. accStorage[randomHash()] = value
  315. }
  316. storage[accountKey] = accStorage
  317. }
  318. return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
  319. }
  320. b.ResetTimer()
  321. for i := 0; i < b.N; i++ {
  322. b.StopTimer()
  323. var layer snapshot
  324. layer = emptyLayer()
  325. for i := 1; i < 128; i++ {
  326. layer = fill(layer)
  327. }
  328. b.StartTimer()
  329. for i := 1; i < 128; i++ {
  330. dl, ok := layer.(*diffLayer)
  331. if !ok {
  332. break
  333. }
  334. layer = dl.flatten()
  335. }
  336. b.StopTimer()
  337. }
  338. }
  339. // This test writes ~324M of diff layers to disk, spread over
  340. // - 128 individual layers,
  341. // - each with 200 accounts
  342. // - containing 200 slots
  343. //
  344. // BenchmarkJournal-6 1 1471373923 ns/ops
  345. // BenchmarkJournal-6 1 1208083335 ns/op // bufio writer
  346. func BenchmarkJournal(b *testing.B) {
  347. fill := func(parent snapshot) *diffLayer {
  348. var (
  349. destructs = make(map[common.Hash]struct{})
  350. accounts = make(map[common.Hash][]byte)
  351. storage = make(map[common.Hash]map[common.Hash][]byte)
  352. )
  353. for i := 0; i < 200; i++ {
  354. accountKey := randomHash()
  355. accounts[accountKey] = randomAccount()
  356. accStorage := make(map[common.Hash][]byte)
  357. for i := 0; i < 200; i++ {
  358. value := make([]byte, 32)
  359. rand.Read(value)
  360. accStorage[randomHash()] = value
  361. }
  362. storage[accountKey] = accStorage
  363. }
  364. return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
  365. }
  366. layer := snapshot(new(diskLayer))
  367. for i := 1; i < 128; i++ {
  368. layer = fill(layer)
  369. }
  370. b.ResetTimer()
  371. for i := 0; i < b.N; i++ {
  372. layer.Journal(new(bytes.Buffer))
  373. }
  374. }