table_test.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. // Copyright 2015 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package discover
  17. import (
  18. "crypto/ecdsa"
  19. "fmt"
  20. "math/rand"
  21. "net"
  22. "reflect"
  23. "testing"
  24. "testing/quick"
  25. "time"
  26. "github.com/ethereum/go-ethereum/crypto"
  27. "github.com/ethereum/go-ethereum/p2p/enode"
  28. "github.com/ethereum/go-ethereum/p2p/enr"
  29. "github.com/ethereum/go-ethereum/p2p/netutil"
  30. )
  31. func TestTable_pingReplace(t *testing.T) {
  32. run := func(newNodeResponding, lastInBucketResponding bool) {
  33. name := fmt.Sprintf("newNodeResponding=%t/lastInBucketResponding=%t", newNodeResponding, lastInBucketResponding)
  34. t.Run(name, func(t *testing.T) {
  35. t.Parallel()
  36. testPingReplace(t, newNodeResponding, lastInBucketResponding)
  37. })
  38. }
  39. run(true, true)
  40. run(false, true)
  41. run(true, false)
  42. run(false, false)
  43. }
  44. func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) {
  45. transport := newPingRecorder()
  46. tab, db := newTestTable(transport)
  47. defer db.Close()
  48. defer tab.close()
  49. <-tab.initDone
  50. // Fill up the sender's bucket.
  51. pingKey, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8")
  52. pingSender := wrapNode(enode.NewV4(&pingKey.PublicKey, net.IP{127, 0, 0, 1}, 99, 99))
  53. last := fillBucket(tab, pingSender)
  54. // Add the sender as if it just pinged us. Revalidate should replace the last node in
  55. // its bucket if it is unresponsive. Revalidate again to ensure that
  56. transport.dead[last.ID()] = !lastInBucketIsResponding
  57. transport.dead[pingSender.ID()] = !newNodeIsResponding
  58. tab.addSeenNode(pingSender)
  59. tab.doRevalidate(make(chan struct{}, 1))
  60. tab.doRevalidate(make(chan struct{}, 1))
  61. if !transport.pinged[last.ID()] {
  62. // Oldest node in bucket is pinged to see whether it is still alive.
  63. t.Error("table did not ping last node in bucket")
  64. }
  65. tab.mutex.Lock()
  66. defer tab.mutex.Unlock()
  67. wantSize := bucketSize
  68. if !lastInBucketIsResponding && !newNodeIsResponding {
  69. wantSize--
  70. }
  71. if l := len(tab.bucket(pingSender.ID()).entries); l != wantSize {
  72. t.Errorf("wrong bucket size after bond: got %d, want %d", l, wantSize)
  73. }
  74. if found := contains(tab.bucket(pingSender.ID()).entries, last.ID()); found != lastInBucketIsResponding {
  75. t.Errorf("last entry found: %t, want: %t", found, lastInBucketIsResponding)
  76. }
  77. wantNewEntry := newNodeIsResponding && !lastInBucketIsResponding
  78. if found := contains(tab.bucket(pingSender.ID()).entries, pingSender.ID()); found != wantNewEntry {
  79. t.Errorf("new entry found: %t, want: %t", found, wantNewEntry)
  80. }
  81. }
  82. func TestBucket_bumpNoDuplicates(t *testing.T) {
  83. t.Parallel()
  84. cfg := &quick.Config{
  85. MaxCount: 1000,
  86. Rand: rand.New(rand.NewSource(time.Now().Unix())),
  87. Values: func(args []reflect.Value, rand *rand.Rand) {
  88. // generate a random list of nodes. this will be the content of the bucket.
  89. n := rand.Intn(bucketSize-1) + 1
  90. nodes := make([]*node, n)
  91. for i := range nodes {
  92. nodes[i] = nodeAtDistance(enode.ID{}, 200, intIP(200))
  93. }
  94. args[0] = reflect.ValueOf(nodes)
  95. // generate random bump positions.
  96. bumps := make([]int, rand.Intn(100))
  97. for i := range bumps {
  98. bumps[i] = rand.Intn(len(nodes))
  99. }
  100. args[1] = reflect.ValueOf(bumps)
  101. },
  102. }
  103. prop := func(nodes []*node, bumps []int) (ok bool) {
  104. tab, db := newTestTable(newPingRecorder())
  105. defer db.Close()
  106. defer tab.close()
  107. b := &bucket{entries: make([]*node, len(nodes))}
  108. copy(b.entries, nodes)
  109. for i, pos := range bumps {
  110. tab.bumpInBucket(b, b.entries[pos])
  111. if hasDuplicates(b.entries) {
  112. t.Logf("bucket has duplicates after %d/%d bumps:", i+1, len(bumps))
  113. for _, n := range b.entries {
  114. t.Logf(" %p", n)
  115. }
  116. return false
  117. }
  118. }
  119. checkIPLimitInvariant(t, tab)
  120. return true
  121. }
  122. if err := quick.Check(prop, cfg); err != nil {
  123. t.Error(err)
  124. }
  125. }
  126. // This checks that the table-wide IP limit is applied correctly.
  127. func TestTable_IPLimit(t *testing.T) {
  128. transport := newPingRecorder()
  129. tab, db := newTestTable(transport)
  130. defer db.Close()
  131. defer tab.close()
  132. for i := 0; i < tableIPLimit+1; i++ {
  133. n := nodeAtDistance(tab.self().ID(), i, net.IP{172, 0, 1, byte(i)})
  134. tab.addSeenNode(n)
  135. }
  136. if tab.len() > tableIPLimit {
  137. t.Errorf("too many nodes in table")
  138. }
  139. checkIPLimitInvariant(t, tab)
  140. }
  141. // This checks that the per-bucket IP limit is applied correctly.
  142. func TestTable_BucketIPLimit(t *testing.T) {
  143. transport := newPingRecorder()
  144. tab, db := newTestTable(transport)
  145. defer db.Close()
  146. defer tab.close()
  147. d := 3
  148. for i := 0; i < bucketIPLimit+1; i++ {
  149. n := nodeAtDistance(tab.self().ID(), d, net.IP{172, 0, 1, byte(i)})
  150. tab.addSeenNode(n)
  151. }
  152. if tab.len() > bucketIPLimit {
  153. t.Errorf("too many nodes in table")
  154. }
  155. checkIPLimitInvariant(t, tab)
  156. }
  157. // checkIPLimitInvariant checks that ip limit sets contain an entry for every
  158. // node in the table and no extra entries.
  159. func checkIPLimitInvariant(t *testing.T, tab *Table) {
  160. t.Helper()
  161. tabset := netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}
  162. for _, b := range tab.buckets {
  163. for _, n := range b.entries {
  164. tabset.Add(n.IP())
  165. }
  166. }
  167. if tabset.String() != tab.ips.String() {
  168. t.Errorf("table IP set is incorrect:\nhave: %v\nwant: %v", tab.ips, tabset)
  169. }
  170. }
  171. func TestTable_findnodeByID(t *testing.T) {
  172. t.Parallel()
  173. test := func(test *closeTest) bool {
  174. // for any node table, Target and N
  175. transport := newPingRecorder()
  176. tab, db := newTestTable(transport)
  177. defer db.Close()
  178. defer tab.close()
  179. fillTable(tab, test.All)
  180. // check that closest(Target, N) returns nodes
  181. result := tab.findnodeByID(test.Target, test.N, false).entries
  182. if hasDuplicates(result) {
  183. t.Errorf("result contains duplicates")
  184. return false
  185. }
  186. if !sortedByDistanceTo(test.Target, result) {
  187. t.Errorf("result is not sorted by distance to target")
  188. return false
  189. }
  190. // check that the number of results is min(N, tablen)
  191. wantN := test.N
  192. if tlen := tab.len(); tlen < test.N {
  193. wantN = tlen
  194. }
  195. if len(result) != wantN {
  196. t.Errorf("wrong number of nodes: got %d, want %d", len(result), wantN)
  197. return false
  198. } else if len(result) == 0 {
  199. return true // no need to check distance
  200. }
  201. // check that the result nodes have minimum distance to target.
  202. for _, b := range tab.buckets {
  203. for _, n := range b.entries {
  204. if contains(result, n.ID()) {
  205. continue // don't run the check below for nodes in result
  206. }
  207. farthestResult := result[len(result)-1].ID()
  208. if enode.DistCmp(test.Target, n.ID(), farthestResult) < 0 {
  209. t.Errorf("table contains node that is closer to target but it's not in result")
  210. t.Logf(" Target: %v", test.Target)
  211. t.Logf(" Farthest Result: %v", farthestResult)
  212. t.Logf(" ID: %v", n.ID())
  213. return false
  214. }
  215. }
  216. }
  217. return true
  218. }
  219. if err := quick.Check(test, quickcfg()); err != nil {
  220. t.Error(err)
  221. }
  222. }
  223. func TestTable_ReadRandomNodesGetAll(t *testing.T) {
  224. cfg := &quick.Config{
  225. MaxCount: 200,
  226. Rand: rand.New(rand.NewSource(time.Now().Unix())),
  227. Values: func(args []reflect.Value, rand *rand.Rand) {
  228. args[0] = reflect.ValueOf(make([]*enode.Node, rand.Intn(1000)))
  229. },
  230. }
  231. test := func(buf []*enode.Node) bool {
  232. transport := newPingRecorder()
  233. tab, db := newTestTable(transport)
  234. defer db.Close()
  235. defer tab.close()
  236. <-tab.initDone
  237. for i := 0; i < len(buf); i++ {
  238. ld := cfg.Rand.Intn(len(tab.buckets))
  239. fillTable(tab, []*node{nodeAtDistance(tab.self().ID(), ld, intIP(ld))})
  240. }
  241. gotN := tab.ReadRandomNodes(buf)
  242. if gotN != tab.len() {
  243. t.Errorf("wrong number of nodes, got %d, want %d", gotN, tab.len())
  244. return false
  245. }
  246. if hasDuplicates(wrapNodes(buf[:gotN])) {
  247. t.Errorf("result contains duplicates")
  248. return false
  249. }
  250. return true
  251. }
  252. if err := quick.Check(test, cfg); err != nil {
  253. t.Error(err)
  254. }
  255. }
  256. type closeTest struct {
  257. Self enode.ID
  258. Target enode.ID
  259. All []*node
  260. N int
  261. }
  262. func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value {
  263. t := &closeTest{
  264. Self: gen(enode.ID{}, rand).(enode.ID),
  265. Target: gen(enode.ID{}, rand).(enode.ID),
  266. N: rand.Intn(bucketSize),
  267. }
  268. for _, id := range gen([]enode.ID{}, rand).([]enode.ID) {
  269. r := new(enr.Record)
  270. r.Set(enr.IP(genIP(rand)))
  271. n := wrapNode(enode.SignNull(r, id))
  272. n.livenessChecks = 1
  273. t.All = append(t.All, n)
  274. }
  275. return reflect.ValueOf(t)
  276. }
  277. func TestTable_addVerifiedNode(t *testing.T) {
  278. tab, db := newTestTable(newPingRecorder())
  279. <-tab.initDone
  280. defer db.Close()
  281. defer tab.close()
  282. // Insert two nodes.
  283. n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1})
  284. n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2})
  285. tab.addSeenNode(n1)
  286. tab.addSeenNode(n2)
  287. // Verify bucket content:
  288. bcontent := []*node{n1, n2}
  289. if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
  290. t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries)
  291. }
  292. // Add a changed version of n2.
  293. newrec := n2.Record()
  294. newrec.Set(enr.IP{99, 99, 99, 99})
  295. newn2 := wrapNode(enode.SignNull(newrec, n2.ID()))
  296. tab.addVerifiedNode(newn2)
  297. // Check that bucket is updated correctly.
  298. newBcontent := []*node{newn2, n1}
  299. if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, newBcontent) {
  300. t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries)
  301. }
  302. checkIPLimitInvariant(t, tab)
  303. }
  304. func TestTable_addSeenNode(t *testing.T) {
  305. tab, db := newTestTable(newPingRecorder())
  306. <-tab.initDone
  307. defer db.Close()
  308. defer tab.close()
  309. // Insert two nodes.
  310. n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1})
  311. n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2})
  312. tab.addSeenNode(n1)
  313. tab.addSeenNode(n2)
  314. // Verify bucket content:
  315. bcontent := []*node{n1, n2}
  316. if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
  317. t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries)
  318. }
  319. // Add a changed version of n2.
  320. newrec := n2.Record()
  321. newrec.Set(enr.IP{99, 99, 99, 99})
  322. newn2 := wrapNode(enode.SignNull(newrec, n2.ID()))
  323. tab.addSeenNode(newn2)
  324. // Check that bucket content is unchanged.
  325. if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
  326. t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries)
  327. }
  328. checkIPLimitInvariant(t, tab)
  329. }
  330. // This test checks that ENR updates happen during revalidation. If a node in the table
  331. // announces a new sequence number, the new record should be pulled.
  332. func TestTable_revalidateSyncRecord(t *testing.T) {
  333. transport := newPingRecorder()
  334. tab, db := newTestTable(transport)
  335. <-tab.initDone
  336. defer db.Close()
  337. defer tab.close()
  338. // Insert a node.
  339. var r enr.Record
  340. r.Set(enr.IP(net.IP{127, 0, 0, 1}))
  341. id := enode.ID{1}
  342. n1 := wrapNode(enode.SignNull(&r, id))
  343. tab.addSeenNode(n1)
  344. // Update the node record.
  345. r.Set(enr.WithEntry("foo", "bar"))
  346. n2 := enode.SignNull(&r, id)
  347. transport.updateRecord(n2)
  348. tab.doRevalidate(make(chan struct{}, 1))
  349. intable := tab.getNode(id)
  350. if !reflect.DeepEqual(intable, n2) {
  351. t.Fatalf("table contains old record with seq %d, want seq %d", intable.Seq(), n2.Seq())
  352. }
  353. }
  354. // gen wraps quick.Value so it's easier to use.
  355. // it generates a random value of the given value's type.
  356. func gen(typ interface{}, rand *rand.Rand) interface{} {
  357. v, ok := quick.Value(reflect.TypeOf(typ), rand)
  358. if !ok {
  359. panic(fmt.Sprintf("couldn't generate random value of type %T", typ))
  360. }
  361. return v.Interface()
  362. }
  363. func genIP(rand *rand.Rand) net.IP {
  364. ip := make(net.IP, 4)
  365. rand.Read(ip)
  366. return ip
  367. }
  368. func quickcfg() *quick.Config {
  369. return &quick.Config{
  370. MaxCount: 5000,
  371. Rand: rand.New(rand.NewSource(time.Now().Unix())),
  372. }
  373. }
  374. func newkey() *ecdsa.PrivateKey {
  375. key, err := crypto.GenerateKey()
  376. if err != nil {
  377. panic("couldn't generate key: " + err.Error())
  378. }
  379. return key
  380. }