clientpool_test.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package server
  17. import (
  18. "fmt"
  19. "math/rand"
  20. "testing"
  21. "time"
  22. "github.com/ethereum/go-ethereum/common/mclock"
  23. "github.com/ethereum/go-ethereum/core/rawdb"
  24. "github.com/ethereum/go-ethereum/p2p/enode"
  25. "github.com/ethereum/go-ethereum/p2p/enr"
  26. "github.com/ethereum/go-ethereum/p2p/nodestate"
  27. )
  28. const defaultConnectedBias = time.Minute * 3
  29. func TestClientPoolL10C100Free(t *testing.T) {
  30. testClientPool(t, 10, 100, 0, true)
  31. }
  32. func TestClientPoolL40C200Free(t *testing.T) {
  33. testClientPool(t, 40, 200, 0, true)
  34. }
  35. func TestClientPoolL100C300Free(t *testing.T) {
  36. testClientPool(t, 100, 300, 0, true)
  37. }
  38. func TestClientPoolL10C100P4(t *testing.T) {
  39. testClientPool(t, 10, 100, 4, false)
  40. }
  41. func TestClientPoolL40C200P30(t *testing.T) {
  42. testClientPool(t, 40, 200, 30, false)
  43. }
  44. func TestClientPoolL100C300P20(t *testing.T) {
  45. testClientPool(t, 100, 300, 20, false)
  46. }
  47. const testClientPoolTicks = 100000
  48. type poolTestPeer struct {
  49. node *enode.Node
  50. index int
  51. disconnCh chan int
  52. cap uint64
  53. inactiveAllowed bool
  54. }
  55. func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {
  56. return &poolTestPeer{
  57. index: i,
  58. disconnCh: disconnCh,
  59. node: enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}),
  60. }
  61. }
  62. func (i *poolTestPeer) Node() *enode.Node {
  63. return i.node
  64. }
  65. func (i *poolTestPeer) FreeClientId() string {
  66. return fmt.Sprintf("addr #%d", i.index)
  67. }
  68. func (i *poolTestPeer) InactiveAllowance() time.Duration {
  69. if i.inactiveAllowed {
  70. return time.Second * 10
  71. }
  72. return 0
  73. }
  74. func (i *poolTestPeer) UpdateCapacity(capacity uint64, requested bool) {
  75. i.cap = capacity
  76. }
  77. func (i *poolTestPeer) Disconnect() {
  78. if i.disconnCh == nil {
  79. return
  80. }
  81. id := i.node.ID()
  82. i.disconnCh <- int(id[0]) + int(id[1])<<8
  83. }
  84. func getBalance(pool *ClientPool, p *poolTestPeer) (pos, neg uint64) {
  85. pool.BalanceOperation(p.node.ID(), p.FreeClientId(), func(nb AtomicBalanceOperator) {
  86. pos, neg = nb.GetBalance()
  87. })
  88. return
  89. }
  90. func addBalance(pool *ClientPool, id enode.ID, amount int64) {
  91. pool.BalanceOperation(id, "", func(nb AtomicBalanceOperator) {
  92. nb.AddBalance(amount)
  93. })
  94. }
  95. func checkDiff(a, b uint64) bool {
  96. maxDiff := (a + b) / 2000
  97. if maxDiff < 1 {
  98. maxDiff = 1
  99. }
  100. return a > b+maxDiff || b > a+maxDiff
  101. }
  102. func connect(pool *ClientPool, peer *poolTestPeer) uint64 {
  103. pool.Register(peer)
  104. return peer.cap
  105. }
  106. func disconnect(pool *ClientPool, peer *poolTestPeer) {
  107. pool.Unregister(peer)
  108. }
  109. func alwaysTrueFn() bool {
  110. return true
  111. }
  112. func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) {
  113. rand.Seed(time.Now().UnixNano())
  114. var (
  115. clock mclock.Simulated
  116. db = rawdb.NewMemoryDatabase()
  117. connected = make([]bool, clientCount)
  118. connTicks = make([]int, clientCount)
  119. disconnCh = make(chan int, clientCount)
  120. pool = NewClientPool(db, 1, 0, &clock, alwaysTrueFn)
  121. )
  122. pool.Start()
  123. pool.SetExpirationTCs(0, 1000)
  124. pool.SetLimits(uint64(activeLimit), uint64(activeLimit))
  125. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  126. // pool should accept new peers up to its connected limit
  127. for i := 0; i < activeLimit; i++ {
  128. if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 {
  129. connected[i] = true
  130. } else {
  131. t.Fatalf("Test peer #%d rejected", i)
  132. }
  133. }
  134. // randomly connect and disconnect peers, expect to have a similar total connection time at the end
  135. for tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ {
  136. clock.Run(1 * time.Second)
  137. if tickCounter == testClientPoolTicks/4 {
  138. // give a positive balance to some of the peers
  139. amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period
  140. for i := 0; i < paidCount; i++ {
  141. addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount)
  142. }
  143. }
  144. i := rand.Intn(clientCount)
  145. if connected[i] {
  146. if randomDisconnect {
  147. disconnect(pool, newPoolTestPeer(i, disconnCh))
  148. connected[i] = false
  149. connTicks[i] += tickCounter
  150. }
  151. } else {
  152. if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 {
  153. connected[i] = true
  154. connTicks[i] -= tickCounter
  155. } else {
  156. disconnect(pool, newPoolTestPeer(i, disconnCh))
  157. }
  158. }
  159. pollDisconnects:
  160. for {
  161. select {
  162. case i := <-disconnCh:
  163. disconnect(pool, newPoolTestPeer(i, disconnCh))
  164. if connected[i] {
  165. connTicks[i] += tickCounter
  166. connected[i] = false
  167. }
  168. default:
  169. break pollDisconnects
  170. }
  171. }
  172. }
  173. expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount)
  174. expMin := expTicks - expTicks/5
  175. expMax := expTicks + expTicks/5
  176. paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2
  177. paidMin := paidTicks - paidTicks/5
  178. paidMax := paidTicks + paidTicks/5
  179. // check if the total connected time of peers are all in the expected range
  180. for i, c := range connected {
  181. if c {
  182. connTicks[i] += testClientPoolTicks
  183. }
  184. min, max := expMin, expMax
  185. if i < paidCount {
  186. // expect a higher amount for clients with a positive balance
  187. min, max = paidMin, paidMax
  188. }
  189. if connTicks[i] < min || connTicks[i] > max {
  190. t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max)
  191. }
  192. }
  193. pool.Stop()
  194. }
  195. func testPriorityConnect(t *testing.T, pool *ClientPool, p *poolTestPeer, cap uint64, expSuccess bool) {
  196. if cap := connect(pool, p); cap == 0 {
  197. if expSuccess {
  198. t.Fatalf("Failed to connect paid client")
  199. } else {
  200. return
  201. }
  202. }
  203. if newCap, _ := pool.SetCapacity(p.node, cap, defaultConnectedBias, true); newCap != cap {
  204. if expSuccess {
  205. t.Fatalf("Failed to raise capacity of paid client")
  206. } else {
  207. return
  208. }
  209. }
  210. if !expSuccess {
  211. t.Fatalf("Should reject high capacity paid client")
  212. }
  213. }
  214. func TestConnectPaidClient(t *testing.T) {
  215. var (
  216. clock mclock.Simulated
  217. db = rawdb.NewMemoryDatabase()
  218. )
  219. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  220. pool.Start()
  221. defer pool.Stop()
  222. pool.SetLimits(10, uint64(10))
  223. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  224. // Add balance for an external client and mark it as paid client
  225. addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
  226. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true)
  227. }
  228. func TestConnectPaidClientToSmallPool(t *testing.T) {
  229. var (
  230. clock mclock.Simulated
  231. db = rawdb.NewMemoryDatabase()
  232. )
  233. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  234. pool.Start()
  235. defer pool.Stop()
  236. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  237. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  238. // Add balance for an external client and mark it as paid client
  239. addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
  240. // connect a fat paid client to pool, should reject it.
  241. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false)
  242. }
  243. func TestConnectPaidClientToFullPool(t *testing.T) {
  244. var (
  245. clock mclock.Simulated
  246. db = rawdb.NewMemoryDatabase()
  247. )
  248. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  249. pool.Start()
  250. defer pool.Stop()
  251. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  252. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  253. for i := 0; i < 10; i++ {
  254. addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
  255. connect(pool, newPoolTestPeer(i, nil))
  256. }
  257. addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client
  258. if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 {
  259. t.Fatalf("Low balance paid client should be rejected")
  260. }
  261. clock.Run(time.Second)
  262. addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client
  263. if cap := connect(pool, newPoolTestPeer(12, nil)); cap == 0 {
  264. t.Fatalf("High balance paid client should be accepted")
  265. }
  266. }
  267. func TestPaidClientKickedOut(t *testing.T) {
  268. var (
  269. clock mclock.Simulated
  270. db = rawdb.NewMemoryDatabase()
  271. kickedCh = make(chan int, 100)
  272. )
  273. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  274. pool.Start()
  275. pool.SetExpirationTCs(0, 0)
  276. defer pool.Stop()
  277. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  278. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  279. for i := 0; i < 10; i++ {
  280. addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
  281. connect(pool, newPoolTestPeer(i, kickedCh))
  282. clock.Run(time.Millisecond)
  283. }
  284. clock.Run(defaultConnectedBias + time.Second*11)
  285. if cap := connect(pool, newPoolTestPeer(11, kickedCh)); cap == 0 {
  286. t.Fatalf("Free client should be accepted")
  287. }
  288. clock.Run(0)
  289. select {
  290. case id := <-kickedCh:
  291. if id != 0 {
  292. t.Fatalf("Kicked client mismatch, want %v, got %v", 0, id)
  293. }
  294. default:
  295. t.Fatalf("timeout")
  296. }
  297. }
  298. func TestConnectFreeClient(t *testing.T) {
  299. var (
  300. clock mclock.Simulated
  301. db = rawdb.NewMemoryDatabase()
  302. )
  303. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  304. pool.Start()
  305. defer pool.Stop()
  306. pool.SetLimits(10, uint64(10))
  307. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  308. if cap := connect(pool, newPoolTestPeer(0, nil)); cap == 0 {
  309. t.Fatalf("Failed to connect free client")
  310. }
  311. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false)
  312. }
  313. func TestConnectFreeClientToFullPool(t *testing.T) {
  314. var (
  315. clock mclock.Simulated
  316. db = rawdb.NewMemoryDatabase()
  317. )
  318. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  319. pool.Start()
  320. defer pool.Stop()
  321. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  322. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  323. for i := 0; i < 10; i++ {
  324. connect(pool, newPoolTestPeer(i, nil))
  325. }
  326. if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 {
  327. t.Fatalf("New free client should be rejected")
  328. }
  329. clock.Run(time.Minute)
  330. if cap := connect(pool, newPoolTestPeer(12, nil)); cap != 0 {
  331. t.Fatalf("New free client should be rejected")
  332. }
  333. clock.Run(time.Millisecond)
  334. clock.Run(4 * time.Minute)
  335. if cap := connect(pool, newPoolTestPeer(13, nil)); cap == 0 {
  336. t.Fatalf("Old client connects more than 5min should be kicked")
  337. }
  338. }
  339. func TestFreeClientKickedOut(t *testing.T) {
  340. var (
  341. clock mclock.Simulated
  342. db = rawdb.NewMemoryDatabase()
  343. kicked = make(chan int, 100)
  344. )
  345. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  346. pool.Start()
  347. defer pool.Stop()
  348. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  349. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  350. for i := 0; i < 10; i++ {
  351. connect(pool, newPoolTestPeer(i, kicked))
  352. clock.Run(time.Millisecond)
  353. }
  354. if cap := connect(pool, newPoolTestPeer(10, kicked)); cap != 0 {
  355. t.Fatalf("New free client should be rejected")
  356. }
  357. clock.Run(0)
  358. select {
  359. case <-kicked:
  360. default:
  361. t.Fatalf("timeout")
  362. }
  363. disconnect(pool, newPoolTestPeer(10, kicked))
  364. clock.Run(5 * time.Minute)
  365. for i := 0; i < 10; i++ {
  366. connect(pool, newPoolTestPeer(i+10, kicked))
  367. }
  368. clock.Run(0)
  369. for i := 0; i < 10; i++ {
  370. select {
  371. case id := <-kicked:
  372. if id >= 10 {
  373. t.Fatalf("Old client should be kicked, now got: %d", id)
  374. }
  375. default:
  376. t.Fatalf("timeout")
  377. }
  378. }
  379. }
  380. func TestPositiveBalanceCalculation(t *testing.T) {
  381. var (
  382. clock mclock.Simulated
  383. db = rawdb.NewMemoryDatabase()
  384. kicked = make(chan int, 10)
  385. )
  386. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  387. pool.Start()
  388. defer pool.Stop()
  389. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  390. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  391. addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
  392. testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
  393. clock.Run(time.Minute)
  394. disconnect(pool, newPoolTestPeer(0, kicked))
  395. pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
  396. if checkDiff(pb, uint64(time.Minute*2)) {
  397. t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb)
  398. }
  399. }
  400. func TestDowngradePriorityClient(t *testing.T) {
  401. var (
  402. clock mclock.Simulated
  403. db = rawdb.NewMemoryDatabase()
  404. kicked = make(chan int, 10)
  405. )
  406. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  407. pool.Start()
  408. defer pool.Stop()
  409. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  410. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  411. p := newPoolTestPeer(0, kicked)
  412. addBalance(pool, p.node.ID(), int64(time.Minute))
  413. testPriorityConnect(t, pool, p, 10, true)
  414. if p.cap != 10 {
  415. t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap)
  416. }
  417. clock.Run(time.Minute) // All positive balance should be used up.
  418. time.Sleep(300 * time.Millisecond) // Ensure the callback is called
  419. if p.cap != 1 {
  420. t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
  421. }
  422. pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
  423. if pb != 0 {
  424. t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb)
  425. }
  426. addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute))
  427. pb, _ = getBalance(pool, newPoolTestPeer(0, kicked))
  428. if checkDiff(pb, uint64(time.Minute)) {
  429. t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb)
  430. }
  431. }
  432. func TestNegativeBalanceCalculation(t *testing.T) {
  433. var (
  434. clock mclock.Simulated
  435. db = rawdb.NewMemoryDatabase()
  436. )
  437. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  438. pool.Start()
  439. defer pool.Stop()
  440. pool.SetExpirationTCs(0, 3600)
  441. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  442. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
  443. for i := 0; i < 10; i++ {
  444. connect(pool, newPoolTestPeer(i, nil))
  445. }
  446. clock.Run(time.Second)
  447. for i := 0; i < 10; i++ {
  448. disconnect(pool, newPoolTestPeer(i, nil))
  449. _, nb := getBalance(pool, newPoolTestPeer(i, nil))
  450. if nb != 0 {
  451. t.Fatalf("Short connection shouldn't be recorded")
  452. }
  453. }
  454. for i := 0; i < 10; i++ {
  455. connect(pool, newPoolTestPeer(i, nil))
  456. }
  457. clock.Run(time.Minute)
  458. for i := 0; i < 10; i++ {
  459. disconnect(pool, newPoolTestPeer(i, nil))
  460. _, nb := getBalance(pool, newPoolTestPeer(i, nil))
  461. exp := uint64(time.Minute) / 1000
  462. exp -= exp / 120 // correct for negative balance expiration
  463. if checkDiff(nb, exp) {
  464. t.Fatalf("Negative balance mismatch, want %v, got %v", exp, nb)
  465. }
  466. }
  467. }
  468. func TestInactiveClient(t *testing.T) {
  469. var (
  470. clock mclock.Simulated
  471. db = rawdb.NewMemoryDatabase()
  472. )
  473. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  474. pool.Start()
  475. defer pool.Stop()
  476. pool.SetLimits(2, uint64(2))
  477. p1 := newPoolTestPeer(1, nil)
  478. p1.inactiveAllowed = true
  479. p2 := newPoolTestPeer(2, nil)
  480. p2.inactiveAllowed = true
  481. p3 := newPoolTestPeer(3, nil)
  482. p3.inactiveAllowed = true
  483. addBalance(pool, p1.node.ID(), 1000*int64(time.Second))
  484. addBalance(pool, p3.node.ID(), 2000*int64(time.Second))
  485. // p1: 1000 p2: 0 p3: 2000
  486. p1.cap = connect(pool, p1)
  487. if p1.cap != 1 {
  488. t.Fatalf("Failed to connect peer #1")
  489. }
  490. p2.cap = connect(pool, p2)
  491. if p2.cap != 1 {
  492. t.Fatalf("Failed to connect peer #2")
  493. }
  494. p3.cap = connect(pool, p3)
  495. if p3.cap != 1 {
  496. t.Fatalf("Failed to connect peer #3")
  497. }
  498. if p2.cap != 0 {
  499. t.Fatalf("Failed to deactivate peer #2")
  500. }
  501. addBalance(pool, p2.node.ID(), 3000*int64(time.Second))
  502. // p1: 1000 p2: 3000 p3: 2000
  503. if p2.cap != 1 {
  504. t.Fatalf("Failed to activate peer #2")
  505. }
  506. if p1.cap != 0 {
  507. t.Fatalf("Failed to deactivate peer #1")
  508. }
  509. addBalance(pool, p2.node.ID(), -2500*int64(time.Second))
  510. // p1: 1000 p2: 500 p3: 2000
  511. if p1.cap != 1 {
  512. t.Fatalf("Failed to activate peer #1")
  513. }
  514. if p2.cap != 0 {
  515. t.Fatalf("Failed to deactivate peer #2")
  516. }
  517. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
  518. p4 := newPoolTestPeer(4, nil)
  519. addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
  520. // p1: 1000 p2: 500 p3: 2000 p4: 1500
  521. p4.cap = connect(pool, p4)
  522. if p4.cap != 1 {
  523. t.Fatalf("Failed to activate peer #4")
  524. }
  525. if p1.cap != 0 {
  526. t.Fatalf("Failed to deactivate peer #1")
  527. }
  528. clock.Run(time.Second * 600)
  529. // manually trigger a check to avoid a long real-time wait
  530. pool.ns.SetState(p1.node, pool.setup.updateFlag, nodestate.Flags{}, 0)
  531. pool.ns.SetState(p1.node, nodestate.Flags{}, pool.setup.updateFlag, 0)
  532. // p1: 1000 p2: 500 p3: 2000 p4: 900
  533. if p1.cap != 1 {
  534. t.Fatalf("Failed to activate peer #1")
  535. }
  536. if p4.cap != 0 {
  537. t.Fatalf("Failed to deactivate peer #4")
  538. }
  539. disconnect(pool, p2)
  540. disconnect(pool, p4)
  541. addBalance(pool, p1.node.ID(), -1000*int64(time.Second))
  542. if p1.cap != 1 {
  543. t.Fatalf("Should not deactivate peer #1")
  544. }
  545. if p2.cap != 0 {
  546. t.Fatalf("Should not activate peer #2")
  547. }
  548. }