|
| 1 | +#!/usr/bin/env python3 |
| 2 | +import os |
| 3 | +import shutil |
| 4 | +import signal |
| 5 | +import time |
| 6 | +from TestHarness import Cluster, TestHelper, Utils, WalletMgr |
| 7 | +from TestHarness.Node import BlockType |
| 8 | + |
| 9 | +############################################################### |
| 10 | +# nodeos_late_block_test |
| 11 | +# |
| 12 | +# Set up a cluster of 4 producer nodes so that 3 can reach consensus. |
| 13 | +# Node_00 - defproducera,b,c |
| 14 | +# Node_01 - defproducerd,e,f |
| 15 | +# Node_02 - defproducerg,h,i |
| 16 | +# Node_04 - bridge between 2 & 3 |
| 17 | +# Node_03 - defproducerj,k,l |
| 18 | +# |
| 19 | +# When Node_02 is producing shutdown Node_04 and bring it back up when Node_03 is producing. |
| 20 | +# Verify that Node_03 realizes it should switch over to fork other nodes have chosen. |
| 21 | +############################################################### |
| 22 | + |
| 23 | +Print=Utils.Print |
| 24 | +errorExit=Utils.errorExit |
| 25 | + |
| 26 | +args=TestHelper.parse_args({"-d","--keep-logs","--dump-error-details","-v","--leave-running","--unshared"}) |
| 27 | +pnodes=4 |
| 28 | +total_nodes=pnodes + 1 |
| 29 | +delay=args.d |
| 30 | +debug=args.v |
| 31 | +dumpErrorDetails=args.dump_error_details |
| 32 | + |
| 33 | +Utils.Debug=debug |
| 34 | +testSuccessful=False |
| 35 | + |
| 36 | +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) |
| 37 | +walletMgr=WalletMgr(True, keepRunning=args.leave_running, keepLogs=args.keep_logs) |
| 38 | + |
| 39 | +try: |
| 40 | + TestHelper.printSystemInfo("BEGIN") |
| 41 | + cluster.setWalletMgr(walletMgr) |
| 42 | + |
| 43 | + Print(f'producing nodes: {pnodes}, delay between nodes launch: {delay} second{"s" if delay != 1 else ""}') |
| 44 | + |
| 45 | + Print("Stand up cluster") |
| 46 | + # do not allow pause production to interfere with late block test |
| 47 | + extraNodeosArgs=" --production-pause-vote-timeout-ms 0 " |
| 48 | + |
| 49 | + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, extraNodeosArgs=extraNodeosArgs, |
| 50 | + topo="./tests/nodeos_late_block_test_shape.json", delay=delay, loadSystemContract=False, |
| 51 | + activateIF=True, signatureProviderForNonProducer=True) is False: |
| 52 | + errorExit("Failed to stand up eos cluster.") |
| 53 | + |
| 54 | + assert cluster.biosNode.getInfo(exitOnError=True)["head_block_producer"] != "eosio", "launch should have waited for production to change" |
| 55 | + cluster.biosNode.kill(signal.SIGTERM) |
| 56 | + cluster.waitOnClusterSync(blockAdvancing=5) |
| 57 | + |
| 58 | + node3 = cluster.getNode(3) |
| 59 | + node4 = cluster.getNode(4) # bridge between 2 & 3 |
| 60 | + |
| 61 | + Print("Wait for producer before j") |
| 62 | + node3.waitForAnyProducer("defproducerh", exitOnError=True) |
| 63 | + node3.waitForAnyProducer("defproduceri", exitOnError=True) |
| 64 | + iProdBlockNum = node3.getHeadBlockNum() |
| 65 | + |
| 66 | + node4.kill(signal.SIGTERM) |
| 67 | + assert not node4.verifyAlive(), "Node4 did not shutdown" |
| 68 | + |
| 69 | + Print("Wait until Node_03 starts to produce its second round ") |
| 70 | + node3.waitForProducer("defproducerk", exitOnError=True) |
| 71 | + |
| 72 | + Print("Relaunch bridge to connection Node_02 and Node_03") |
| 73 | + node4.relaunch() |
| 74 | + |
| 75 | + Print("Verify Node_03 fork switches even though it is producing") |
| 76 | + node3.waitForProducer("defproduceri", exitOnError=True) |
| 77 | + Print("Verify fork switch") |
| 78 | + assert node3.findInLog("switching forks .* defproducerk"), "Expected to find 'switching forks' in node_03 log" |
| 79 | + |
| 80 | + Print("Wait until Node_00 to produce") |
| 81 | + node3.waitForProducer("defproducera") |
| 82 | + |
| 83 | + # verify the LIB blocks of defproduceri made it into the canonical chain |
| 84 | + # defproducerk has produced at least one block, but possibly more by time of relaunch, so verify only some of the round |
| 85 | + for i in range(9): |
| 86 | + defprod=node3.getBlockProducerByNum(iProdBlockNum + i) |
| 87 | + assert defprod == "defproduceri", f"expected defproduceri for block {iProdBlockNum + i}, instead: {defprod}" |
| 88 | + |
| 89 | + # verify that defproducerk blocks made it into the canonical chain as well |
| 90 | + # It can take a while to resolve the fork, but should have at least one defproducerk block unless defproducerl |
| 91 | + # wins the fork in which case there will be another fork switch |
| 92 | + expectedProd = "defproducerk" |
| 93 | + if node3.findInLog("switching forks .* defproducerl"): |
| 94 | + expectedProd = "defproducera" |
| 95 | + iProdBlockNum += 12 # into the next set of blocks |
| 96 | + found_defproducer = False |
| 97 | + for i in range(12): |
| 98 | + defprod=node3.getBlockProducerByNum(iProdBlockNum + i) |
| 99 | + if defprod == expectedProd: |
| 100 | + found_defproducer = True |
| 101 | + |
| 102 | + assert found_defproducer, f"expected {expectedProd} in blocks {iProdBlockNum}-{iProdBlockNum+12}" |
| 103 | + |
| 104 | + testSuccessful=True |
| 105 | +finally: |
| 106 | + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) |
| 107 | + |
| 108 | +exitCode = 0 if testSuccessful else 1 |
| 109 | +exit(exitCode) |
0 commit comments