Refactor predecessors and successors to hold RemoteNodeStates
- neighbour nodes need to be contacted reliably - Only holding NodeIDs requires a cache lookup for getting hostname and port. This is brittle as the entry could've been purged from cache. - refactored ASN.1 scheme, types and add/ sort/ remove implementations closes #46
This commit is contained in:
parent
67cba1b69b
commit
f15d83baff
6 changed files with 61 additions and 57 deletions
|
@ -119,7 +119,7 @@ spec = do
|
|||
let
|
||||
emptyCache = initCache
|
||||
nid1 = toNodeID 2^(23::Integer)+1
|
||||
node1 = setPredecessors [nid4] . setNid nid1 <$> exampleLocalNode
|
||||
node1 = setPredecessors [node4] . setNid nid1 <$> exampleLocalNode
|
||||
nid2 = toNodeID 2^(230::Integer)+12
|
||||
node2 = exampleNodeState { nid = nid2}
|
||||
nid3 = toNodeID 2^(25::Integer)+10
|
||||
|
@ -152,15 +152,15 @@ spec = do
|
|||
describe "Messages can be encoded to and decoded from ASN.1" $ do
|
||||
-- define test messages
|
||||
let
|
||||
someNodeIDs = fmap fromInteger [3..12]
|
||||
someNodes = fmap (flip setNid exampleNodeState . fromInteger) [3..12]
|
||||
qidReqPayload = QueryIDRequestPayload {
|
||||
queryTargetID = nid exampleNodeState
|
||||
, queryLBestNodes = 3
|
||||
}
|
||||
jReqPayload = JoinRequestPayload
|
||||
lReqPayload = LeaveRequestPayload {
|
||||
leaveSuccessors = someNodeIDs
|
||||
, leavePredecessors = someNodeIDs
|
||||
leaveSuccessors = someNodes
|
||||
, leavePredecessors = someNodes
|
||||
}
|
||||
stabReqPayload = StabiliseRequestPayload
|
||||
pingReqPayload = PingRequestPayload
|
||||
|
@ -174,8 +174,8 @@ spec = do
|
|||
]
|
||||
}
|
||||
jResPayload = JoinResponsePayload {
|
||||
joinSuccessors = someNodeIDs
|
||||
, joinPredecessors = someNodeIDs
|
||||
joinSuccessors = someNodes
|
||||
, joinPredecessors = someNodes
|
||||
, joinCache = [
|
||||
RemoteCacheEntry exampleNodeState (toEnum 23420001)
|
||||
, RemoteCacheEntry (exampleNodeState {nid = fromInteger (-5)}) (toEnum 0)
|
||||
|
@ -183,7 +183,7 @@ spec = do
|
|||
}
|
||||
lResPayload = LeaveResponsePayload
|
||||
stabResPayload = StabiliseResponsePayload {
|
||||
stabiliseSuccessors = someNodeIDs
|
||||
stabiliseSuccessors = someNodes
|
||||
, stabilisePredecessors = []
|
||||
}
|
||||
pingResPayload = PingResponsePayload {
|
||||
|
@ -213,8 +213,8 @@ spec = do
|
|||
|
||||
encodeDecodeAndCheck msg = runParseASN1 parseMessage (encodeMessage msg) `shouldBe` pure msg
|
||||
largeMessage = responseWith Join $ JoinResponsePayload {
|
||||
joinSuccessors = fromInteger <$> [-20..150]
|
||||
, joinPredecessors = fromInteger <$> [5..11]
|
||||
joinSuccessors = flip setNid exampleNodeState . fromInteger <$> [-20..150]
|
||||
, joinPredecessors = flip setNid exampleNodeState . fromInteger <$> [5..11]
|
||||
, joinCache = [ RemoteCacheEntry (exampleNodeState {nid = node}) 290001 | node <- [50602,506011..60000]]
|
||||
}
|
||||
|
||||
|
@ -235,7 +235,7 @@ spec = do
|
|||
it "messages too large for a single packet can (often) be split into multiple parts" $ do
|
||||
-- TODO: once splitting works more efficient, test for exact number or payload, see #18
|
||||
length (serialiseMessage 600 largeMessage) > 1 `shouldBe` True
|
||||
length (serialiseMessage 6000 largeMessage) `shouldBe` 1
|
||||
length (serialiseMessage 60000 largeMessage) `shouldBe` 1
|
||||
it "message part numbering starts at the submitted part number" $ do
|
||||
isJust (Map.lookup 1 (serialiseMessage 600 largeMessage)) `shouldBe` True
|
||||
let startAt5 = serialiseMessage 600 (largeMessage {part = 5})
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue