diff --git a/FediChord.asn1 b/FediChord.asn1 index f278f8f..41f9650 100644 --- a/FediChord.asn1 +++ b/FediChord.asn1 @@ -62,8 +62,8 @@ NodeCache ::= SEQUENCE OF CacheEntry JoinRequestPayload ::= NULL JoinResponsePayload ::= SEQUENCE { - successors SEQUENCE OF NodeState, - predecessors SEQUENCE OF NodeState, + successors SEQUENCE OF NodeID, + predecessors SEQUENCE OF NodeID, cache NodeCache } @@ -82,14 +82,14 @@ QueryIDResponsePayload ::= SEQUENCE { StabiliseRequestPayload ::= NULL StabiliseResponsePayload ::= SEQUENCE { - successors SEQUENCE OF NodeState, - predecessors SEQUENCE OF NodeState + successors SEQUENCE OF NodeID, + predecessors SEQUENCE OF NodeID -- ToDo: transfer of handled key data, if newly responsible for it } LeaveRequestPayload ::= SEQUENCE { - successors SEQUENCE OF NodeState, - predecessors SEQUENCE OF NodeState + successors SEQUENCE OF NodeID, + predecessors SEQUENCE OF NodeID -- ToDo: transfer of own data to newly responsible node } diff --git a/src/Hash2Pub/ASN1Coding.hs b/src/Hash2Pub/ASN1Coding.hs index 456dac6..d476809 100644 --- a/src/Hash2Pub/ASN1Coding.hs +++ b/src/Hash2Pub/ASN1Coding.hs @@ -130,20 +130,20 @@ encodePayload LeaveResponsePayload = [Null] encodePayload payload'@LeaveRequestPayload{} = Start Sequence : Start Sequence - : concatMap encodeNodeState (leaveSuccessors payload') + : fmap (IntVal . getNodeID) (leaveSuccessors payload') <> [End Sequence , Start Sequence] - <> concatMap encodeNodeState (leavePredecessors payload') + <> fmap (IntVal . getNodeID) (leavePredecessors payload') <> [End Sequence , End Sequence] -- currently StabiliseResponsePayload and LeaveRequestPayload are equal encodePayload payload'@StabiliseResponsePayload{} = Start Sequence : Start Sequence - : concatMap encodeNodeState (stabiliseSuccessors payload') + : fmap (IntVal . getNodeID) (stabiliseSuccessors payload') <> [End Sequence , Start Sequence] - <> concatMap encodeNodeState (stabilisePredecessors payload') + <> fmap (IntVal . getNodeID) (stabilisePredecessors payload') <> [End Sequence , End Sequence] encodePayload payload'@StabiliseRequestPayload = [Null] @@ -170,10 +170,10 @@ encodePayload payload'@QueryIDRequestPayload{} = [ encodePayload payload'@JoinResponsePayload{} = Start Sequence : Start Sequence - : concatMap encodeNodeState (joinSuccessors payload') + : fmap (IntVal . getNodeID) (joinSuccessors payload') <> [End Sequence , Start Sequence] - <> concatMap encodeNodeState (joinPredecessors payload') + <> fmap (IntVal . getNodeID) (joinPredecessors payload') <> [End Sequence , Start Sequence] <> concatMap encodeCacheEntry (joinCache payload') @@ -368,8 +368,8 @@ parseJoinRequest = do parseJoinResponse :: ParseASN1 ActionPayload parseJoinResponse = onNextContainer Sequence $ do - succ' <- onNextContainer Sequence (getMany parseNodeState) - pred' <- onNextContainer Sequence (getMany parseNodeState) + succ' <- fmap fromInteger <$> onNextContainer Sequence (getMany parseInteger) + pred' <- fmap fromInteger <$> onNextContainer Sequence (getMany parseInteger) cache <- parseNodeCache pure $ JoinResponsePayload { joinSuccessors = succ' @@ -404,8 +404,8 @@ parseStabiliseRequest = do parseStabiliseResponse :: ParseASN1 ActionPayload parseStabiliseResponse = onNextContainer Sequence $ do - succ' <- onNextContainer Sequence (getMany parseNodeState) - pred' <- onNextContainer Sequence (getMany parseNodeState) + succ' <- fmap fromInteger <$> onNextContainer Sequence (getMany parseInteger) + pred' <- fmap fromInteger <$> onNextContainer Sequence (getMany parseInteger) pure $ StabiliseResponsePayload { stabiliseSuccessors = succ' , stabilisePredecessors = pred' @@ -413,8 +413,8 @@ parseStabiliseResponse = onNextContainer Sequence $ do parseLeaveRequest :: ParseASN1 ActionPayload parseLeaveRequest = onNextContainer Sequence $ do - succ' <- onNextContainer Sequence (getMany parseNodeState) - pred' <- onNextContainer Sequence (getMany parseNodeState) + succ' <- fmap fromInteger <$> onNextContainer Sequence (getMany parseInteger) + pred' <- fmap fromInteger <$> onNextContainer Sequence (getMany parseInteger) pure $ LeaveRequestPayload { leaveSuccessors = succ' , leavePredecessors = pred' diff --git a/src/Hash2Pub/DHTProtocol.hs b/src/Hash2Pub/DHTProtocol.hs index f1eda71..64e4602 100644 --- a/src/Hash2Pub/DHTProtocol.hs +++ b/src/Hash2Pub/DHTProtocol.hs @@ -73,7 +73,7 @@ import Debug.Trace (trace) queryLocalCache :: LocalNodeState -> NodeCache -> Int -> NodeID -> QueryResponse queryLocalCache ownState nCache lBestNodes targetID -- as target ID falls between own ID and first predecessor, it is handled by this node - | (targetID `localCompare` ownID) `elem` [LT, EQ] && maybe False (\p -> targetID `localCompare` p == GT) (getNid <$> headMay preds) = FOUND . toRemoteNodeState $ ownState + | (targetID `localCompare` ownID) `elem` [LT, EQ] && maybe False (\p -> targetID `localCompare` p == GT) (headMay preds) = FOUND . toRemoteNodeState $ ownState -- my interpretation: the "l best next hops" are the l-1 closest preceding nodes and -- the closest succeeding node (like with the p initiated parallel queries | otherwise = FORWARD $ closestSuccessor `Set.union` closestPredecessors @@ -264,8 +264,8 @@ respondLeave nsSTM msgSet = do writeTQueue (cacheWriteQueue nsSnap) $ deleteCacheEntry senderID writeTVar nsSTM $ -- add predecessors and successors of leaving node to own lists - setPredecessors (filter ((/=) senderID . getNid) $ requestPreds <> predecessors nsSnap) - . setSuccessors (filter ((/=) senderID . getNid) $ requestSuccs <> successors nsSnap) $ nsSnap + setPredecessors (delete senderID $ requestPreds <> predecessors nsSnap) + . setSuccessors (delete senderID $ requestSuccs <> successors nsSnap) $ nsSnap -- TODO: handle handover of key data let leaveResponse = Response { requestID = requestID aRequestPart @@ -337,7 +337,7 @@ respondJoin nsSTM msgSet = do then do -- if yes, adjust own predecessors/ successors and return those in a response let - newPreds = senderNS:predecessors nsSnap + newPreds = getNid senderNS:predecessors nsSnap joinedNS = setPredecessors newPreds nsSnap responsePayload = JoinResponsePayload { joinSuccessors = successors joinedNS @@ -381,28 +381,28 @@ requestJoin toJoinOn ownStateSTM = (cacheInsertQ, joinedState) <- atomically $ do stateSnap <- readTVar ownStateSTM let - (cacheInsertQ, predAccSet, succAccSet) = foldl' - (\(insertQ, predAccSet', succAccSet') msg -> + (cacheInsertQ, joinedStateUnsorted) = foldl' + (\(insertQ, nsAcc) msg -> let insertQ' = maybe insertQ (\msgPl -> -- collect list of insertion statements into global cache queueAddEntries (joinCache msgPl) : insertQ ) $ payload msg - -- collect received predecessors and successors - predAccSet'' = maybe predAccSet' ( - foldr' Set.insert predAccSet' . joinPredecessors - ) $ payload msg - succAccSet'' = maybe succAccSet' ( - foldr' Set.insert succAccSet' . joinSuccessors - ) $ payload msg + -- add received predecessors and successors + addPreds ns' = maybe ns' (\msgPl -> + setPredecessors (foldr' (:) (predecessors ns') (joinPredecessors msgPl)) ns' + ) $ payload msg + addSuccs ns' = maybe ns' (\msgPl -> + setSuccessors (foldr' (:) (successors ns') (joinSuccessors msgPl)) ns' + ) $ payload msg in - (insertQ', predAccSet'', succAccSet'') + (insertQ', addSuccs . addPreds $ nsAcc) ) -- reset predecessors and successors - ([], Set.empty, Set.empty) + ([], setPredecessors [] . setSuccessors [] $ ownState) responses - -- sort, slice and set the accumulated successors and predecessors - newState = setSuccessors (Set.elems succAccSet) . setPredecessors (Set.elems predAccSet) $ stateSnap + -- sort successors and predecessors + newState = setSuccessors (take (kNeighbours joinedStateUnsorted) . sortBy localCompare $ successors joinedStateUnsorted) . setPredecessors (take (kNeighbours joinedStateUnsorted) . sortBy (flip localCompare) $ predecessors joinedStateUnsorted) $ joinedStateUnsorted writeTVar ownStateSTM newState pure (cacheInsertQ, newState) -- execute the cache insertions diff --git a/src/Hash2Pub/FediChordTypes.hs b/src/Hash2Pub/FediChordTypes.hs index 410cbe9..d5ea900 100644 --- a/src/Hash2Pub/FediChordTypes.hs +++ b/src/Hash2Pub/FediChordTypes.hs @@ -32,7 +32,6 @@ module Hash2Pub.FediChordTypes ( ) where import Control.Exception -import Data.Function (on) import Data.List (delete, nub, sortBy) import qualified Data.Map.Strict as Map import Data.Maybe (fromMaybe, isJust, mapMaybe) @@ -124,9 +123,6 @@ data RemoteNodeState = RemoteNodeState } deriving (Show, Eq) -instance Ord RemoteNodeState where - a `compare` b = nid a `compare` nid b - -- | represents a node and encapsulates all data and parameters that are not present for remote nodes data LocalNodeState = LocalNodeState { nodeState :: RemoteNodeState @@ -135,9 +131,9 @@ data LocalNodeState = LocalNodeState -- ^ EpiChord node cache with expiry times for nodes , cacheWriteQueue :: TQueue (NodeCache -> NodeCache) -- ^ cache updates are not written directly to the 'nodeCache' but queued and - , successors :: [RemoteNodeState] -- could be a set instead as these are ordered as well + , successors :: [NodeID] -- could be a set instead as these are ordered as well -- ^ successor nodes in ascending order by distance - , predecessors :: [RemoteNodeState] + , predecessors :: [NodeID] -- ^ predecessor nodes in ascending order by distance , kNeighbours :: Int -- ^ desired length of predecessor and successor list @@ -217,12 +213,12 @@ instance Typeable a => Show (TQueue a) where show x = show (typeOf x) -- | convenience function that updates the successors of a 'LocalNodeState' -setSuccessors :: [RemoteNodeState] -> LocalNodeState -> LocalNodeState -setSuccessors succ' ns = ns {successors = take (kNeighbours ns) . nub . sortBy (localCompare `on` getNid) $ succ'} +setSuccessors :: [NodeID] -> LocalNodeState -> LocalNodeState +setSuccessors succ' ns = ns {successors = take (kNeighbours ns) . nub . sortBy localCompare $ succ'} -- | convenience function that updates the predecessors of a 'LocalNodeState' -setPredecessors :: [RemoteNodeState] -> LocalNodeState -> LocalNodeState -setPredecessors pred' ns = ns {predecessors = take (kNeighbours ns) . nub . sortBy (flip (localCompare `on` getNid)) $ pred'} +setPredecessors :: [NodeID] -> LocalNodeState -> LocalNodeState +setPredecessors pred' ns = ns {predecessors = take (kNeighbours ns) . nub . sortBy (flip localCompare) $ pred'} type NodeCache = Map.Map NodeID CacheEntry diff --git a/src/Hash2Pub/ProtocolTypes.hs b/src/Hash2Pub/ProtocolTypes.hs index afb72d2..d56a257 100644 --- a/src/Hash2Pub/ProtocolTypes.hs +++ b/src/Hash2Pub/ProtocolTypes.hs @@ -30,7 +30,7 @@ data FediChordMessage = Request , payload :: Maybe ActionPayload } | Response - { requestID :: Integer + { requestID :: Integer , senderID :: NodeID , part :: Integer , isFinalPart :: Bool @@ -53,8 +53,8 @@ data ActionPayload = QueryIDRequestPayload } | JoinRequestPayload | LeaveRequestPayload - { leaveSuccessors :: [RemoteNodeState] - , leavePredecessors :: [RemoteNodeState] + { leaveSuccessors :: [NodeID] + , leavePredecessors :: [NodeID] } | StabiliseRequestPayload | PingRequestPayload @@ -62,14 +62,14 @@ data ActionPayload = QueryIDRequestPayload { queryResult :: QueryResponse } | JoinResponsePayload - { joinSuccessors :: [RemoteNodeState] - , joinPredecessors :: [RemoteNodeState] + { joinSuccessors :: [NodeID] + , joinPredecessors :: [NodeID] , joinCache :: [RemoteCacheEntry] } | LeaveResponsePayload | StabiliseResponsePayload - { stabiliseSuccessors :: [RemoteNodeState] - , stabilisePredecessors :: [RemoteNodeState] + { stabiliseSuccessors :: [NodeID] + , stabilisePredecessors :: [NodeID] } | PingResponsePayload { pingNodeStates :: [RemoteNodeState] diff --git a/test/FediChordSpec.hs b/test/FediChordSpec.hs index b289c33..146afcd 100644 --- a/test/FediChordSpec.hs +++ b/test/FediChordSpec.hs @@ -119,7 +119,7 @@ spec = do let emptyCache = initCache nid1 = toNodeID 2^(23::Integer)+1 - node1 = setPredecessors [node4] . setNid nid1 <$> exampleLocalNode + node1 = setPredecessors [nid4] . setNid nid1 <$> exampleLocalNode nid2 = toNodeID 2^(230::Integer)+12 node2 = exampleNodeState { nid = nid2} nid3 = toNodeID 2^(25::Integer)+10 @@ -152,15 +152,15 @@ spec = do describe "Messages can be encoded to and decoded from ASN.1" $ do -- define test messages let - someNodes = fmap (flip setNid exampleNodeState . fromInteger) [3..12] + someNodeIDs = fmap fromInteger [3..12] qidReqPayload = QueryIDRequestPayload { queryTargetID = nid exampleNodeState , queryLBestNodes = 3 } jReqPayload = JoinRequestPayload lReqPayload = LeaveRequestPayload { - leaveSuccessors = someNodes - , leavePredecessors = someNodes + leaveSuccessors = someNodeIDs + , leavePredecessors = someNodeIDs } stabReqPayload = StabiliseRequestPayload pingReqPayload = PingRequestPayload @@ -174,8 +174,8 @@ spec = do ] } jResPayload = JoinResponsePayload { - joinSuccessors = someNodes - , joinPredecessors = someNodes + joinSuccessors = someNodeIDs + , joinPredecessors = someNodeIDs , joinCache = [ RemoteCacheEntry exampleNodeState (toEnum 23420001) , RemoteCacheEntry (exampleNodeState {nid = fromInteger (-5)}) (toEnum 0) @@ -183,7 +183,7 @@ spec = do } lResPayload = LeaveResponsePayload stabResPayload = StabiliseResponsePayload { - stabiliseSuccessors = someNodes + stabiliseSuccessors = someNodeIDs , stabilisePredecessors = [] } pingResPayload = PingResponsePayload { @@ -213,8 +213,8 @@ spec = do encodeDecodeAndCheck msg = runParseASN1 parseMessage (encodeMessage msg) `shouldBe` pure msg largeMessage = responseWith Join $ JoinResponsePayload { - joinSuccessors = flip setNid exampleNodeState . fromInteger <$> [-20..150] - , joinPredecessors = flip setNid exampleNodeState . fromInteger <$> [5..11] + joinSuccessors = fromInteger <$> [-20..150] + , joinPredecessors = fromInteger <$> [5..11] , joinCache = [ RemoteCacheEntry (exampleNodeState {nid = node}) 290001 | node <- [50602,506011..60000]] } @@ -235,7 +235,7 @@ spec = do it "messages too large for a single packet can (often) be split into multiple parts" $ do -- TODO: once splitting works more efficient, test for exact number or payload, see #18 length (serialiseMessage 600 largeMessage) > 1 `shouldBe` True - length (serialiseMessage 60000 largeMessage) `shouldBe` 1 + length (serialiseMessage 6000 largeMessage) `shouldBe` 1 it "message part numbering starts at the submitted part number" $ do isJust (Map.lookup 1 (serialiseMessage 600 largeMessage)) `shouldBe` True let startAt5 = serialiseMessage 600 (largeMessage {part = 5})