Compare commits
No commits in common. "mainline" and "refactorRingMap" have entirely different histories.
mainline
...
refactorRi
|
@ -89,8 +89,8 @@ StabiliseResponsePayload ::= SEQUENCE {
|
|||
|
||||
LeaveRequestPayload ::= SEQUENCE {
|
||||
successors SEQUENCE OF NodeState,
|
||||
predecessors SEQUENCE OF NodeState,
|
||||
doMigration BOOLEAN
|
||||
predecessors SEQUENCE OF NodeState
|
||||
-- ToDo: transfer of own data to newly responsible node
|
||||
}
|
||||
|
||||
LeaveResponsePayload ::= NULL -- just a confirmation
|
||||
|
|
|
@ -46,8 +46,8 @@ category: Network
|
|||
extra-source-files: CHANGELOG.md
|
||||
|
||||
common deps
|
||||
build-depends: base >=4, containers ^>=0.6.0.1, bytestring, utf8-string ^>=1.0.1.1, network ^>=3.1, time, cmdargs ^>= 0.10, cryptonite, memory, async, stm, asn1-encoding, asn1-types, asn1-parse, publicsuffix, network-byte-order, safe, iproute, mtl, random, servant, servant-server, servant-client, warp, text, unordered-containers, hashable, unicode-transforms, http-client, http-types, unbounded-delays, dlist, formatting
|
||||
ghc-options: -Wall -Wpartial-fields -O2
|
||||
build-depends: base ^>=4.12.0.0, containers ^>=0.6.0.1, bytestring, utf8-string ^>=1.0.1.1, network ^>=2.8.0.1, time ^>=1.8.0.2, cmdargs ^>= 0.10, cryptonite ^>= 0.25, memory, async, stm, asn1-encoding, asn1-types, asn1-parse, publicsuffix, network-byte-order, safe, iproute, mtl, random, servant, servant-server, servant-client, warp, text, unordered-containers
|
||||
ghc-options: -Wall
|
||||
|
||||
|
||||
|
||||
|
@ -55,10 +55,10 @@ library
|
|||
import: deps
|
||||
|
||||
-- Modules exported by the library.
|
||||
exposed-modules: Hash2Pub.FediChord, Hash2Pub.FediChordTypes, Hash2Pub.DHTProtocol, Hash2Pub.ASN1Coding, Hash2Pub.ProtocolTypes, Hash2Pub.PostService, Hash2Pub.RingMap
|
||||
exposed-modules: Hash2Pub.FediChord, Hash2Pub.FediChordTypes, Hash2Pub.DHTProtocol, Hash2Pub.ASN1Coding, Hash2Pub.ProtocolTypes, Hash2Pub.PostService, Hash2Pub.ServiceTypes, Hash2Pub.RingMap
|
||||
|
||||
-- Modules included in this library but not exported.
|
||||
other-modules: Hash2Pub.Utils, Hash2Pub.PostService.API
|
||||
other-modules: Hash2Pub.Utils
|
||||
|
||||
-- LANGUAGE extensions used by modules in this package.
|
||||
other-extensions: GeneralizedNewtypeDeriving, DataKinds, OverloadedStrings
|
||||
|
@ -91,20 +91,6 @@ executable Hash2Pub
|
|||
-- Base language which the package is written in.
|
||||
default-language: Haskell2010
|
||||
|
||||
ghc-options: -threaded -rtsopts -with-rtsopts=-N
|
||||
|
||||
executable Experiment
|
||||
-- experiment runner
|
||||
import: deps
|
||||
|
||||
build-depends: Hash2Pub
|
||||
|
||||
main-is: Experiment.hs
|
||||
|
||||
hs-source-dirs: app
|
||||
|
||||
default-language: Haskell2010
|
||||
|
||||
ghc-options: -threaded
|
||||
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Hash2Pub
|
||||
|
||||
***This is heavily WIP and does not provide any useful functionality yet***.
|
||||
I aim for always having the `mainline` branch in a state where it builds and tests pass.
|
||||
I aim for always having the master branch at a state where it builds and tests pass.
|
||||
|
||||
A fully-decentralised relay for global hashtag federation in [ActivityPub](https://activitypub.rocks) based on a distributed hash table.
|
||||
It allows querying and subscribing to all posts of a certain hashtag and is implemented in Haskell.
|
||||
|
@ -10,13 +10,8 @@ This is the practical implementation of the concept presented in the paper [Dece
|
|||
|
||||
The ASN.1 module schema used for DHT messages can be found in `FediChord.asn1`.
|
||||
|
||||
For further questions and discussins, please refer to the **Hash2Pub topic in [SocialHub](https://socialhub.activitypub.rocks/c/software/hash2pub/48)**.
|
||||
|
||||
## Building
|
||||
|
||||
The project and its developent environment are built with [Nix](https://nixos.org/nix/).
|
||||
|
||||
The development environment can be entered with `nix-shell shell-minimal.nix`. Then the project can be built with `cabal build` from within the environment, or using `nix-shell --command "cabal build" shell-minimal.nix` to do both steps at once.
|
||||
|
||||
While the `shell-minimal.nix` environment contains everything necessary for building and testing this project, the `shell.nix` additionally contains the Haskell IDE engine *hie* and the documentation for all used Haskell packages for more convenient development.
|
||||
Be aware that these need to be build from source and can take a very long time to build.
|
||||
The development environment can be entered with `nix-shell`. Then the project can be built with `cabal build` from within the environment, or using `nix-shell --command "cabal build"` to do both steps at once.
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
module Main where
|
||||
|
||||
import Control.Concurrent
|
||||
import Control.Monad (forM_)
|
||||
import qualified Data.Text.Lazy as Txt
|
||||
import qualified Data.Text.Lazy.IO as TxtI
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import System.Environment (getArgs)
|
||||
|
||||
import Hash2Pub.PostService (Hashtag, clientPublishPost)
|
||||
|
||||
-- configuration constants
|
||||
timelineFile = "../simulationData/inputs/generated/timeline_sample.csv"
|
||||
|
||||
main :: IO ()
|
||||
main = do
|
||||
-- read CLI parameters
|
||||
speedupStr : _ <- getArgs
|
||||
-- read and parse timeline schedule
|
||||
-- relying on lazyness of HaskellIO, hoping it does not introduce too strong delays
|
||||
postEvents <- parseSchedule <$> TxtI.readFile timelineFile
|
||||
-- actually schedule and send the post events
|
||||
executeSchedule (read speedupStr) postEvents
|
||||
pure ()
|
||||
|
||||
|
||||
|
||||
parseSchedule :: Txt.Text
|
||||
-> [(Int, Hashtag, (String, Int))] -- ^ [(delay in microseconds, hashtag, (hostname, port))]
|
||||
parseSchedule = fmap (parseEntry . Txt.split (== ';')) . Txt.lines
|
||||
where
|
||||
parseEntry [delayT, contactT, tag] =
|
||||
(read $ Txt.unpack delayT, tag, read $ Txt.unpack contactT)
|
||||
parseEntry entry = error $ "invalid schedule input format: " <> show entry
|
||||
|
||||
executeSchedule :: Int -- ^ speedup factor
|
||||
-> [(Int, Hashtag, (String, Int))] -- ^ [(delay in microseconds, hashtag, (hostname, port))]
|
||||
-> IO ()
|
||||
executeSchedule speedup events = do
|
||||
-- initialise HTTP manager
|
||||
httpMan <- HTTP.newManager $ HTTP.defaultManagerSettings { HTTP.managerResponseTimeout = HTTP.responseTimeoutMicro 60000000 }
|
||||
forM_ events $ \(delay, tag, (pubHost, pubPort)) -> do
|
||||
_ <- forkIO $
|
||||
clientPublishPost httpMan pubHost pubPort ("foobar #" <> tag)
|
||||
>>= either putStrLn (const $ pure ())
|
||||
-- while threadDelay gives only minimum delay guarantees, let's hope the
|
||||
-- additional delays are negligible
|
||||
-- otherwise: evaluate usage of https://hackage.haskell.org/package/schedule-0.3.0.0/docs/Data-Schedule.html
|
||||
threadDelay $ delay `div` speedup
|
45
app/Main.hs
45
app/Main.hs
|
@ -10,17 +10,15 @@ import Data.IP (IPv6, toHostAddress6)
|
|||
import System.Environment
|
||||
|
||||
import Hash2Pub.FediChord
|
||||
import Hash2Pub.FediChordTypes
|
||||
import Hash2Pub.PostService (PostService (..))
|
||||
|
||||
main :: IO ()
|
||||
main = do
|
||||
-- ToDo: parse and pass config
|
||||
-- probably use `tomland` for that
|
||||
(fConf, sConf) <- readConfig
|
||||
conf <- readConfig
|
||||
-- TODO: first initialise 'RealNode', then the vservers
|
||||
-- ToDo: load persisted caches, bootstrapping nodes …
|
||||
(serverSock, thisNode) <- fediChordInit fConf (runService sConf :: DHT d => d -> IO (PostService d))
|
||||
(serverSock, thisNode) <- fediChordInit conf
|
||||
-- currently no masking is necessary, as there is nothing to clean up
|
||||
nodeCacheWriterThread <- forkIO $ nodeCacheWriter thisNode
|
||||
-- try joining the DHT using one of the provided bootstrapping nodes
|
||||
|
@ -43,38 +41,15 @@ main = do
|
|||
pure ()
|
||||
|
||||
|
||||
readConfig :: IO (FediChordConf, ServiceConf)
|
||||
readConfig :: IO FediChordConf
|
||||
readConfig = do
|
||||
confDomainString : ipString : portString : servicePortString : speedupString : remainingArgs <- getArgs
|
||||
-- allow starting the initial node without bootstrapping info to avoid
|
||||
-- waiting for timeout
|
||||
let
|
||||
speedup = read speedupString
|
||||
confBootstrapNodes' = case remainingArgs of
|
||||
bootstrapHost : bootstrapPortString : _ ->
|
||||
[(bootstrapHost, read bootstrapPortString)]
|
||||
_ -> []
|
||||
fConf = FediChordConf
|
||||
{ confDomain = confDomainString
|
||||
confDomainString : ipString : portString : bootstrapHost : bootstrapPortString : _ <- getArgs
|
||||
pure $ FediChordConf {
|
||||
confDomain = confDomainString
|
||||
, confIP = toHostAddress6 . read $ ipString
|
||||
, confDhtPort = read portString
|
||||
, confBootstrapNodes = confBootstrapNodes'
|
||||
, confStabiliseInterval = 80 * 10^6
|
||||
, confBootstrapSamplingInterval = 180 * 10^6 `div` speedup
|
||||
, confMaxLookupCacheAge = 300 / fromIntegral speedup
|
||||
, confJoinAttemptsInterval = 60 * 10^6 `div` speedup
|
||||
, confMaxNodeCacheAge = 600 / fromIntegral speedup
|
||||
, confResponsePurgeAge = 60 / fromIntegral speedup
|
||||
, confRequestTimeout = 5 * 10^6 `div` speedup
|
||||
, confRequestRetries = 3
|
||||
, confBootstrapNodes = [(bootstrapHost, read bootstrapPortString)]
|
||||
--, confStabiliseInterval = 60
|
||||
, confBootstrapSamplingInterval = 180
|
||||
, confMaxLookupCacheAge = 300
|
||||
}
|
||||
sConf = ServiceConf
|
||||
{ confSubscriptionExpiryTime = 24*3600 / fromIntegral speedup
|
||||
, confServicePort = read servicePortString
|
||||
, confServiceHost = confDomainString
|
||||
, confLogfilePath = "../simulationData/logs/" <> confDomainString <> ".log"
|
||||
, confSpeedupFactor = speedup
|
||||
, confStatsEvalDelay = 120 * 10^6 `div` speedup
|
||||
}
|
||||
pure (fConf, sConf)
|
||||
|
||||
|
|
21
default.nix
21
default.nix
|
@ -1,19 +1,14 @@
|
|||
{
|
||||
compiler ? "ghc884"
|
||||
}:
|
||||
|
||||
let
|
||||
pkgs = import (
|
||||
{ pkgs ? import (
|
||||
builtins.fetchGit {
|
||||
name = "nixpkgs-pinned";
|
||||
url = https://github.com/NixOS/nixpkgs/;
|
||||
ref = "refs/heads/release-20.09";
|
||||
rev = "e065200fc90175a8f6e50e76ef10a48786126e1c";
|
||||
}) {
|
||||
# Pass no config for purity
|
||||
config = {};
|
||||
overlays = [];
|
||||
};
|
||||
ref = "refs/heads/release-20.03";
|
||||
rev = "da7ddd822e32aeebea00e97ab5aeca9758250a40";
|
||||
}) {},
|
||||
compiler ? "ghc865"
|
||||
}:
|
||||
|
||||
let
|
||||
hp = pkgs.haskell.packages."${compiler}";
|
||||
src = pkgs.nix-gitignore.gitignoreSource [] ./.;
|
||||
drv = hp.callCabal2nix "Hash2Pub" "${src}/Hash2Pub.cabal" {};
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
(import ./default.nix {withHIE = false;}).shell
|
|
@ -38,7 +38,6 @@ splitPayload numParts pl@LeaveRequestPayload{} = [
|
|||
LeaveRequestPayload {
|
||||
leaveSuccessors = atDef [] (listInto numParts $ leaveSuccessors pl) (thisPart-1)
|
||||
, leavePredecessors = atDef [] (listInto numParts $ leavePredecessors pl) (thisPart-1)
|
||||
, leaveDoMigration = leaveDoMigration pl
|
||||
} | thisPart <- [1..numParts] ]
|
||||
splitPayload numParts pl@StabiliseResponsePayload{} = [
|
||||
StabiliseResponsePayload {
|
||||
|
@ -135,8 +134,9 @@ encodePayload payload'@LeaveRequestPayload{} =
|
|||
<> [End Sequence
|
||||
, Start Sequence]
|
||||
<> concatMap encodeNodeState (leavePredecessors payload')
|
||||
<> [End Sequence]
|
||||
<> [Boolean (leaveDoMigration payload'), End Sequence]
|
||||
<> [End Sequence
|
||||
, End Sequence]
|
||||
-- currently StabiliseResponsePayload and LeaveRequestPayload are equal
|
||||
encodePayload payload'@StabiliseResponsePayload{} =
|
||||
Start Sequence
|
||||
: Start Sequence
|
||||
|
@ -144,7 +144,8 @@ encodePayload payload'@StabiliseResponsePayload{} =
|
|||
<> [End Sequence
|
||||
, Start Sequence]
|
||||
<> concatMap encodeNodeState (stabilisePredecessors payload')
|
||||
<> [End Sequence, End Sequence]
|
||||
<> [End Sequence
|
||||
, End Sequence]
|
||||
encodePayload payload'@StabiliseRequestPayload = [Null]
|
||||
encodePayload payload'@QueryIDResponsePayload{} =
|
||||
let
|
||||
|
@ -414,11 +415,9 @@ parseLeaveRequest :: ParseASN1 ActionPayload
|
|||
parseLeaveRequest = onNextContainer Sequence $ do
|
||||
succ' <- onNextContainer Sequence (getMany parseNodeState)
|
||||
pred' <- onNextContainer Sequence (getMany parseNodeState)
|
||||
doMigration <- parseBool
|
||||
pure $ LeaveRequestPayload {
|
||||
leaveSuccessors = succ'
|
||||
, leavePredecessors = pred'
|
||||
, leaveDoMigration = doMigration
|
||||
}
|
||||
|
||||
parseLeaveResponse :: ParseASN1 ActionPayload
|
||||
|
|
|
@ -19,7 +19,6 @@ module Hash2Pub.DHTProtocol
|
|||
, sendQueryIdMessages
|
||||
, requestQueryID
|
||||
, requestJoin
|
||||
, requestLeave
|
||||
, requestPing
|
||||
, requestStabilise
|
||||
, lookupMessage
|
||||
|
@ -35,25 +34,21 @@ module Hash2Pub.DHTProtocol
|
|||
, ackRequest
|
||||
, isPossibleSuccessor
|
||||
, isPossiblePredecessor
|
||||
, isInOwnResponsibilitySlice
|
||||
, isJoined
|
||||
, closestCachePredecessors
|
||||
)
|
||||
where
|
||||
|
||||
import Control.Concurrent
|
||||
import Control.Concurrent.Async
|
||||
import Control.Concurrent.STM
|
||||
import Control.Concurrent.STM.TBQueue
|
||||
import Control.Concurrent.STM.TQueue
|
||||
import Control.Concurrent.STM.TVar
|
||||
import Control.Exception
|
||||
import Control.Monad (foldM, forM, forM_, void, when)
|
||||
import Control.Monad.Except (MonadError (..), runExceptT)
|
||||
import Control.Monad.IO.Class (MonadIO (..))
|
||||
import Control.Monad (foldM, forM, forM_)
|
||||
import qualified Data.ByteString as BS
|
||||
import Data.Either (rights)
|
||||
import Data.Foldable (foldl', foldr', foldrM)
|
||||
import Data.Foldable (foldl', foldr')
|
||||
import Data.Functor.Identity
|
||||
import Data.IP (IPv6, fromHostAddress6,
|
||||
toHostAddress6)
|
||||
|
@ -78,11 +73,10 @@ import Hash2Pub.FediChordTypes (CacheEntry (..),
|
|||
LocalNodeState (..),
|
||||
LocalNodeStateSTM, NodeCache,
|
||||
NodeID, NodeState (..),
|
||||
RealNode (..), RealNodeSTM,
|
||||
RealNode (..),
|
||||
RemoteNodeState (..),
|
||||
RingEntry (..), RingMap (..),
|
||||
Service (..), addRMapEntry,
|
||||
addRMapEntryWith,
|
||||
addRMapEntry, addRMapEntryWith,
|
||||
cacheGetNodeStateUnvalidated,
|
||||
cacheLookup, cacheLookupPred,
|
||||
cacheLookupSucc, genNodeID,
|
||||
|
@ -98,7 +92,7 @@ import Debug.Trace (trace)
|
|||
|
||||
-- TODO: evaluate more fine-grained argument passing to allow granular locking
|
||||
-- | look up an ID to either claim responsibility for it or return the closest l nodes from the local cache
|
||||
queryLocalCache :: LocalNodeState s -> NodeCache -> Int -> NodeID -> QueryResponse
|
||||
queryLocalCache :: LocalNodeState -> NodeCache -> Int -> NodeID -> QueryResponse
|
||||
queryLocalCache ownState nCache lBestNodes targetID
|
||||
-- as target ID falls between own ID and first predecessor, it is handled by this node
|
||||
-- This only makes sense if the node is part of the DHT by having joined.
|
||||
|
@ -108,6 +102,9 @@ queryLocalCache ownState nCache lBestNodes targetID
|
|||
-- the closest succeeding node (like with the p initiated parallel queries
|
||||
| otherwise = FORWARD $ closestSuccessor `Set.union` closestCachePredecessors (lBestNodes-1) targetID nCache
|
||||
where
|
||||
ownID = getNid ownState
|
||||
preds = predecessors ownState
|
||||
|
||||
closestSuccessor :: Set.Set RemoteCacheEntry
|
||||
closestSuccessor = maybe Set.empty (Set.singleton . toRemoteCacheEntry) $ cacheLookupSucc targetID nCache
|
||||
|
||||
|
@ -133,25 +130,23 @@ closestCachePredecessors remainingLookups lastID nCache
|
|||
-- Looks up the successor of the lookup key on a 'RingMap' representation of the
|
||||
-- predecessor list with the node itself added. If the result is the same as the node
|
||||
-- itself then it falls into the responsibility interval.
|
||||
isInOwnResponsibilitySlice :: HasKeyID NodeID a => a -> LocalNodeState s -> Bool
|
||||
isInOwnResponsibilitySlice lookupTarget ownNs = (fst <$> rMapLookupSucc (getKeyID lookupTarget :: NodeID) predecessorRMap) == pure (getNid ownNs)
|
||||
isInOwnResponsibilitySlice :: HasKeyID a NodeID => a -> LocalNodeState -> Bool
|
||||
isInOwnResponsibilitySlice lookupTarget ownNs = (getKeyID <$> rMapLookupSucc (getKeyID lookupTarget :: NodeID) predecessorRMap) == pure (getNid ownNs)
|
||||
where
|
||||
predecessorList = predecessors ownNs
|
||||
-- add node itself to RingMap representation, to distinguish between
|
||||
-- responsibility of own node and predecessor
|
||||
predecessorRMap = addRMapEntry (getKeyID ownRemote) ownRemote $ rMapFromList (keyValuePair <$> predecessorList) :: RingMap NodeID RemoteNodeState
|
||||
ownRemote = toRemoteNodeState ownNs
|
||||
predecessorRMap = addRMapEntry (toRemoteNodeState ownNs) $ rMapFromList predecessorList
|
||||
closestPredecessor = headMay predecessorList
|
||||
|
||||
isPossiblePredecessor :: HasKeyID NodeID a => a -> LocalNodeState s -> Bool
|
||||
isPossiblePredecessor :: HasKeyID a NodeID => a -> LocalNodeState -> Bool
|
||||
isPossiblePredecessor = isInOwnResponsibilitySlice
|
||||
|
||||
isPossibleSuccessor :: HasKeyID NodeID a => a -> LocalNodeState s -> Bool
|
||||
isPossibleSuccessor lookupTarget ownNs = (fst <$> rMapLookupPred (getKeyID lookupTarget :: NodeID) successorRMap) == pure (getNid ownNs)
|
||||
isPossibleSuccessor :: HasKeyID a NodeID => a -> LocalNodeState -> Bool
|
||||
isPossibleSuccessor lookupTarget ownNs = (getKeyID <$> rMapLookupPred (getKeyID lookupTarget :: NodeID) successorRMap) == pure (getNid ownNs)
|
||||
where
|
||||
successorList = successors ownNs
|
||||
successorRMap = addRMapEntry (getKeyID ownRemote) ownRemote $ rMapFromList (keyValuePair <$> successorList)
|
||||
ownRemote = toRemoteNodeState ownNs
|
||||
successorRMap = addRMapEntry (toRemoteNodeState ownNs) $ rMapFromList successorList
|
||||
closestSuccessor = headMay successorList
|
||||
|
||||
-- cache operations
|
||||
|
@ -174,8 +169,7 @@ addCacheEntryPure now (RemoteCacheEntry ns ts) cache =
|
|||
let
|
||||
-- TODO: limit diffSeconds to some maximum value to prevent malicious nodes from inserting entries valid nearly until eternity
|
||||
timestamp' = if ts <= now then ts else now
|
||||
newEntry = CacheEntry False ns timestamp'
|
||||
newCache = addRMapEntryWith insertCombineFunction (getKeyID newEntry) newEntry cache
|
||||
newCache = addRMapEntryWith insertCombineFunction (CacheEntry False ns timestamp') cache
|
||||
insertCombineFunction newVal@(KeyEntry (CacheEntry newValidationState newNode newTimestamp)) oldVal =
|
||||
case oldVal of
|
||||
ProxyEntry n _ -> ProxyEntry n (Just newVal)
|
||||
|
@ -208,7 +202,7 @@ addNodeAsVerifiedPure :: POSIXTime
|
|||
-> RemoteNodeState
|
||||
-> NodeCache
|
||||
-> NodeCache
|
||||
addNodeAsVerifiedPure now node = addRMapEntry (getKeyID node) (CacheEntry True node now)
|
||||
addNodeAsVerifiedPure now node = addRMapEntry (CacheEntry True node now)
|
||||
|
||||
|
||||
|
||||
|
@ -227,7 +221,7 @@ markCacheEntryAsVerified timestamp nid = RingMap . Map.adjust adjustFunc nid . g
|
|||
|
||||
-- | uses the successor and predecessor list of a node as an indicator for whether a
|
||||
-- node has properly joined the DHT
|
||||
isJoined :: LocalNodeState s -> Bool
|
||||
isJoined :: LocalNodeState -> Bool
|
||||
isJoined ns = not . all null $ [successors ns, predecessors ns]
|
||||
|
||||
-- | the size limit to be used when serialising messages for sending
|
||||
|
@ -251,20 +245,19 @@ ackRequest _ _ = Map.empty
|
|||
|
||||
-- | Dispatch incoming requests to the dedicated handling and response function, and enqueue
|
||||
-- the response to be sent.
|
||||
handleIncomingRequest :: Service s (RealNodeSTM s)
|
||||
=> LocalNodeStateSTM s -- ^ the handling node
|
||||
handleIncomingRequest :: LocalNodeStateSTM -- ^ the handling node
|
||||
-> TQueue (BS.ByteString, SockAddr) -- ^ send queue
|
||||
-> Set.Set FediChordMessage -- ^ all parts of the request to handle
|
||||
-> SockAddr -- ^ source address of the request
|
||||
-> IO ()
|
||||
handleIncomingRequest nsSTM sendQ msgSet sourceAddr = do
|
||||
putStrLn $ "handling incoming request: " <> show msgSet
|
||||
ns <- readTVarIO nsSTM
|
||||
-- add nodestate to cache
|
||||
now <- getPOSIXTime
|
||||
case headMay . Set.elems $ msgSet of
|
||||
Nothing -> pure ()
|
||||
Just aPart -> do
|
||||
let (SockAddrInet6 _ _ sourceIP _) = sourceAddr
|
||||
queueAddEntries (Identity $ RemoteCacheEntry (sender aPart) now) ns
|
||||
-- distinguish on whether and how to respond. If responding, pass message to response generating function and write responses to send queue
|
||||
maybe (pure ()) (
|
||||
|
@ -272,36 +265,17 @@ handleIncomingRequest nsSTM sendQ msgSet sourceAddr = do
|
|||
)
|
||||
=<< (case action aPart of
|
||||
Ping -> Just <$> respondPing nsSTM msgSet
|
||||
Join -> dropSpoofedIDs sourceIP nsSTM msgSet respondJoin
|
||||
Join -> Just <$> respondJoin nsSTM msgSet
|
||||
-- ToDo: figure out what happens if not joined
|
||||
QueryID -> Just <$> respondQueryID nsSTM msgSet
|
||||
-- only when joined
|
||||
Leave -> if isJoined ns then dropSpoofedIDs sourceIP nsSTM msgSet respondLeave else pure Nothing
|
||||
Stabilise -> if isJoined ns then dropSpoofedIDs sourceIP nsSTM msgSet respondStabilise else pure Nothing
|
||||
Leave -> if isJoined ns then Just <$> respondLeave nsSTM msgSet else pure Nothing
|
||||
Stabilise -> if isJoined ns then Just <$> respondStabilise nsSTM msgSet else pure Nothing
|
||||
)
|
||||
-- for single part request, response starts with part number 1. For multipart requests, response starts with part number n+1.
|
||||
|
||||
-- TODO: determine request type only from first part, but catch RecSelError on each record access when folding, because otherwise different request type parts can make this crash
|
||||
-- TODO: test case: mixed message types of parts
|
||||
where
|
||||
-- | Filter out requests with spoofed node IDs by recomputing the ID using
|
||||
-- the sender IP.
|
||||
-- For valid (non-spoofed) sender IDs, the passed responder function is invoked.
|
||||
dropSpoofedIDs :: HostAddress6 -- msg source address
|
||||
-> LocalNodeStateSTM s
|
||||
-> Set.Set FediChordMessage -- message parts of the request
|
||||
-> (LocalNodeStateSTM s -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)) -- reponder function to be invoked for valid requests
|
||||
-> IO (Maybe (Map.Map Integer BS.ByteString))
|
||||
dropSpoofedIDs addr nsSTM' msgSet' responder =
|
||||
let
|
||||
aRequestPart = Set.elemAt 0 msgSet
|
||||
senderNs = sender aRequestPart
|
||||
givenSenderID = getNid senderNs
|
||||
recomputedID = genNodeID addr (getDomain senderNs) (fromInteger $ getVServerID senderNs)
|
||||
in
|
||||
if recomputedID == givenSenderID
|
||||
then Just <$> responder nsSTM' msgSet'
|
||||
else pure Nothing
|
||||
|
||||
|
||||
-- ....... response sending .......
|
||||
|
@ -310,8 +284,9 @@ handleIncomingRequest nsSTM sendQ msgSet sourceAddr = do
|
|||
|
||||
|
||||
-- | execute a key ID lookup on local cache and respond with the result
|
||||
respondQueryID :: LocalNodeStateSTM s -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
respondQueryID :: LocalNodeStateSTM -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
respondQueryID nsSTM msgSet = do
|
||||
putStrLn "responding to a QueryID request"
|
||||
-- this message cannot be split reasonably, so just
|
||||
-- consider the first payload
|
||||
let
|
||||
|
@ -349,7 +324,8 @@ respondQueryID nsSTM msgSet = do
|
|||
|
||||
-- | Respond to a Leave request by removing the leaving node from local data structures
|
||||
-- and confirming with response.
|
||||
respondLeave :: Service s (RealNodeSTM s) => LocalNodeStateSTM s -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
-- TODO: copy over key data from leaver and confirm
|
||||
respondLeave :: LocalNodeStateSTM -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
respondLeave nsSTM msgSet = do
|
||||
-- combine payload of all parts
|
||||
let (requestPreds, requestSuccs) = foldr' (\msg (predAcc, succAcc) ->
|
||||
|
@ -358,15 +334,16 @@ respondLeave nsSTM msgSet = do
|
|||
)
|
||||
([],[]) msgSet
|
||||
aRequestPart = Set.elemAt 0 msgSet
|
||||
leaveSenderID = getNid . sender $ aRequestPart
|
||||
senderID = getNid . sender $ aRequestPart
|
||||
responseMsg <- atomically $ do
|
||||
nsSnap <- readTVar nsSTM
|
||||
-- remove leaving node from successors, predecessors and NodeCache
|
||||
writeTQueue (cacheWriteQueue nsSnap) $ deleteCacheEntry leaveSenderID
|
||||
writeTQueue (cacheWriteQueue nsSnap) $ deleteCacheEntry senderID
|
||||
writeTVar nsSTM $
|
||||
-- add predecessors and successors of leaving node to own lists
|
||||
setPredecessors (filter ((/=) leaveSenderID . getNid) $ requestPreds <> predecessors nsSnap)
|
||||
. setSuccessors (filter ((/=) leaveSenderID . getNid) $ requestSuccs <> successors nsSnap) $ nsSnap
|
||||
setPredecessors (filter ((/=) senderID . getNid) $ requestPreds <> predecessors nsSnap)
|
||||
. setSuccessors (filter ((/=) senderID . getNid) $ requestSuccs <> successors nsSnap) $ nsSnap
|
||||
-- TODO: handle handover of key data
|
||||
let leaveResponse = Response {
|
||||
requestID = requestID aRequestPart
|
||||
, senderID = getNid nsSnap
|
||||
|
@ -376,14 +353,10 @@ respondLeave nsSTM msgSet = do
|
|||
, payload = Just LeaveResponsePayload
|
||||
}
|
||||
pure leaveResponse
|
||||
-- if awaiting an incoming service data migration, collect the lock without blocking this thread
|
||||
when (maybe False leaveDoMigration (payload aRequestPart)) $ do
|
||||
ownService <- atomically $ nodeService <$> (readTVar nsSTM >>= (readTVar . parentRealNode))
|
||||
void (forkIO $ waitForMigrationFrom ownService leaveSenderID)
|
||||
pure $ serialiseMessage sendMessageSize responseMsg
|
||||
|
||||
-- | respond to stabilise requests by returning successor and predecessor list
|
||||
respondStabilise :: LocalNodeStateSTM s -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
respondStabilise :: LocalNodeStateSTM -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
respondStabilise nsSTM msgSet = do
|
||||
nsSnap <- readTVarIO nsSTM
|
||||
let
|
||||
|
@ -405,7 +378,7 @@ respondStabilise nsSTM msgSet = do
|
|||
|
||||
|
||||
-- | respond to Ping request by returning all active vserver NodeStates
|
||||
respondPing :: LocalNodeStateSTM s -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
respondPing :: LocalNodeStateSTM -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
respondPing nsSTM msgSet = do
|
||||
-- TODO: respond with all active VS when implementing k-choices
|
||||
nsSnap <- readTVarIO nsSTM
|
||||
|
@ -422,19 +395,18 @@ respondPing nsSTM msgSet = do
|
|||
}
|
||||
pure $ serialiseMessage sendMessageSize pingResponse
|
||||
|
||||
|
||||
respondJoin :: Service s (RealNodeSTM s) => LocalNodeStateSTM s -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
-- this modifies node state, so locking and IO seems to be necessary.
|
||||
-- Still try to keep as much code as possible pure
|
||||
respondJoin :: LocalNodeStateSTM -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
respondJoin nsSTM msgSet = do
|
||||
-- atomically read and modify the node state according to the parsed request
|
||||
(dataMigration, responseMsg) <- atomically $ do
|
||||
responseMsg <- atomically $ do
|
||||
nsSnap <- readTVar nsSTM
|
||||
cache <- readTVar $ nodeCacheSTM nsSnap
|
||||
let
|
||||
aRequestPart = Set.elemAt 0 msgSet
|
||||
senderNS = sender aRequestPart
|
||||
-- if not joined yet, attract responsibility for
|
||||
-- all keys to make bootstrapping possible
|
||||
responsibilityLookup = if isJoined nsSnap then queryLocalCache nsSnap cache 1 (getNid senderNS) else FOUND (toRemoteNodeState nsSnap)
|
||||
responsibilityLookup = queryLocalCache nsSnap cache 1 (getNid senderNS)
|
||||
thisNodeResponsible (FOUND _) = True
|
||||
thisNodeResponsible (FORWARD _) = False
|
||||
-- check whether the joining node falls into our responsibility
|
||||
|
@ -458,41 +430,33 @@ respondJoin nsSTM msgSet = do
|
|||
, payload = Just responsePayload
|
||||
}
|
||||
writeTVar nsSTM joinedNS
|
||||
ownService <- nodeService <$> readTVar (parentRealNode nsSnap)
|
||||
let
|
||||
serviceDataMigrator = migrateData ownService (getNid nsSnap) lowerKeyBound (getNid senderNS) (getDomain senderNS, fromIntegral $ getServicePort senderNS)
|
||||
lowerKeyBound = maybe (getNid nsSnap) getNid $ headMay (predecessors nsSnap)
|
||||
pure (Just serviceDataMigrator, joinResponse)
|
||||
pure joinResponse
|
||||
-- otherwise respond with empty payload
|
||||
else pure (Nothing, Response {
|
||||
else pure Response {
|
||||
requestID = requestID aRequestPart
|
||||
, senderID = getNid nsSnap
|
||||
, part = if Set.size msgSet == 1 then 1 else fromIntegral $ Set.size msgSet + 1
|
||||
, isFinalPart = False
|
||||
, action = Join
|
||||
, payload = Nothing
|
||||
})
|
||||
}
|
||||
|
||||
-- as DHT response is required immediately, fork the service data migration push
|
||||
-- into a new thread. That's kind of ugly but the best I can think of so far
|
||||
when (isJust dataMigration) (forkIO (fromJust dataMigration >> pure ()) >> pure ())
|
||||
pure $ serialiseMessage sendMessageSize responseMsg
|
||||
-- TODO: notify service layer to copy over data now handled by the new joined node
|
||||
|
||||
-- ....... request sending .......
|
||||
|
||||
-- | send a join request and return the joined 'LocalNodeState' including neighbours
|
||||
requestJoin :: (NodeState a, Service s (RealNodeSTM s)) => a -- ^ currently responsible node to be contacted
|
||||
-> LocalNodeStateSTM s -- ^ joining NodeState
|
||||
-> IO (Either String (LocalNodeStateSTM s)) -- ^ node after join with all its new information
|
||||
requestJoin :: NodeState a => a -- ^ currently responsible node to be contacted
|
||||
-> LocalNodeStateSTM -- ^ joining NodeState
|
||||
-> IO (Either String LocalNodeStateSTM) -- ^ node after join with all its new information
|
||||
requestJoin toJoinOn ownStateSTM = do
|
||||
ownState <- readTVarIO ownStateSTM
|
||||
prn <- readTVarIO $ parentRealNode ownState
|
||||
nodeConf <- nodeConfig <$> readTVarIO (parentRealNode ownState)
|
||||
let srcAddr = confIP nodeConf
|
||||
srcAddr <- confIP . nodeConfig <$> readTVarIO (parentRealNode ownState)
|
||||
bracket (mkSendSocket srcAddr (getDomain toJoinOn) (getDhtPort toJoinOn)) close (\sock -> do
|
||||
-- extract own state for getting request information
|
||||
responses <- sendRequestTo (confRequestTimeout nodeConf) (confRequestRetries nodeConf) (\rid -> Request rid (toRemoteNodeState ownState) 1 True Join (Just JoinRequestPayload)) sock
|
||||
responses <- sendRequestTo 5000 3 (\rid -> Request rid (toRemoteNodeState ownState) 1 True Join (Just JoinRequestPayload)) sock
|
||||
(cacheInsertQ, joinedState) <- atomically $ do
|
||||
stateSnap <- readTVar ownStateSTM
|
||||
let
|
||||
|
@ -517,28 +481,25 @@ requestJoin toJoinOn ownStateSTM = do
|
|||
([], Set.empty, Set.empty)
|
||||
responses
|
||||
-- sort, slice and set the accumulated successors and predecessors
|
||||
-- the contacted node itself is a successor as well and, with few
|
||||
-- nodes, can be a predecessor as well
|
||||
newState = setSuccessors (toRemoteNodeState toJoinOn:Set.elems succAccSet) . setPredecessors (toRemoteNodeState toJoinOn:Set.elems predAccSet) $ stateSnap
|
||||
newState = setSuccessors (Set.elems succAccSet) . setPredecessors (Set.elems predAccSet) $ stateSnap
|
||||
writeTVar ownStateSTM newState
|
||||
pure (cacheInsertQ, newState)
|
||||
-- execute the cache insertions
|
||||
mapM_ (\f -> f joinedState) cacheInsertQ
|
||||
if responses == Set.empty
|
||||
then pure . Left $ "join error: got no response from " <> show (getNid toJoinOn)
|
||||
else do
|
||||
-- wait for migration data to be completely received
|
||||
waitForMigrationFrom (nodeService prn) (getNid toJoinOn)
|
||||
pure $ Right ownStateSTM
|
||||
pure $ if responses == Set.empty
|
||||
then Left $ "join error: got no response from " <> show (getNid toJoinOn)
|
||||
else if null (predecessors joinedState) && null (successors joinedState)
|
||||
then Left "join error: no predecessors or successors"
|
||||
-- successful join
|
||||
else Right ownStateSTM
|
||||
)
|
||||
`catch` (\e -> pure . Left $ displayException (e :: IOException))
|
||||
|
||||
|
||||
-- | Send a 'QueryID' 'Request' for getting the node that handles a certain key ID.
|
||||
requestQueryID :: (MonadIO m, MonadError String m)
|
||||
=> LocalNodeState s -- ^ NodeState of the querying node
|
||||
requestQueryID :: LocalNodeState -- ^ NodeState of the querying node
|
||||
-> NodeID -- ^ target key ID to look up
|
||||
-> m RemoteNodeState -- ^ the node responsible for handling that key
|
||||
-> IO RemoteNodeState -- ^ the node responsible for handling that key
|
||||
-- 1. do a local lookup for the l closest nodes
|
||||
-- 2. create l sockets
|
||||
-- 3. send a message async concurrently to all l nodes
|
||||
|
@ -546,23 +507,23 @@ requestQueryID :: (MonadIO m, MonadError String m)
|
|||
-- 5. repeat until FOUND (problem: new entries not necessarily already in cache, explicitly compare with closer results)
|
||||
-- TODO: deal with lookup failures
|
||||
requestQueryID ns targetID = do
|
||||
firstCacheSnapshot <- liftIO . readTVarIO . nodeCacheSTM $ ns
|
||||
firstCacheSnapshot <- readTVarIO . nodeCacheSTM $ ns
|
||||
-- TODO: make maxAttempts configurable
|
||||
queryIdLookupLoop firstCacheSnapshot ns 50 targetID
|
||||
|
||||
-- | like 'requestQueryID, but allows passing of a custom cache, e.g. for joining
|
||||
queryIdLookupLoop :: (MonadIO m, MonadError String m) => NodeCache -> LocalNodeState s -> Int -> NodeID -> m RemoteNodeState
|
||||
queryIdLookupLoop :: NodeCache -> LocalNodeState -> Int -> NodeID -> IO RemoteNodeState
|
||||
-- return node itself as default fallback value against infinite recursion.
|
||||
-- TODO: consider using an Either instead of a default value
|
||||
queryIdLookupLoop _ ns 0 _ = throwError "exhausted maximum lookup attempts"
|
||||
queryIdLookupLoop _ ns 0 _ = pure $ toRemoteNodeState ns
|
||||
queryIdLookupLoop cacheSnapshot ns maxAttempts targetID = do
|
||||
let localResult = queryLocalCache ns cacheSnapshot (lNumBestNodes ns) targetID
|
||||
-- FOUND can only be returned if targetID is owned by local node
|
||||
case localResult of
|
||||
FOUND thisNode -> pure thisNode
|
||||
FORWARD nodeSet -> do
|
||||
responseEntries <- liftIO $ sendQueryIdMessages targetID ns Nothing (remoteNode <$> Set.elems nodeSet)
|
||||
now <- liftIO getPOSIXTime
|
||||
responseEntries <- sendQueryIdMessages targetID ns Nothing (remoteNode <$> Set.elems nodeSet)
|
||||
now <- getPOSIXTime
|
||||
-- check for a FOUND and return it
|
||||
case responseEntries of
|
||||
FOUND foundNode -> pure foundNode
|
||||
|
@ -577,7 +538,7 @@ queryIdLookupLoop cacheSnapshot ns maxAttempts targetID = do
|
|||
|
||||
sendQueryIdMessages :: (Integral i)
|
||||
=> NodeID -- ^ target key ID to look up
|
||||
-> LocalNodeState s -- ^ node state of the node doing the query
|
||||
-> LocalNodeState -- ^ node state of the node doing the query
|
||||
-> Maybe i -- ^ optionally provide an explicit @l@ parameter of number of nodes to be returned
|
||||
-> [RemoteNodeState] -- ^ nodes to query
|
||||
-> IO QueryResponse -- ^ accumulated response
|
||||
|
@ -585,10 +546,10 @@ sendQueryIdMessages targetID ns lParam targets = do
|
|||
|
||||
-- create connected sockets to all query targets and use them for request handling
|
||||
|
||||
nodeConf <- nodeConfig <$> readTVarIO (parentRealNode ns)
|
||||
let srcAddr = confIP nodeConf
|
||||
srcAddr <- confIP . nodeConfig <$> readTVarIO (parentRealNode ns)
|
||||
-- ToDo: make attempts and timeout configurable
|
||||
queryThreads <- mapM (\resultNode -> async $ bracket (mkSendSocket srcAddr (getDomain resultNode) (getDhtPort resultNode)) close (
|
||||
sendRequestTo (confRequestTimeout nodeConf) (confRequestRetries nodeConf) (lookupMessage targetID ns Nothing)
|
||||
sendRequestTo 5000 3 (lookupMessage targetID ns Nothing)
|
||||
)) targets
|
||||
-- ToDo: process results immediately instead of waiting for the last one to finish, see https://stackoverflow.com/a/38815224/9198613
|
||||
-- ToDo: exception handling, maybe log them
|
||||
|
@ -596,10 +557,8 @@ sendQueryIdMessages targetID ns lParam targets = do
|
|||
-- insert new cache entries both into global cache as well as return accumulated QueryResponses for further processing
|
||||
now <- getPOSIXTime
|
||||
-- collect cache entries from all responses
|
||||
foldrM (\resp acc -> do
|
||||
let
|
||||
responseResult = queryResult <$> payload resp
|
||||
entrySet = case responseResult of
|
||||
foldM (\acc resp -> do
|
||||
let entrySet = case queryResult <$> payload resp of
|
||||
Just (FOUND result1) -> Set.singleton (RemoteCacheEntry result1 now)
|
||||
Just (FORWARD resultset) -> resultset
|
||||
_ -> Set.empty
|
||||
|
@ -609,20 +568,15 @@ sendQueryIdMessages targetID ns lParam targets = do
|
|||
-- return accumulated QueryResult
|
||||
pure $ case acc of
|
||||
-- once a FOUND as been encountered, return this as a result
|
||||
FOUND{} -> acc
|
||||
FORWARD accSet
|
||||
| maybe False isFound responseResult -> fromJust responseResult
|
||||
| otherwise -> FORWARD $ entrySet `Set.union` accSet
|
||||
isFound@FOUND{} -> isFound
|
||||
FORWARD accSet -> FORWARD $ entrySet `Set.union` accSet
|
||||
|
||||
) (FORWARD Set.empty) responses
|
||||
where
|
||||
isFound FOUND{} = True
|
||||
isFound _ = False
|
||||
|
||||
-- | Create a QueryID message to be supplied to 'sendRequestTo'
|
||||
lookupMessage :: Integral i
|
||||
=> NodeID -- ^ target ID
|
||||
-> LocalNodeState s -- ^ sender node state
|
||||
-> LocalNodeState -- ^ sender node state
|
||||
-> Maybe i -- ^ optionally provide a different l parameter
|
||||
-> (Integer -> FediChordMessage)
|
||||
lookupMessage targetID ns lParam = \rID -> Request rID (toRemoteNodeState ns) 1 True QueryID (Just $ pl ns targetID)
|
||||
|
@ -632,13 +586,12 @@ lookupMessage targetID ns lParam = \rID -> Request rID (toRemoteNodeState ns) 1
|
|||
|
||||
-- | Send a stabilise request to provided 'RemoteNode' and, if successful,
|
||||
-- return parsed neighbour lists
|
||||
requestStabilise :: LocalNodeState s -- ^ sending node
|
||||
requestStabilise :: LocalNodeState -- ^ sending node
|
||||
-> RemoteNodeState -- ^ neighbour node to send to
|
||||
-> IO (Either String ([RemoteNodeState], [RemoteNodeState])) -- ^ (predecessors, successors) of responding node
|
||||
requestStabilise ns neighbour = do
|
||||
nodeConf <- nodeConfig <$> readTVarIO (parentRealNode ns)
|
||||
let srcAddr = confIP nodeConf
|
||||
responses <- bracket (mkSendSocket srcAddr (getDomain neighbour) (getDhtPort neighbour)) close (fmap Right . sendRequestTo (confRequestTimeout nodeConf) (confRequestRetries nodeConf) (\rid ->
|
||||
srcAddr <- confIP . nodeConfig <$> readTVarIO (parentRealNode ns)
|
||||
responses <- bracket (mkSendSocket srcAddr (getDomain neighbour) (getDhtPort neighbour)) close (fmap Right . sendRequestTo 5000 3 (\rid ->
|
||||
Request {
|
||||
requestID = rid
|
||||
, sender = toRemoteNodeState ns
|
||||
|
@ -660,55 +613,22 @@ requestStabilise ns neighbour = do
|
|||
)
|
||||
([],[]) respSet
|
||||
-- update successfully responded neighbour in cache
|
||||
maybe (pure ()) (\p -> queueUpdateVerifieds (Identity $ senderID p) ns) $ headMay (Set.elems respSet)
|
||||
now <- getPOSIXTime
|
||||
maybe (pure ()) (\p -> queueAddEntries (Identity $ RemoteCacheEntry (sender p) now) ns) $ headMay (Set.elems respSet)
|
||||
pure $ if null responsePreds && null responseSuccs
|
||||
then Left "no neighbours returned"
|
||||
else Right (responsePreds, responseSuccs)
|
||||
) responses
|
||||
|
||||
|
||||
-- | Send a Leave request to the specified node.
|
||||
-- Service data transfer needs to be done separately, as not all neighbours
|
||||
-- that need to know about the leaving handle the new service data.
|
||||
requestLeave :: LocalNodeState s
|
||||
-> Bool -- whether to migrate service data
|
||||
-> RemoteNodeState -- target node
|
||||
-> IO (Either String ()) -- error or success
|
||||
requestLeave ns doMigration target = do
|
||||
nodeConf <- nodeConfig <$> readTVarIO (parentRealNode ns)
|
||||
let srcAddr = confIP nodeConf
|
||||
leavePayload = LeaveRequestPayload {
|
||||
leaveSuccessors = successors ns
|
||||
, leavePredecessors = predecessors ns
|
||||
, leaveDoMigration = doMigration
|
||||
}
|
||||
responses <- bracket (mkSendSocket srcAddr (getDomain target) (getDhtPort target)) close (fmap Right . sendRequestTo (confRequestTimeout nodeConf) (confRequestRetries nodeConf) (\rid ->
|
||||
Request {
|
||||
requestID = rid
|
||||
, sender = toRemoteNodeState ns
|
||||
, part = 1
|
||||
, isFinalPart = False
|
||||
, action = Leave
|
||||
, payload = Just leavePayload
|
||||
}
|
||||
)
|
||||
) `catch` (\e -> pure . Left $ displayException (e :: IOException))
|
||||
either
|
||||
-- forward IO error messages
|
||||
(pure . Left)
|
||||
-- empty payload, so no processing required
|
||||
(const . pure . Right $ ())
|
||||
responses
|
||||
|
||||
requestPing :: LocalNodeState s -- ^ sending node
|
||||
requestPing :: LocalNodeState -- ^ sending node
|
||||
-> RemoteNodeState -- ^ node to be PINGed
|
||||
-> IO (Either String [RemoteNodeState]) -- ^ all active vServers of the pinged node
|
||||
requestPing ns target = do
|
||||
nodeConf <- nodeConfig <$> readTVarIO (parentRealNode ns)
|
||||
let srcAddr = confIP nodeConf
|
||||
srcAddr <- confIP . nodeConfig <$> readTVarIO (parentRealNode ns)
|
||||
responses <- bracket (mkSendSocket srcAddr (getDomain target) (getDhtPort target)) close
|
||||
(\sock -> do
|
||||
resp <- sendRequestTo (confRequestTimeout nodeConf) (confRequestRetries nodeConf) (\rid ->
|
||||
resp <- sendRequestTo 5000 3 (\rid ->
|
||||
Request {
|
||||
requestID = rid
|
||||
, sender = toRemoteNodeState ns
|
||||
|
@ -744,9 +664,10 @@ requestPing ns target = do
|
|||
) responses
|
||||
|
||||
|
||||
|
||||
-- | Generic function for sending a request over a connected socket and collecting the response.
|
||||
-- Serialises the message and tries to deliver its parts for a number of attempts within a specified timeout.
|
||||
sendRequestTo :: Int -- ^ timeout in milliseconds
|
||||
sendRequestTo :: Int -- ^ timeout in seconds
|
||||
-> Int -- ^ number of retries
|
||||
-> (Integer -> FediChordMessage) -- ^ the message to be sent, still needing a requestID
|
||||
-> Socket -- ^ connected socket to use for sending
|
||||
|
@ -757,10 +678,11 @@ sendRequestTo timeoutMillis numAttempts msgIncomplete sock = do
|
|||
let
|
||||
msgComplete = msgIncomplete randomID
|
||||
requests = serialiseMessage sendMessageSize msgComplete
|
||||
putStrLn $ "sending request message " <> show msgComplete
|
||||
-- create a queue for passing received response messages back, even after a timeout
|
||||
responseQ <- newTBQueueIO $ 2*maximumParts -- keep room for duplicate packets
|
||||
-- start sendAndAck with timeout
|
||||
_ <- attempts numAttempts . timeout (timeoutMillis*1000) $ sendAndAck responseQ sock requests
|
||||
attempts numAttempts . timeout timeoutMillis $ sendAndAck responseQ sock requests
|
||||
-- after timeout, check received responses, delete them from unacked message set/ map and rerun senAndAck with that if necessary.
|
||||
recvdParts <- atomically $ flushTBQueue responseQ
|
||||
pure $ Set.fromList recvdParts
|
||||
|
@ -769,20 +691,19 @@ sendRequestTo timeoutMillis numAttempts msgIncomplete sock = do
|
|||
-> Socket -- ^ the socket used for sending and receiving for this particular remote node
|
||||
-> Map.Map Integer BS.ByteString -- ^ the remaining unacked request parts
|
||||
-> IO ()
|
||||
sendAndAck responseQueue sock' remainingSends = do
|
||||
sendMany sock' $ Map.elems remainingSends
|
||||
sendAndAck responseQueue sock remainingSends = do
|
||||
sendMany sock $ Map.elems remainingSends
|
||||
-- if all requests have been acked/ responded to, return prematurely
|
||||
recvLoop sock' responseQueue remainingSends Set.empty Nothing
|
||||
recvLoop :: Socket
|
||||
-> TBQueue FediChordMessage -- ^ the queue for putting in the received responses
|
||||
recvLoop responseQueue remainingSends Set.empty Nothing
|
||||
recvLoop :: TBQueue FediChordMessage -- ^ the queue for putting in the received responses
|
||||
-> Map.Map Integer BS.ByteString -- ^ the remaining unacked request parts
|
||||
-> Set.Set Integer -- ^ already received response part numbers
|
||||
-> Maybe Integer -- ^ total number of response parts if already known
|
||||
-> IO ()
|
||||
recvLoop sock' responseQueue remainingSends' receivedPartNums totalParts = do
|
||||
recvLoop responseQueue remainingSends' receivedPartNums totalParts = do
|
||||
-- 65535 is maximum length of UDP packets, as long as
|
||||
-- no IPv6 jumbograms are used
|
||||
response <- deserialiseMessage <$> recv sock' 65535
|
||||
response <- deserialiseMessage <$> recv sock 65535
|
||||
case response of
|
||||
Right msg@Response{} -> do
|
||||
atomically $ writeTBQueue responseQueue msg
|
||||
|
@ -790,17 +711,16 @@ sendRequestTo timeoutMillis numAttempts msgIncomplete sock = do
|
|||
newTotalParts = if isFinalPart msg then Just (part msg) else totalParts
|
||||
newRemaining = Map.delete (part msg) remainingSends'
|
||||
newReceivedParts = Set.insert (part msg) receivedPartNums
|
||||
if Map.null newRemaining && maybe False (\p -> Set.size newReceivedParts == fromIntegral p) newTotalParts
|
||||
if Map.null newRemaining && maybe False (\p -> Set.size receivedPartNums == fromIntegral p) newTotalParts
|
||||
then pure ()
|
||||
else recvLoop sock' responseQueue newRemaining newReceivedParts newTotalParts
|
||||
else recvLoop responseQueue newRemaining receivedPartNums newTotalParts
|
||||
-- drop errors and invalid messages
|
||||
Right Request{} -> pure () -- expecting a response, not a request
|
||||
Left _ -> recvLoop sock' responseQueue remainingSends' receivedPartNums totalParts
|
||||
Left _ -> recvLoop responseQueue remainingSends' receivedPartNums totalParts
|
||||
|
||||
|
||||
-- | enqueue a list of RemoteCacheEntries to be added to the global NodeCache
|
||||
queueAddEntries :: Foldable c => c RemoteCacheEntry
|
||||
-> LocalNodeState s
|
||||
-> LocalNodeState
|
||||
-> IO ()
|
||||
queueAddEntries entries ns = do
|
||||
now <- getPOSIXTime
|
||||
|
@ -810,29 +730,17 @@ queueAddEntries entries ns = do
|
|||
-- | enque a list of node IDs to be deleted from the global NodeCache
|
||||
queueDeleteEntries :: Foldable c
|
||||
=> c NodeID
|
||||
-> LocalNodeState s
|
||||
-> LocalNodeState
|
||||
-> IO ()
|
||||
queueDeleteEntries ids ns = forM_ ids $ atomically . writeTQueue (cacheWriteQueue ns) . deleteCacheEntry
|
||||
|
||||
|
||||
-- | enque a single node ID to be deleted from the global NodeCache
|
||||
queueDeleteEntry :: NodeID
|
||||
-> LocalNodeState s
|
||||
-> LocalNodeState
|
||||
-> IO ()
|
||||
queueDeleteEntry toDelete = queueDeleteEntries $ Identity toDelete
|
||||
|
||||
|
||||
-- | enqueue the timestamp update and verification marking of an entry in the
|
||||
-- global 'NodeCache'.
|
||||
queueUpdateVerifieds :: Foldable c
|
||||
=> c NodeID
|
||||
-> LocalNodeState s
|
||||
-> IO ()
|
||||
queueUpdateVerifieds nIds ns = do
|
||||
now <- getPOSIXTime
|
||||
forM_ nIds $ \nid' -> atomically $ writeTQueue (cacheWriteQueue ns) $
|
||||
markCacheEntryAsVerified (Just now) nid'
|
||||
|
||||
-- | retry an IO action at most *i* times until it delivers a result
|
||||
attempts :: Int -- ^ number of retries *i*
|
||||
-> IO (Maybe a) -- ^ action to retry
|
||||
|
@ -865,7 +773,7 @@ mkServerSocket ip port = do
|
|||
sockAddr <- addrAddress <$> resolve (Just $ show . fromHostAddress6 $ ip) (Just port)
|
||||
sock <- socket AF_INET6 Datagram defaultProtocol
|
||||
setSocketOption sock IPv6Only 1
|
||||
bind sock sockAddr `catch` (\e -> putStrLn $ "Caught exception while bind " <> show sock <> " " <> show sockAddr <> ": " <> show (e :: SomeException))
|
||||
bind sock sockAddr
|
||||
pure sock
|
||||
|
||||
-- | create a UDP datagram socket, connected to a destination.
|
||||
|
@ -881,6 +789,6 @@ mkSendSocket srcIp dest destPort = do
|
|||
setSocketOption sendSock IPv6Only 1
|
||||
-- bind to the configured local IP to make sure that outgoing packets are sent from
|
||||
-- this source address
|
||||
bind sendSock srcAddr `catch` (\e -> putStrLn $ "Caught exception while mkSendSocket bind " <> show sendSock <> " " <> show srcAddr <> ": " <> show (e :: SomeException))
|
||||
bind sendSock srcAddr
|
||||
connect sendSock destAddr
|
||||
pure sendSock
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE TypeSynonymInstances #-}
|
||||
{- |
|
||||
Module : FediChord
|
||||
Description : An opinionated implementation of the EpiChord DHT by Leong et al.
|
||||
|
@ -39,7 +40,7 @@ module Hash2Pub.FediChord (
|
|||
, bsAsIpAddr
|
||||
, FediChordConf(..)
|
||||
, fediChordInit
|
||||
, fediChordVserverJoin
|
||||
, fediChordJoin
|
||||
, fediChordBootstrapJoin
|
||||
, tryBootstrapJoining
|
||||
, fediMainThreads
|
||||
|
@ -77,6 +78,7 @@ import Data.Maybe (catMaybes, fromJust, fromMaybe,
|
|||
isJust, isNothing, mapMaybe)
|
||||
import qualified Data.Set as Set
|
||||
import Data.Time.Clock.POSIX
|
||||
import Data.Typeable (Typeable (..), typeOf)
|
||||
import Data.Word
|
||||
import qualified Network.ByteOrder as NetworkBytes
|
||||
import Network.Socket hiding (recv, recvFrom, send,
|
||||
|
@ -93,34 +95,24 @@ import Debug.Trace (trace)
|
|||
|
||||
-- | initialise data structures, compute own IDs and bind to listening socket
|
||||
-- ToDo: load persisted state, thus this function already operates in IO
|
||||
fediChordInit :: (Service s (RealNodeSTM s))
|
||||
=> FediChordConf
|
||||
-> (RealNodeSTM s -> IO (s (RealNodeSTM s))) -- ^ runner function for service
|
||||
-> IO (Socket, LocalNodeStateSTM s)
|
||||
fediChordInit initConf serviceRunner = do
|
||||
fediChordInit :: FediChordConf -> IO (Socket, LocalNodeStateSTM)
|
||||
fediChordInit initConf = do
|
||||
emptyLookupCache <- newTVarIO Map.empty
|
||||
let realNode = RealNode {
|
||||
vservers = []
|
||||
, nodeConfig = initConf
|
||||
, bootstrapNodes = confBootstrapNodes initConf
|
||||
, lookupCacheSTM = emptyLookupCache
|
||||
, nodeService = undefined
|
||||
}
|
||||
realNodeSTM <- newTVarIO realNode
|
||||
-- launch service and set the reference in the RealNode
|
||||
serv <- serviceRunner realNodeSTM
|
||||
atomically . modifyTVar' realNodeSTM $ \rn -> rn { nodeService = serv }
|
||||
-- initialise a single vserver
|
||||
initialState <- nodeStateInit realNodeSTM
|
||||
initialStateSTM <- newTVarIO initialState
|
||||
-- add vserver to list at RealNode
|
||||
atomically . modifyTVar' realNodeSTM $ \rn -> rn { vservers = initialStateSTM:vservers rn }
|
||||
serverSock <- mkServerSocket (getIpAddr initialState) (getDhtPort initialState)
|
||||
pure (serverSock, initialStateSTM)
|
||||
|
||||
-- | initialises the 'NodeState' for this local node.
|
||||
-- Separated from 'fediChordInit' to be usable in tests.
|
||||
nodeStateInit :: Service s (RealNodeSTM s) => RealNodeSTM s -> IO (LocalNodeState s)
|
||||
nodeStateInit :: RealNodeSTM -> IO LocalNodeState
|
||||
nodeStateInit realNodeSTM = do
|
||||
realNode <- readTVarIO realNodeSTM
|
||||
cacheSTM <- newTVarIO initCache
|
||||
|
@ -133,7 +125,7 @@ nodeStateInit realNodeSTM = do
|
|||
, ipAddr = confIP conf
|
||||
, nid = genNodeID (confIP conf) (confDomain conf) $ fromInteger vsID
|
||||
, dhtPort = toEnum $ confDhtPort conf
|
||||
, servicePort = getListeningPortFromService $ nodeService realNode
|
||||
, servicePort = 0
|
||||
, vServerID = vsID
|
||||
}
|
||||
initialState = LocalNodeState {
|
||||
|
@ -152,10 +144,9 @@ nodeStateInit realNodeSTM = do
|
|||
|
||||
-- | Join a new node into the DHT, using a provided bootstrap node as initial cache seed
|
||||
-- for resolving the new node's position.
|
||||
fediChordBootstrapJoin :: Service s (RealNodeSTM s)
|
||||
=> LocalNodeStateSTM s -- ^ the local 'NodeState'
|
||||
fediChordBootstrapJoin :: LocalNodeStateSTM -- ^ the local 'NodeState'
|
||||
-> (String, PortNumber) -- ^ domain and port of a bootstrapping node
|
||||
-> IO (Either String (LocalNodeStateSTM s)) -- ^ the joined 'NodeState' after a
|
||||
-> IO (Either String LocalNodeStateSTM) -- ^ the joined 'NodeState' after a
|
||||
-- successful join, otherwise an error message
|
||||
fediChordBootstrapJoin nsSTM bootstrapNode = do
|
||||
-- can be invoked multiple times with all known bootstrapping nodes until successfully joined
|
||||
|
@ -166,13 +157,12 @@ fediChordBootstrapJoin nsSTM bootstrapNode = do
|
|||
currentlyResponsible <- liftEither lookupResp
|
||||
liftIO . putStrLn $ "Trying to join on " <> show (getNid currentlyResponsible)
|
||||
-- 2. then send a join to the currently responsible node
|
||||
liftIO $ putStrLn "send a bootstrap Join"
|
||||
joinResult <- liftIO $ requestJoin currentlyResponsible nsSTM
|
||||
liftEither joinResult
|
||||
|
||||
-- Periodically lookup own ID through a random bootstrapping node to discover and merge separated DHT clusters.
|
||||
-- Unjoined try joining instead.
|
||||
convergenceSampleThread :: Service s (RealNodeSTM s) => LocalNodeStateSTM s -> IO ()
|
||||
convergenceSampleThread :: LocalNodeStateSTM -> IO ()
|
||||
convergenceSampleThread nsSTM = forever $ do
|
||||
nsSnap <- readTVarIO nsSTM
|
||||
parentNode <- readTVarIO $ parentRealNode nsSnap
|
||||
|
@ -199,11 +189,11 @@ convergenceSampleThread nsSTM = forever $ do
|
|||
-- unjoined node: try joining through all bootstrapping nodes
|
||||
else tryBootstrapJoining nsSTM >> pure ()
|
||||
let delaySecs = confBootstrapSamplingInterval . nodeConfig $ parentNode
|
||||
threadDelay delaySecs
|
||||
threadDelay $ delaySecs * 10^6
|
||||
|
||||
|
||||
-- | Try joining the DHT through any of the bootstrapping nodes until it succeeds.
|
||||
tryBootstrapJoining :: Service s (RealNodeSTM s) => LocalNodeStateSTM s -> IO (Either String (LocalNodeStateSTM s))
|
||||
tryBootstrapJoining :: LocalNodeStateSTM -> IO (Either String LocalNodeStateSTM)
|
||||
tryBootstrapJoining nsSTM = do
|
||||
bss <- atomically $ do
|
||||
nsSnap <- readTVar nsSTM
|
||||
|
@ -220,14 +210,13 @@ tryBootstrapJoining nsSTM = do
|
|||
|
||||
|
||||
-- | Look up a key just based on the responses of a single bootstrapping node.
|
||||
bootstrapQueryId :: LocalNodeStateSTM s -> (String, PortNumber) -> NodeID -> IO (Either String RemoteNodeState)
|
||||
bootstrapQueryId :: LocalNodeStateSTM -> (String, PortNumber) -> NodeID -> IO (Either String RemoteNodeState)
|
||||
bootstrapQueryId nsSTM (bootstrapHost, bootstrapPort) targetID = do
|
||||
ns <- readTVarIO nsSTM
|
||||
nodeConf <- nodeConfig <$> readTVarIO (parentRealNode ns)
|
||||
let srcAddr = confIP nodeConf
|
||||
srcAddr <- confIP . nodeConfig <$> readTVarIO (parentRealNode ns)
|
||||
bootstrapResponse <- bracket (mkSendSocket srcAddr bootstrapHost bootstrapPort) close (
|
||||
-- Initialise an empty cache only with the responses from a bootstrapping node
|
||||
fmap Right . sendRequestTo (confRequestTimeout nodeConf) (confRequestRetries nodeConf) (lookupMessage targetID ns Nothing)
|
||||
fmap Right . sendRequestTo 5000 3 (lookupMessage targetID ns Nothing)
|
||||
)
|
||||
`catch` (\e -> pure . Left $ "Error at bootstrap QueryId: " <> displayException (e :: IOException))
|
||||
|
||||
|
@ -246,95 +235,60 @@ bootstrapQueryId nsSTM (bootstrapHost, bootstrapPort) targetID = do
|
|||
Just (FORWARD resultset) -> foldr' (addCacheEntryPure now) cacheAcc resultset
|
||||
)
|
||||
initCache resp
|
||||
currentlyResponsible <- runExceptT $ queryIdLookupLoop bootstrapCache ns 50 $ getNid ns
|
||||
pure currentlyResponsible
|
||||
currentlyResponsible <- queryIdLookupLoop bootstrapCache ns 50 $ getNid ns
|
||||
pure $ Right currentlyResponsible
|
||||
|
||||
|
||||
-- | join a node to the DHT using the global node cache
|
||||
-- node's position.
|
||||
fediChordVserverJoin :: (MonadError String m, MonadIO m, Service s (RealNodeSTM s))
|
||||
=> LocalNodeStateSTM s -- ^ the local 'NodeState'
|
||||
-> m (LocalNodeStateSTM s) -- ^ the joined 'NodeState' after a
|
||||
fediChordJoin :: LocalNodeStateSTM -- ^ the local 'NodeState'
|
||||
-> IO (Either String LocalNodeStateSTM) -- ^ the joined 'NodeState' after a
|
||||
-- successful join, otherwise an error message
|
||||
fediChordVserverJoin nsSTM = do
|
||||
ns <- liftIO $ readTVarIO nsSTM
|
||||
fediChordJoin nsSTM = do
|
||||
ns <- readTVarIO nsSTM
|
||||
-- 1. get routed to the currently responsible node
|
||||
currentlyResponsible <- requestQueryID ns $ getNid ns
|
||||
liftIO . putStrLn $ "Trying to join on " <> show (getNid currentlyResponsible)
|
||||
putStrLn $ "Trying to join on " <> show (getNid currentlyResponsible)
|
||||
-- 2. then send a join to the currently responsible node
|
||||
joinResult <- liftIO $ requestJoin currentlyResponsible nsSTM
|
||||
liftEither joinResult
|
||||
|
||||
fediChordVserverLeave :: (MonadError String m, MonadIO m, Service s (RealNodeSTM s)) => LocalNodeState s -> m ()
|
||||
fediChordVserverLeave ns = do
|
||||
-- TODO: deal with failure of all successors, e.g. by invoking a stabilise
|
||||
-- and looking up further successors. So far we just fail here.
|
||||
_ <- migrateSuccessor
|
||||
-- then send leave messages to all other neighbours
|
||||
-- TODO: distinguish between sending error causes on our side and on the
|
||||
-- network/ target side. The latter cannot be fixed anyways while the
|
||||
-- former could be worked around
|
||||
|
||||
-- send a leave message to all neighbours
|
||||
forM_ (predecessors ns <> successors ns) $ liftIO . requestLeave ns False
|
||||
where
|
||||
sendUntilSuccess i = maybe
|
||||
(pure $ Left "Exhausted all successors")
|
||||
(\neighb -> do
|
||||
leaveResponse <- requestLeave ns True neighb
|
||||
case leaveResponse of
|
||||
Left _ -> sendUntilSuccess (i+1)
|
||||
-- return first successfully contacted neighbour,
|
||||
-- so it can be contacted by the service layer for migration
|
||||
Right _ -> pure $ Right neighb
|
||||
)
|
||||
$ atMay (successors ns) i
|
||||
migrateSuccessor :: (MonadError String m, MonadIO m) => m ()
|
||||
migrateSuccessor = do
|
||||
-- send leave message to first responding successor
|
||||
successorLeave <- liftIO $ sendUntilSuccess 0
|
||||
-- trigger service data transfer for abandoned key space
|
||||
migrateToNode <- liftEither successorLeave
|
||||
let lowerKeyBound = maybe (getNid ns) getNid $ headMay (predecessors ns)
|
||||
ownService <- nodeService <$> (liftIO . readTVarIO $ parentRealNode ns)
|
||||
-- previously held data is the one between the immediate predecessor and
|
||||
-- the own ID
|
||||
migrationResult <- liftIO $ migrateData ownService (getNid ns) lowerKeyBound (getNid ns) (getDomain migrateToNode, fromIntegral $ getServicePort migrateToNode)
|
||||
liftEither migrationResult
|
||||
|
||||
joinResult <- requestJoin currentlyResponsible nsSTM
|
||||
case joinResult of
|
||||
Left err -> pure . Left $ "Error joining on " <> err
|
||||
Right joinedNS -> pure . Right $ joinedNS
|
||||
|
||||
|
||||
-- | Wait for new cache entries to appear and then try joining on them.
|
||||
-- Exits after successful joining.
|
||||
joinOnNewEntriesThread :: Service s (RealNodeSTM s) => LocalNodeStateSTM s -> IO ()
|
||||
joinOnNewEntriesThread :: LocalNodeStateSTM -> IO ()
|
||||
joinOnNewEntriesThread nsSTM = loop
|
||||
where
|
||||
loop = do
|
||||
nsSnap <- readTVarIO nsSTM
|
||||
(lookupResult, parentNode) <- atomically $ do
|
||||
(lookupResult, cache) <- atomically $ do
|
||||
cache <- readTVar $ nodeCacheSTM nsSnap
|
||||
parentNode <- readTVar $ parentRealNode nsSnap
|
||||
case queryLocalCache nsSnap cache 1 (getNid nsSnap) of
|
||||
-- empty cache, block until cache changes and then retry
|
||||
(FORWARD s) | Set.null s -> retry
|
||||
result -> pure (result, parentNode)
|
||||
result -> pure (result, cache)
|
||||
case lookupResult of
|
||||
-- already joined
|
||||
FOUND _ ->
|
||||
FOUND _ -> do
|
||||
print =<< readTVarIO nsSTM
|
||||
pure ()
|
||||
-- otherwise try joining
|
||||
FORWARD _ -> do
|
||||
joinResult <- runExceptT $ fediChordVserverJoin nsSTM
|
||||
joinResult <- fediChordJoin nsSTM
|
||||
either
|
||||
-- on join failure, sleep and retry
|
||||
(const $ threadDelay (confJoinAttemptsInterval . nodeConfig $ parentNode) >> loop)
|
||||
-- TODO: make delay configurable
|
||||
(const $ threadDelay (30 * 10^6) >> loop)
|
||||
(const $ pure ())
|
||||
joinResult
|
||||
emptyset = Set.empty -- because pattern matches don't accept qualified names
|
||||
|
||||
|
||||
-- | cache updater thread that waits for incoming NodeCache update instructions on
|
||||
-- the node's cacheWriteQueue and then modifies the NodeCache as the single writer.
|
||||
nodeCacheWriter :: LocalNodeStateSTM s -> IO ()
|
||||
nodeCacheWriter :: LocalNodeStateSTM -> IO ()
|
||||
nodeCacheWriter nsSTM =
|
||||
forever $ atomically $ do
|
||||
ns <- readTVar nsSTM
|
||||
|
@ -342,15 +296,20 @@ nodeCacheWriter nsSTM =
|
|||
modifyTVar' (nodeCacheSTM ns) cacheModifier
|
||||
|
||||
|
||||
-- TODO: make max entry age configurable
|
||||
maxEntryAge :: POSIXTime
|
||||
maxEntryAge = 600
|
||||
|
||||
|
||||
-- | Periodically iterate through cache, clean up expired entries and verify unverified ones
|
||||
nodeCacheVerifyThread :: LocalNodeStateSTM s -> IO ()
|
||||
nodeCacheVerifyThread :: LocalNodeStateSTM -> IO ()
|
||||
nodeCacheVerifyThread nsSTM = forever $ do
|
||||
putStrLn "cache verify run: begin"
|
||||
-- get cache
|
||||
(ns, cache, maxEntryAge) <- atomically $ do
|
||||
(ns, cache) <- atomically $ do
|
||||
ns <- readTVar nsSTM
|
||||
cache <- readTVar $ nodeCacheSTM ns
|
||||
maxEntryAge <- confMaxNodeCacheAge . nodeConfig <$> readTVar (parentRealNode ns)
|
||||
pure (ns, cache, maxEntryAge)
|
||||
pure (ns, cache)
|
||||
-- iterate entries:
|
||||
-- for avoiding too many time syscalls, get current time before iterating.
|
||||
now <- getPOSIXTime
|
||||
|
@ -397,13 +356,14 @@ nodeCacheVerifyThread nsSTM = forever $ do
|
|||
forkIO $ sendQueryIdMessages targetID latestNs (Just (1 + jEntriesPerSlice latestNs)) (nodesToQuery targetID) >> pure () -- ask for 1 entry more than j because of querying the middle
|
||||
)
|
||||
|
||||
threadDelay $ fromEnum (maxEntryAge / 20) `div` 10^6 -- convert from pico to milliseconds
|
||||
putStrLn "cache verify run: end"
|
||||
threadDelay $ 10^6 * round maxEntryAge `div` 20
|
||||
|
||||
|
||||
-- | Checks the invariant of at least @jEntries@ per cache slice.
|
||||
-- If this invariant does not hold, the middle of the slice is returned for
|
||||
-- making lookups to that ID
|
||||
checkCacheSliceInvariants :: LocalNodeState s
|
||||
checkCacheSliceInvariants :: LocalNodeState
|
||||
-> NodeCache
|
||||
-> [NodeID] -- ^ list of middle IDs of slices not
|
||||
-- ^ fulfilling the invariant
|
||||
|
@ -459,10 +419,12 @@ checkCacheSliceInvariants ns
|
|||
-- | Periodically send @StabiliseRequest' s to the closest neighbour nodes, until
|
||||
-- one responds, and get their neighbours for maintaining the own neighbour lists.
|
||||
-- If necessary, request new neighbours.
|
||||
stabiliseThread :: Service s (RealNodeSTM s) => LocalNodeStateSTM s -> IO ()
|
||||
stabiliseThread :: LocalNodeStateSTM -> IO ()
|
||||
stabiliseThread nsSTM = forever $ do
|
||||
oldNs <- readTVarIO nsSTM
|
||||
ns <- readTVarIO nsSTM
|
||||
|
||||
putStrLn "stabilise run: begin"
|
||||
print ns
|
||||
|
||||
-- iterate through the same snapshot, collect potential new neighbours
|
||||
-- and nodes to be deleted, and modify these changes only at the end of
|
||||
|
@ -471,8 +433,8 @@ stabiliseThread nsSTM = forever $ do
|
|||
|
||||
-- don't contact all neighbours unless the previous one failed/ Left ed
|
||||
|
||||
predStabilise <- stabiliseClosestResponder oldNs predecessors 1 []
|
||||
succStabilise <- stabiliseClosestResponder oldNs predecessors 1 []
|
||||
predStabilise <- stabiliseClosestResponder ns predecessors 1 []
|
||||
succStabilise <- stabiliseClosestResponder ns predecessors 1 []
|
||||
|
||||
let
|
||||
(predDeletes, predNeighbours) = either (const ([], [])) id predStabilise
|
||||
|
@ -497,60 +459,31 @@ stabiliseThread nsSTM = forever $ do
|
|||
-- try looking up additional neighbours if list too short
|
||||
forM_ [(length $ predecessors updatedNs)..(kNeighbours updatedNs)] (\_ -> do
|
||||
ns' <- readTVarIO nsSTM
|
||||
nextEntry <- runExceptT . requestQueryID ns' $ pred . getNid $ lastDef (toRemoteNodeState ns') (predecessors ns')
|
||||
either
|
||||
(const $ pure ())
|
||||
(\entry -> atomically $ do
|
||||
nextEntry <- requestQueryID ns' $ pred . getNid $ lastDef (toRemoteNodeState ns') (predecessors ns')
|
||||
atomically $ do
|
||||
latestNs <- readTVar nsSTM
|
||||
writeTVar nsSTM $ addPredecessors [entry] latestNs
|
||||
)
|
||||
nextEntry
|
||||
writeTVar nsSTM $ addPredecessors [nextEntry] latestNs
|
||||
)
|
||||
|
||||
forM_ [(length $ successors updatedNs)..(kNeighbours updatedNs)] (\_ -> do
|
||||
ns' <- readTVarIO nsSTM
|
||||
nextEntry <- runExceptT . requestQueryID ns' $ succ . getNid $ lastDef (toRemoteNodeState ns') (successors ns')
|
||||
either
|
||||
(const $ pure ())
|
||||
(\entry -> atomically $ do
|
||||
nextEntry <- requestQueryID ns' $ succ . getNid $ lastDef (toRemoteNodeState ns') (successors ns')
|
||||
atomically $ do
|
||||
latestNs <- readTVar nsSTM
|
||||
writeTVar nsSTM $ addSuccessors [entry] latestNs
|
||||
)
|
||||
nextEntry
|
||||
writeTVar nsSTM $ addSuccessors [nextEntry] latestNs
|
||||
)
|
||||
|
||||
newNs <- readTVarIO nsSTM
|
||||
|
||||
let
|
||||
oldPredecessor = headDef (toRemoteNodeState oldNs) $ predecessors oldNs
|
||||
newPredecessor = headMay $ predecessors newNs
|
||||
-- manage need for service data migration:
|
||||
maybe (pure ()) (\newPredecessor' ->
|
||||
when (
|
||||
isJust newPredecessor
|
||||
&& oldPredecessor /= newPredecessor'
|
||||
-- case: predecessor has changed in some way => own responsibility has changed in some way
|
||||
-- case 1: new predecessor is further away => broader responsibility, but new pred needs to push the data
|
||||
-- If this is due to a node leaving without transfering its data, try getting it from a redundant copy
|
||||
-- case 2: new predecessor is closer, it takes some of our data but somehow didn't join on us => push data to it
|
||||
&& isInOwnResponsibilitySlice newPredecessor' oldNs) $ do
|
||||
ownService <- nodeService <$> (liftIO . readTVarIO $ parentRealNode newNs)
|
||||
migrationResult <- migrateData ownService (getNid newNs) (getNid oldPredecessor) (getNid newPredecessor') (getDomain newPredecessor', fromIntegral $ getServicePort newPredecessor')
|
||||
-- TODO: deal with migration failure, e.g retry
|
||||
pure ()
|
||||
)
|
||||
newPredecessor
|
||||
|
||||
stabiliseDelay <- confStabiliseInterval . nodeConfig <$> readTVarIO (parentRealNode newNs)
|
||||
threadDelay stabiliseDelay
|
||||
putStrLn "stabilise run: end"
|
||||
-- TODO: make delay configurable
|
||||
threadDelay (60 * 10^6)
|
||||
where
|
||||
-- | send a stabilise request to the n-th neighbour
|
||||
-- (specified by the provided getter function) and on failure retry
|
||||
-- (specified by the provided getter function) and on failure retr
|
||||
-- with the n+1-th neighbour.
|
||||
-- On success, return 2 lists: The failed nodes and the potential neighbours
|
||||
-- returned by the queried node.
|
||||
stabiliseClosestResponder :: LocalNodeState s -- ^ own node
|
||||
-> (LocalNodeState s -> [RemoteNodeState]) -- ^ getter function for either predecessors or successors
|
||||
stabiliseClosestResponder :: LocalNodeState -- ^ own node
|
||||
-> (LocalNodeState -> [RemoteNodeState]) -- ^ getter function for either predecessors or successors
|
||||
-> Int -- ^ index of neighbour to query
|
||||
-> [RemoteNodeState] -- ^ delete accumulator
|
||||
-> IO (Either String ([RemoteNodeState], [RemoteNodeState])) -- ^ (nodes to be deleted, successfully pinged potential neighbours)
|
||||
|
@ -574,7 +507,7 @@ stabiliseThread nsSTM = forever $ do
|
|||
|
||||
currentNeighbour ns neighbourGetter = atMay $ neighbourGetter ns
|
||||
|
||||
checkReachability :: LocalNodeState s -- ^ this node
|
||||
checkReachability :: LocalNodeState -- ^ this node
|
||||
-> RemoteNodeState -- ^ node to Ping for reachability
|
||||
-> IO (Maybe RemoteNodeState) -- ^ if the Pinged node handles the requested node state then that one
|
||||
checkReachability ns toCheck = do
|
||||
|
@ -603,10 +536,10 @@ sendThread sock sendQ = forever $ do
|
|||
sendAllTo sock packet addr
|
||||
|
||||
-- | Sets up and manages the main server threads of FediChord
|
||||
fediMainThreads :: Service s (RealNodeSTM s) => Socket -> LocalNodeStateSTM s -> IO ()
|
||||
fediMainThreads :: Socket -> LocalNodeStateSTM -> IO ()
|
||||
fediMainThreads sock nsSTM = do
|
||||
ns <- readTVarIO nsSTM
|
||||
putStrLn "launching threads"
|
||||
putStrLn $ "launching threads, ns: " <> show ns
|
||||
sendQ <- newTQueueIO
|
||||
recvQ <- newTQueueIO
|
||||
-- concurrently launch all handler threads, if one of them throws an exception
|
||||
|
@ -629,36 +562,38 @@ type RequestMap = Map.Map (SockAddr, Integer) RequestMapEntry
|
|||
data RequestMapEntry = RequestMapEntry (Set.Set FediChordMessage) (Maybe Integer)
|
||||
POSIXTime
|
||||
|
||||
-- TODO: make purge age configurable
|
||||
-- | periodically clean up old request parts
|
||||
responsePurgeAge :: POSIXTime
|
||||
responsePurgeAge = 60 -- seconds
|
||||
|
||||
requestMapPurge :: POSIXTime -> MVar RequestMap -> IO ()
|
||||
requestMapPurge purgeAge mapVar = forever $ do
|
||||
requestMapPurge :: MVar RequestMap -> IO ()
|
||||
requestMapPurge mapVar = forever $ do
|
||||
rMapState <- takeMVar mapVar
|
||||
now <- getPOSIXTime
|
||||
putMVar mapVar $ Map.filter (\(RequestMapEntry _ _ ts) ->
|
||||
now - ts < purgeAge
|
||||
putMVar mapVar $ Map.filter (\entry@(RequestMapEntry _ _ ts) ->
|
||||
now - ts < responsePurgeAge
|
||||
) rMapState
|
||||
threadDelay $ (fromEnum purgeAge * 2) `div` 10^6
|
||||
threadDelay $ round responsePurgeAge * 2 * 10^6
|
||||
|
||||
|
||||
-- | Wait for messages, deserialise them, manage parts and acknowledgement status,
|
||||
-- and pass them to their specific handling function.
|
||||
fediMessageHandler :: Service s (RealNodeSTM s)
|
||||
=> TQueue (BS.ByteString, SockAddr) -- ^ send queue
|
||||
fediMessageHandler :: TQueue (BS.ByteString, SockAddr) -- ^ send queue
|
||||
-> TQueue (BS.ByteString, SockAddr) -- ^ receive queue
|
||||
-> LocalNodeStateSTM s -- ^ acting NodeState
|
||||
-> LocalNodeStateSTM -- ^ acting NodeState
|
||||
-> IO ()
|
||||
fediMessageHandler sendQ recvQ nsSTM = do
|
||||
-- Read node state just once, assuming that all relevant data for this function does
|
||||
-- not change.
|
||||
-- Other functions are passed the nsSTM reference and thus can get the latest state.
|
||||
nsSnap <- readTVarIO nsSTM
|
||||
nodeConf <- nodeConfig <$> readTVarIO (parentRealNode nsSnap)
|
||||
-- handling multipart messages:
|
||||
-- Request parts can be insert into a map (key: (sender IP against spoofing, request ID), value: timestamp + set of message parts, handle all of them when size of set == parts) before being handled. This map needs to be purged periodically by a separate thread and can be protected by an MVar for fairness.
|
||||
requestMap <- newMVar (Map.empty :: RequestMap)
|
||||
-- run receive loop and requestMapPurge concurrently, so that an exception makes
|
||||
-- both of them fail
|
||||
concurrently_ (requestMapPurge (confResponsePurgeAge nodeConf) requestMap) $ forever $ do
|
||||
concurrently_ (requestMapPurge requestMap) $ forever $ do
|
||||
-- wait for incoming messages
|
||||
(rawMsg, sourceAddr) <- atomically $ readTQueue recvQ
|
||||
let aMsg = deserialiseMessage rawMsg
|
||||
|
@ -711,33 +646,14 @@ fediMessageHandler sendQ recvQ nsSTM = do
|
|||
|
||||
-- ==== interface to service layer ====
|
||||
|
||||
instance DHT (RealNodeSTM s) where
|
||||
instance DHT RealNodeSTM where
|
||||
lookupKey nodeSTM keystring = getKeyResponsibility nodeSTM $ genKeyID keystring
|
||||
forceLookupKey nodeSTM keystring = (putStrLn $ "forced responsibility lookup of #" <> keystring) >> (updateLookupCache nodeSTM $ genKeyID keystring)
|
||||
-- potential better implementation: put all neighbours of all vservers and the vservers on a ringMap, look the key up and see whether it results in a LocalNodeState
|
||||
isResponsibleFor nodeSTM key = do
|
||||
node <- readTVarIO nodeSTM
|
||||
foldM (\responsible vsSTM -> do
|
||||
vs <- readTVarIO vsSTM
|
||||
pure $ responsible || isInOwnResponsibilitySlice key vs
|
||||
)
|
||||
False
|
||||
$ vservers node
|
||||
isResponsibleForSTM nodeSTM key = do
|
||||
node <- readTVar nodeSTM
|
||||
foldM (\responsible vsSTM -> do
|
||||
vs <- readTVar vsSTM
|
||||
pure $ responsible || isInOwnResponsibilitySlice key vs
|
||||
)
|
||||
False
|
||||
$ vservers node
|
||||
|
||||
|
||||
forceLookupKey nodeSTM keystring = updateLookupCache nodeSTM $ genKeyID keystring
|
||||
|
||||
-- | Returns the hostname and port of the host responsible for a key.
|
||||
-- Information is provided from a cache, only on a cache miss a new DHT lookup
|
||||
-- is triggered.
|
||||
getKeyResponsibility :: RealNodeSTM s -> NodeID -> IO (Maybe (String, PortNumber))
|
||||
getKeyResponsibility :: RealNodeSTM -> NodeID -> IO (Maybe (String, PortNumber))
|
||||
getKeyResponsibility nodeSTM lookupKey = do
|
||||
node <- readTVarIO nodeSTM
|
||||
cache <- readTVarIO $ lookupCacheSTM node
|
||||
|
@ -753,8 +669,8 @@ getKeyResponsibility nodeSTM lookupKey = do
|
|||
-- | Triggers a new DHT lookup for a key, updates the lookup cache and returns the
|
||||
-- new entry.
|
||||
-- If no vserver is active in the DHT, 'Nothing' is returned.
|
||||
updateLookupCache :: RealNodeSTM s -> NodeID -> IO (Maybe (String, PortNumber))
|
||||
updateLookupCache nodeSTM keyToLookup = do
|
||||
updateLookupCache :: RealNodeSTM -> NodeID -> IO (Maybe (String, PortNumber))
|
||||
updateLookupCache nodeSTM lookupKey = do
|
||||
(node, lookupSource) <- atomically $ do
|
||||
node <- readTVar nodeSTM
|
||||
let firstVs = headMay (vservers node)
|
||||
|
@ -764,30 +680,23 @@ updateLookupCache nodeSTM keyToLookup = do
|
|||
pure (node, lookupSource)
|
||||
maybe (do
|
||||
-- if no local node available, delete cache entry and return Nothing
|
||||
atomically $ modifyTVar' (lookupCacheSTM node) $ Map.delete keyToLookup
|
||||
atomically $ modifyTVar' (lookupCacheSTM node) $ Map.delete lookupKey
|
||||
pure Nothing
|
||||
)
|
||||
(\n -> do
|
||||
-- start a lookup from the node, update the cache with the lookup result and return it
|
||||
-- TODO: better retry management, because having no vserver joined yet should
|
||||
-- be treated differently than other reasons for not getting a result.
|
||||
newResponsible <- runExceptT $ requestQueryID n keyToLookup
|
||||
either
|
||||
(const $ pure Nothing)
|
||||
(\result -> do
|
||||
let newEntry = (getDomain result, getServicePort result)
|
||||
newResponsible <- requestQueryID n lookupKey
|
||||
let newEntry = (getDomain newResponsible, getServicePort newResponsible)
|
||||
now <- getPOSIXTime
|
||||
-- atomic update against lost updates
|
||||
atomically $ modifyTVar' (lookupCacheSTM node) $
|
||||
Map.insert keyToLookup (CacheEntry False newEntry now)
|
||||
Map.insert lookupKey (CacheEntry False newEntry now)
|
||||
pure $ Just newEntry
|
||||
)
|
||||
newResponsible
|
||||
) lookupSource
|
||||
|
||||
|
||||
-- | Periodically clean the lookup cache from expired entries.
|
||||
lookupCacheCleanup :: RealNodeSTM s -> IO ()
|
||||
lookupCacheCleanup :: RealNodeSTM -> IO ()
|
||||
lookupCacheCleanup nodeSTM = do
|
||||
node <- readTVarIO nodeSTM
|
||||
forever $ do
|
||||
|
@ -797,4 +706,4 @@ lookupCacheCleanup nodeSTM = do
|
|||
now - ts < confMaxLookupCacheAge (nodeConfig node)
|
||||
)
|
||||
)
|
||||
threadDelay $ fromEnum (2 * confMaxLookupCacheAge (nodeConfig node)) `div` 10^6
|
||||
threadDelay $ round (confMaxLookupCacheAge $ nodeConfig node) * (10^5)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
{-# LANGUAGE DataKinds #-}
|
||||
{-# LANGUAGE DerivingStrategies #-}
|
||||
{-# LANGUAGE FlexibleContexts #-}
|
||||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
|
||||
{-# LANGUAGE MultiParamTypeClasses #-}
|
||||
|
@ -27,7 +26,8 @@ module Hash2Pub.FediChordTypes (
|
|||
, CacheEntry(..)
|
||||
, RingEntry(..)
|
||||
, RingMap(..)
|
||||
, HasKeyID(..)
|
||||
, HasKeyID
|
||||
, getKeyID
|
||||
, rMapSize
|
||||
, rMapLookup
|
||||
, rMapLookupPred
|
||||
|
@ -58,14 +58,11 @@ module Hash2Pub.FediChordTypes (
|
|||
, bsAsIpAddr
|
||||
, FediChordConf(..)
|
||||
, DHT(..)
|
||||
, Service(..)
|
||||
, ServiceConf(..)
|
||||
) where
|
||||
|
||||
import Control.Exception
|
||||
import Data.Foldable (foldr')
|
||||
import Data.Function (on)
|
||||
import qualified Data.Hashable as Hashable
|
||||
import Data.List (delete, nub, sortBy)
|
||||
import qualified Data.Map.Strict as Map
|
||||
import Data.Maybe (fromJust, fromMaybe, isJust,
|
||||
|
@ -147,8 +144,8 @@ a `localCompare` b
|
|||
-- | Data for managing the virtual server nodes of this real node.
|
||||
-- Also contains shared data and config values.
|
||||
-- TODO: more data structures for k-choices bookkeeping
|
||||
data RealNode s = RealNode
|
||||
{ vservers :: [LocalNodeStateSTM s]
|
||||
data RealNode = RealNode
|
||||
{ vservers :: [LocalNodeStateSTM]
|
||||
-- ^ references to all active versers
|
||||
, nodeConfig :: FediChordConf
|
||||
-- ^ holds the initial configuration read at program start
|
||||
|
@ -156,10 +153,9 @@ data RealNode s = RealNode
|
|||
-- ^ nodes to be used as bootstrapping points, new ones learned during operation
|
||||
, lookupCacheSTM :: TVar LookupCache
|
||||
-- ^ a global cache of looked up keys and their associated nodes
|
||||
, nodeService :: s (RealNodeSTM s)
|
||||
}
|
||||
|
||||
type RealNodeSTM s = TVar (RealNode s)
|
||||
type RealNodeSTM = TVar RealNode
|
||||
|
||||
-- | represents a node and all its important state
|
||||
data RemoteNodeState = RemoteNodeState
|
||||
|
@ -181,7 +177,7 @@ instance Ord RemoteNodeState where
|
|||
a `compare` b = nid a `compare` nid b
|
||||
|
||||
-- | represents a node and encapsulates all data and parameters that are not present for remote nodes
|
||||
data LocalNodeState s = LocalNodeState
|
||||
data LocalNodeState = LocalNodeState
|
||||
{ nodeState :: RemoteNodeState
|
||||
-- ^ represents common data present both in remote and local node representations
|
||||
, nodeCacheSTM :: TVar NodeCache
|
||||
|
@ -200,13 +196,13 @@ data LocalNodeState s = LocalNodeState
|
|||
-- ^ number of parallel sent queries
|
||||
, jEntriesPerSlice :: Int
|
||||
-- ^ number of desired entries per cache slice
|
||||
, parentRealNode :: RealNodeSTM s
|
||||
, parentRealNode :: RealNodeSTM
|
||||
-- ^ the parent node managing this vserver instance
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
|
||||
-- | for concurrent access, LocalNodeState is wrapped in a TVar
|
||||
type LocalNodeStateSTM s = TVar (LocalNodeState s)
|
||||
type LocalNodeStateSTM = TVar LocalNodeState
|
||||
|
||||
-- | class for various NodeState representations, providing
|
||||
-- getters and setters for common values
|
||||
|
@ -243,14 +239,14 @@ instance NodeState RemoteNodeState where
|
|||
toRemoteNodeState = id
|
||||
|
||||
-- | helper function for setting values on the 'RemoteNodeState' contained in the 'LocalNodeState'
|
||||
propagateNodeStateSet_ :: (RemoteNodeState -> RemoteNodeState) -> LocalNodeState s -> LocalNodeState s
|
||||
propagateNodeStateSet_ :: (RemoteNodeState -> RemoteNodeState) -> LocalNodeState -> LocalNodeState
|
||||
propagateNodeStateSet_ func ns = let
|
||||
newNs = func $ nodeState ns
|
||||
in
|
||||
ns {nodeState = newNs}
|
||||
|
||||
|
||||
instance NodeState (LocalNodeState s) where
|
||||
instance NodeState LocalNodeState where
|
||||
getNid = getNid . nodeState
|
||||
getDomain = getDomain . nodeState
|
||||
getIpAddr = getIpAddr . nodeState
|
||||
|
@ -272,37 +268,34 @@ instance Typeable a => Show (TVar a) where
|
|||
instance Typeable a => Show (TQueue a) where
|
||||
show x = show (typeOf x)
|
||||
|
||||
instance Typeable a => Show (TChan a) where
|
||||
show x = show (typeOf x)
|
||||
|
||||
|
||||
-- | convenience function that replaces the predecessors of a 'LocalNodeState' with the k closest nodes from the provided list
|
||||
setPredecessors :: [RemoteNodeState] -> LocalNodeState s -> LocalNodeState s
|
||||
setPredecessors preds ns = ns {predecessors = takeRMapPredecessors (getNid ns) (kNeighbours ns) . rMapFromList . fmap keyValuePair . filter ((/=) (getNid ns) . getNid) $ preds}
|
||||
setPredecessors :: [RemoteNodeState] -> LocalNodeState -> LocalNodeState
|
||||
setPredecessors preds ns = ns {predecessors = takeRMapPredecessors (getNid ns) (kNeighbours ns) . rMapFromList . filter ((/=) (getNid ns) . getNid) $ preds}
|
||||
|
||||
-- | convenience function that replaces the successors of a 'LocalNodeState' with the k closest nodes from the provided list
|
||||
setSuccessors :: [RemoteNodeState] -> LocalNodeState s -> LocalNodeState s
|
||||
setSuccessors succs ns = ns {successors = takeRMapSuccessors (getNid ns) (kNeighbours ns) . rMapFromList . fmap keyValuePair . filter ((/=) (getNid ns) . getNid) $ succs}
|
||||
setSuccessors :: [RemoteNodeState] -> LocalNodeState -> LocalNodeState
|
||||
setSuccessors succs ns = ns {successors = takeRMapSuccessors (getNid ns) (kNeighbours ns) . rMapFromList . filter ((/=) (getNid ns) . getNid) $ succs}
|
||||
|
||||
-- | sets the predecessors of a 'LocalNodeState' to the closest k nodes of the current predecessors and the provided list, combined
|
||||
addPredecessors :: [RemoteNodeState] -> LocalNodeState s -> LocalNodeState s
|
||||
addPredecessors preds ns = ns {predecessors = takeRMapPredecessors (getNid ns) (kNeighbours ns) . addRMapEntries (keyValuePair <$> filter ((/=) (getNid ns) . getNid) preds) . rMapFromList . fmap keyValuePair $ predecessors ns}
|
||||
addPredecessors :: [RemoteNodeState] -> LocalNodeState -> LocalNodeState
|
||||
addPredecessors preds ns = ns {predecessors = takeRMapPredecessors (getNid ns) (kNeighbours ns) . addRMapEntries (filter ((/=) (getNid ns) . getNid) preds) . rMapFromList $ predecessors ns}
|
||||
|
||||
-- | sets the successors of a 'LocalNodeState' to the closest k nodes of the current successors and the provided list, combined
|
||||
addSuccessors :: [RemoteNodeState] -> LocalNodeState s -> LocalNodeState s
|
||||
addSuccessors succs ns = ns {successors = takeRMapSuccessors (getNid ns) (kNeighbours ns) . addRMapEntries (keyValuePair <$> filter ((/=) (getNid ns) . getNid) succs) . rMapFromList . fmap keyValuePair $ successors ns}
|
||||
addSuccessors :: [RemoteNodeState] -> LocalNodeState -> LocalNodeState
|
||||
addSuccessors succs ns = ns {successors = takeRMapSuccessors (getNid ns) (kNeighbours ns) . addRMapEntries (filter ((/=) (getNid ns) . getNid) succs) . rMapFromList $ successors ns}
|
||||
|
||||
instance HasKeyID NodeID RemoteNodeState where
|
||||
instance HasKeyID RemoteNodeState NodeID where
|
||||
getKeyID = getNid
|
||||
|
||||
instance HasKeyID k a => HasKeyID k (CacheEntry a) where
|
||||
instance HasKeyID a k => HasKeyID (CacheEntry a) k where
|
||||
getKeyID (CacheEntry _ obj _) = getKeyID obj
|
||||
|
||||
instance HasKeyID NodeID NodeID where
|
||||
getKeyID = id
|
||||
|
||||
type NodeCacheEntry = CacheEntry RemoteNodeState
|
||||
type NodeCache = RingMap NodeID NodeCacheEntry
|
||||
type NodeCache = RingMap NodeCacheEntry NodeID
|
||||
|
||||
type LookupCacheEntry = CacheEntry (String, PortNumber)
|
||||
type LookupCache = Map.Map NodeID LookupCacheEntry
|
||||
|
@ -326,15 +319,12 @@ cacheLookup = rMapLookup
|
|||
cacheLookupSucc :: NodeID -- ^lookup key
|
||||
-> NodeCache -- ^ring cache
|
||||
-> Maybe NodeCacheEntry
|
||||
cacheLookupSucc key cache = snd <$> rMapLookupSucc key cache
|
||||
cacheLookupSucc = rMapLookupSucc
|
||||
|
||||
cacheLookupPred :: NodeID -- ^lookup key
|
||||
-> NodeCache -- ^ring cache
|
||||
-> Maybe NodeCacheEntry
|
||||
cacheLookupPred key cache = snd <$> rMapLookupPred key cache
|
||||
|
||||
-- clean up cache entries: once now - entry > maxAge
|
||||
-- transfer difference now - entry to other node
|
||||
cacheLookupPred = rMapLookupPred
|
||||
|
||||
-- | return the @NodeState@ data from a cache entry without checking its validation status
|
||||
cacheGetNodeStateUnvalidated :: CacheEntry RemoteNodeState -> RemoteNodeState
|
||||
|
@ -411,67 +401,18 @@ data FediChordConf = FediChordConf
|
|||
-- ^ listening port for the FediChord DHT
|
||||
, confBootstrapNodes :: [(String, PortNumber)]
|
||||
-- ^ list of potential bootstrapping nodes
|
||||
, confStabiliseInterval :: Int
|
||||
-- ^ pause between stabilise runs, in milliseconds
|
||||
, confBootstrapSamplingInterval :: Int
|
||||
-- ^ pause between sampling the own ID through bootstrap nodes, in milliseconds
|
||||
-- ^ pause between sampling the own ID through bootstrap nodes, in seconds
|
||||
, confMaxLookupCacheAge :: POSIXTime
|
||||
-- ^ maximum age of key lookup cache entries in seconds
|
||||
, confJoinAttemptsInterval :: Int
|
||||
-- ^ interval between join attempts on newly learned nodes, in milliseconds
|
||||
, confMaxNodeCacheAge :: POSIXTime
|
||||
-- ^ maximum age of entries in the node cache, in milliseconds
|
||||
, confResponsePurgeAge :: POSIXTime
|
||||
-- ^ maximum age of message parts in response part cache, in seconds
|
||||
, confRequestTimeout :: Int
|
||||
-- ^ how long to wait until response has arrived, in milliseconds
|
||||
, confRequestRetries :: Int
|
||||
-- ^ how often re-sending a timed-out request can be retried
|
||||
-- ^ maximum age of lookup cache entries in seconds
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
|
||||
-- ====== Service Types ============
|
||||
|
||||
class Service s d where
|
||||
-- | run the service
|
||||
runService :: ServiceConf -> d -> IO (s d)
|
||||
getListeningPortFromService :: (Integral i) => s d -> i
|
||||
-- | trigger a service data migration of data between the two given keys
|
||||
migrateData :: s d
|
||||
-> NodeID -- ^ source/ sender node ID
|
||||
-> NodeID -- ^ start key
|
||||
-> NodeID -- ^ end key
|
||||
-> (String, Int) -- ^ hostname and port of target service
|
||||
-> IO (Either String ()) -- ^ success or failure
|
||||
-- | Wait for an incoming migration from a given node to succeed, may block forever
|
||||
waitForMigrationFrom :: s d -> NodeID -> IO ()
|
||||
|
||||
instance Hashable.Hashable NodeID where
|
||||
hashWithSalt salt = Hashable.hashWithSalt salt . getNodeID
|
||||
hash = Hashable.hash . getNodeID
|
||||
|
||||
data ServiceConf = ServiceConf
|
||||
{ confSubscriptionExpiryTime :: POSIXTime
|
||||
-- ^ subscription lease expiration in seconds
|
||||
, confServicePort :: Int
|
||||
-- ^ listening port for service
|
||||
, confServiceHost :: String
|
||||
-- ^ hostname of service
|
||||
, confLogfilePath :: String
|
||||
-- ^ where to store the (measurement) log file
|
||||
, confStatsEvalDelay :: Int
|
||||
-- ^ delay between statistic rate measurement samplings, in microseconds
|
||||
, confSpeedupFactor :: Int
|
||||
-- While the speedup factor needs to be already included in all
|
||||
}
|
||||
|
||||
class DHT d where
|
||||
-- | lookup the responsible host handling a given key string,
|
||||
-- possiblggy from a lookup cache
|
||||
-- possibly from a lookup cache
|
||||
lookupKey :: d -> String -> IO (Maybe (String, PortNumber))
|
||||
-- | lookup the responsible host handling a given key string,
|
||||
-- but force the DHT to do a fresh lookup instead of returning a cached result.
|
||||
-- Also invalidates old cache entries.
|
||||
forceLookupKey :: d -> String -> IO (Maybe (String, PortNumber))
|
||||
isResponsibleFor :: d -> NodeID -> IO Bool
|
||||
isResponsibleForSTM :: d -> NodeID -> STM Bool
|
||||
|
|
|
@ -1,884 +1,117 @@
|
|||
{-# LANGUAGE DataKinds #-}
|
||||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE InstanceSigs #-}
|
||||
{-# LANGUAGE MultiParamTypeClasses #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE RankNTypes #-}
|
||||
{-# LANGUAGE TypeOperators #-}
|
||||
{-# LANGUAGE InstanceSigs #-}
|
||||
|
||||
module Hash2Pub.PostService where
|
||||
|
||||
import Control.Concurrent
|
||||
import Control.Concurrent.Async
|
||||
import Control.Concurrent.STM
|
||||
import Control.Exception (Exception (..), try)
|
||||
import Control.Monad (foldM, forM, forM_, forever, unless,
|
||||
void, when)
|
||||
import Control.Monad.IO.Class (liftIO)
|
||||
import Data.Bifunctor
|
||||
import qualified Data.ByteString.Lazy.UTF8 as BSUL
|
||||
import qualified Data.ByteString.UTF8 as BSU
|
||||
import qualified Data.DList as D
|
||||
import Data.Either (lefts, rights)
|
||||
import qualified Data.HashMap.Strict as HMap
|
||||
import qualified Data.HashSet as HSet
|
||||
import Data.Maybe (fromJust, isJust)
|
||||
import qualified Data.ByteString.Lazy.UTF8 as BSU
|
||||
import Data.Maybe (fromMaybe)
|
||||
import Data.String (fromString)
|
||||
import Data.Text.Lazy (Text)
|
||||
import qualified Data.Text.Lazy as Txt
|
||||
import qualified Data.Text.Lazy.IO as TxtI
|
||||
import Data.Text.Normalize (NormalizationMode (NFC), normalize)
|
||||
import Data.Time.Clock.POSIX
|
||||
import Data.Typeable (Typeable)
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as HTTPT
|
||||
import System.IO
|
||||
import System.Random
|
||||
import Text.Read (readEither)
|
||||
import qualified Data.Text as Txt
|
||||
|
||||
import Formatting (fixed, format, int, (%))
|
||||
import qualified Network.Wai.Handler.Warp as Warp
|
||||
import Servant
|
||||
import Servant.Client
|
||||
|
||||
import Hash2Pub.FediChordTypes
|
||||
import Hash2Pub.PostService.API
|
||||
import Hash2Pub.RingMap
|
||||
import Hash2Pub.Utils
|
||||
import Hash2Pub.FediChord
|
||||
import Hash2Pub.ServiceTypes
|
||||
|
||||
import Debug.Trace
|
||||
|
||||
data PostService d = PostService
|
||||
{ serviceConf :: ServiceConf
|
||||
{ psPort :: Warp.Port
|
||||
, psHost :: String
|
||||
-- queues, other data structures
|
||||
, baseDHT :: (DHT d) => d
|
||||
, serviceThread :: TVar ThreadId
|
||||
, subscribers :: TVar RelayTags
|
||||
-- ^ for each tag store the subscribers + their queue
|
||||
, ownSubscriptions :: TVar (HMap.HashMap NodeID POSIXTime)
|
||||
-- ^ tags subscribed by the own node have an assigned lease time
|
||||
, relayInQueue :: TQueue (Hashtag, PostID, PostContent)
|
||||
-- ^ Queue for processing incoming posts of own instance asynchronously
|
||||
, postFetchQueue :: TQueue PostID
|
||||
-- ^ queue of posts to be fetched
|
||||
, migrationsInProgress :: TVar (HMap.HashMap NodeID (MVar ()))
|
||||
, httpMan :: HTTP.Manager
|
||||
, statsQueue :: TQueue StatsEvent
|
||||
, loadStats :: TVar RelayStats
|
||||
-- ^ current load stats, replaced periodically
|
||||
, logFileHandle :: Handle
|
||||
, serviceThread :: ThreadId
|
||||
}
|
||||
deriving (Typeable)
|
||||
|
||||
type Hashtag = Text
|
||||
type PostID = Text
|
||||
type PostContent = Text
|
||||
-- | For each handled tag, store its subscribers and provide a
|
||||
-- broadcast 'TChan' for enqueuing posts
|
||||
type RelayTags = RingMap NodeID (TagSubscribersSTM, TChan PostID, Hashtag)
|
||||
type TagSubscribersSTM = TVar TagSubscribers
|
||||
-- | each subscriber is identified by its contact data "hostname" "port"
|
||||
-- and holds a TChan duplicated from the broadcast TChan of the tag
|
||||
-- + an expiration timestamp
|
||||
type TagSubscribers = (HMap.HashMap (String, Int) (TChan PostID, POSIXTime))
|
||||
|
||||
|
||||
instance DHT d => Service PostService d where
|
||||
-- | initialise 'PostService' data structures and run server
|
||||
runService conf dht = do
|
||||
-- create necessary TVars
|
||||
threadVar <- newTVarIO =<< myThreadId -- own thread ID as placeholder
|
||||
subscriberVar <- newTVarIO emptyRMap
|
||||
ownSubsVar <- newTVarIO HMap.empty
|
||||
--ownPostVar <- newTVarIO HSet.empty
|
||||
relayInQueue' <- newTQueueIO
|
||||
postFetchQueue' <- newTQueueIO
|
||||
migrationsInProgress' <- newTVarIO HMap.empty
|
||||
httpMan' <- HTTP.newManager HTTP.defaultManagerSettings
|
||||
statsQueue' <- newTQueueIO
|
||||
loadStats' <- newTVarIO emptyStats
|
||||
loggingFile <- openFile (confLogfilePath conf) WriteMode
|
||||
hSetBuffering loggingFile LineBuffering
|
||||
runService dht host port = do
|
||||
let
|
||||
thisService = PostService
|
||||
{ serviceConf = conf
|
||||
port' = fromIntegral port
|
||||
warpSettings = Warp.setPort port' . Warp.setHost (fromString host) $ Warp.defaultSettings
|
||||
servThread <- forkIO $ Warp.runSettings warpSettings postServiceApplication
|
||||
pure $ PostService {
|
||||
psPort = port'
|
||||
, psHost = host
|
||||
, baseDHT = dht
|
||||
, serviceThread = threadVar
|
||||
, subscribers = subscriberVar
|
||||
, ownSubscriptions = ownSubsVar
|
||||
--, ownPosts = ownPostVar
|
||||
, relayInQueue = relayInQueue'
|
||||
, postFetchQueue = postFetchQueue'
|
||||
, migrationsInProgress = migrationsInProgress'
|
||||
, httpMan = httpMan'
|
||||
, statsQueue = statsQueue'
|
||||
, loadStats = loadStats'
|
||||
, logFileHandle = loggingFile
|
||||
, serviceThread = servThread
|
||||
}
|
||||
port' = fromIntegral (confServicePort conf)
|
||||
warpSettings = Warp.setPort port' . Warp.setHost (fromString . confServiceHost $ conf) $ Warp.defaultSettings
|
||||
-- log a start message, this also truncates existing files
|
||||
TxtI.hPutStrLn loggingFile $ Txt.unlines
|
||||
[ "# Starting mock relay implementation"
|
||||
, "#time stamp ; relay receive rate ;relay delivery rate ;instance publish rate ;instance fetch rate ;total subscriptions"
|
||||
]
|
||||
-- Run 'concurrently_' from another thread to be able to return the
|
||||
-- 'PostService'.
|
||||
-- Terminating that parent thread will make all child threads terminate as well.
|
||||
servThreadID <- forkIO $
|
||||
concurrently_
|
||||
-- web server
|
||||
(Warp.runSettings warpSettings $ postServiceApplication thisService)
|
||||
$ concurrently
|
||||
-- background processing workers
|
||||
(launchWorkerThreads thisService)
|
||||
-- statistics/ measurements
|
||||
(launchStatsThreads thisService)
|
||||
-- update thread ID after fork
|
||||
atomically $ writeTVar threadVar servThreadID
|
||||
pure thisService
|
||||
|
||||
getListeningPortFromService = fromIntegral . confServicePort . serviceConf
|
||||
|
||||
migrateData = clientDeliverSubscriptions
|
||||
|
||||
waitForMigrationFrom serv fromID = do
|
||||
migrationSynchroniser <- atomically $ do
|
||||
syncPoint <- HMap.lookup fromID <$> readTVar (migrationsInProgress serv)
|
||||
maybe
|
||||
-- decision: this function blocks until it gets an incoming migration from given ID
|
||||
retry
|
||||
pure
|
||||
syncPoint
|
||||
-- block until migration finished
|
||||
takeMVar migrationSynchroniser
|
||||
getServicePort s = fromIntegral $ psPort s
|
||||
|
||||
|
||||
-- | return a WAI application
|
||||
postServiceApplication :: DHT d => PostService d -> Application
|
||||
postServiceApplication serv = serve exposedPostServiceAPI $ postServer serv
|
||||
postServiceApplication :: Application
|
||||
postServiceApplication = serve exposedPostServiceAPI postServer
|
||||
|
||||
servicePort = 8081
|
||||
|
||||
-- | needed for guiding type inference
|
||||
exposedPostServiceAPI :: Proxy PostServiceAPI
|
||||
exposedPostServiceAPI = Proxy
|
||||
|
||||
|
||||
-- ========= constants ===========
|
||||
|
||||
placeholderPost :: Text
|
||||
placeholderPost = Txt.take 5120 . Txt.repeat $ 'O' -- size 5KiB
|
||||
|
||||
-- ========= HTTP API and handlers =============
|
||||
|
||||
postServer :: DHT d => PostService d -> Server PostServiceAPI
|
||||
postServer service = relayInbox service
|
||||
:<|> subscriptionDelivery service
|
||||
:<|> postFetch service
|
||||
:<|> postMultiFetch service
|
||||
:<|> postInbox service
|
||||
:<|> tagDelivery service
|
||||
:<|> tagSubscribe service
|
||||
:<|> tagUnsubscribe service
|
||||
type PostServiceAPI = "relay" :> "inbox" :> ReqBody '[PlainText] Txt.Text :> PostCreated '[PlainText] Txt.Text
|
||||
-- ^ delivery endpoint of newly published posts of the relay's instance
|
||||
:<|> "relay" :> "subscribers" :> ReqBody '[PlainText] Txt.Text :> Post '[PlainText] Txt.Text
|
||||
-- ^ endpoint for delivering the subscriptions and outstanding queue
|
||||
:<|> "post" :> Capture "postid" Txt.Text :> Get '[PlainText] Txt.Text
|
||||
-- ^ fetch endpoint for posts, full post ID is http://$domain/post/$postid
|
||||
:<|> "posts" :> ReqBody '[PlainText] Txt.Text :> Post '[PlainText] Txt.Text
|
||||
-- ^ endpoint for fetching multiple posts at once
|
||||
:<|> "tags" :> Capture "hashtag" Txt.Text :> ReqBody '[PlainText] Txt.Text :> PostCreated '[PlainText] Txt.Text
|
||||
-- ^ delivery endpoint for posts of $tag at subscribing instance
|
||||
:<|> "tags" :> Capture "hashtag" Txt.Text :> "subscribe" :> Header "Origin" Txt.Text :> Get '[PlainText] Integer
|
||||
-- ^ endpoint for subscribing the instance specified in
|
||||
-- the Origin header to $hashtag.
|
||||
-- Returns subscription lease time in seconds.
|
||||
:<|> "tags" :> Capture "hashtag" Txt.Text :> "unsubscribe" :> Header "Origin" Txt.Text :> Get '[PlainText] Txt.Text
|
||||
-- ^ endpoint for unsubscribing the instance specified in
|
||||
-- the Origin header to $hashtag
|
||||
|
||||
|
||||
-- | delivery endpoint: receive posts of a handled tag and enqueue them for relaying
|
||||
relayInbox :: DHT d => PostService d -> Hashtag -> Text -> Handler NoContent
|
||||
relayInbox serv tag posts = do
|
||||
let
|
||||
-- skip checking whether the post actually contains the tag, just drop full post
|
||||
postIDs = head . Txt.splitOn "," <$> Txt.lines posts
|
||||
-- if tag is not in own responsibility, return a 410 Gone
|
||||
responsible <- liftIO $ isResponsibleFor (baseDHT serv) (hashtagToId tag)
|
||||
if responsible
|
||||
then pure ()
|
||||
else
|
||||
throwError $ err410 { errBody = "Relay is not responsible for this tag"}
|
||||
broadcastChan <- liftIO $ atomically $ getTagBroadcastChannel serv tag
|
||||
maybe
|
||||
-- if noone subscribed to the tag, nothing needs to be done
|
||||
(pure ())
|
||||
-- otherwise enqueue posts into broadcast queue of the tag
|
||||
(\queue -> do
|
||||
liftIO $ forM_ postIDs (atomically . writeTChan queue)
|
||||
-- report the received post for statistic purposes
|
||||
liftIO . atomically . writeTQueue (statsQueue serv) $ StatsEvent RelayReceiveEvent (length postIDs) (hashtagToId tag)
|
||||
)
|
||||
broadcastChan
|
||||
pure NoContent
|
||||
|
||||
-- exception to be thrown when a tag is not in the responsibility of a relay
|
||||
newtype UnhandledTagException = UnhandledTagException String
|
||||
deriving (Show, Typeable)
|
||||
|
||||
instance Exception UnhandledTagException
|
||||
|
||||
-- | delivery endpoint: receives a list of subscribers of tags and their outstanding queues for migration
|
||||
subscriptionDelivery :: DHT d => PostService d -> Integer -> Text -> Handler Text
|
||||
subscriptionDelivery serv senderID subList = do
|
||||
let
|
||||
tagSubs = Txt.lines subList
|
||||
-- signal that the migration is in progress
|
||||
syncMVar <- liftIO newEmptyMVar
|
||||
liftIO . atomically $ modifyTVar' (migrationsInProgress serv) $
|
||||
HMap.insert (fromInteger senderID) syncMVar
|
||||
-- In favor of having the convenience of rolling back the transaction once a
|
||||
-- not-handled tag occurs, this results in a single large transaction.
|
||||
-- Hopefully the performance isn't too bad.
|
||||
res <- liftIO . atomically $ (foldM (\_ tag' -> do
|
||||
responsible <- isResponsibleForSTM (baseDHT serv) (hashtagToId tag')
|
||||
if responsible
|
||||
then processTag (subscribers serv) tag'
|
||||
else throwSTM $ UnhandledTagException (Txt.unpack tag' <> " not handled by this relay")
|
||||
pure $ Right ()
|
||||
) (pure ()) tagSubs
|
||||
`catchSTM` (\e -> pure . Left $ show (e :: UnhandledTagException))
|
||||
-- TODO: potentially log this
|
||||
:: STM (Either String ()))
|
||||
-- TODO: should this always signal migration finished to avoid deadlocksP
|
||||
liftIO $ putMVar syncMVar () -- wakes up waiting thread
|
||||
-- allow response to be completed independently from waiting thread
|
||||
_ <- liftIO . forkIO $ do
|
||||
putMVar syncMVar () -- blocks until waiting thread has resumed
|
||||
-- delete this migration from ongoing ones
|
||||
liftIO . atomically $ modifyTVar' (migrationsInProgress serv) $
|
||||
HMap.delete (fromInteger senderID)
|
||||
case res of
|
||||
Left err -> throwError err410 {errBody = BSUL.fromString err}
|
||||
Right _ -> pure ""
|
||||
-- TODO: check and only accept tags in own (future?) responsibility
|
||||
where
|
||||
processTag :: TVar RelayTags -> Text -> STM ()
|
||||
processTag subscriberSTM tagData = do
|
||||
let
|
||||
tag:subText:lease:posts:_ = Txt.splitOn "," tagData
|
||||
-- ignore checking of lease time
|
||||
leaseTime = fromIntegral (read . Txt.unpack $ lease :: Integer)
|
||||
sub = read . Txt.unpack $ subText :: (String, Int)
|
||||
postList = Txt.words posts
|
||||
enqueueSubscription subscriberSTM (normaliseTag tag) sub postList leaseTime
|
||||
postServer :: Server PostServiceAPI
|
||||
postServer = relayInbox
|
||||
:<|> subscriptionDelivery
|
||||
:<|> postFetch
|
||||
:<|> postMultiFetch
|
||||
:<|> tagDelivery
|
||||
:<|> tagSubscribe
|
||||
:<|> tagUnsubscribe
|
||||
|
||||
|
||||
-- | endpoint for fetching a post by its ID
|
||||
postFetch :: PostService d -> Text -> Handler Text
|
||||
postFetch serv _ = do
|
||||
-- decision: for saving memory do not store published posts, just
|
||||
-- pretend there is a post for each requested ID
|
||||
liftIO . atomically . writeTQueue (statsQueue serv) $ StatsEvent IncomingPostFetchEvent 1 0 -- tag fetched for is irrelevant
|
||||
pure placeholderPost
|
||||
relayInbox :: Txt.Text -> Handler Txt.Text
|
||||
relayInbox post = pure $ "Here be InboxDragons with " <> post
|
||||
|
||||
subscriptionDelivery :: Txt.Text -> Handler Txt.Text
|
||||
subscriptionDelivery subList = pure $ "Here be Subscription List dragons: " <> subList
|
||||
|
||||
-- | endpoint for fetching multiple posts of this instance by their IDs
|
||||
postMultiFetch :: PostService d -> Text -> Handler Text
|
||||
postMultiFetch serv postIDs = do
|
||||
let
|
||||
idList = Txt.lines postIDs
|
||||
-- decision: for saving memory do not store published posts, just
|
||||
-- pretend there is a post for each requested ID
|
||||
response = foldl (\response' _ ->
|
||||
placeholderPost <> "\n" <> response'
|
||||
) "" idList
|
||||
liftIO . atomically . writeTQueue (statsQueue serv) $ StatsEvent IncomingPostFetchEvent (length idList) 0 -- tag fetched for is irrelevant
|
||||
pure response
|
||||
postFetch :: Txt.Text -> Handler Txt.Text
|
||||
postFetch postID = pure $ "Here be a post with dragon ID " <> postID
|
||||
|
||||
postMultiFetch :: Txt.Text -> Handler Txt.Text
|
||||
postMultiFetch postIDs = pure $ "Here be multiple post dragons: "
|
||||
<> (Txt.unwords . Txt.lines $ postIDs)
|
||||
|
||||
-- | delivery endpoint: inbox for initially publishing a post at an instance
|
||||
postInbox :: PostService d -> Text -> Handler NoContent
|
||||
postInbox serv post = do
|
||||
-- extract contained hashtags
|
||||
let
|
||||
containedTags = fmap (normaliseTag . Txt.tail) . filter ((==) '#' . Txt.head) . Txt.words $ post
|
||||
-- generate post ID
|
||||
postId <- liftIO $ Txt.pack . show <$> (randomRIO (0, 2^(128::Integer)-1) :: IO Integer)
|
||||
-- decision: for saving memory do not store published post IDs, just deliver a post for any requested ID
|
||||
-- enqueue a relay job for each tag
|
||||
liftIO $ forM_ (containedTags :: [Text]) (\tag ->
|
||||
atomically $ writeTQueue (relayInQueue serv) (tag, postId, post)
|
||||
)
|
||||
pure NoContent
|
||||
tagDelivery :: Txt.Text -> Txt.Text -> Handler Txt.Text
|
||||
tagDelivery hashtag posts = pure $ "Here be #" <> hashtag <> " dragons with " <> posts
|
||||
|
||||
tagSubscribe :: Txt.Text -> Maybe Txt.Text -> Handler Integer
|
||||
tagSubscribe hashtag origin = pure 42
|
||||
|
||||
-- | delivery endpoint: receive postIDs of a certain subscribed hashtag
|
||||
tagDelivery :: PostService d -> Text -> Text -> Handler Text
|
||||
tagDelivery serv hashtag posts = do
|
||||
let postIDs = Txt.lines posts
|
||||
subscriptions <- liftIO . readTVarIO . ownSubscriptions $ serv
|
||||
if isJust (HMap.lookup (hashtagToId hashtag) subscriptions)
|
||||
then -- TODO: increase a counter/ statistics for received posts of this tag
|
||||
liftIO $ forM_ postIDs $ atomically . writeTQueue (postFetchQueue serv)
|
||||
else -- silently drop posts from unsubscribed tags
|
||||
pure ()
|
||||
pure $ "Received a postID for tag " <> hashtag
|
||||
tagUnsubscribe :: Txt.Text -> Maybe Txt.Text -> Handler Txt.Text
|
||||
tagUnsubscribe hashtag origin = pure $ "Here be a dragon unsubscription from " <> fromMaybe "Nothing" origin <> " to " <> hashtag
|
||||
|
||||
|
||||
-- | receive subscription requests to a handled hashtag
|
||||
tagSubscribe :: DHT d => PostService d -> Text -> Maybe Text -> Handler Integer
|
||||
tagSubscribe serv hashtag origin = do
|
||||
responsible <- liftIO $ isResponsibleFor (baseDHT serv) (hashtagToId hashtag)
|
||||
if not responsible
|
||||
-- GONE if not responsible
|
||||
then throwError err410 { errBody = "not responsible for this tag" }
|
||||
else pure ()
|
||||
originURL <- maybe
|
||||
(throwError $ err400 { errBody = "Missing Origin header" })
|
||||
pure
|
||||
origin
|
||||
req <- HTTP.parseUrlThrow (Txt.unpack originURL)
|
||||
now <- liftIO getPOSIXTime
|
||||
let leaseTime = now + confSubscriptionExpiryTime (serviceConf serv)
|
||||
-- setup subscription entry
|
||||
_ <- liftIO . atomically $ setupSubscriberChannel (subscribers serv) hashtag (BSU.toString $ HTTP.host req, HTTP.port req) leaseTime
|
||||
--liftIO . putStrLn $ "just got a subscription to " <> Txt.unpack hashtag
|
||||
pure $ round leaseTime
|
||||
|
||||
|
||||
-- | receive and handle unsubscription requests regarding a handled tag
|
||||
tagUnsubscribe :: DHT d => PostService d -> Text -> Maybe Text -> Handler Text
|
||||
tagUnsubscribe serv hashtag origin = do
|
||||
responsible <- liftIO $ isResponsibleFor (baseDHT serv) (hashtagToId hashtag)
|
||||
if not responsible
|
||||
-- GONE if not responsible
|
||||
then throwError err410 { errBody = "not responsible for this tag" }
|
||||
else pure ()
|
||||
originURL <- maybe
|
||||
(throwError $ err400 { errBody = "Missing Origin header" })
|
||||
pure
|
||||
origin
|
||||
req <- HTTP.parseUrlThrow (Txt.unpack originURL)
|
||||
liftIO . atomically $ deleteSubscription (subscribers serv) hashtag (BSU.toString $ HTTP.host req, HTTP.port req)
|
||||
pure "bye bye"
|
||||
|
||||
-- client/ request functions
|
||||
|
||||
clientAPI :: Proxy PostServiceAPI
|
||||
clientAPI = Proxy
|
||||
|
||||
relayInboxClient
|
||||
:<|> subscriptionDeliveryClient
|
||||
:<|> postFetchClient
|
||||
:<|> postMultiFetchClient
|
||||
:<|> postInboxClient
|
||||
:<|> tagDeliveryClient
|
||||
:<|> tagSubscribeClient
|
||||
:<|> tagUnsubscribeClient
|
||||
= client clientAPI
|
||||
|
||||
|
||||
-- | Deliver the subscriber list of all hashtags in the interval [fromTag, toTag]
|
||||
-- and their outstanding delivery queue to another instance.
|
||||
-- If the transfer succeeds, the transfered subscribers are removed from the local list.
|
||||
clientDeliverSubscriptions :: PostService d
|
||||
-> NodeID -- ^ sender node ID
|
||||
-> NodeID -- ^ fromTag
|
||||
-> NodeID -- ^ toTag
|
||||
-> (String, Int) -- ^ hostname and port of instance to deliver to
|
||||
-> IO (Either String ()) -- Either signals success or failure
|
||||
clientDeliverSubscriptions serv fromNode fromKey toKey (toHost, toPort) = do
|
||||
-- collect tag interval
|
||||
intervalTags <- takeRMapSuccessorsFromTo fromKey toKey <$> readTVarIO (subscribers serv)
|
||||
-- returns a [ (TagSubscribersSTM, TChan PostID, Hashtag) ]
|
||||
-- extract subscribers and posts
|
||||
-- no need for extracting as a single atomic operation, as newly incoming posts are supposed to be rejected because of already having re-positioned on the DHT
|
||||
subscriberData <- foldM (\response (subSTM, _, tag) -> do
|
||||
subMap <- readTVarIO subSTM
|
||||
thisTagsData <- foldM (\tagResponse (subscriber, (subChan, lease)) -> do
|
||||
-- duplicate the pending queue to work on a copy, in case of a delivery error
|
||||
pending <- atomically $ do
|
||||
queueCopy <- cloneTChan subChan
|
||||
channelGetAll queueCopy
|
||||
if null pending
|
||||
then pure tagResponse
|
||||
else pure $ tag <> "," <> Txt.pack (show subscriber) <> "," <> Txt.pack (show lease) <> "," <> Txt.unwords pending <> "\n"
|
||||
)
|
||||
""
|
||||
(HMap.toList subMap)
|
||||
pure $ thisTagsData <> response
|
||||
)
|
||||
""
|
||||
intervalTags
|
||||
-- send subscribers
|
||||
resp <- runClientM (subscriptionDeliveryClient (getNodeID fromNode) subscriberData) (mkClientEnv (httpMan serv) (BaseUrl Http toHost (fromIntegral toPort) ""))
|
||||
-- on failure return a Left, otherwise delete subscription entry
|
||||
case resp of
|
||||
Left err -> pure . Left . show $ err
|
||||
Right _ -> do
|
||||
atomically $
|
||||
modifyTVar' (subscribers serv) $ \tagMap ->
|
||||
foldr deleteRMapEntry tagMap ((\(_, _, t) -> hashtagToId t) <$> intervalTags)
|
||||
pure . Right $ ()
|
||||
where
|
||||
channelGetAll :: TChan a -> STM [a]
|
||||
channelGetAll chan = channelGetAll' chan []
|
||||
channelGetAll' :: TChan a -> [a] -> STM [a]
|
||||
channelGetAll' chan acc = do
|
||||
haveRead <- tryReadTChan chan
|
||||
maybe (pure acc) (\x -> channelGetAll' chan (x:acc)) haveRead
|
||||
|
||||
|
||||
-- | Subscribe the client to the given hashtag. On success it returns the given lease time,
|
||||
-- but also records the subscription in its own data structure.
|
||||
clientSubscribeTo :: DHT d => PostService d -> Hashtag -> IO (Either String Integer)
|
||||
clientSubscribeTo serv tag = do
|
||||
lookupRes <- lookupKey (baseDHT serv) (Txt.unpack tag)
|
||||
doSubscribe lookupRes True
|
||||
where
|
||||
doSubscribe lookupResponse allowRetry = maybe
|
||||
(pure . Left $ "No node found")
|
||||
(\(foundHost, foundPort) -> do
|
||||
let origin = "http://" <> Txt.pack (confServiceHost $ serviceConf serv) <> ":" <> Txt.pack (show (getListeningPortFromService serv :: Integer))
|
||||
resp <- runClientM (tagSubscribeClient tag (Just origin)) (mkClientEnv (httpMan serv) (BaseUrl Http foundHost (fromIntegral foundPort) ""))
|
||||
case resp of
|
||||
Left (FailureResponse _ fresp)
|
||||
|(HTTPT.statusCode . responseStatusCode $ fresp) == 410 && allowRetry -> do -- responsibility gone, force new lookup
|
||||
newRes <- forceLookupKey (baseDHT serv) (Txt.unpack tag)
|
||||
--putStrLn $ "failed subscribing to " <> Txt.unpack tag <> " on " <> foundHost
|
||||
doSubscribe newRes False
|
||||
Left err -> pure . Left . show $ err
|
||||
Right lease -> do
|
||||
atomically . modifyTVar' (ownSubscriptions serv) $ HMap.insert (hashtagToId tag) (fromInteger lease)
|
||||
--putStrLn $ "just subscribed to " <> Txt.unpack tag <> " on " <> foundHost
|
||||
pure . Right $ lease
|
||||
)
|
||||
lookupResponse
|
||||
|
||||
|
||||
-- | Unsubscribe the client from the given hashtag.
|
||||
clientUnsubscribeFrom :: DHT d => PostService d -> Hashtag -> IO (Either String ())
|
||||
clientUnsubscribeFrom serv tag = do
|
||||
lookupRes <- lookupKey (baseDHT serv) (Txt.unpack tag)
|
||||
doUnsubscribe lookupRes True
|
||||
where
|
||||
doUnsubscribe lookupResponse allowRetry = maybe
|
||||
(pure . Left $ "No node found")
|
||||
(\(foundHost, foundPort) -> do
|
||||
let origin = "http://" <> Txt.pack (confServiceHost $ serviceConf serv) <> ":" <> Txt.pack (show (getListeningPortFromService serv :: Integer))
|
||||
resp <- runClientM (tagUnsubscribeClient tag (Just origin)) (mkClientEnv (httpMan serv) (BaseUrl Http foundHost (fromIntegral foundPort) ""))
|
||||
case resp of
|
||||
Left (FailureResponse _ fresp)
|
||||
|(HTTPT.statusCode . responseStatusCode $ fresp) == 410 && allowRetry -> do -- responsibility gone, force new lookup
|
||||
newRes <- forceLookupKey (baseDHT serv) (Txt.unpack tag)
|
||||
doUnsubscribe newRes False
|
||||
Left err -> pure . Left . show $ err
|
||||
Right _ -> do
|
||||
atomically . modifyTVar' (ownSubscriptions serv) $ HMap.delete (hashtagToId tag)
|
||||
pure . Right $ ()
|
||||
)
|
||||
lookupResponse
|
||||
|
||||
|
||||
-- | publish a new post to the inbox of a specified relay instance. This
|
||||
-- instance will then be the originating instance of the post and will forward
|
||||
-- the post to the responsible relays.
|
||||
-- As the initial publishing isn't done by a specific relay (but *to* a specific relay
|
||||
-- instead), the function does *not* take a PostService as argument.
|
||||
clientPublishPost :: HTTP.Manager -- ^ for better performance, a shared HTTP manager has to be provided
|
||||
-> String -- ^ hostname
|
||||
-> Int -- ^ port
|
||||
-> PostContent -- ^ post content
|
||||
-> IO (Either String ()) -- ^ error or success
|
||||
clientPublishPost httpman hostname port postC = do
|
||||
resp <- runClientM (postInboxClient postC) (mkClientEnv httpman (BaseUrl Http hostname port ""))
|
||||
pure . bimap show (const ()) $ resp
|
||||
|
||||
-- currently this is unused code
|
||||
getClients :: String -> Int -> HTTP.Manager -> Client IO PostServiceAPI
|
||||
getClients hostname' port' httpMan = hoistClient clientAPI
|
||||
(fmap (either (error . show) id)
|
||||
. flip runClientM clientEnv
|
||||
)
|
||||
(client clientAPI)
|
||||
where
|
||||
clientEnv = mkClientEnv httpMan (BaseUrl Http hostname' port' "")
|
||||
|
||||
-- ======= data structure manipulations =========
|
||||
|
||||
-- | Write all pending posts of a subscriber-tag-combination to its queue.
|
||||
-- Sets up all necessary data structures if they are still missing.
|
||||
enqueueSubscription :: TVar RelayTags -- tag-subscriber map
|
||||
-> Hashtag -- hashtag of pending posts
|
||||
-> (String, Int) -- subscriber's connection information
|
||||
-> [PostID] -- pending posts
|
||||
-> POSIXTime -- lease expiry time
|
||||
-> STM ()
|
||||
enqueueSubscription tagMapSTM tag subscriber posts leaseTime = do
|
||||
-- get the tag output queue and, if necessary, create it
|
||||
subChan <- setupSubscriberChannel tagMapSTM tag subscriber leaseTime
|
||||
forM_ posts (writeTChan subChan)
|
||||
|
||||
|
||||
-- | STM operation to return the outgoing post queue of a tag to a specified subscriber.
|
||||
-- If the queue doesn't exist yet, all necessary data structures are set up accordingly.
|
||||
setupSubscriberChannel :: TVar RelayTags -> Hashtag -> (String, Int) -> POSIXTime -> STM (TChan PostID)
|
||||
setupSubscriberChannel tagMapSTM tag subscriber leaseTime = do
|
||||
tagMap <- readTVar tagMapSTM
|
||||
case lookupTagSubscriptions tag tagMap of
|
||||
Nothing -> do
|
||||
-- if no collision/ tag doesn't exist yet, just initialize a
|
||||
-- new subscriber map
|
||||
broadcastChan <- newBroadcastTChan
|
||||
tagOutChan <- dupTChan broadcastChan
|
||||
newSubMapSTM <- newTVar $ HMap.singleton subscriber (tagOutChan, leaseTime)
|
||||
writeTVar tagMapSTM $ addRMapEntry (hashtagToId tag) (newSubMapSTM, broadcastChan, tag) tagMap
|
||||
pure tagOutChan
|
||||
Just (foundSubMapSTM, broadcastChan, _) -> do
|
||||
-- otherwise use the existing subscriber map
|
||||
foundSubMap <- readTVar foundSubMapSTM
|
||||
case HMap.lookup subscriber foundSubMap of
|
||||
Nothing -> do
|
||||
-- for new subscribers, create new output channel
|
||||
tagOutChan <- dupTChan broadcastChan
|
||||
writeTVar foundSubMapSTM $ HMap.insert subscriber (tagOutChan, leaseTime) foundSubMap
|
||||
pure tagOutChan
|
||||
-- existing subscriber's channels are just returned
|
||||
Just (tagOutChan, _) -> pure tagOutChan
|
||||
|
||||
|
||||
-- | deletes a subscription from the passed subscriber map
|
||||
deleteSubscription :: TVar RelayTags -> Hashtag -> (String, Int) -> STM ()
|
||||
deleteSubscription tagMapSTM tag subscriber = do
|
||||
tagMap <- readTVar tagMapSTM
|
||||
case lookupTagSubscriptions tag tagMap of
|
||||
-- no subscribers to that tag, just return
|
||||
Nothing -> pure ()
|
||||
Just (foundSubMapSTM, _, _) -> do
|
||||
foundSubMap <- readTVar foundSubMapSTM
|
||||
let newSubMap = HMap.delete subscriber foundSubMap
|
||||
-- if there are no subscriptions for the tag anymore, remove its
|
||||
-- data sttructure altogether
|
||||
if HMap.null newSubMap
|
||||
then writeTVar tagMapSTM $ deleteRMapEntry (hashtagToId tag) tagMap
|
||||
-- otherwise just remove the subscription of that node
|
||||
else writeTVar foundSubMapSTM newSubMap
|
||||
|
||||
|
||||
|
||||
-- | returns the broadcast channel of a hashtag if there are any subscribers to it
|
||||
getTagBroadcastChannel :: PostService d -> Hashtag -> STM (Maybe (TChan PostID))
|
||||
getTagBroadcastChannel serv tag = do
|
||||
tagMap <- readTVar $ subscribers serv
|
||||
case lookupTagSubscriptions tag tagMap of
|
||||
Nothing -> pure Nothing
|
||||
Just (subscriberSTM, broadcastChan, _) -> do
|
||||
subscriberMap <- readTVar subscriberSTM
|
||||
if HMap.null subscriberMap
|
||||
then pure Nothing
|
||||
else pure (Just broadcastChan)
|
||||
|
||||
|
||||
-- | look up the subscription data of a tag
|
||||
lookupTagSubscriptions :: Hashtag -> RingMap NodeID a -> Maybe a
|
||||
lookupTagSubscriptions tag = rMapLookup (hashtagToId tag)
|
||||
|
||||
|
||||
-- normalise the unicode representation of a string to NFC and convert to lower case
|
||||
normaliseTag :: Text -> Text
|
||||
normaliseTag = Txt.toLower . Txt.fromStrict . normalize NFC . Txt.toStrict
|
||||
|
||||
|
||||
-- | convert a hashtag to its representation on the DHT
|
||||
hashtagToId :: Hashtag -> NodeID
|
||||
hashtagToId = genKeyID . Txt.unpack
|
||||
|
||||
|
||||
readUpToTChan :: Int -> TChan a -> STM [a]
|
||||
readUpToTChan 0 _ = pure []
|
||||
readUpToTChan n chan = do
|
||||
readFromChan <- tryReadTChan chan
|
||||
case readFromChan of
|
||||
Nothing -> pure []
|
||||
Just val -> do
|
||||
moreReads <- readUpToTChan (pred n) chan
|
||||
pure (val:moreReads)
|
||||
|
||||
|
||||
readUpToTQueue :: Int -> TQueue a -> STM [a]
|
||||
readUpToTQueue 0 _ = pure []
|
||||
readUpToTQueue n q = do
|
||||
readFromQueue <- tryReadTQueue q
|
||||
case readFromQueue of
|
||||
Nothing -> pure []
|
||||
Just val -> do
|
||||
moreReads <- readUpToTQueue (pred n) q
|
||||
pure (val:moreReads)
|
||||
|
||||
-- | define how to convert all showable types to PlainText
|
||||
-- No idea what I'm doing with these overlappable instances though ¯\_(ツ)_/¯
|
||||
-- TODO: figure out how this overlapping stuff actually works https://downloads.haskell.org/~ghc/latest/docs/html/users_guide/glasgow_exts.html#instance-overlap
|
||||
instance {-# OVERLAPPABLE #-} Show a => MimeRender PlainText a where
|
||||
mimeRender _ = BSUL.fromString . show
|
||||
|
||||
instance {-# OVERLAPPABLE #-} Read a => MimeUnrender PlainText a where
|
||||
mimeUnrender _ = readEither . BSUL.toString
|
||||
|
||||
-- ====== worker threads ======
|
||||
|
||||
-- TODO: make configurable
|
||||
numParallelDeliveries = 10
|
||||
|
||||
launchWorkerThreads :: DHT d => PostService d -> IO ()
|
||||
launchWorkerThreads serv = concurrently_
|
||||
(processIncomingPosts serv)
|
||||
$ concurrently_
|
||||
(purgeSubscriptionsThread serv)
|
||||
$ concurrently_
|
||||
(fetchTagPosts serv)
|
||||
(relayWorker serv)
|
||||
|
||||
|
||||
-- | periodically remove expired subscription entries from relay subscribers
|
||||
purgeSubscriptionsThread :: PostService d -> IO ()
|
||||
purgeSubscriptionsThread serv = forever $ do
|
||||
-- read config
|
||||
now <- getPOSIXTime
|
||||
let
|
||||
purgeInterval = confSubscriptionExpiryTime (serviceConf serv) / 10
|
||||
-- no need to atomically lock this, as newly incoming subscriptions do not
|
||||
-- need to be purged
|
||||
tagMap <- readTVarIO $ subscribers serv
|
||||
forM_ tagMap $ \(subscriberMapSTM, _, _) ->
|
||||
-- but each subscriberMap needs to be modified atomically
|
||||
atomically . modifyTVar' subscriberMapSTM $ HMap.filter (\(_, ts) -> ts > now)
|
||||
threadDelay $ fromEnum purgeInterval `div` 10^6
|
||||
|
||||
|
||||
-- | process the pending relay inbox of incoming posts from the internal queue:
|
||||
-- Look up responsible relay node for given hashtag and forward post to it
|
||||
processIncomingPosts :: DHT d => PostService d -> IO ()
|
||||
processIncomingPosts serv = forever $ do
|
||||
-- blocks until available
|
||||
deliveriesToProcess <- atomically $ do
|
||||
readResult <- readUpToTQueue numParallelDeliveries $ relayInQueue serv
|
||||
if null readResult
|
||||
then retry
|
||||
else pure readResult
|
||||
runningJobs <- forM deliveriesToProcess $ \(tag, pID, pContent) -> async $ do
|
||||
let pIdUri = "http://" <> (Txt.pack . confServiceHost . serviceConf $ serv) <> ":" <> (fromString . show . confServicePort . serviceConf $ serv) <> "/post/" <> pID
|
||||
lookupRes <- lookupKey (baseDHT serv) (Txt.unpack tag)
|
||||
case lookupRes of
|
||||
-- no vserver active => wait and retry
|
||||
Nothing -> threadDelay (10 * 10^6) >> pure (Left "no vserver active")
|
||||
Just (responsibleHost, responsiblePort) -> do
|
||||
resp <- runClientM (relayInboxClient tag $ pIdUri <> "," <> pContent) (mkClientEnv (httpMan serv) (BaseUrl Http responsibleHost (fromIntegral responsiblePort) ""))
|
||||
case resp of
|
||||
Left err -> do
|
||||
-- 410 error indicates outdated responsibility mapping
|
||||
-- Simplification: just invalidate the mapping entry on all errors, force a re-lookup and re-queue the post
|
||||
-- TODO: keep track of maximum retries
|
||||
_ <- forceLookupKey (baseDHT serv) (Txt.unpack tag)
|
||||
atomically . writeTQueue (relayInQueue serv) $ (tag, pID, pContent)
|
||||
pure . Left $ "Error: " <> show err
|
||||
Right _ -> do
|
||||
-- idea for the experiment: each post publication makes the initial posting instance subscribe to all contained tags
|
||||
now <- getPOSIXTime
|
||||
subscriptionStatus <- HMap.lookup (hashtagToId tag) <$> readTVarIO (ownSubscriptions serv)
|
||||
-- if not yet subscribed or subscription expires within 5 minutes, (re)subscribe to tag
|
||||
when (maybe True (\subLease -> now - subLease < 300) subscriptionStatus) $
|
||||
void $ clientSubscribeTo serv tag
|
||||
|
||||
-- for evaluation, return the tag of the successfully forwarded post
|
||||
pure $ Right tag
|
||||
|
||||
-- collect async results
|
||||
results <- mapM waitCatch runningJobs
|
||||
-- report the count of published posts for statistics
|
||||
atomically . writeTQueue (statsQueue serv) $ StatsEvent PostPublishEvent (length . rights $ results) 0 -- hashtag published to doesn't matter
|
||||
pure ()
|
||||
|
||||
|
||||
|
||||
-- | process the pending fetch jobs of delivered post IDs: Delivered posts are tried to be fetched from their URI-ID
|
||||
fetchTagPosts :: DHT d => PostService d -> IO ()
|
||||
fetchTagPosts serv = forever $ do
|
||||
-- blocks until available
|
||||
-- TODO: batching, retry
|
||||
-- TODO: process multiple in parallel
|
||||
pIdUri <- atomically . readTQueue $ postFetchQueue serv
|
||||
fetchReq <- HTTP.parseRequest . Txt.unpack $ pIdUri
|
||||
resp <- try $ HTTP.httpLbs fetchReq (httpMan serv) :: IO (Either HTTP.HttpException (HTTP.Response BSUL.ByteString))
|
||||
case resp of
|
||||
Right response ->
|
||||
-- TODO error handling, retry
|
||||
--if HTTPT.statusCode (HTTP.responseStatus response) == 200
|
||||
-- then
|
||||
-- -- success, TODO: statistics
|
||||
-- else
|
||||
pure ()
|
||||
Left _ ->
|
||||
-- TODO error handling, retry
|
||||
pure ()
|
||||
|
||||
|
||||
relayWorker :: PostService d -> IO ()
|
||||
relayWorker serv = forever $ do
|
||||
-- atomically (to be able to retry) fold a list of due delivery actions
|
||||
jobsToProcess <- atomically $ do
|
||||
subscriptionMap <- readTVar $ subscribers serv
|
||||
jobList <- D.toList <$> foldM (\jobAcc (subscriberMapSTM, _, tag) -> do
|
||||
subscriberMap <- readTVar subscriberMapSTM
|
||||
foldM (\jobAcc' ((subHost, subPort), (postChan, _)) -> do
|
||||
postsToDeliver <- readUpToTChan 500 postChan
|
||||
let postDeliveryAction = runClientM (tagDeliveryClient tag (Txt.unlines postsToDeliver)) (mkClientEnv (httpMan serv) (BaseUrl Http subHost (fromIntegral subPort) ""))
|
||||
-- append relay push job to job list
|
||||
pure $ if not (null postsToDeliver)
|
||||
then jobAcc' `D.snoc` (do
|
||||
deliveryResult <- postDeliveryAction
|
||||
either
|
||||
(const $ pure ())
|
||||
-- on successful push, record that event for statistics
|
||||
(const . atomically . writeTQueue (statsQueue serv) $ StatsEvent RelayDeliveryEvent (length postsToDeliver) (hashtagToId tag))
|
||||
deliveryResult
|
||||
pure deliveryResult
|
||||
)
|
||||
else jobAcc'
|
||||
) jobAcc $ HMap.toList subscriberMap
|
||||
) D.empty subscriptionMap
|
||||
-- if no relay jobs, then retry
|
||||
if null jobList
|
||||
then retry
|
||||
else pure jobList
|
||||
|
||||
-- when processing the list, send several deliveries in parallel
|
||||
forM_ (chunksOf numParallelDeliveries jobsToProcess) $ \jobset -> do
|
||||
runningJobs <- mapM async jobset
|
||||
-- so far just dropping failed attempts, TODO: retry mechanism
|
||||
results <- mapM waitCatch runningJobs
|
||||
let
|
||||
successfulResults = rights results
|
||||
unsuccessfulResults = lefts results
|
||||
unless (null unsuccessfulResults) $ putStrLn ("ERR: " <> show (length unsuccessfulResults) <> " failed deliveries!")
|
||||
putStrLn $ "successfully relayed " <> show (length successfulResults)
|
||||
pure ()
|
||||
|
||||
|
||||
-- ======= statistics/measurement and logging =======
|
||||
|
||||
data StatsEventType = PostPublishEvent
|
||||
| RelayReceiveEvent
|
||||
| RelayDeliveryEvent
|
||||
| IncomingPostFetchEvent
|
||||
deriving (Enum, Show, Eq)
|
||||
|
||||
-- | Represents measurement event of a 'StatsEventType' with a count relevant for a certain key
|
||||
data StatsEvent = StatsEvent StatsEventType Int NodeID
|
||||
deriving (Show, Eq)
|
||||
|
||||
|
||||
-- | measured rates of relay performance
|
||||
-- TODO: maybe include other metrics in here as well, like number of subscribers?
|
||||
data RelayStats = RelayStats
|
||||
{ relayReceiveRates :: RingMap NodeID Double
|
||||
-- ^ rate of incoming posts in the responsibility of this relay
|
||||
, relayDeliveryRates :: RingMap NodeID Double
|
||||
-- ^ rate of relayed outgoing posts
|
||||
, postFetchRate :: Double -- no need to differentiate between tags
|
||||
-- ^ number of post-fetches delivered
|
||||
, postPublishRate :: Double
|
||||
-- ^ rate of initially publishing posts through this instance
|
||||
}
|
||||
deriving (Show, Eq)
|
||||
|
||||
|
||||
|
||||
launchStatsThreads :: PostService d -> IO ()
|
||||
launchStatsThreads serv = do
|
||||
-- create shared accumulator
|
||||
sharedAccum <- newTVarIO emptyStats
|
||||
concurrently_
|
||||
(accumulateStatsThread sharedAccum $ statsQueue serv)
|
||||
(evaluateStatsThread serv sharedAccum)
|
||||
|
||||
|
||||
-- | Read stats events from queue and add them to a shared accumulator.
|
||||
-- Instead of letting the events accumulate in the queue and allocate linear memory, immediately fold the result.
|
||||
accumulateStatsThread :: TVar RelayStats -> TQueue StatsEvent -> IO ()
|
||||
accumulateStatsThread statsAccumulator statsQ = forever $ do
|
||||
-- blocks until stats event arrives
|
||||
event <- atomically $ readTQueue statsQ
|
||||
-- add the event number to current accumulator
|
||||
atomically $ modifyTVar' statsAccumulator $ statsAdder event
|
||||
|
||||
|
||||
-- | add incoming stats events to accumulator value
|
||||
statsAdder :: StatsEvent -> RelayStats -> RelayStats
|
||||
statsAdder event stats = case event of
|
||||
StatsEvent PostPublishEvent num _ ->
|
||||
stats {postPublishRate = fromIntegral num + postPublishRate stats}
|
||||
StatsEvent RelayReceiveEvent num key ->
|
||||
stats {relayReceiveRates = sumIfEntryExists key (fromIntegral num) (relayReceiveRates stats)}
|
||||
StatsEvent RelayDeliveryEvent num key ->
|
||||
stats {relayDeliveryRates = sumIfEntryExists key (fromIntegral num) (relayDeliveryRates stats)}
|
||||
StatsEvent IncomingPostFetchEvent num _ ->
|
||||
stats {postFetchRate = fromIntegral num + postFetchRate stats}
|
||||
where
|
||||
sumIfEntryExists = addRMapEntryWith (\newVal oldVal ->
|
||||
let toInsert = fromJust $ extractRingEntry newVal
|
||||
in
|
||||
case oldVal of
|
||||
KeyEntry n -> KeyEntry (n + toInsert)
|
||||
ProxyEntry pointer (Just (KeyEntry n)) -> ProxyEntry pointer (Just (KeyEntry $ n + toInsert))
|
||||
ProxyEntry pointer Nothing -> ProxyEntry pointer (Just newVal)
|
||||
_ -> error "RingMap nested too deeply"
|
||||
)
|
||||
|
||||
|
||||
-- Periodically exchange the accumulated statistics with empty ones, evaluate them
|
||||
-- and make them the current statistics of the service.
|
||||
evaluateStatsThread :: PostService d -> TVar RelayStats -> IO ()
|
||||
evaluateStatsThread serv statsAcc = getPOSIXTime >>= loop
|
||||
where
|
||||
loop previousTs = do
|
||||
threadDelay $ confStatsEvalDelay (serviceConf serv)
|
||||
-- get and reset the stats accumulator
|
||||
summedStats <- atomically $ do
|
||||
stats <- readTVar statsAcc
|
||||
writeTVar statsAcc emptyStats
|
||||
pure stats
|
||||
-- as the transaction might retry several times, current time needs to
|
||||
-- be read afterwards
|
||||
now <- getPOSIXTime
|
||||
-- evaluate stats rate and replace server stats
|
||||
-- persistently store in a TVar so it can be retrieved later by the DHT
|
||||
let timePassed = (now - previousTs) * fromIntegral (confSpeedupFactor $ serviceConf serv)
|
||||
rateStats = evaluateStats timePassed summedStats
|
||||
atomically $ writeTVar (loadStats serv) rateStats
|
||||
-- and now what? write a log to file
|
||||
-- format: total relayReceiveRates;total relayDeliveryRates;postFetchRate;postPublishRate; subscriberSum
|
||||
-- later: current (reported) load, target load
|
||||
subscriberSum <- sumSubscribers
|
||||
TxtI.hPutStrLn (logFileHandle serv) $
|
||||
format (fixed 9 % ";" % fixed 20 % ";" % fixed 20 % ";" % fixed 20 % ";" % fixed 20 % ";" % int )
|
||||
(realToFrac now :: Double)
|
||||
(sum . relayReceiveRates $ rateStats)
|
||||
(sum . relayDeliveryRates $ rateStats)
|
||||
(postPublishRate rateStats)
|
||||
(postFetchRate rateStats)
|
||||
subscriberSum
|
||||
loop now
|
||||
|
||||
sumSubscribers = do
|
||||
tagMap <- readTVarIO $ subscribers serv
|
||||
foldM (\subscriberSum (subscriberMapSTM, _, _) -> do
|
||||
subscriberMap <- readTVarIO subscriberMapSTM
|
||||
pure $ subscriberSum + HMap.size subscriberMap
|
||||
)
|
||||
0 tagMap
|
||||
|
||||
|
||||
-- | Evaluate the accumulated statistic events: Currently mostly calculates the event
|
||||
-- rates by dividing through the collection time frame
|
||||
evaluateStats :: POSIXTime -> RelayStats -> RelayStats
|
||||
evaluateStats timeInterval summedStats =
|
||||
-- first sum all event numbers, then divide through number of seconds passed to
|
||||
-- get rate per second
|
||||
RelayStats
|
||||
{ relayReceiveRates = (/ intervalSeconds) <$> relayReceiveRates summedStats
|
||||
, relayDeliveryRates = (/ intervalSeconds) <$> relayDeliveryRates summedStats
|
||||
, postPublishRate = postPublishRate summedStats / intervalSeconds
|
||||
, postFetchRate = postFetchRate summedStats / intervalSeconds
|
||||
}
|
||||
where
|
||||
intervalSeconds = realToFrac timeInterval
|
||||
|
||||
|
||||
emptyStats :: RelayStats
|
||||
emptyStats = RelayStats
|
||||
{ relayReceiveRates = emptyRMap
|
||||
, relayDeliveryRates = emptyRMap
|
||||
, postFetchRate = 0
|
||||
, postPublishRate = 0
|
||||
}
|
||||
mimeRender _ = BSU.fromString . show
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
{-# LANGUAGE DataKinds #-}
|
||||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE InstanceSigs #-}
|
||||
{-# LANGUAGE MultiParamTypeClasses #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE RankNTypes #-}
|
||||
{-# LANGUAGE TypeOperators #-}
|
||||
module Hash2Pub.PostService.API where
|
||||
|
||||
import Data.Text.Lazy (Text)
|
||||
|
||||
import Servant
|
||||
|
||||
type PostServiceAPI = "relay" :> "inbox" :> Capture "hashtag" Text :> ReqBody '[PlainText] Text :> PutCreated '[PlainText] NoContent
|
||||
-- delivery endpoint at responsible relay for delivering posts of $tag for distribution
|
||||
:<|> "relay" :> "subscribers" :> Capture "senderID" Integer :> ReqBody '[PlainText] Text :> PostNoContent '[PlainText] Text
|
||||
-- endpoint for delivering the subscriptions and outstanding queue
|
||||
:<|> "post" :> Capture "postid" Text :> Get '[PlainText] Text
|
||||
-- fetch endpoint for posts, full post ID is http://$domain/post/$postid
|
||||
:<|> "posts" :> ReqBody '[PlainText] Text :> Post '[PlainText] Text
|
||||
-- endpoint for fetching multiple posts at once
|
||||
:<|> "posts" :> "inbox" :> ReqBody '[PlainText] Text :> PutCreated '[PlainText] NoContent
|
||||
-- delivery endpoint of newly published posts of the relay's instance
|
||||
:<|> "tags" :> Capture "hashtag" Text :> ReqBody '[PlainText] Text :> PostCreated '[PlainText] Text
|
||||
-- delivery endpoint for posts of $tag at subscribing instance
|
||||
:<|> "tags" :> Capture "hashtag" Text :> "subscribe" :> Header "Origin" Text :> Get '[PlainText] Integer
|
||||
-- endpoint for subscribing the instance specified in
|
||||
-- the Origin header to $hashtag.
|
||||
-- Returns subscription lease time in seconds.
|
||||
:<|> "tags" :> Capture "hashtag" Text :> "unsubscribe" :> Header "Origin" Text :> Get '[PlainText] Text
|
||||
-- endpoint for unsubscribing the instance specified in
|
||||
-- the Origin header to $hashtag
|
||||
|
||||
-- | needed for guiding type inference
|
||||
exposedPostServiceAPI :: Proxy PostServiceAPI
|
||||
exposedPostServiceAPI = Proxy
|
||||
|
|
@ -1,5 +1,7 @@
|
|||
module Hash2Pub.ProtocolTypes where
|
||||
|
||||
import qualified Data.Map as Map
|
||||
import Data.Maybe (mapMaybe)
|
||||
import qualified Data.Set as Set
|
||||
import Data.Time.Clock.POSIX (POSIXTime)
|
||||
|
||||
|
@ -53,7 +55,6 @@ data ActionPayload = QueryIDRequestPayload
|
|||
| LeaveRequestPayload
|
||||
{ leaveSuccessors :: [RemoteNodeState]
|
||||
, leavePredecessors :: [RemoteNodeState]
|
||||
, leaveDoMigration :: Bool
|
||||
}
|
||||
| StabiliseRequestPayload
|
||||
| PingRequestPayload
|
||||
|
|
|
@ -5,61 +5,36 @@ module Hash2Pub.RingMap where
|
|||
|
||||
import Data.Foldable (foldr')
|
||||
import qualified Data.Map.Strict as Map
|
||||
import Data.Maybe (isJust, isNothing, mapMaybe)
|
||||
import Data.Maybe (fromJust, isJust, isNothing, mapMaybe)
|
||||
|
||||
|
||||
-- | Class for all types that can be identified via a EpiChord key.
|
||||
-- Used for restricting the types a 'RingMap' can store
|
||||
class (Eq a, Show a, Bounded k, Ord k) => HasKeyID k a where
|
||||
class (Eq a, Show a, Bounded k, Ord k) => HasKeyID a k where
|
||||
getKeyID :: a -> k
|
||||
keyValuePair :: a -> (k, a)
|
||||
keyValuePair val = (getKeyID val, val)
|
||||
|
||||
|
||||
-- | generic data structure for holding elements with a key and modular lookup
|
||||
newtype RingMap k a = RingMap { getRingMap :: (Bounded k, Ord k) => Map.Map k (RingEntry k a) }
|
||||
newtype RingMap a k = RingMap { getRingMap :: (HasKeyID a k, Bounded k, Ord k) => Map.Map k (RingEntry a k) }
|
||||
|
||||
instance (Bounded k, Ord k, Eq a) => Eq (RingMap k a) where
|
||||
instance (HasKeyID a k, Bounded k, Ord k) => Eq (RingMap a k) where
|
||||
a == b = getRingMap a == getRingMap b
|
||||
|
||||
instance (Bounded k, Ord k, Show k, Show a) => Show (RingMap k a) where
|
||||
show rmap = shows ("RingMap " :: String) (show $ getRingMap rmap)
|
||||
|
||||
|
||||
instance (Bounded k, Ord k) => Functor (RingMap k) where
|
||||
-- | map a function over all payload values of a 'RingMap'
|
||||
fmap f = RingMap . Map.map traversingF . getRingMap
|
||||
where
|
||||
traversingF (KeyEntry a) = KeyEntry (f a)
|
||||
traversingF (ProxyEntry pointer (Just entry)) = ProxyEntry pointer (Just $ traversingF entry)
|
||||
traversingF (ProxyEntry pointer Nothing) = ProxyEntry pointer Nothing
|
||||
|
||||
|
||||
instance (Bounded k, Ord k) => Foldable (RingMap k) where
|
||||
foldr f initVal = Map.foldr traversingFR initVal . getRingMap
|
||||
where
|
||||
traversingFR (KeyEntry a) acc = f a acc
|
||||
traversingFR (ProxyEntry _ Nothing) acc = acc
|
||||
traversingFR (ProxyEntry _ (Just entry)) acc = traversingFR entry acc
|
||||
foldl f initVal = Map.foldl traversingFL initVal . getRingMap
|
||||
where
|
||||
traversingFL acc (KeyEntry a) = f acc a
|
||||
traversingFL acc (ProxyEntry _ Nothing) = acc
|
||||
traversingFL acc (ProxyEntry _ (Just entry)) = traversingFL acc entry
|
||||
|
||||
instance (HasKeyID a k, Bounded k, Ord k, Show k) => Show (RingMap a k) where
|
||||
show rmap = shows "RingMap " (show $ getRingMap rmap)
|
||||
|
||||
-- | entry of a 'RingMap' that holds a value and can also
|
||||
-- wrap around the lookup direction at the edges of the name space.
|
||||
data RingEntry k a = KeyEntry a
|
||||
| ProxyEntry (k, ProxyDirection) (Maybe (RingEntry k a))
|
||||
data RingEntry a k = KeyEntry a
|
||||
| ProxyEntry (k, ProxyDirection) (Maybe (RingEntry a k))
|
||||
deriving (Show, Eq)
|
||||
|
||||
-- | as a compromise, only KeyEntry components are ordered by their key
|
||||
-- while ProxyEntry components should never be tried to be ordered.
|
||||
instance (HasKeyID k a, Eq k, Ord a, Bounded k, Ord k) => Ord (RingEntry k a) where
|
||||
instance (HasKeyID a k, Eq k, Ord a, Bounded k, Ord k) => Ord (RingEntry a k) where
|
||||
a `compare` b = compare (extractID a) (extractID b)
|
||||
where
|
||||
extractID :: (HasKeyID k a, Ord a, Bounded k, Ord k) => RingEntry k a -> k
|
||||
extractID :: (HasKeyID a k, Ord a, Bounded k, Ord k) => RingEntry a k -> k
|
||||
extractID (KeyEntry e) = getKeyID e
|
||||
extractID ProxyEntry{} = error "proxy entries should never appear outside of the RingMap"
|
||||
|
||||
|
@ -74,51 +49,51 @@ instance Enum ProxyDirection where
|
|||
fromEnum Backwards = - 1
|
||||
fromEnum Forwards = 1
|
||||
|
||||
-- | helper function for getting the a from a RingEntry k a
|
||||
extractRingEntry :: (Bounded k, Ord k) => RingEntry k a -> Maybe a
|
||||
-- | helper function for getting the a from a RingEntry a k
|
||||
extractRingEntry :: (HasKeyID a k, Bounded k, Ord k) => RingEntry a k -> Maybe a
|
||||
extractRingEntry (KeyEntry entry) = Just entry
|
||||
extractRingEntry (ProxyEntry _ (Just (KeyEntry entry))) = Just entry
|
||||
extractRingEntry _ = Nothing
|
||||
|
||||
-- | An empty 'RingMap' needs to be initialised with 2 proxy entries,
|
||||
-- linking the modular name space together by connecting @minBound@ and @maxBound@
|
||||
emptyRMap :: (Bounded k, Ord k) => RingMap k a
|
||||
emptyRMap :: (HasKeyID a k, Bounded k, Ord k) => RingMap a k
|
||||
emptyRMap = RingMap . Map.fromList $ proxyEntry <$> [(maxBound, (minBound, Forwards)), (minBound, (maxBound, Backwards))]
|
||||
where
|
||||
proxyEntry (from,to) = (from, ProxyEntry to Nothing)
|
||||
|
||||
-- | Maybe returns the entry stored at given key
|
||||
rMapLookup :: (Bounded k, Ord k)
|
||||
rMapLookup :: (HasKeyID a k, Bounded k, Ord k)
|
||||
=> k -- ^lookup key
|
||||
-> RingMap k a -- ^lookup cache
|
||||
-> RingMap a k -- ^lookup cache
|
||||
-> Maybe a
|
||||
rMapLookup key rmap = extractRingEntry =<< Map.lookup key (getRingMap rmap)
|
||||
|
||||
-- | returns number of present 'KeyEntry' in a properly initialised 'RingMap'
|
||||
rMapSize :: (Integral i, Bounded k, Ord k)
|
||||
=> RingMap k a
|
||||
rMapSize :: (HasKeyID a k, Integral i, Bounded k, Ord k)
|
||||
=> RingMap a k
|
||||
-> i
|
||||
rMapSize rmap = fromIntegral $ Map.size innerMap - oneIfEntry rmap minBound - oneIfEntry rmap maxBound
|
||||
where
|
||||
innerMap = getRingMap rmap
|
||||
oneIfEntry :: (Integral i, Bounded k, Ord k) => RingMap k a -> k -> i
|
||||
oneIfEntry :: (HasKeyID a k, Integral i, Bounded k, Ord k) => RingMap a k -> k -> i
|
||||
oneIfEntry rmap' nid
|
||||
| isNothing (rMapLookup nid rmap') = 1
|
||||
| otherwise = 0
|
||||
|
||||
-- | a wrapper around lookup functions, making the lookup redirectable by a @ProxyEntry@
|
||||
-- to simulate a modular ring
|
||||
lookupWrapper :: (Bounded k, Ord k, Num k)
|
||||
=> (k -> Map.Map k (RingEntry k a) -> Maybe (k, RingEntry k a))
|
||||
-> (k -> Map.Map k (RingEntry k a) -> Maybe (k, RingEntry k a))
|
||||
lookupWrapper :: (HasKeyID a k, Bounded k, Ord k, Num k)
|
||||
=> (k -> Map.Map k (RingEntry a k) -> Maybe (k, RingEntry a k))
|
||||
-> (k -> Map.Map k (RingEntry a k) -> Maybe (k, RingEntry a k))
|
||||
-> ProxyDirection
|
||||
-> k
|
||||
-> RingMap k a
|
||||
-> Maybe (k, a)
|
||||
-> RingMap a k
|
||||
-> Maybe a
|
||||
lookupWrapper f fRepeat direction key rmap =
|
||||
case f key $ getRingMap rmap of
|
||||
-- the proxy entry found holds a
|
||||
Just (foundKey, ProxyEntry _ (Just (KeyEntry entry))) -> Just (foundKey, entry)
|
||||
Just (_, ProxyEntry _ (Just (KeyEntry entry))) -> Just entry
|
||||
-- proxy entry holds another proxy entry, this should not happen
|
||||
Just (_, ProxyEntry _ (Just (ProxyEntry _ _))) -> Nothing
|
||||
-- proxy entry without own entry is a pointer on where to continue
|
||||
|
@ -131,10 +106,10 @@ lookupWrapper f fRepeat direction key rmap =
|
|||
then lookupWrapper fRepeat fRepeat direction newKey rmap
|
||||
else Nothing
|
||||
-- normal entries are returned
|
||||
Just (foundKey, KeyEntry entry) -> Just (foundKey, entry)
|
||||
Just (_, KeyEntry entry) -> Just entry
|
||||
Nothing -> Nothing
|
||||
where
|
||||
rMapNotEmpty :: (Bounded k, Ord k) => RingMap k a -> Bool
|
||||
rMapNotEmpty :: (HasKeyID a k, Bounded k, Ord k) => RingMap a k -> Bool
|
||||
rMapNotEmpty rmap' = (Map.size (getRingMap rmap') > 2) -- there are more than the 2 ProxyEntries
|
||||
|| isJust (rMapLookup minBound rmap') -- or one of the ProxyEntries holds a node
|
||||
|| isJust (rMapLookup maxBound rmap')
|
||||
|
@ -142,34 +117,32 @@ lookupWrapper f fRepeat direction key rmap =
|
|||
-- | find the successor node to a given key on a modular EpiChord ring.
|
||||
-- Note: The EpiChord definition of "successor" includes the node at the key itself,
|
||||
-- if existing.
|
||||
rMapLookupSucc :: (Bounded k, Ord k, Num k)
|
||||
rMapLookupSucc :: (HasKeyID a k, Bounded k, Ord k, Num k)
|
||||
=> k -- ^lookup key
|
||||
-> RingMap k a -- ^ring cache
|
||||
-> Maybe (k, a)
|
||||
-> RingMap a k -- ^ring cache
|
||||
-> Maybe a
|
||||
rMapLookupSucc = lookupWrapper Map.lookupGE Map.lookupGE Forwards
|
||||
|
||||
-- | find the predecessor node to a given key on a modular EpiChord ring.
|
||||
rMapLookupPred :: (Bounded k, Ord k, Num k)
|
||||
rMapLookupPred :: (HasKeyID a k, Bounded k, Ord k, Num k)
|
||||
=> k -- ^lookup key
|
||||
-> RingMap k a -- ^ring cache
|
||||
-> Maybe (k, a)
|
||||
-> RingMap a k -- ^ring cache
|
||||
-> Maybe a
|
||||
rMapLookupPred = lookupWrapper Map.lookupLT Map.lookupLE Backwards
|
||||
|
||||
addRMapEntryWith :: (Bounded k, Ord k)
|
||||
=> (RingEntry k a -> RingEntry k a -> RingEntry k a) -- ^ f new_value mold_value
|
||||
-> k -- ^ key
|
||||
-> a -- ^ value
|
||||
-> RingMap k a
|
||||
-> RingMap k a
|
||||
addRMapEntryWith combineFunc key entry = RingMap
|
||||
. Map.insertWith combineFunc key (KeyEntry entry)
|
||||
addRMapEntryWith :: (HasKeyID a k, Bounded k, Ord k)
|
||||
=> (RingEntry a k -> RingEntry a k -> RingEntry a k)
|
||||
-> a
|
||||
-> RingMap a k
|
||||
-> RingMap a k
|
||||
addRMapEntryWith combineFunc entry = RingMap
|
||||
. Map.insertWith combineFunc (getKeyID entry) (KeyEntry entry)
|
||||
. getRingMap
|
||||
|
||||
addRMapEntry :: (Bounded k, Ord k)
|
||||
=> k -- ^ key
|
||||
-> a -- ^ value
|
||||
-> RingMap k a
|
||||
-> RingMap k a
|
||||
addRMapEntry :: (HasKeyID a k, Bounded k, Ord k)
|
||||
=> a
|
||||
-> RingMap a k
|
||||
-> RingMap a k
|
||||
addRMapEntry = addRMapEntryWith insertCombineFunction
|
||||
where
|
||||
insertCombineFunction newVal oldVal =
|
||||
|
@ -178,30 +151,30 @@ addRMapEntry = addRMapEntryWith insertCombineFunction
|
|||
KeyEntry _ -> newVal
|
||||
|
||||
|
||||
addRMapEntries :: (Foldable t, Bounded k, Ord k)
|
||||
=> t (k, a)
|
||||
-> RingMap k a
|
||||
-> RingMap k a
|
||||
addRMapEntries entries rmap = foldr' (\(k, v) rmap' -> addRMapEntry k v rmap') rmap entries
|
||||
addRMapEntries :: (Foldable t, HasKeyID a k, Bounded k, Ord k)
|
||||
=> t a
|
||||
-> RingMap a k
|
||||
-> RingMap a k
|
||||
addRMapEntries entries rmap = foldr' addRMapEntry rmap entries
|
||||
|
||||
setRMapEntries :: (Foldable t, Bounded k, Ord k)
|
||||
=> t (k, a)
|
||||
-> RingMap k a
|
||||
setRMapEntries :: (Foldable t, HasKeyID a k, Bounded k, Ord k)
|
||||
=> t a
|
||||
-> RingMap a k
|
||||
setRMapEntries entries = addRMapEntries entries emptyRMap
|
||||
|
||||
deleteRMapEntry :: (Bounded k, Ord k)
|
||||
deleteRMapEntry :: (HasKeyID a k, Bounded k, Ord k)
|
||||
=> k
|
||||
-> RingMap k a
|
||||
-> RingMap k a
|
||||
-> RingMap a k
|
||||
-> RingMap a k
|
||||
deleteRMapEntry nid = RingMap . Map.update modifier nid . getRingMap
|
||||
where
|
||||
modifier (ProxyEntry idPointer _) = Just (ProxyEntry idPointer Nothing)
|
||||
modifier KeyEntry {} = Nothing
|
||||
|
||||
rMapToList :: (Bounded k, Ord k) => RingMap k a -> [a]
|
||||
rMapToList :: (HasKeyID a k, Bounded k, Ord k) => RingMap a k -> [a]
|
||||
rMapToList = mapMaybe extractRingEntry . Map.elems . getRingMap
|
||||
|
||||
rMapFromList :: (Bounded k, Ord k) => [(k, a)] -> RingMap k a
|
||||
rMapFromList :: (HasKeyID a k, Bounded k, Ord k) => [a] -> RingMap a k
|
||||
rMapFromList = setRMapEntries
|
||||
|
||||
-- | takes up to i entries from a 'RingMap' by calling a getter function on a
|
||||
|
@ -209,64 +182,49 @@ rMapFromList = setRMapEntries
|
|||
-- Stops once i entries have been taken or an entry has been encountered twice
|
||||
-- (meaning the ring has been traversed completely).
|
||||
-- Forms the basis for 'takeRMapSuccessors' and 'takeRMapPredecessors'.
|
||||
takeRMapEntries_ :: (Integral i, Bounded k, Ord k)
|
||||
=> (k -> RingMap k a -> Maybe (k, a)) -- ^ parameterisable getter function to determine lookup direction
|
||||
-> k -- ^ starting key
|
||||
-> i -- ^ number of maximum values to take
|
||||
-> RingMap k a
|
||||
-> [a] -- ^ values taken
|
||||
takeRMapEntries_ :: (HasKeyID a k, Integral i, Bounded k, Ord k)
|
||||
=> (k -> RingMap a k -> Maybe a)
|
||||
-> k
|
||||
-> i
|
||||
-> RingMap a k
|
||||
-> [a]
|
||||
-- TODO: might be more efficient with dlists
|
||||
takeRMapEntries_ getterFunc startAt num rmap = reverse $
|
||||
case getterFunc startAt rmap of
|
||||
Nothing -> []
|
||||
Just (foundKey, anEntry) -> takeEntriesUntil_ rmap getterFunc foundKey foundKey (Just $ num-1) [anEntry]
|
||||
|
||||
|
||||
takeEntriesUntil_ :: (Integral i, Bounded k, Ord k)
|
||||
=> RingMap k a
|
||||
-> (k -> RingMap k a -> Maybe (k, a)) -- getter function
|
||||
-> k -- limit value
|
||||
-> k -- start value
|
||||
-> Maybe i -- possible number limit
|
||||
-> [a]
|
||||
-> [a]
|
||||
takeEntriesUntil_ _rmap' _getterFunc' _havingReached _previousEntry (Just remaining) takeAcc
|
||||
-- length limit reached
|
||||
| remaining <= 0 = takeAcc
|
||||
takeEntriesUntil_ rmap' getterFunc' havingReached previousEntry numLimit takeAcc =
|
||||
case nextEntry of
|
||||
Just (fKey, gotEntry)
|
||||
| fKey == havingReached -> takeAcc
|
||||
| otherwise -> takeEntriesUntil_ rmap' getterFunc' havingReached fKey (fmap pred numLimit) (gotEntry:takeAcc)
|
||||
Nothing -> takeAcc
|
||||
Just anEntry -> takeEntriesUntil rmap getterFunc (getKeyID anEntry) (getKeyID anEntry) (num-1) [anEntry]
|
||||
where
|
||||
nextEntry = getterFunc' previousEntry rmap'
|
||||
-- for some reason, just reusing the already-bound @rmap@ and @getterFunc@
|
||||
-- variables leads to a type error, these need to be passed explicitly
|
||||
takeEntriesUntil :: (HasKeyID a k, Integral i, Bounded k, Ord k)
|
||||
=> RingMap a k
|
||||
-> (k -> RingMap a k -> Maybe a) -- getter function
|
||||
-> k
|
||||
-> k
|
||||
-> i
|
||||
-> [a]
|
||||
-> [a]
|
||||
takeEntriesUntil rmap' getterFunc' havingReached previousEntry remaining takeAcc
|
||||
| remaining <= 0 = takeAcc
|
||||
| getKeyID (fromJust $ getterFunc' previousEntry rmap') == havingReached = takeAcc
|
||||
| otherwise = let (Just gotEntry) = getterFunc' previousEntry rmap'
|
||||
in takeEntriesUntil rmap' getterFunc' havingReached (getKeyID gotEntry) (remaining-1) (gotEntry:takeAcc)
|
||||
|
||||
|
||||
takeRMapPredecessors :: (Integral i, Bounded k, Ord k, Num k)
|
||||
takeRMapPredecessors :: (HasKeyID a k, Integral i, Bounded k, Ord k, Num k)
|
||||
=> k
|
||||
-> i
|
||||
-> RingMap k a
|
||||
-> RingMap a k
|
||||
-> [a]
|
||||
takeRMapPredecessors = takeRMapEntries_ rMapLookupPred
|
||||
|
||||
takeRMapSuccessors :: (Integral i, Bounded k, Ord k, Num k)
|
||||
takeRMapSuccessors :: (HasKeyID a k, Integral i, Bounded k, Ord k, Num k)
|
||||
=> k
|
||||
-> i
|
||||
-> RingMap k a
|
||||
-> RingMap a k
|
||||
-> [a]
|
||||
takeRMapSuccessors = takeRMapEntries_ rMapLookupSucc
|
||||
|
||||
takeRMapPredecessorsFromTo :: (Bounded k, Ord k, Num k)
|
||||
=> k -- start value for taking
|
||||
-> k -- stop value for taking
|
||||
-> RingMap k a
|
||||
-> [a]
|
||||
takeRMapPredecessorsFromTo fromVal toVal rmap = takeEntriesUntil_ rmap rMapLookupPred toVal fromVal Nothing []
|
||||
-- clean up cache entries: once now - entry > maxAge
|
||||
-- transfer difference now - entry to other node
|
||||
|
||||
|
||||
takeRMapSuccessorsFromTo :: (Bounded k, Ord k, Num k)
|
||||
=> k -- start value for taking
|
||||
-> k -- stop value for taking
|
||||
-> RingMap k a
|
||||
-> [a]
|
||||
takeRMapSuccessorsFromTo fromVal toVal rmap = takeEntriesUntil_ rmap rMapLookupSucc toVal fromVal Nothing []
|
||||
|
|
9
src/Hash2Pub/ServiceTypes.hs
Normal file
9
src/Hash2Pub/ServiceTypes.hs
Normal file
|
@ -0,0 +1,9 @@
|
|||
{-# LANGUAGE MultiParamTypeClasses #-}
|
||||
module Hash2Pub.ServiceTypes where
|
||||
|
||||
import Hash2Pub.FediChord (DHT (..))
|
||||
|
||||
class Service s d where
|
||||
-- | run the service
|
||||
runService :: (Integral i) => d -> String -> i -> IO (s d)
|
||||
getServicePort :: (Integral i) => s d -> i
|
|
@ -1,5 +1,3 @@
|
|||
{-# LANGUAGE FlexibleInstances #-}
|
||||
{-# LANGUAGE MultiParamTypeClasses #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
module FediChordSpec where
|
||||
|
||||
|
@ -189,7 +187,6 @@ spec = do
|
|||
lReqPayload = LeaveRequestPayload {
|
||||
leaveSuccessors = someNodes
|
||||
, leavePredecessors = someNodes
|
||||
, leaveDoMigration = True
|
||||
}
|
||||
stabReqPayload = StabiliseRequestPayload
|
||||
pingReqPayload = PingRequestPayload
|
||||
|
@ -295,15 +292,12 @@ exampleNodeState = RemoteNodeState {
|
|||
, vServerID = 0
|
||||
}
|
||||
|
||||
exampleLocalNode :: IO (LocalNodeState MockService)
|
||||
exampleLocalNode = do
|
||||
realNode <- newTVarIO $ RealNode {
|
||||
exampleLocalNode :: IO LocalNodeState
|
||||
exampleLocalNode = nodeStateInit =<< (newTVarIO $ RealNode {
|
||||
vservers = []
|
||||
, nodeConfig = exampleFediConf
|
||||
, bootstrapNodes = confBootstrapNodes exampleFediConf
|
||||
, nodeService = MockService
|
||||
}
|
||||
nodeStateInit realNode
|
||||
})
|
||||
|
||||
|
||||
exampleFediConf :: FediChordConf
|
||||
|
@ -319,9 +313,3 @@ exampleVs :: (Integral i) => i
|
|||
exampleVs = 4
|
||||
exampleIp :: HostAddress6
|
||||
exampleIp = tupleToHostAddress6 (0x2001, 0x16b8, 0x755a, 0xb110, 0x7d6a, 0x12ab, 0xf0c5, 0x386e)
|
||||
|
||||
data MockService d = MockService
|
||||
|
||||
instance DHT d => Service MockService d where
|
||||
runService _ _ = pure MockService
|
||||
getListeningPortFromService = const 1337
|
||||
|
|
Loading…
Reference in a new issue