reduce logging verbosity
This commit is contained in:
parent
f330ff1070
commit
5511026c8d
|
@ -259,7 +259,6 @@ handleIncomingRequest :: Service s (RealNodeSTM s)
|
|||
-> SockAddr -- ^ source address of the request
|
||||
-> IO ()
|
||||
handleIncomingRequest nsSTM sendQ msgSet sourceAddr = do
|
||||
putStrLn $ "handling incoming request: " <> show msgSet
|
||||
ns <- readTVarIO nsSTM
|
||||
-- add nodestate to cache
|
||||
now <- getPOSIXTime
|
||||
|
@ -314,7 +313,6 @@ handleIncomingRequest nsSTM sendQ msgSet sourceAddr = do
|
|||
-- | execute a key ID lookup on local cache and respond with the result
|
||||
respondQueryID :: LocalNodeStateSTM s -> Set.Set FediChordMessage -> IO (Map.Map Integer BS.ByteString)
|
||||
respondQueryID nsSTM msgSet = do
|
||||
putStrLn "responding to a QueryID request"
|
||||
-- this message cannot be split reasonably, so just
|
||||
-- consider the first payload
|
||||
let
|
||||
|
@ -749,7 +747,6 @@ sendRequestTo timeoutMillis numAttempts msgIncomplete sock = do
|
|||
let
|
||||
msgComplete = msgIncomplete randomID
|
||||
requests = serialiseMessage sendMessageSize msgComplete
|
||||
putStrLn $ "sending request message " <> show msgComplete
|
||||
-- create a queue for passing received response messages back, even after a timeout
|
||||
responseQ <- newTBQueueIO $ 2*maximumParts -- keep room for duplicate packets
|
||||
-- start sendAndAck with timeout
|
||||
|
|
|
@ -716,7 +716,7 @@ fediMessageHandler sendQ recvQ nsSTM = do
|
|||
|
||||
instance DHT (RealNodeSTM s) where
|
||||
lookupKey nodeSTM keystring = getKeyResponsibility nodeSTM $ genKeyID keystring
|
||||
forceLookupKey nodeSTM keystring = updateLookupCache nodeSTM $ genKeyID keystring
|
||||
forceLookupKey nodeSTM keystring = (putStrLn $ "forced responsibility lookup of #" <> keystring) >> (updateLookupCache nodeSTM $ genKeyID keystring)
|
||||
-- potential better implementation: put all neighbours of all vservers and the vservers on a ringMap, look the key up and see whether it results in a LocalNodeState
|
||||
isResponsibleFor nodeSTM key = do
|
||||
node <- readTVarIO nodeSTM
|
||||
|
|
|
@ -585,8 +585,8 @@ processIncomingPosts serv = forever $ do
|
|||
-- TODO: keep track of maximum retries
|
||||
_ <- forceLookupKey (baseDHT serv) (Txt.unpack tag)
|
||||
atomically . writeTQueue (relayInQueue serv) $ (tag, pID, pContent)
|
||||
Right yay -> do
|
||||
putStrLn $ "Yay! " <> show yay
|
||||
Right _ -> do
|
||||
-- TODO: stats
|
||||
-- idea for the experiment: each post publication makes the initial posting instance subscribe to all contained tags
|
||||
now <- getPOSIXTime
|
||||
subscriptionStatus <- HMap.lookup (genKeyID . Txt.unpack $ tag) <$> readTVarIO (ownSubscriptions serv)
|
||||
|
|
Loading…
Reference in a new issue