forked from schmittlauch/Hash2Pub
parent
66be1cc2b6
commit
c1f8ec2c56
|
@ -6,6 +6,7 @@ import Control.Exception
|
||||||
import Network.Socket
|
import Network.Socket
|
||||||
import qualified Data.Map.Strict as Map
|
import qualified Data.Map.Strict as Map
|
||||||
import qualified Data.ByteString as BS
|
import qualified Data.ByteString as BS
|
||||||
|
import qualified Data.Set as Set
|
||||||
|
|
||||||
import Hash2Pub.FediChord
|
import Hash2Pub.FediChord
|
||||||
import Hash2Pub.DHTProtocol
|
import Hash2Pub.DHTProtocol
|
||||||
|
@ -108,30 +109,33 @@ spec = do
|
||||||
nid. cacheGetNodeStateUnvalidated <$> cacheLookupSucc (anotherID + 2) cacheWithProxyNodeEntry `shouldBe` Just maxBound
|
nid. cacheGetNodeStateUnvalidated <$> cacheLookupSucc (anotherID + 2) cacheWithProxyNodeEntry `shouldBe` Just maxBound
|
||||||
|
|
||||||
|
|
||||||
describe "NodeCache query lookups" $ do
|
describe "NodeCache query lookup" $ do
|
||||||
let
|
let
|
||||||
emptyCache = Map.empty :: NodeCache
|
emptyCache = nodeCache exampleNodeState
|
||||||
node1 = exampleNodeState { nid = toNodeID 2^(23::Integer)+1}
|
nid1 = toNodeID 2^(23::Integer)+1
|
||||||
node2 = exampleNodeState { nid = toNodeID 2^(230::Integer)+12}
|
node1 = exampleNodeState { nid = nid1}
|
||||||
node3 = exampleNodeState { nid = toNodeID 2^(25::Integer)+10}
|
nid2 = toNodeID 2^(230::Integer)+12
|
||||||
node4 = exampleNodeState { nid = toNodeID 2^(9::Integer)+100}
|
node2 = exampleNodeState { nid = nid2}
|
||||||
--cacheWith2Entries <- mapM (\n -> addCacheEntry n 0) [node1, node2] $ emptyCache
|
nid3 = toNodeID 2^(25::Integer)+10
|
||||||
|
node3 = exampleNodeState { nid = nid3}
|
||||||
|
nid4 = toNodeID 2^(9::Integer)+100
|
||||||
|
node4 = exampleNodeState { nid = nid4}
|
||||||
cacheWith2Entries = addCacheEntry node1 120 =<< addCacheEntry node2 0 emptyCache
|
cacheWith2Entries = addCacheEntry node1 120 =<< addCacheEntry node2 0 emptyCache
|
||||||
cacheWith4Entries = addCacheEntry node3 110 =<< addCacheEntry node4 0 =<< cacheWith2Entries
|
cacheWith4Entries = addCacheEntry node3 110 =<< addCacheEntry node4 0 =<< cacheWith2Entries
|
||||||
it "work on an empty cache" $ do
|
it "works on an empty cache" $ do
|
||||||
incomingQuery exampleNodeState emptyCache 3 (toNodeID 2^(25::Integer)) `shouldBe` FORWARD []
|
incomingQuery exampleNodeState emptyCache 3 (toNodeID 2^(9::Integer)+5) `shouldBe` FORWARD Set.empty
|
||||||
incomingQuery exampleNodeState emptyCache 1 (toNodeID 2342) `shouldBe` FORWARD []
|
incomingQuery exampleNodeState emptyCache 1 (toNodeID 2342) `shouldBe` FORWARD Set.empty
|
||||||
it "work on a cache with less entries than needed" $ do
|
it "works on a cache with less entries than needed" $ do
|
||||||
c2 <- cacheWith2Entries
|
c2 <- cacheWith2Entries
|
||||||
let (FORWARD nodelist) = incomingQuery exampleNodeState emptyCache 3 (toNodeID 2^(25::Integer))
|
let (FORWARD nodeset) = incomingQuery exampleNodeState c2 4 (toNodeID 2^(9::Integer)+5)
|
||||||
map (nid . cacheGetNodeStateUnvalidated) nodelist `shouldBe` []
|
Set.map (nid . cacheGetNodeStateUnvalidated) nodeset `shouldBe` Set.fromList [ nid1, nid2 ]
|
||||||
it "work on a cache with sufficient entries" $ do
|
it "works on a cache with sufficient entries" $ do
|
||||||
c4 <- cacheWith4Entries
|
c4 <- cacheWith4Entries
|
||||||
let
|
let
|
||||||
(FORWARD nodelist1) = incomingQuery exampleNodeState c4 3 (toNodeID 2342)
|
(FORWARD nodeset1) = incomingQuery exampleNodeState c4 3 (toNodeID 2^(9::Integer)+5)
|
||||||
(FORWARD nodelist2) = incomingQuery exampleNodeState c4 1 (toNodeID 2342)
|
(FORWARD nodeset2) = incomingQuery exampleNodeState c4 1 (toNodeID 2^(9::Integer)+5)
|
||||||
map (nid . cacheGetNodeStateUnvalidated) nodelist1 `shouldBe` []
|
Set.map (nid . cacheGetNodeStateUnvalidated) nodeset1 `shouldBe` Set.fromList [nid4, nid2, nid3]
|
||||||
map (nid . cacheGetNodeStateUnvalidated) nodelist2 `shouldBe` []
|
Set.map (nid . cacheGetNodeStateUnvalidated) nodeset2 `shouldBe` Set.fromList [nid4]
|
||||||
|
|
||||||
-- some example data
|
-- some example data
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue