From 133ddfefd7196570a01f8868603588027e9db3dd Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 28 Sep 2022 12:01:00 +1000 Subject: [PATCH 1/7] Run `cargo clippy --workspace --all-features --all-targets --fix` Previously, we were running clippy without --workspace which left out examples and tests of all sub-crates. --- core/benches/peer_id.rs | 2 +- core/src/identity/rsa.rs | 6 +- core/src/peer_id.rs | 10 +- misc/multistream-select/src/protocol.rs | 2 +- muxers/mplex/benches/split_send_size.rs | 2 +- muxers/mplex/src/io.rs | 2 +- protocols/autonat/tests/test_client.rs | 2 +- protocols/dcutr/tests/lib.rs | 11 +- protocols/gossipsub/src/behaviour/tests.rs | 320 +++++++----------- protocols/gossipsub/src/mcache.rs | 20 +- protocols/gossipsub/src/peer_score/tests.rs | 75 ++-- protocols/gossipsub/src/protocol.rs | 12 +- protocols/gossipsub/src/rpc_proto.rs | 2 +- .../gossipsub/src/subscription_filter.rs | 20 +- protocols/gossipsub/tests/smoke.rs | 4 +- protocols/identify/src/identify.rs | 2 +- protocols/kad/src/behaviour/test.rs | 40 +-- protocols/kad/src/jobs.rs | 8 +- protocols/kad/src/kbucket/bucket.rs | 2 +- protocols/kad/src/query/peers/closest.rs | 4 +- .../kad/src/query/peers/closest/disjoint.rs | 41 +-- protocols/kad/src/record/store/memory.rs | 6 +- protocols/mdns/src/behaviour/iface/query.rs | 6 +- protocols/ping/tests/ping.rs | 20 +- protocols/relay/src/v2/copy_future.rs | 2 +- protocols/relay/tests/v2.rs | 12 +- protocols/rendezvous/examples/discover.rs | 2 +- protocols/rendezvous/tests/harness.rs | 7 +- protocols/rendezvous/tests/rendezvous.rs | 4 +- swarm/src/connection.rs | 2 +- swarm/src/lib.rs | 14 +- swarm/src/registry.rs | 2 +- swarm/src/test.rs | 14 +- transports/noise/src/protocol/x25519.rs | 2 +- transports/uds/src/lib.rs | 1 - 35 files changed, 290 insertions(+), 391 deletions(-) diff --git a/core/benches/peer_id.rs b/core/benches/peer_id.rs index 9a6935113ec..e9fec2f18d3 100644 --- a/core/benches/peer_id.rs +++ b/core/benches/peer_id.rs @@ -39,7 +39,7 @@ fn clone(c: &mut Criterion) { c.bench_function("clone", |b| { b.iter(|| { - black_box(peer_id.clone()); + black_box(peer_id); }) }); } diff --git a/core/src/identity/rsa.rs b/core/src/identity/rsa.rs index 497dca40de5..54dbe47f697 100644 --- a/core/src/identity/rsa.rs +++ b/core/src/identity/rsa.rs @@ -307,9 +307,9 @@ mod tests { use super::*; use quickcheck::*; - const KEY1: &'static [u8] = include_bytes!("test/rsa-2048.pk8"); - const KEY2: &'static [u8] = include_bytes!("test/rsa-3072.pk8"); - const KEY3: &'static [u8] = include_bytes!("test/rsa-4096.pk8"); + const KEY1: &[u8] = include_bytes!("test/rsa-2048.pk8"); + const KEY2: &[u8] = include_bytes!("test/rsa-3072.pk8"); + const KEY3: &[u8] = include_bytes!("test/rsa-4096.pk8"); #[derive(Clone, Debug)] struct SomeKeypair(Keypair); diff --git a/core/src/peer_id.rs b/core/src/peer_id.rs index cbe0a13395c..9e7a1f238cf 100644 --- a/core/src/peer_id.rs +++ b/core/src/peer_id.rs @@ -286,10 +286,10 @@ mod tests { #[test] fn extract_peer_id_from_multi_address() { - let address = - format!("/memory/1234/p2p/12D3KooWGQmdpzHXCqLno4mMxWXKNFQHASBeF99gTm2JR8Vu5Bdc") - .parse() - .unwrap(); + let address = "/memory/1234/p2p/12D3KooWGQmdpzHXCqLno4mMxWXKNFQHASBeF99gTm2JR8Vu5Bdc" + .to_string() + .parse() + .unwrap(); let peer_id = PeerId::try_from_multiaddr(&address).unwrap(); @@ -303,7 +303,7 @@ mod tests { #[test] fn no_panic_on_extract_peer_id_from_multi_address_if_not_present() { - let address = format!("/memory/1234").parse().unwrap(); + let address = "/memory/1234".to_string().parse().unwrap(); let maybe_empty = PeerId::try_from_multiaddr(&address); diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs index b762aa34343..5809e1768d6 100644 --- a/misc/multistream-select/src/protocol.rs +++ b/misc/multistream-select/src/protocol.rs @@ -487,7 +487,7 @@ mod tests { fn prop(msg: Message) { let mut buf = BytesMut::new(); msg.encode(&mut buf) - .expect(&format!("Encoding message failed: {:?}", msg)); + .unwrap_or_else(|_| panic!("Encoding message failed: {:?}", msg)); match Message::decode(buf.freeze()) { Ok(m) => assert_eq!(m, msg), Err(e) => panic!("Decoding failed: {:?}", e), diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index c4946e337d0..8d6803880d6 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -53,7 +53,7 @@ const BENCH_SIZES: [usize; 8] = [ fn prepare(c: &mut Criterion) { let _ = env_logger::try_init(); - let payload: Vec = vec![1; 1024 * 1024 * 1]; + let payload: Vec = vec![1; 1024 * 1024]; let mut tcp = c.benchmark_group("tcp"); let tcp_addr = multiaddr![Ip4(std::net::Ipv4Addr::new(127, 0, 0, 1)), Tcp(0u16)]; diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index c5ad6bf105d..9d0d9939c28 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -1326,7 +1326,7 @@ mod tests { w_buf: BytesMut::new(), eof: false, }; - let mut m = Multiplexed::new(conn, cfg.clone()); + let mut m = Multiplexed::new(conn, cfg); // Run the test. let mut opened = HashSet::new(); diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs index 72088f6e232..d5dfe75eb85 100644 --- a/protocols/autonat/tests/test_client.rs +++ b/protocols/autonat/tests/test_client.rs @@ -276,7 +276,7 @@ async fn test_confidence() { } } else { let unreachable_addr: Multiaddr = "/ip4/127.0.0.1/tcp/42".parse().unwrap(); - client.add_external_address(unreachable_addr.clone(), AddressScore::Infinite); + client.add_external_address(unreachable_addr, AddressScore::Infinite); } for i in 0..MAX_CONFIDENCE + 1 { diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index cf2c07c8707..64aca18596b 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -53,7 +53,6 @@ fn connect() { let mut dst = build_client(); let dst_peer_id = *dst.local_peer_id(); let dst_relayed_addr = relay_addr - .clone() .with(Protocol::P2p(relay_peer_id.into())) .with(Protocol::P2pCircuit) .with(Protocol::P2p(dst_peer_id.into())); @@ -96,7 +95,7 @@ fn connect() { fn build_relay() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); - let local_peer_id = local_public_key.clone().to_peer_id(); + let local_peer_id = local_public_key.to_peer_id(); let transport = build_transport(MemoryTransport::default().boxed(), local_public_key); @@ -116,7 +115,7 @@ fn build_relay() -> Swarm { fn build_client() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); - let local_peer_id = local_public_key.clone().to_peer_id(); + let local_peer_id = local_public_key.to_peer_id(); let (relay_transport, behaviour) = client::Client::new_transport_and_behaviour(local_peer_id); let transport = build_transport( @@ -141,13 +140,11 @@ fn build_transport( where StreamSink: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - let transport = transport + transport .upgrade(Version::V1) .authenticate(PlainText2Config { local_public_key }) .multiplex(libp2p::yamux::YamuxConfig::default()) - .boxed(); - - transport + .boxed() } #[derive(NetworkBehaviour)] diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index f405daa0a2e..a68883e68a3 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -203,11 +203,7 @@ mod tests { 0, // first connection ); if let Some(kind) = kind { - gs.inject_event( - peer.clone(), - ConnectionId::new(1), - HandlerEvent::PeerKind(kind), - ); + gs.inject_event(peer, ConnectionId::new(1), HandlerEvent::PeerKind(kind)); } if explicit { gs.add_explicit_peer(&peer); @@ -426,11 +422,11 @@ mod tests { for topic_hash in &topic_hashes { assert!( - gs.topic_peers.get(&topic_hash).is_some(), + gs.topic_peers.get(topic_hash).is_some(), "Topic_peers contain a topic entry" ); assert!( - gs.mesh.get(&topic_hash).is_some(), + gs.mesh.get(topic_hash).is_some(), "mesh should contain a topic entry" ); } @@ -474,7 +470,7 @@ mod tests { // check we clean up internal structures for topic_hash in &topic_hashes { assert!( - gs.mesh.get(&topic_hash).is_none(), + gs.mesh.get(topic_hash).is_none(), "All topics should have been removed from the mesh" ); } @@ -643,7 +639,7 @@ mod tests { .fold(vec![], |mut collected_publish, e| match e { NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { GossipsubHandlerIn::Message(ref message) => { - let event = proto_to_message(&message); + let event = proto_to_message(message); for s in &event.messages { collected_publish.push(s.clone()); } @@ -665,7 +661,7 @@ mod tests { ) .unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); let config: GossipsubConfig = GossipsubConfig::default(); assert_eq!( @@ -719,7 +715,7 @@ mod tests { assert_eq!( gs.fanout - .get(&TopicHash::from_raw(fanout_topic.clone())) + .get(&TopicHash::from_raw(fanout_topic)) .unwrap() .len(), gs.config.mesh_n(), @@ -733,7 +729,7 @@ mod tests { .fold(vec![], |mut collected_publish, e| match e { NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { GossipsubHandlerIn::Message(ref message) => { - let event = proto_to_message(&message); + let event = proto_to_message(message); for s in &event.messages { collected_publish.push(s.clone()); } @@ -755,7 +751,7 @@ mod tests { ) .unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); assert_eq!( publishes.len(), @@ -888,7 +884,7 @@ mod tests { for topic_hash in topic_hashes[..3].iter() { let topic_peers = gs.topic_peers.get(topic_hash).unwrap().clone(); assert!( - topic_peers == peers[..2].into_iter().cloned().collect(), + topic_peers == peers[..2].iter().cloned().collect(), "Two peers should be added to the first three topics" ); } @@ -896,7 +892,7 @@ mod tests { // Peer 0 unsubscribes from the first topic gs.handle_received_subscriptions( - &vec![GossipsubSubscription { + &[GossipsubSubscription { action: GossipsubSubscriptionAction::Unsubscribe, topic_hash: topic_hashes[0].clone(), }], @@ -905,13 +901,13 @@ mod tests { let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); assert!( - peer_topics == topic_hashes[1..3].into_iter().cloned().collect(), + peer_topics == topic_hashes[1..3].iter().cloned().collect(), "Peer should be subscribed to two topics" ); let topic_peers = gs.topic_peers.get(&topic_hashes[0]).unwrap().clone(); // only gossipsub at the moment assert!( - topic_peers == peers[1..2].into_iter().cloned().collect(), + topic_peers == peers[1..2].iter().cloned().collect(), "Only the second peers should be in the first topic" ); } @@ -928,7 +924,7 @@ mod tests { let mut gs: Gossipsub = Gossipsub::new(MessageAuthenticity::Anonymous, gs_config).unwrap(); // create a topic and fill it with some peers - let topic_hash = Topic::new("Test").hash().clone(); + let topic_hash = Topic::new("Test").hash(); let mut peers = vec![]; for _ in 0..20 { peers.push(PeerId::random()) @@ -941,7 +937,7 @@ mod tests { .iter() .map(|p| { ( - p.clone(), + *p, PeerConnections { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new(1)], @@ -983,13 +979,13 @@ mod tests { get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 0, |_| { true }); - assert!(random_peers.len() == 0, "Expected 0 peers to be returned"); + assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); // test the filter let random_peers = get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { false }); - assert!(random_peers.len() == 0, "Expected 0 peers to be returned"); + assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); let random_peers = get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 10, { |peer| peers.contains(peer) @@ -1007,7 +1003,7 @@ mod tests { .create_network(); let raw_message = RawGossipsubMessage { - source: Some(peers[11].clone()), + source: Some(peers[11]), data: vec![1, 2, 3, 4], sequence_number: Some(1u64), topic: TopicHash::from_raw("topic"), @@ -1022,7 +1018,7 @@ mod tests { .inbound_transform(raw_message.clone()) .unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); gs.mcache.put(&msg_id, raw_message); gs.handle_iwant(&peers[7], vec![msg_id.clone()]); @@ -1034,7 +1030,7 @@ mod tests { .fold(vec![], |mut collected_messages, e| match e { NetworkBehaviourAction::NotifyHandler { event, .. } => { if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(&m); + let event = proto_to_message(m); for c in &event.messages { collected_messages.push(c.clone()) } @@ -1065,7 +1061,7 @@ mod tests { // perform 10 memshifts and check that it leaves the cache for shift in 1..10 { let raw_message = RawGossipsubMessage { - source: Some(peers[11].clone()), + source: Some(peers[11]), data: vec![1, 2, 3, 4], sequence_number: Some(shift), topic: TopicHash::from_raw("topic"), @@ -1080,7 +1076,7 @@ mod tests { .inbound_transform(raw_message.clone()) .unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); gs.mcache.put(&msg_id, raw_message); for _ in 0..shift { gs.mcache.shift(); @@ -1266,7 +1262,7 @@ mod tests { let (mut gs, peers, topic_hashes) = inject_nodes1() .peer_no(20) - .topics(topics.clone()) + .topics(topics) .to_subscribe(true) .create_network(); @@ -1487,7 +1483,7 @@ mod tests { //only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!( gs.mesh[&topic_hashes[0]], - vec![peers[1].clone()].into_iter().collect() + vec![peers[1]].into_iter().collect() ); //assert that graft gets created to non-explicit peer @@ -1553,7 +1549,7 @@ mod tests { let local_id = PeerId::random(); let message = RawGossipsubMessage { - source: Some(peers[1].clone()), + source: Some(peers[1]), data: vec![12], sequence_number: Some(0), topic: topic_hashes[0].clone(), @@ -1604,7 +1600,7 @@ mod tests { let topic_hash = topic.hash(); for i in 0..2 { gs.handle_received_subscriptions( - &vec![GossipsubSubscription { + &[GossipsubSubscription { action: GossipsubSubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), }], @@ -1616,10 +1612,7 @@ mod tests { gs.subscribe(&topic).unwrap(); //only peer 1 is in the mesh not peer 0 (which is an explicit peer) - assert_eq!( - gs.mesh[&topic_hash], - vec![peers[1].clone()].into_iter().collect() - ); + assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); //assert that graft gets created to non-explicit peer assert!( @@ -1659,7 +1652,7 @@ mod tests { let topic_hash = topic.hash(); for i in 0..2 { gs.handle_received_subscriptions( - &vec![GossipsubSubscription { + &[GossipsubSubscription { action: GossipsubSubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), }], @@ -1674,10 +1667,7 @@ mod tests { gs.subscribe(&topic).unwrap(); //only peer 1 is in the mesh not peer 0 (which is an explicit peer) - assert_eq!( - gs.mesh[&topic_hash], - vec![peers[1].clone()].into_iter().collect() - ); + assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); //assert that graft gets created to non-explicit peer assert!( @@ -1715,7 +1705,7 @@ mod tests { let local_id = PeerId::random(); let message = RawGossipsubMessage { - source: Some(peers[1].clone()), + source: Some(peers[1]), data: vec![], sequence_number: Some(0), topic: topic_hashes[0].clone(), @@ -1725,7 +1715,7 @@ mod tests { }; //forward the message - gs.handle_received_message(message.clone(), &local_id); + gs.handle_received_message(message, &local_id); //simulate multiple gossip calls (for randomness) for _ in 0..3 { @@ -1860,7 +1850,7 @@ mod tests { //all dial peers must be in px assert!(dials_set.is_subset( &px.iter() - .map(|i| i.peer_id.as_ref().unwrap().clone()) + .map(|i| *i.peer_id.as_ref().unwrap()) .collect::>() )); } @@ -1879,7 +1869,7 @@ mod tests { //send prune to peer gs.send_graft_prune( HashMap::new(), - vec![(peers[0].clone(), vec![topics[0].clone()])] + vec![(peers[0], vec![topics[0].clone()])] .into_iter() .collect(), HashSet::new(), @@ -1921,7 +1911,7 @@ mod tests { gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]); gs.send_graft_prune( HashMap::new(), - vec![(peers[0].clone(), vec![topics[0].clone()])] + vec![(peers[0], vec![topics[0].clone()])] .into_iter() .collect(), HashSet::new(), @@ -2083,7 +2073,7 @@ mod tests { .gs_config(config) .create_network(); - let _ = gs.unsubscribe(&Topic::new(topic.clone())); + let _ = gs.unsubscribe(&Topic::new(topic)); assert_eq!( count_control_msgs(&gs, |_, m| match m { @@ -2177,7 +2167,7 @@ mod tests { ) .unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); let config: GossipsubConfig = GossipsubConfig::default(); assert_eq!( @@ -2220,12 +2210,9 @@ mod tests { gs.emit_gossip(); // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); //check that exactly config.gossip_lazy() many gossip messages were sent. assert_eq!( @@ -2269,12 +2256,9 @@ mod tests { gs.emit_gossip(); // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); //check that exactly config.gossip_lazy() many gossip messages were sent. assert_eq!( count_control_msgs(&gs, |_, action| match action { @@ -2343,7 +2327,7 @@ mod tests { .peer_no(n) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .create_network(); // graft all the peers @@ -2355,7 +2339,7 @@ mod tests { let mut outbound = HashSet::new(); for _ in 0..m { let peer = add_peer(&mut gs, &topics, true, false); - outbound.insert(peer.clone()); + outbound.insert(peer); gs.handle_graft(&peer, topics.clone()); } @@ -2466,7 +2450,7 @@ mod tests { .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .scoring(Some(( PeerScoreParams::default(), PeerScoreThresholds::default(), @@ -2524,7 +2508,7 @@ mod tests { &peers[0], vec![( topics[0].clone(), - px.clone(), + px, Some(config.prune_backoff().as_secs()), )], ); @@ -2555,7 +2539,7 @@ mod tests { .peer_no(3) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .explicit(0) .outbound(0) .scoring(Some(( @@ -2570,7 +2554,7 @@ mod tests { // Prune second peer gs.send_graft_prune( HashMap::new(), - vec![(peers[1].clone(), vec![topics[0].clone()])] + vec![(peers[1], vec![topics[0].clone()])] .into_iter() .collect(), HashSet::new(), @@ -2606,7 +2590,7 @@ mod tests { .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); @@ -2640,12 +2624,9 @@ mod tests { gs.handle_received_message(raw_message.clone(), &PeerId::random()); // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); // Emit gossip gs.emit_gossip(); @@ -2682,7 +2663,7 @@ mod tests { .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .explicit(0) .outbound(0) .scoring(Some((peer_score_params, peer_score_thresholds))) @@ -2718,12 +2699,9 @@ mod tests { gs.handle_received_message(raw_message.clone(), &PeerId::random()); // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); gs.handle_iwant(&p1, vec![msg_id.clone()]); gs.handle_iwant(&p2, vec![msg_id.clone()]); @@ -2737,7 +2715,7 @@ mod tests { if let GossipsubHandlerIn::Message(ref m) = **event { let event = proto_to_message(m); for c in &event.messages { - collected_messages.push((peer_id.clone(), c.clone())) + collected_messages.push((*peer_id, c.clone())) } } collected_messages @@ -2775,7 +2753,7 @@ mod tests { .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .explicit(0) .outbound(0) .scoring(Some((peer_score_params, peer_score_thresholds))) @@ -2810,12 +2788,9 @@ mod tests { }; // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); - let msg_id = gs.config.message_id(&message); + let msg_id = gs.config.message_id(message); gs.handle_ihave(&p1, vec![(topics[0].clone(), vec![msg_id.clone()])]); gs.handle_ihave(&p2, vec![(topics[0].clone(), vec![msg_id.clone()])]); @@ -2849,7 +2824,7 @@ mod tests { //build mesh with no peers and no subscribed topics let (mut gs, _, _) = inject_nodes1() - .gs_config(config.clone()) + .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); @@ -2885,7 +2860,7 @@ mod tests { if let GossipsubHandlerIn::Message(ref m) = **event { let event = proto_to_message(m); for s in &event.messages { - collected_publish.push((peer_id.clone(), s.clone())); + collected_publish.push((*peer_id, s.clone())); } } collected_publish @@ -2909,7 +2884,7 @@ mod tests { //build mesh with no peers let (mut gs, _, topics) = inject_nodes1() .topics(vec!["test".into()]) - .gs_config(config.clone()) + .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); @@ -2941,7 +2916,7 @@ mod tests { if let GossipsubHandlerIn::Message(ref m) = **event { let event = proto_to_message(m); for s in &event.messages { - collected_publish.push((peer_id.clone(), s.clone())); + collected_publish.push((*peer_id, s.clone())); } } collected_publish @@ -3023,16 +2998,10 @@ mod tests { }; // Transform the inbound message - let message2 = &gs - .data_transform - .inbound_transform(raw_message2.clone()) - .unwrap(); + let message2 = &gs.data_transform.inbound_transform(raw_message2).unwrap(); // Transform the inbound message - let message4 = &gs - .data_transform - .inbound_transform(raw_message4.clone()) - .unwrap(); + let message4 = &gs.data_transform.inbound_transform(raw_message4).unwrap(); let subscription = GossipsubSubscription { action: GossipsubSubscriptionAction::Subscribe, @@ -3041,7 +3010,7 @@ mod tests { let control_action = GossipsubControlAction::IHave { topic_hash: topics[0].clone(), - message_ids: vec![config.message_id(&message2)], + message_ids: vec![config.message_id(message2)], }; //clear events @@ -3049,7 +3018,7 @@ mod tests { //receive from p1 gs.inject_event( - p1.clone(), + p1, ConnectionId::new(0), HandlerEvent::Message { rpc: GossipsubRpc { @@ -3073,17 +3042,17 @@ mod tests { let control_action = GossipsubControlAction::IHave { topic_hash: topics[0].clone(), - message_ids: vec![config.message_id(&message4)], + message_ids: vec![config.message_id(message4)], }; //receive from p2 gs.inject_event( - p2.clone(), + p2, ConnectionId::new(0), HandlerEvent::Message { rpc: GossipsubRpc { messages: vec![raw_message3], - subscriptions: vec![subscription.clone()], + subscriptions: vec![subscription], control_msgs: vec![control_action], }, invalid_messages: Vec::new(), @@ -3127,7 +3096,7 @@ mod tests { &peers[0], vec![( topics[0].clone(), - px.clone(), + px, Some(config.prune_backoff().as_secs()), )], ); @@ -3152,7 +3121,7 @@ mod tests { &peers[1], vec![( topics[0].clone(), - px.clone(), + px, Some(config.prune_backoff().as_secs()), )], ); @@ -3236,7 +3205,7 @@ mod tests { topic_params.topic_weight = 0.7; peer_score_params .topics - .insert(topic_hash.clone(), topic_params.clone()); + .insert(topic_hash, topic_params.clone()); let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with one peer @@ -3244,7 +3213,7 @@ mod tests { .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .explicit(0) .outbound(0) .scoring(Some((peer_score_params, peer_score_thresholds))) @@ -3319,7 +3288,7 @@ mod tests { topic_params.topic_weight = 0.7; peer_score_params .topics - .insert(topic_hash.clone(), topic_params.clone()); + .insert(topic_hash, topic_params.clone()); let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with one peer @@ -3327,7 +3296,7 @@ mod tests { .peer_no(2) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .explicit(0) .outbound(0) .scoring(Some((peer_score_params, peer_score_thresholds))) @@ -3342,7 +3311,7 @@ mod tests { //peer 0 delivers message first deliver_message(&mut gs, 0, m1.clone()); //peer 1 delivers message second - deliver_message(&mut gs, 1, m1.clone()); + deliver_message(&mut gs, 1, m1); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), @@ -3417,9 +3386,7 @@ mod tests { topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); topic_params.mesh_message_deliveries_window = Duration::from_millis(100); topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); + peer_score_params.topics.insert(topic_hash, topic_params); let peer_score_thresholds = PeerScoreThresholds::default(); //build mesh with two peers @@ -3427,7 +3394,7 @@ mod tests { .peer_no(2) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .explicit(0) .outbound(0) .scoring(Some((peer_score_params, peer_score_thresholds))) @@ -3517,9 +3484,7 @@ mod tests { topic_params.mesh_failure_penalty_weight = -3.0; topic_params.mesh_failure_penalty_decay = 0.95; topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); + peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3528,7 +3493,7 @@ mod tests { .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .explicit(0) .outbound(0) .scoring(Some((peer_score_params, peer_score_thresholds))) @@ -3610,9 +3575,7 @@ mod tests { topic_params.invalid_message_deliveries_weight = -2.0; topic_params.invalid_message_deliveries_decay = 0.9; topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); + peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3637,13 +3600,13 @@ mod tests { deliver_message(&mut gs, 0, m1.clone()); // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); //message m1 gets validated gs.report_message_validation_result( - &config.message_id(&message1), + &config.message_id(message1), &peers[0], MessageAcceptance::Accept, ) @@ -3669,9 +3632,7 @@ mod tests { topic_params.invalid_message_deliveries_weight = -2.0; topic_params.invalid_message_deliveries_decay = 0.9; topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); + peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3680,7 +3641,7 @@ mod tests { .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .explicit(0) .outbound(0) .scoring(Some((peer_score_params, peer_score_thresholds))) @@ -3692,7 +3653,7 @@ mod tests { let m = random_message(&mut seq, &topics); gs.inject_event( - peers[0].clone(), + peers[0], ConnectionId::new(0), HandlerEvent::Message { rpc: GossipsubRpc { @@ -3727,9 +3688,7 @@ mod tests { topic_params.invalid_message_deliveries_weight = -2.0; topic_params.invalid_message_deliveries_decay = 0.9; topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); + peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3738,7 +3697,7 @@ mod tests { .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) - .gs_config(config.clone()) + .gs_config(config) .explicit(0) .outbound(0) .scoring(Some((peer_score_params, peer_score_thresholds))) @@ -3751,9 +3710,9 @@ mod tests { //peer 0 delivers invalid message from self let mut m = random_message(&mut seq, &topics); - m.source = Some(gs.publish_config.get_own_id().unwrap().clone()); + m.source = Some(*gs.publish_config.get_own_id().unwrap()); - deliver_message(&mut gs, 0, m.clone()); + deliver_message(&mut gs, 0, m); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), -2.0 * 0.7 @@ -3777,9 +3736,7 @@ mod tests { topic_params.invalid_message_deliveries_weight = -2.0; topic_params.invalid_message_deliveries_decay = 0.9; topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); + peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3806,11 +3763,11 @@ mod tests { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); //message m1 gets ignored gs.report_message_validation_result( - &config.message_id(&message1), + &config.message_id(message1), &peers[0], MessageAcceptance::Ignore, ) @@ -3836,9 +3793,7 @@ mod tests { topic_params.invalid_message_deliveries_weight = -2.0; topic_params.invalid_message_deliveries_decay = 0.9; topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); + peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3865,11 +3820,11 @@ mod tests { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); //message m1 gets rejected gs.report_message_validation_result( - &config.message_id(&message1), + &config.message_id(message1), &peers[0], MessageAcceptance::Reject, ) @@ -3898,9 +3853,7 @@ mod tests { topic_params.invalid_message_deliveries_weight = -2.0; topic_params.invalid_message_deliveries_decay = 0.9; topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); + peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3928,14 +3881,14 @@ mod tests { let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); //peer 1 delivers same message - deliver_message(&mut gs, 1, m1.clone()); + deliver_message(&mut gs, 1, m1); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0); //message m1 gets rejected gs.report_message_validation_result( - &config.message_id(&message1), + &config.message_id(message1), &peers[0], MessageAcceptance::Reject, ) @@ -3968,9 +3921,7 @@ mod tests { topic_params.invalid_message_deliveries_weight = -2.0; topic_params.invalid_message_deliveries_decay = 0.9; topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); + peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3999,30 +3950,30 @@ mod tests { deliver_message(&mut gs, 0, m3.clone()); // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); // Transform the inbound message - let message2 = &gs.data_transform.inbound_transform(m2.clone()).unwrap(); + let message2 = &gs.data_transform.inbound_transform(m2).unwrap(); // Transform the inbound message - let message3 = &gs.data_transform.inbound_transform(m3.clone()).unwrap(); + let message3 = &gs.data_transform.inbound_transform(m3).unwrap(); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); //messages gets rejected gs.report_message_validation_result( - &config.message_id(&message1), + &config.message_id(message1), &peers[0], MessageAcceptance::Reject, ) .unwrap(); gs.report_message_validation_result( - &config.message_id(&message2), + &config.message_id(message2), &peers[0], MessageAcceptance::Reject, ) .unwrap(); gs.report_message_validation_result( - &config.message_id(&message3), + &config.message_id(message3), &peers[0], MessageAcceptance::Reject, ) @@ -4052,9 +4003,7 @@ mod tests { topic_params.invalid_message_deliveries_weight = -2.0; topic_params.invalid_message_deliveries_decay = 0.9; topic_params.topic_weight = 0.7; - peer_score_params - .topics - .insert(topic_hash.clone(), topic_params.clone()); + peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -4079,12 +4028,12 @@ mod tests { deliver_message(&mut gs, 0, m1.clone()); // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); //message m1 gets rejected gs.report_message_validation_result( - &config.message_id(&message1), + &config.message_id(message1), &peers[0], MessageAcceptance::Reject, ) @@ -4226,7 +4175,7 @@ mod tests { &peers[0], &ConnectionId::new(0), &ConnectedPoint::Dialer { - address: addr.clone(), + address: addr, role_override: Endpoint::Dialer, }, None, @@ -4278,7 +4227,7 @@ mod tests { gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[i]); gs.send_graft_prune( HashMap::new(), - vec![(peers[i].clone(), vec![topics[0].clone()])] + vec![(peers[i], vec![topics[0].clone()])] .into_iter() .collect(), HashSet::new(), @@ -4456,9 +4405,9 @@ mod tests { // Transform the inbound message let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); - let id = config.message_id(&message1); + let id = config.message_id(message1); - gs.handle_received_message(m1.clone(), &PeerId::random()); + gs.handle_received_message(m1, &PeerId::random()); //clear events gs.events.clear(); @@ -4520,7 +4469,7 @@ mod tests { gs.handle_ihave( &peer, - vec![(topics[0].clone(), vec![config.message_id(&message)])], + vec![(topics[0].clone(), vec![config.message_id(message)])], ); } @@ -4564,7 +4513,7 @@ mod tests { gs.handle_ihave( &peer, - vec![(topics[0].clone(), vec![config.message_id(&message)])], + vec![(topics[0].clone(), vec![config.message_id(message)])], ); } @@ -4610,31 +4559,19 @@ mod tests { let mut seq = 0; let message_ids: Vec<_> = (0..20) .map(|_| random_message(&mut seq, &topics)) - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) + .map(|msg| gs.data_transform.inbound_transform(msg).unwrap()) .map(|msg| config.message_id(&msg)) .collect(); //peer sends us three ihaves + gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]); gs.handle_ihave( &peer, - vec![( - topics[0].clone(), - message_ids[0..8].iter().cloned().collect(), - )], - ); - gs.handle_ihave( - &peer, - vec![( - topics[0].clone(), - message_ids[0..12].iter().cloned().collect(), - )], + vec![(topics[0].clone(), message_ids[0..12].to_vec())], ); gs.handle_ihave( &peer, - vec![( - topics[0].clone(), - message_ids[0..20].iter().cloned().collect(), - )], + vec![(topics[0].clone(), message_ids[0..20].to_vec())], ); let first_twelve: HashSet<_> = message_ids.iter().take(12).collect(); @@ -4661,10 +4598,7 @@ mod tests { gs.heartbeat(); gs.handle_ihave( &peer, - vec![( - topics[0].clone(), - message_ids[10..20].iter().cloned().collect(), - )], + vec![(topics[0].clone(), message_ids[10..20].to_vec())], ); //we sent 20 iwant messages @@ -4695,7 +4629,7 @@ mod tests { .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) - .gs_config(config.clone()) + .gs_config(config) .create_network(); //graft to all peers to really fill the mesh with all the peers @@ -4817,14 +4751,14 @@ mod tests { peer, vec![( topics[0].clone(), - vec![config.message_id(&message1), config.message_id(&message2)], + vec![config.message_id(message1), config.message_id(message2)], )], ); } // the peers send us all the first message ids in time for (index, peer) in other_peers.iter().enumerate() { - gs.handle_received_message(first_messages[index].clone(), &peer); + gs.handle_received_message(first_messages[index].clone(), peer); } // now we do a heartbeat no penalization should have been applied yet @@ -4836,7 +4770,7 @@ mod tests { // receive the first twenty of the other peers then send their response for (index, peer) in other_peers.iter().enumerate().take(20) { - gs.handle_received_message(second_messages[index].clone(), &peer); + gs.handle_received_message(second_messages[index].clone(), peer); } // sleep for the promise duration @@ -4848,7 +4782,7 @@ mod tests { // now we get the second messages from the last 80 peers. for (index, peer) in other_peers.iter().enumerate() { if index > 19 { - gs.handle_received_message(second_messages[index].clone(), &peer); + gs.handle_received_message(second_messages[index].clone(), peer); } } @@ -5055,7 +4989,7 @@ mod tests { //prune the peer gs.send_graft_prune( HashMap::new(), - vec![(p1.clone(), topics.clone())].into_iter().collect(), + vec![(p1, topics.clone())].into_iter().collect(), HashSet::new(), ); @@ -5094,9 +5028,7 @@ mod tests { //prune only mesh node gs.send_graft_prune( HashMap::new(), - vec![(peers[0].clone(), topics.clone())] - .into_iter() - .collect(), + vec![(peers[0], topics.clone())].into_iter().collect(), HashSet::new(), ); @@ -5311,7 +5243,7 @@ mod tests { }); for message in messages_to_p1 { gs1.inject_event( - p2.clone(), + p2, connection_id, HandlerEvent::Message { rpc: proto_to_message(&message), diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index 6d1c0465a76..ad19eaf08dd 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -270,7 +270,7 @@ mod tests { fn test_put_get_one() { let mut mc = new_cache(10, 15); - let topic1_hash = Topic::new("topic1").hash().clone(); + let topic1_hash = Topic::new("topic1").hash(); let (id, m) = gen_testm(10, topic1_hash); mc.put(&id, m.clone()); @@ -294,10 +294,10 @@ mod tests { fn test_get_wrong() { let mut mc = new_cache(10, 15); - let topic1_hash = Topic::new("topic1").hash().clone(); + let topic1_hash = Topic::new("topic1").hash(); let (id, m) = gen_testm(10, topic1_hash); - mc.put(&id, m.clone()); + mc.put(&id, m); // Try to get an incorrect ID let wrong_id = MessageId::new(b"wrongid"); @@ -321,7 +321,7 @@ mod tests { fn test_shift() { let mut mc = new_cache(1, 5); - let topic1_hash = Topic::new("topic1").hash().clone(); + let topic1_hash = Topic::new("topic1").hash(); // Build the message for i in 0..10 { @@ -332,7 +332,7 @@ mod tests { mc.shift(); // Ensure the shift occurred - assert!(mc.history[0].len() == 0); + assert!(mc.history[0].is_empty()); assert!(mc.history[1].len() == 10); // Make sure no messages deleted @@ -344,7 +344,7 @@ mod tests { fn test_empty_shift() { let mut mc = new_cache(1, 5); - let topic1_hash = Topic::new("topic1").hash().clone(); + let topic1_hash = Topic::new("topic1").hash(); // Build the message for i in 0..10 { @@ -355,14 +355,14 @@ mod tests { mc.shift(); // Ensure the shift occurred - assert!(mc.history[0].len() == 0); + assert!(mc.history[0].is_empty()); assert!(mc.history[1].len() == 10); mc.shift(); assert!(mc.history[2].len() == 10); - assert!(mc.history[1].len() == 0); - assert!(mc.history[0].len() == 0); + assert!(mc.history[1].is_empty()); + assert!(mc.history[0].is_empty()); } #[test] @@ -370,7 +370,7 @@ mod tests { fn test_remove_last_from_shift() { let mut mc = new_cache(4, 5); - let topic1_hash = Topic::new("topic1").hash().clone(); + let topic1_hash = Topic::new("topic1").hash(); // Build the message for i in 0..10 { diff --git a/protocols/gossipsub/src/peer_score/tests.rs b/protocols/gossipsub/src/peer_score/tests.rs index 4ede29b7c51..7f2658fed42 100644 --- a/protocols/gossipsub/src/peer_score/tests.rs +++ b/protocols/gossipsub/src/peer_score/tests.rs @@ -29,7 +29,7 @@ fn within_variance(value: f64, expected: f64, variance: f64) -> bool { if expected >= 0.0 { return value > expected * (1.0 - variance) && value < expected * (1.0 + variance); } - return value > expected * (1.0 + variance) && value < expected * (1.0 - variance); + value > expected * (1.0 + variance) && value < expected * (1.0 - variance) } // generates a random gossipsub message with sequence number i @@ -45,7 +45,7 @@ fn make_test_message(seq: u64) -> (MessageId, RawGossipsubMessage) { }; let message = GossipsubMessage { - source: raw_message.source.clone(), + source: raw_message.source, data: raw_message.data.clone(), sequence_number: raw_message.sequence_number, topic: raw_message.topic.clone(), @@ -62,7 +62,7 @@ fn default_message_id() -> fn(&GossipsubMessage) -> MessageId { let mut source_string = if let Some(peer_id) = message.source.as_ref() { peer_id.to_base58() } else { - PeerId::from_bytes(&vec![0, 1, 0]) + PeerId::from_bytes(&[0, 1, 0]) .expect("Valid peer id") .to_base58() }; @@ -91,7 +91,7 @@ fn test_score_time_in_mesh() { let mut peer_score = PeerScore::new(params); // Peer score should start at 0 - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(peer_id); let score = peer_score.score(&peer_id); assert!( @@ -137,7 +137,7 @@ fn test_score_time_in_mesh_cap() { let mut peer_score = PeerScore::new(params); // Peer score should start at 0 - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(peer_id); let score = peer_score.score(&peer_id); assert!( @@ -186,7 +186,7 @@ fn test_score_first_message_deliveries() { let mut peer_score = PeerScore::new(params); // Peer score should start at 0 - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(peer_id); peer_score.graft(&peer_id, topic); // deliver a bunch of messages from the peer @@ -230,7 +230,7 @@ fn test_score_first_message_deliveries_cap() { let mut peer_score = PeerScore::new(params); // Peer score should start at 0 - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(peer_id); peer_score.graft(&peer_id, topic); // deliver a bunch of messages from the peer @@ -271,7 +271,7 @@ fn test_score_first_message_deliveries_decay() { params.topics.insert(topic_hash, topic_params.clone()); let peer_id = PeerId::random(); let mut peer_score = PeerScore::new(params); - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(peer_id); peer_score.graft(&peer_id, topic); // deliver a bunch of messages from the peer @@ -341,11 +341,11 @@ fn test_score_mesh_message_deliveries() { let peer_id_b = PeerId::random(); let peer_id_c = PeerId::random(); - let peers = vec![peer_id_a.clone(), peer_id_b.clone(), peer_id_c.clone()]; + let peers = vec![peer_id_a, peer_id_b, peer_id_c]; for peer_id in &peers { - peer_score.add_peer(peer_id.clone()); - peer_score.graft(&peer_id, topic.clone()); + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); } // assert that nobody has been penalized yet for not delivering messages before activation time @@ -436,8 +436,8 @@ fn test_score_mesh_message_deliveries_decay() { let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); - peer_score.add_peer(peer_id_a.clone()); - peer_score.graft(&peer_id_a, topic.clone()); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); // deliver a bunch of messages from peer A let messages = 100; @@ -505,11 +505,11 @@ fn test_score_mesh_failure_penalty() { let peer_id_a = PeerId::random(); let peer_id_b = PeerId::random(); - let peers = vec![peer_id_a.clone(), peer_id_b.clone()]; + let peers = vec![peer_id_a, peer_id_b]; for peer_id in &peers { - peer_score.add_peer(peer_id.clone()); - peer_score.graft(&peer_id, topic.clone()); + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); } // deliver a bunch of messages from peer A @@ -581,8 +581,8 @@ fn test_score_invalid_message_deliveries() { let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); - peer_score.add_peer(peer_id_a.clone()); - peer_score.graft(&peer_id_a, topic.clone()); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); // reject a bunch of messages from peer A let messages = 100; @@ -627,8 +627,8 @@ fn test_score_invalid_message_deliveris_decay() { let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); - peer_score.add_peer(peer_id_a.clone()); - peer_score.graft(&peer_id_a, topic.clone()); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); // reject a bunch of messages from peer A let messages = 100; @@ -677,16 +677,16 @@ fn test_score_reject_message_deliveries() { topic_params.invalid_message_deliveries_weight = -1.0; topic_params.invalid_message_deliveries_decay = 1.0; - params.topics.insert(topic_hash, topic_params.clone()); + params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); let peer_id_b = PeerId::random(); - let peers = vec![peer_id_a.clone(), peer_id_b.clone()]; + let peers = vec![peer_id_a, peer_id_b]; for peer_id in &peers { - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(*peer_id); } let (id, msg) = make_test_message(1); @@ -790,12 +790,12 @@ fn test_application_score() { topic_params.invalid_message_deliveries_weight = 0.0; topic_params.invalid_message_deliveries_decay = 1.0; - params.topics.insert(topic_hash, topic_params.clone()); + params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); - peer_score.add_peer(peer_id_a.clone()); - peer_score.graft(&peer_id_a, topic.clone()); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); let messages = 100; for i in -100..messages { @@ -828,7 +828,7 @@ fn test_score_ip_colocation() { topic_params.time_in_mesh_quantum = Duration::from_secs(1); topic_params.invalid_message_deliveries_weight = 0.0; - params.topics.insert(topic_hash, topic_params.clone()); + params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); @@ -836,15 +836,10 @@ fn test_score_ip_colocation() { let peer_id_c = PeerId::random(); let peer_id_d = PeerId::random(); - let peers = vec![ - peer_id_a.clone(), - peer_id_b.clone(), - peer_id_c.clone(), - peer_id_d.clone(), - ]; + let peers = vec![peer_id_a, peer_id_b, peer_id_c, peer_id_d]; for peer_id in &peers { - peer_score.add_peer(peer_id.clone()); - peer_score.graft(&peer_id, topic.clone()); + peer_score.add_peer(*peer_id); + peer_score.graft(peer_id, topic.clone()); } // peerA should have no penalty, but B, C, and D should be penalized for sharing an IP @@ -893,7 +888,7 @@ fn test_score_behaviour_penality() { topic_params.time_in_mesh_quantum = Duration::from_secs(1); topic_params.invalid_message_deliveries_weight = 0.0; - params.topics.insert(topic_hash, topic_params.clone()); + params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); @@ -905,7 +900,7 @@ fn test_score_behaviour_penality() { assert_eq!(score_a, 0.0, "Peer A should be unaffected"); // add the peer and test penalties - peer_score.add_peer(peer_id_a.clone()); + peer_score.add_peer(peer_id_a); assert_eq!(score_a, 0.0, "Peer A should be unaffected"); peer_score.add_penalty(&peer_id_a, 1); @@ -942,12 +937,12 @@ fn test_score_retention() { topic_params.first_message_deliveries_weight = 0.0; topic_params.time_in_mesh_weight = 0.0; - params.topics.insert(topic_hash, topic_params.clone()); + params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); let peer_id_a = PeerId::random(); - peer_score.add_peer(peer_id_a.clone()); - peer_score.graft(&peer_id_a, topic.clone()); + peer_score.add_peer(peer_id_a); + peer_score.graft(&peer_id_a, topic); peer_score.set_application_score(&peer_id_a, app_score_value); diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 3695bcb9f97..c6aa2bdd56b 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -582,11 +582,8 @@ mod tests { // generate an arbitrary GossipsubMessage using the behaviour signing functionality let config = GossipsubConfig::default(); - let gs: Gossipsub = Gossipsub::new( - crate::MessageAuthenticity::Signed(keypair.0.clone()), - config, - ) - .unwrap(); + let gs: Gossipsub = + Gossipsub::new(crate::MessageAuthenticity::Signed(keypair.0), config).unwrap(); let data = (0..g.gen_range(10..10024u32)) .map(|_| u8::arbitrary(g)) .collect::>(); @@ -602,8 +599,7 @@ mod tests { fn arbitrary(g: &mut Gen) -> Self { let topic_string: String = (0..g.gen_range(20..1024u32)) .map(|_| char::arbitrary(g)) - .collect::() - .into(); + .collect::(); TopicId(Topic::new(topic_string).into()) } } @@ -654,7 +650,7 @@ mod tests { let mut codec = GossipsubCodec::new(codec::UviBytes::default(), ValidationMode::Strict); let mut buf = BytesMut::new(); - codec.encode(rpc.clone().into_protobuf(), &mut buf).unwrap(); + codec.encode(rpc.into_protobuf(), &mut buf).unwrap(); let decoded_rpc = codec.decode(&mut buf).unwrap().unwrap(); // mark as validated as its a published message match decoded_rpc { diff --git a/protocols/gossipsub/src/rpc_proto.rs b/protocols/gossipsub/src/rpc_proto.rs index 3903318fbfb..7b952ef0926 100644 --- a/protocols/gossipsub/src/rpc_proto.rs +++ b/protocols/gossipsub/src/rpc_proto.rs @@ -75,7 +75,7 @@ mod test { assert_eq!(new_message.topic, topic1.clone().into_string()); let new_message = super::Message::decode(&old_message2b[..]).unwrap(); - assert_eq!(new_message.topic, topic2.clone().into_string()); + assert_eq!(new_message.topic, topic2.into_string()); let old_message = compat_proto::Message::decode(&new_message1b[..]).unwrap(); assert_eq!(old_message.topic_ids, vec![topic1.into_string()]); diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 960d0cb8a54..600a02c7a64 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -232,15 +232,15 @@ pub mod regex { let subscriptions = vec![ GossipsubSubscription { action: Subscribe, - topic_hash: t1.clone(), + topic_hash: t1, }, GossipsubSubscription { action: Subscribe, - topic_hash: t2.clone(), + topic_hash: t2, }, GossipsubSubscription { action: Subscribe, - topic_hash: t3.clone(), + topic_hash: t3, }, ]; @@ -277,7 +277,7 @@ mod test { }, GossipsubSubscription { action: Subscribe, - topic_hash: t2.clone(), + topic_hash: t2, }, GossipsubSubscription { action: Subscribe, @@ -285,7 +285,7 @@ mod test { }, GossipsubSubscription { action: Unsubscribe, - topic_hash: t1.clone(), + topic_hash: t1, }, ]; @@ -306,11 +306,11 @@ mod test { let subscriptions = vec![ GossipsubSubscription { action: Subscribe, - topic_hash: t1.clone(), + topic_hash: t1, }, GossipsubSubscription { action: Subscribe, - topic_hash: t2.clone(), + topic_hash: t2, }, ]; @@ -343,7 +343,7 @@ mod test { }, GossipsubSubscription { action: Subscribe, - topic_hash: t1.clone(), + topic_hash: t1, }, ]; @@ -434,11 +434,11 @@ mod test { let subscriptions = vec![ GossipsubSubscription { action: Subscribe, - topic_hash: t1.clone(), + topic_hash: t1, }, GossipsubSubscription { action: Subscribe, - topic_hash: t2.clone(), + topic_hash: t2, }, ]; diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index 1e7febbdaae..43ad944dccb 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -170,7 +170,7 @@ fn build_node() -> (Multiaddr, Swarm) { .validation_mode(ValidationMode::Permissive) .build() .unwrap(); - let behaviour = Gossipsub::new(MessageAuthenticity::Author(peer_id.clone()), config).unwrap(); + let behaviour = Gossipsub::new(MessageAuthenticity::Author(peer_id), config).unwrap(); let mut swarm = Swarm::new(transport, behaviour, peer_id); let port = 1 + random::(); @@ -187,7 +187,7 @@ fn multi_hop_propagation() { let _ = env_logger::try_init(); fn prop(num_nodes: u8, seed: u64) -> TestResult { - if num_nodes < 2 || num_nodes > 50 { + if !(2..=50).contains(&num_nodes) { return TestResult::discard(); } diff --git a/protocols/identify/src/identify.rs b/protocols/identify/src/identify.rs index e3839624180..76a331b728c 100644 --- a/protocols/identify/src/identify.rs +++ b/protocols/identify/src/identify.rs @@ -835,7 +835,7 @@ mod tests { let addr_without_peer_id: Multiaddr = addr.clone(); let mut addr_with_other_peer_id = addr.clone(); - addr.push(Protocol::P2p(peer_id.clone().into())); + addr.push(Protocol::P2p(peer_id.into())); addr_with_other_peer_id.push(Protocol::P2p(other_peer_id.into())); assert!(multiaddr_matches_peer_id(&addr, &peer_id)); diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index a30ff2d4311..b7e281af136 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -63,8 +63,8 @@ fn build_node_with_config(cfg: KademliaConfig) -> (Multiaddr, TestSwarm) { .boxed(); let local_id = local_public_key.to_peer_id(); - let store = MemoryStore::new(local_id.clone()); - let behaviour = Kademlia::with_config(local_id.clone(), store, cfg.clone()); + let store = MemoryStore::new(local_id); + let behaviour = Kademlia::with_config(local_id, store, cfg); let mut swarm = Swarm::new(transport, behaviour, local_id); @@ -129,7 +129,7 @@ fn build_fully_connected_nodes_with_config( for (_addr, swarm) in swarms.iter_mut() { for (addr, peer) in &swarm_addr_and_peer_id { - swarm.behaviour_mut().add_address(&peer, addr.clone()); + swarm.behaviour_mut().add_address(peer, addr.clone()); } } @@ -210,7 +210,7 @@ fn bootstrap() { let mut known = HashSet::new(); for b in swarm.behaviour_mut().kbuckets.iter() { for e in b.iter() { - known.insert(e.node.key.preimage().clone()); + known.insert(*e.node.key.preimage()); } } assert_eq!(expected_known, known); @@ -266,7 +266,7 @@ fn query_iter() { } // Set up expectations. - let expected_swarm_id = swarm_ids[0].clone(); + let expected_swarm_id = swarm_ids[0]; let expected_peer_ids: Vec<_> = swarm_ids.iter().skip(1).cloned().collect(); let mut expected_distances = distances(&search_target_key, expected_peer_ids.clone()); expected_distances.sort(); @@ -710,7 +710,7 @@ fn put_record() { ); assert_eq!(swarms[0].behaviour_mut().queries.size(), 0); for k in records.keys() { - swarms[0].behaviour_mut().store.remove(&k); + swarms[0].behaviour_mut().store.remove(k); } assert_eq!(swarms[0].behaviour_mut().store.records().count(), 0); // All records have been republished, thus the test is complete. @@ -740,7 +740,7 @@ fn get_record() { // Let first peer know of second peer and second peer know of third peer. for i in 0..2 { let (peer_id, address) = ( - Swarm::local_peer_id(&swarms[i + 1].1).clone(), + *Swarm::local_peer_id(&swarms[i + 1].1), swarms[i + 1].0.clone(), ); swarms[i].1.behaviour_mut().add_address(&peer_id, address); @@ -961,7 +961,7 @@ fn add_provider() { .skip(1) .filter_map(|swarm| { if swarm.behaviour().store.providers(&key).len() == 1 { - Some(Swarm::local_peer_id(&swarm).clone()) + Some(*Swarm::local_peer_id(swarm)) } else { None } @@ -1007,7 +1007,7 @@ fn add_provider() { keys.len() ); for k in &keys { - swarms[0].behaviour_mut().stop_providing(&k); + swarms[0].behaviour_mut().stop_providing(k); } assert_eq!(swarms[0].behaviour_mut().store.provided().count(), 0); // All records have been republished, thus the test is complete. @@ -1106,11 +1106,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { alice .1 .behaviour_mut() - .add_address(&trudy.1.local_peer_id(), trudy.0.clone()); + .add_address(trudy.1.local_peer_id(), trudy.0.clone()); alice .1 .behaviour_mut() - .add_address(&bob.1.local_peer_id(), bob.0.clone()); + .add_address(bob.1.local_peer_id(), bob.0.clone()); // Drop the swarm addresses. let (mut alice, mut bob, mut trudy) = (alice.1, bob.1, trudy.1); @@ -1169,12 +1169,12 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { assert_eq!( *records, vec![PeerRecord { - peer: Some(Swarm::local_peer_id(&trudy).clone()), + peer: Some(*Swarm::local_peer_id(&trudy)), record: record_trudy.clone(), }], ); } - i @ _ => panic!("Unexpected query info: {:?}", i), + i => panic!("Unexpected query info: {:?}", i), }); // Poll `alice` and `bob` expecting `alice` to return a successful query @@ -1211,11 +1211,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { assert_eq!(2, records.len()); assert!(records.contains(&PeerRecord { - peer: Some(Swarm::local_peer_id(&bob).clone()), + peer: Some(*Swarm::local_peer_id(&bob)), record: record_bob, })); assert!(records.contains(&PeerRecord { - peer: Some(Swarm::local_peer_id(&trudy).clone()), + peer: Some(*Swarm::local_peer_id(&trudy)), record: record_trudy, })); } @@ -1283,7 +1283,7 @@ fn network_behaviour_inject_address_change() { let old_address: Multiaddr = Protocol::Memory(1).into(); let new_address: Multiaddr = Protocol::Memory(2).into(); - let mut kademlia = Kademlia::new(local_peer_id.clone(), MemoryStore::new(local_peer_id)); + let mut kademlia = Kademlia::new(local_peer_id, MemoryStore::new(local_peer_id)); let endpoint = ConnectedPoint::Dialer { address: old_address.clone(), @@ -1301,8 +1301,8 @@ fn network_behaviour_inject_address_change() { // Mimick the connection handler confirming the protocol for // the test connection, so that the peer is added to the routing table. kademlia.inject_event( - remote_peer_id.clone(), - connection_id.clone(), + remote_peer_id, + connection_id, KademliaHandlerEvent::ProtocolConfirmed { endpoint }, ); @@ -1315,7 +1315,7 @@ fn network_behaviour_inject_address_change() { &remote_peer_id, &connection_id, &ConnectedPoint::Dialer { - address: old_address.clone(), + address: old_address, role_override: Endpoint::Dialer, }, &ConnectedPoint::Dialer { @@ -1325,7 +1325,7 @@ fn network_behaviour_inject_address_change() { ); assert_eq!( - vec![new_address.clone()], + vec![new_address], kademlia.addresses_of_peer(&remote_peer_id), ); } diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index b0fbe16aeb2..8855026e8d5 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -338,7 +338,7 @@ mod tests { let replicate_interval = Duration::from_secs(rng.gen_range(1..60)); let publish_interval = Some(replicate_interval * rng.gen_range(1..10)); let record_ttl = Some(Duration::from_secs(rng.gen_range(1..600))); - PutRecordJob::new(id.clone(), replicate_interval, publish_interval, record_ttl) + PutRecordJob::new(id, replicate_interval, publish_interval, record_ttl) } fn rand_add_provider_job() -> AddProviderJob { @@ -360,7 +360,7 @@ mod tests { fn prop(records: Vec) { let mut job = rand_put_record_job(); // Fill a record store. - let mut store = MemoryStore::new(job.local_id.clone()); + let mut store = MemoryStore::new(job.local_id); for r in records { let _ = store.put(r); } @@ -389,9 +389,9 @@ mod tests { let mut job = rand_add_provider_job(); let id = PeerId::random(); // Fill a record store. - let mut store = MemoryStore::new(id.clone()); + let mut store = MemoryStore::new(id); for mut r in records { - r.provider = id.clone(); + r.provider = id; let _ = store.add_provider(r); } diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs index 9a6df073c56..a02b6f8abc1 100644 --- a/protocols/kad/src/kbucket/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -533,7 +533,7 @@ mod tests { // All nodes before the first connected node must be disconnected and // in insertion order. Similarly, all remaining nodes must be connected // and in insertion order. - nodes == Vec::from(disconnected) && tail == Vec::from(connected) + disconnected == nodes && connected == tail } quickcheck(prop as fn(_) -> _); diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index 87d452e8e40..b399f462195 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -593,7 +593,7 @@ mod tests { let mut num_failures = 0; 'finished: loop { - if expected.len() == 0 { + if expected.is_empty() { break; } // Split off the next up to `parallelism` expected peers. @@ -742,7 +742,7 @@ mod tests { } // Artificially advance the clock. - now = now + iter.config.peer_timeout; + now += iter.config.peer_timeout; // Advancing the iterator again should mark the first peer as unresponsive. let _ = iter.next(now); diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs index df7ab70bedc..dbc6d34eb6c 100644 --- a/protocols/kad/src/query/peers/closest/disjoint.rs +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -465,7 +465,7 @@ mod tests { .map(Key::from) .collect::>(); - peers.sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b))); + peers.sort_unstable_by_key(|a| target.distance(a)); peers.into_iter() }) @@ -640,7 +640,7 @@ mod tests { .map(|_| Key::from(PeerId::random())) .collect::>(); - pool.sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b))); + pool.sort_unstable_by_key(|a| target.distance(a)); let known_closest_peers = pool.split_off(pool.len() - 3); @@ -650,11 +650,8 @@ mod tests { ..ClosestPeersIterConfig::default() }; - let mut peers_iter = ClosestDisjointPeersIter::with_config( - config.clone(), - target, - known_closest_peers.clone(), - ); + let mut peers_iter = + ClosestDisjointPeersIter::with_config(config, target, known_closest_peers.clone()); //////////////////////////////////////////////////////////////////////// // First round. @@ -681,19 +678,19 @@ mod tests { malicious_response_1 .clone() .into_iter() - .map(|k| k.preimage().clone()), + .map(|k| *k.preimage()), ); // Response from peer 2. peers_iter.on_success( known_closest_peers[1].preimage(), - response_2.clone().into_iter().map(|k| k.preimage().clone()), + response_2.clone().into_iter().map(|k| *k.preimage()), ); // Response from peer 3. peers_iter.on_success( known_closest_peers[2].preimage(), - response_3.clone().into_iter().map(|k| k.preimage().clone()), + response_3.clone().into_iter().map(|k| *k.preimage()), ); //////////////////////////////////////////////////////////////////////// @@ -752,7 +749,7 @@ mod tests { fn arbitrary(g: &mut Gen) -> Self { let mut peer_ids = random_peers(g.gen_range(K_VALUE.get()..200), g) .into_iter() - .map(|peer_id| (peer_id.clone(), Key::from(peer_id))) + .map(|peer_id| (peer_id, Key::from(peer_id))) .collect::>(); // Make each peer aware of its direct neighborhood. @@ -790,7 +787,7 @@ mod tests { .collect::>(); peer.known_peers.append(&mut random_peer_ids); - peer.known_peers = std::mem::replace(&mut peer.known_peers, vec![]) + peer.known_peers = std::mem::take(&mut peer.known_peers) // Deduplicate peer ids. .into_iter() .collect::>() @@ -804,7 +801,8 @@ mod tests { impl Graph { fn get_closest_peer(&self, target: &KeyBytes) -> PeerId { - self.0 + *self + .0 .iter() .map(|(peer_id, _)| (target.distance(&Key::from(*peer_id)), peer_id)) .fold(None, |acc, (distance_b, peer_id_b)| match acc { @@ -819,7 +817,6 @@ mod tests { }) .expect("Graph to have at least one peer.") .1 - .clone() } } @@ -892,8 +889,7 @@ mod tests { .take(K_VALUE.get()) .map(|(key, _peers)| Key::from(*key)) .collect::>(); - known_closest_peers - .sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b))); + known_closest_peers.sort_unstable_by_key(|a| target.distance(a)); let cfg = ClosestPeersIterConfig { parallelism: parallelism.0, @@ -917,7 +913,7 @@ mod tests { target.clone(), known_closest_peers.clone(), )), - graph.clone(), + graph, &target, ); @@ -964,11 +960,8 @@ mod tests { match iter.next(now) { PeersIterState::Waiting(Some(peer_id)) => { let peer_id = peer_id.clone().into_owned(); - let closest_peers = graph - .0 - .get_mut(&peer_id) - .unwrap() - .get_closest_peers(&target); + let closest_peers = + graph.0.get_mut(&peer_id).unwrap().get_closest_peers(target); iter.on_success(&peer_id, closest_peers); } PeersIterState::WaitingAtCapacity | PeersIterState::Waiting(None) => { @@ -983,7 +976,7 @@ mod tests { .into_iter() .map(Key::from) .collect::>(); - result.sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b))); + result.sort_unstable_by_key(|a| target.distance(a)); result.into_iter().map(|k| k.into_preimage()).collect() } @@ -998,7 +991,7 @@ mod tests { let peer = PeerId::random(); let mut iter = ClosestDisjointPeersIter::new( Key::from(PeerId::random()).into(), - iter::once(Key::from(peer.clone())), + iter::once(Key::from(peer)), ); assert!(matches!(iter.next(now), PeersIterState::Waiting(Some(_)))); diff --git a/protocols/kad/src/record/store/memory.rs b/protocols/kad/src/record/store/memory.rs index 93542683a57..39d17d37c2b 100644 --- a/protocols/kad/src/record/store/memory.rs +++ b/protocols/kad/src/record/store/memory.rs @@ -267,7 +267,7 @@ mod tests { assert!(store.add_provider(r.clone()).is_ok()); } - records.sort_by(|r1, r2| distance(r1).cmp(&distance(r2))); + records.sort_by_key(distance); records.truncate(store.config.max_providers_per_key); records == store.providers(&key).to_vec() @@ -279,9 +279,9 @@ mod tests { #[test] fn provided() { let id = PeerId::random(); - let mut store = MemoryStore::new(id.clone()); + let mut store = MemoryStore::new(id); let key = random_multihash(); - let rec = ProviderRecord::new(key, id.clone(), Vec::new()); + let rec = ProviderRecord::new(key, id, Vec::new()); assert!(store.add_provider(rec.clone()).is_ok()); assert_eq!( vec![Cow::Borrowed(&rec)], diff --git a/protocols/mdns/src/behaviour/iface/query.rs b/protocols/mdns/src/behaviour/iface/query.rs index 5b579254f1f..67dc2e0165d 100644 --- a/protocols/mdns/src/behaviour/iface/query.rs +++ b/protocols/mdns/src/behaviour/iface/query.rs @@ -301,8 +301,8 @@ mod tests { let mut addr1: Multiaddr = "/ip4/1.2.3.4/tcp/5000".parse().expect("bad multiaddress"); let mut addr2: Multiaddr = "/ip6/::1/udp/10000".parse().expect("bad multiaddress"); - addr1.push(Protocol::P2p(peer_id.clone().into())); - addr2.push(Protocol::P2p(peer_id.clone().into())); + addr1.push(Protocol::P2p(peer_id.into())); + addr2.push(Protocol::P2p(peer_id.into())); let packets = build_query_response( 0xf8f8, @@ -324,7 +324,7 @@ mod tests { RData::PTR(record) => record.0.to_string(), _ => return None, }; - return Some(record_value); + Some(record_value) }) .next() .expect("empty record value"); diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index cdf68466e64..3583b9b1faf 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -44,14 +44,14 @@ fn ping_pong() { .with_interval(Duration::from_millis(10)); let (peer1_id, trans) = mk_transport(muxer); - let mut swarm1 = Swarm::new(trans, ping::Behaviour::new(cfg.clone()), peer1_id.clone()); + let mut swarm1 = Swarm::new(trans, ping::Behaviour::new(cfg.clone()), peer1_id); let (peer2_id, trans) = mk_transport(muxer); - let mut swarm2 = Swarm::new(trans, ping::Behaviour::new(cfg), peer2_id.clone()); + let mut swarm2 = Swarm::new(trans, ping::Behaviour::new(cfg), peer2_id); let (mut tx, mut rx) = mpsc::channel::(1); - let pid1 = peer1_id.clone(); + let pid1 = peer1_id; let addr = "/ip4/127.0.0.1/tcp/0".parse().unwrap(); swarm1.listen_on(addr).unwrap(); @@ -68,7 +68,7 @@ fn ping_pong() { }) => { count1 -= 1; if count1 == 0 { - return (pid1.clone(), peer, rtt); + return (pid1, peer, rtt); } } SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => { @@ -79,7 +79,7 @@ fn ping_pong() { } }; - let pid2 = peer2_id.clone(); + let pid2 = peer2_id; let peer2 = async move { swarm2.dial(rx.next().await.unwrap()).unwrap(); @@ -91,7 +91,7 @@ fn ping_pong() { }) => { count2 -= 1; if count2 == 0 { - return (pid2.clone(), peer, rtt); + return (pid2, peer, rtt); } } SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => { @@ -123,10 +123,10 @@ fn max_failures() { .with_max_failures(max_failures.into()); let (peer1_id, trans) = mk_transport(muxer); - let mut swarm1 = Swarm::new(trans, ping::Behaviour::new(cfg.clone()), peer1_id.clone()); + let mut swarm1 = Swarm::new(trans, ping::Behaviour::new(cfg.clone()), peer1_id); let (peer2_id, trans) = mk_transport(muxer); - let mut swarm2 = Swarm::new(trans, ping::Behaviour::new(cfg), peer2_id.clone()); + let mut swarm2 = Swarm::new(trans, ping::Behaviour::new(cfg), peer2_id); let (mut tx, mut rx) = mpsc::channel::(1); @@ -190,14 +190,14 @@ fn unsupported_doesnt_fail() { let mut swarm1 = Swarm::new( trans, DummyBehaviour::with_keep_alive(KeepAlive::Yes), - peer1_id.clone(), + peer1_id, ); let (peer2_id, trans) = mk_transport(MuxerChoice::Mplex); let mut swarm2 = Swarm::new( trans, ping::Behaviour::new(ping::Config::new().with_keep_alive(true)), - peer2_id.clone(), + peer2_id, ); let (mut tx, mut rx) = mpsc::channel::(1); diff --git a/protocols/relay/src/v2/copy_future.rs b/protocols/relay/src/v2/copy_future.rs index 47652c92ed7..12a8c486d3a 100644 --- a/protocols/relay/src/v2/copy_future.rs +++ b/protocols/relay/src/v2/copy_future.rs @@ -197,7 +197,7 @@ mod tests { let n = std::cmp::min(self.read.len(), buf.len()); buf[0..n].copy_from_slice(&self.read[0..n]); self.read = self.read.split_off(n); - return Poll::Ready(Ok(n)); + Poll::Ready(Ok(n)) } } diff --git a/protocols/relay/tests/v2.rs b/protocols/relay/tests/v2.rs index 384e5e556ed..d4d58ee32c3 100644 --- a/protocols/relay/tests/v2.rs +++ b/protocols/relay/tests/v2.rs @@ -51,7 +51,6 @@ fn reservation() { spawn_swarm_on_pool(&pool, relay); let client_addr = relay_addr - .clone() .with(Protocol::P2p(relay_peer_id.into())) .with(Protocol::P2pCircuit); let mut client = build_client(); @@ -97,7 +96,6 @@ fn new_reservation_to_same_relay_replaces_old() { let mut client = build_client(); let client_peer_id = *client.local_peer_id(); let client_addr = relay_addr - .clone() .with(Protocol::P2p(relay_peer_id.into())) .with(Protocol::P2pCircuit); let client_addr_with_peer_id = client_addr @@ -118,7 +116,7 @@ fn new_reservation_to_same_relay_replaces_old() { )); // Trigger new reservation. - let new_listener = client.listen_on(client_addr.clone()).unwrap(); + let new_listener = client.listen_on(client_addr).unwrap(); // Wait for // - listener of old reservation to close @@ -191,7 +189,6 @@ fn connect() { let mut dst = build_client(); let dst_peer_id = *dst.local_peer_id(); let dst_addr = relay_addr - .clone() .with(Protocol::P2p(relay_peer_id.into())) .with(Protocol::P2pCircuit) .with(Protocol::P2p(dst_peer_id.into())); @@ -247,12 +244,11 @@ fn handle_dial_failure() { let mut client = build_client(); let client_peer_id = *client.local_peer_id(); let client_addr = relay_addr - .clone() .with(Protocol::P2p(relay_peer_id.into())) .with(Protocol::P2pCircuit) .with(Protocol::P2p(client_peer_id.into())); - client.listen_on(client_addr.clone()).unwrap(); + client.listen_on(client_addr).unwrap(); assert!(!pool.run_until(wait_for_dial(&mut client, relay_peer_id))); } @@ -292,7 +288,7 @@ fn reuse_connection() { fn build_relay() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); - let local_peer_id = local_public_key.clone().to_peer_id(); + let local_peer_id = local_public_key.to_peer_id(); let transport = upgrade_transport(MemoryTransport::default().boxed(), local_public_key); @@ -315,7 +311,7 @@ fn build_relay() -> Swarm { fn build_client() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); - let local_peer_id = local_public_key.clone().to_peer_id(); + let local_peer_id = local_public_key.to_peer_id(); let (relay_transport, behaviour) = client::Client::new_transport_and_behaviour(local_peer_id); let transport = upgrade_transport( diff --git a/protocols/rendezvous/examples/discover.rs b/protocols/rendezvous/examples/discover.rs index ceca71c6c4c..57ff5c237a9 100644 --- a/protocols/rendezvous/examples/discover.rs +++ b/protocols/rendezvous/examples/discover.rs @@ -55,7 +55,7 @@ async fn main() { log::info!("Local peer id: {}", swarm.local_peer_id()); - let _ = swarm.dial(rendezvous_point_address.clone()).unwrap(); + swarm.dial(rendezvous_point_address.clone()).unwrap(); let mut discover_tick = tokio::time::interval(Duration::from_secs(30)); let mut cookie = None; diff --git a/protocols/rendezvous/tests/harness.rs b/protocols/rendezvous/tests/harness.rs index 30dace245ff..cad3a087afb 100644 --- a/protocols/rendezvous/tests/harness.rs +++ b/protocols/rendezvous/tests/harness.rs @@ -62,11 +62,10 @@ where fn get_rand_memory_address() -> Multiaddr { let address_port = rand::random::(); - let addr = format!("/memory/{}", address_port) - .parse::() - .unwrap(); - addr + format!("/memory/{}", address_port) + .parse::() + .unwrap() } pub async fn await_event_or_timeout( diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index 2d1b715a540..2fe45f7ba8f 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -37,7 +37,7 @@ async fn given_successful_registration_then_successful_discovery() { let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; - let _ = alice + alice .behaviour_mut() .register(namespace.clone(), *robert.local_peer_id(), None); @@ -86,7 +86,7 @@ async fn given_successful_registration_then_refresh_ttl() { let roberts_peer_id = *robert.local_peer_id(); let refresh_ttl = 10_000; - let _ = alice + alice .behaviour_mut() .register(namespace.clone(), roberts_peer_id, None); diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index dc9a2eb92e3..22223fb9608 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -598,7 +598,7 @@ mod tests { let upgrade_timeout = Duration::from_secs(1); let mut connection = Connection::new( StreamMuxerBox::new(PendingStreamMuxer), - MockConnectionHandler::new(upgrade_timeout.clone()), + MockConnectionHandler::new(upgrade_timeout), None, 2, ); diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 94f5215e91e..69020aff2a9 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -1718,7 +1718,7 @@ mod tests { let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); - swarm1.listen_on(addr1.clone()).unwrap(); + swarm1.listen_on(addr1).unwrap(); swarm2.listen_on(addr2.clone()).unwrap(); let swarm1_id = *swarm1.local_peer_id(); @@ -1976,7 +1976,7 @@ mod tests { let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); - swarm1.listen_on(addr1.clone()).unwrap(); + swarm1.listen_on(addr1).unwrap(); swarm2.listen_on(addr2.clone()).unwrap(); let swarm1_id = *swarm1.local_peer_id(); @@ -2084,7 +2084,7 @@ mod tests { swarm .dial( DialOpts::peer_id(PeerId::random()) - .addresses(listen_addresses.into()) + .addresses(listen_addresses) .build(), ) .unwrap(); @@ -2144,11 +2144,7 @@ mod tests { } match network - .dial( - DialOpts::peer_id(target) - .addresses(vec![addr.clone()]) - .build(), - ) + .dial(DialOpts::peer_id(target).addresses(vec![addr]).build()) .expect_err("Unexpected dialing success.") { DialError::ConnectionLimit(limit) => { @@ -2206,7 +2202,7 @@ mod tests { // Spawn and block on the dialer. async_std::task::block_on({ let mut n = 0; - let _ = network2.dial(listen_addr.clone()).unwrap(); + network2.dial(listen_addr.clone()).unwrap(); let mut expected_closed = false; let mut network_1_established = false; diff --git a/swarm/src/registry.rs b/swarm/src/registry.rs index f0a6153ec86..05255876f32 100644 --- a/swarm/src/registry.rs +++ b/swarm/src/registry.rs @@ -476,7 +476,7 @@ mod tests { // Add all address reports to the collection. for r in records.iter() { - addresses.add(r.addr.clone(), r.score.clone()); + addresses.add(r.addr.clone(), r.score); } // Check that each address in the registry has the expected score. diff --git a/swarm/src/test.rs b/swarm/src/test.rs index 166e9185a47..093ee420cb5 100644 --- a/swarm/src/test.rs +++ b/swarm/src/test.rs @@ -232,7 +232,7 @@ where } fn addresses_of_peer(&mut self, p: &PeerId) -> Vec { - self.addresses_of_peer.push(p.clone()); + self.addresses_of_peer.push(*p); self.inner.addresses_of_peer(p) } @@ -271,12 +271,8 @@ where } else { assert_eq!(other_established, 0) } - self.inject_connection_established.push(( - p.clone(), - c.clone(), - e.clone(), - other_established, - )); + self.inject_connection_established + .push((*p, *c, e.clone(), other_established)); self.inner .inject_connection_established(p, c, e, errors, other_established); } @@ -349,7 +345,7 @@ where "`inject_event` is never called for closed connections." ); - self.inject_event.push((p.clone(), c.clone(), e.clone())); + self.inject_event.push((p, c, e.clone())); self.inner.inject_event(p, c, e); } @@ -389,7 +385,7 @@ where } fn inject_listener_error(&mut self, l: ListenerId, e: &(dyn std::error::Error + 'static)) { - self.inject_listener_error.push(l.clone()); + self.inject_listener_error.push(l); self.inner.inject_listener_error(l, e); } diff --git a/transports/noise/src/protocol/x25519.rs b/transports/noise/src/protocol/x25519.rs index 0ffa9991ae6..482f20245a6 100644 --- a/transports/noise/src/protocol/x25519.rs +++ b/transports/noise/src/protocol/x25519.rs @@ -300,7 +300,7 @@ mod tests { let sodium_sec = ed25519_sk_to_curve25519(&ed25519_compact::SecretKey::new(ed25519.encode())); let sodium_pub = ed25519_pk_to_curve25519(&ed25519_compact::PublicKey::new( - ed25519.public().encode().clone(), + ed25519.public().encode(), )); let our_pub = x25519.public.0; diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 492f1afb029..221f8eb2dff 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -264,7 +264,6 @@ mod tests { Transport, }; use std::{self, borrow::Cow, path::Path}; - use tempfile; #[test] fn multiaddr_to_path_conversion() { From 6e0946ae3b47f52084ded6adbe16b5f52bc1c67e Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 28 Sep 2022 16:18:40 +1000 Subject: [PATCH 2/7] Fix most clippy warnings --- misc/multistream-select/tests/transport.rs | 7 +- muxers/mplex/tests/two_peers.rs | 11 +- protocols/autonat/examples/client.rs | 1 + protocols/autonat/examples/server.rs | 1 + protocols/autonat/tests/test_client.rs | 40 +- protocols/autonat/tests/test_server.rs | 26 +- protocols/dcutr/examples/client.rs | 1 + protocols/gossipsub/src/behaviour/tests.rs | 598 +++++++++--------- protocols/gossipsub/src/mcache.rs | 15 +- protocols/gossipsub/src/peer_score/tests.rs | 346 +++++----- protocols/identify/src/identify.rs | 10 +- protocols/kad/src/behaviour/test.rs | 26 +- protocols/kad/src/kbucket/bucket.rs | 21 +- protocols/kad/src/query/peers/closest.rs | 15 +- .../kad/src/query/peers/closest/disjoint.rs | 45 +- protocols/mdns/tests/use-async-std.rs | 57 +- protocols/mdns/tests/use-tokio.rs | 34 +- protocols/ping/tests/ping.rs | 5 +- protocols/relay/src/v2/relay/rate_limiter.rs | 5 +- .../examples/register_with_identify.rs | 1 + protocols/rendezvous/tests/rendezvous.rs | 1 + swarm-derive/tests/test.rs | 19 +- swarm/src/handler/one_shot.rs | 2 +- swarm/src/lib.rs | 1 - swarm/src/registry.rs | 26 +- transports/dns/src/lib.rs | 11 +- transports/plaintext/tests/smoke.rs | 4 +- transports/uds/src/lib.rs | 2 +- 28 files changed, 642 insertions(+), 689 deletions(-) diff --git a/misc/multistream-select/tests/transport.rs b/misc/multistream-select/tests/transport.rs index e941b31158a..7c9680ab893 100644 --- a/misc/multistream-select/tests/transport.rs +++ b/misc/multistream-select/tests/transport.rs @@ -71,9 +71,10 @@ fn transport_upgrade() { let addr = addr_receiver.await.unwrap(); dialer.dial(addr).unwrap(); futures::future::poll_fn(move |cx| loop { - match ready!(dialer.poll_next_unpin(cx)).unwrap() { - SwarmEvent::ConnectionEstablished { .. } => return Poll::Ready(()), - _ => {} + if let SwarmEvent::ConnectionEstablished { .. } = + ready!(dialer.poll_next_unpin(cx)).unwrap() + { + return Poll::Ready(()); } }) .await diff --git a/muxers/mplex/tests/two_peers.rs b/muxers/mplex/tests/two_peers.rs index 71581b89464..27766c3abd5 100644 --- a/muxers/mplex/tests/two_peers.rs +++ b/muxers/mplex/tests/two_peers.rs @@ -188,11 +188,10 @@ fn protocol_not_match() { let mut transport = TcpTransport::default() .and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1)) .boxed(); - match transport.dial(rx.await.unwrap()).unwrap().await { - Ok(_) => { - assert!(false, "Dialing should fail here as protocols do not match") - } - _ => {} - } + + assert!( + transport.dial(rx.await.unwrap()).unwrap().await.is_err(), + "Dialing should fail here as protocols do not match" + ); }); } diff --git a/protocols/autonat/examples/client.rs b/protocols/autonat/examples/client.rs index c2030d99bca..39a84da983e 100644 --- a/protocols/autonat/examples/client.rs +++ b/protocols/autonat/examples/client.rs @@ -117,6 +117,7 @@ impl Behaviour { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] enum Event { AutoNat(autonat::Event), Identify(IdentifyEvent), diff --git a/protocols/autonat/examples/server.rs b/protocols/autonat/examples/server.rs index c4ea7a93e9e..64dc9643e80 100644 --- a/protocols/autonat/examples/server.rs +++ b/protocols/autonat/examples/server.rs @@ -96,6 +96,7 @@ impl Behaviour { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] enum Event { AutoNat(autonat::Event), Identify(IdentifyEvent), diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs index d5dfe75eb85..420bcf99829 100644 --- a/protocols/autonat/tests/test_client.rs +++ b/protocols/autonat/tests/test_client.rs @@ -58,9 +58,8 @@ async fn spawn_server(kill: oneshot::Receiver<()>) -> (PeerId, Multiaddr) { .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); let addr = loop { - match server.select_next_some().await { - SwarmEvent::NewListenAddr { address, .. } => break address, - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = server.select_next_some().await { + break address; }; }; tx.send((peer_id, addr)).unwrap(); @@ -78,11 +77,8 @@ async fn spawn_server(kill: oneshot::Receiver<()>) -> (PeerId, Multiaddr) { async fn next_event(swarm: &mut Swarm) -> Event { loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(event) => { - break event; - } - _ => {} + if let SwarmEvent::Behaviour(event) = swarm.select_next_some().await { + break event; } } } @@ -177,9 +173,8 @@ async fn test_auto_probe() { .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { .. } => break, - _ => {} + if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await { + break; } } @@ -269,9 +264,8 @@ async fn test_confidence() { .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { .. } => break, - _ => {} + if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await { + break; } } } else { @@ -357,9 +351,8 @@ async fn test_throttle_server_period() { .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { .. } => break, - _ => {} + if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await { + break; } } @@ -477,9 +470,8 @@ async fn test_outbound_failure() { .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { .. } => break, - _ => {} + if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await { + break; } } // First probe should be successful and flip status to public. @@ -497,7 +489,8 @@ async fn test_outbound_failure() { } let inactive = servers.split_off(1); - // Drop the handles of the inactive servers to kill them. + + #[allow(clippy::needless_collect)] // Drop the handles of the inactive servers to kill them. let inactive_ids: Vec<_> = inactive.into_iter().map(|(id, _handle)| id).collect(); // Expect to retry on outbound failure @@ -541,9 +534,8 @@ async fn test_global_ips_config() { .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { .. } => break, - _ => {} + if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await { + break; } } diff --git a/protocols/autonat/tests/test_server.rs b/protocols/autonat/tests/test_server.rs index ffe2aa3ea3d..b45ae7ecafc 100644 --- a/protocols/autonat/tests/test_server.rs +++ b/protocols/autonat/tests/test_server.rs @@ -56,9 +56,8 @@ async fn init_server(config: Option) -> (Swarm, PeerId, Multi .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); let addr = loop { - match server.select_next_some().await { - SwarmEvent::NewListenAddr { address, .. } => break address, - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = server.select_next_some().await { + break address; }; }; (server, peer_id, addr) @@ -91,12 +90,9 @@ async fn spawn_client( .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); loop { - match client.select_next_some().await { - SwarmEvent::NewListenAddr { address, .. } => { - addr = Some(address); - break; - } - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = client.select_next_some().await { + addr = Some(address); + break; }; } } @@ -119,11 +115,8 @@ async fn spawn_client( async fn next_event(swarm: &mut Swarm) -> Event { loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(event) => { - break event; - } - _ => {} + if let SwarmEvent::Behaviour(event) = swarm.select_next_some().await { + break event; } } } @@ -161,9 +154,8 @@ async fn test_dial_back() { } => { assert_eq!(peer_id, client_id); let observed_client_ip = loop { - match send_back_addr.pop().unwrap() { - Protocol::Ip4(ip4_addr) => break ip4_addr, - _ => {} + if let Protocol::Ip4(ip4_addr) = send_back_addr.pop().unwrap() { + break ip4_addr; } }; break observed_client_ip; diff --git a/protocols/dcutr/examples/client.rs b/protocols/dcutr/examples/client.rs index e70e54cca3b..a6cf64c23e8 100644 --- a/protocols/dcutr/examples/client.rs +++ b/protocols/dcutr/examples/client.rs @@ -114,6 +114,7 @@ fn main() -> Result<(), Box> { } #[derive(Debug)] + #[allow(clippy::large_enum_variant)] enum Event { Ping(PingEvent), Identify(IdentifyEvent), diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index a68883e68a3..7f8e8878b34 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -21,26 +21,23 @@ // Collection of tests for the gossipsub network behaviour mod tests { - use byteorder::{BigEndian, ByteOrder}; - use std::thread::sleep; - use std::time::Duration; - - use async_std::net::Ipv4Addr; - use rand::Rng; - - use crate::{ - GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, IdentTopic as Topic, - TopicScoreParams, - }; - use super::super::*; use crate::error::ValidationError; use crate::subscription_filter::WhitelistSubscriptionFilter; use crate::transform::{DataTransform, IdentityTransform}; use crate::types::FastMessageId; + use crate::{ + GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, IdentTopic as Topic, + TopicScoreParams, + }; + use async_std::net::Ipv4Addr; + use byteorder::{BigEndian, ByteOrder}; use libp2p_core::Endpoint; + use rand::Rng; use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; + use std::thread::sleep; + use std::time::Duration; #[derive(Default, Builder, Debug)] #[builder(default)] @@ -313,10 +310,10 @@ mod tests { info.peer_id .and_then(|id| PeerId::from_bytes(&id).ok()) .map(|peer_id| - //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 - PeerInfo { - peer_id: Some(peer_id), - }) + //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 + PeerInfo { + peer_id: Some(peer_id), + }) }) .collect::>(); @@ -380,9 +377,8 @@ mod tests { NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { GossipsubHandlerIn::Message(ref message) => { for s in &message.subscriptions { - match s.subscribe { - Some(true) => collected_subscriptions.push(s.clone()), - _ => {} + if let Some(true) = s.subscribe { + collected_subscriptions.push(s.clone()) }; } collected_subscriptions @@ -449,9 +445,8 @@ mod tests { NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { GossipsubHandlerIn::Message(ref message) => { for s in &message.subscriptions { - match s.subscribe { - Some(true) => collected_subscriptions.push(s.clone()), - _ => {} + if let Some(true) = s.subscribe { + collected_subscriptions.push(s.clone()) }; } collected_subscriptions @@ -527,11 +522,8 @@ mod tests { (_, controls): (&PeerId, &Vec), ) -> Vec { for c in controls.iter() { - match c { - GossipsubControlAction::Graft { topic_hash: _ } => { - collected_grafts.push(c.clone()) - } - _ => {} + if let GossipsubControlAction::Graft { topic_hash: _ } = c { + collected_grafts.push(c.clone()) } } collected_grafts @@ -793,16 +785,13 @@ mod tests { // check that there are two subscriptions sent to each peer for sevent in send_events.clone() { - match sevent { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - assert!( - m.subscriptions.len() == 2, - "There should be two subscriptions sent to each peer (1 for each topic)." - ); - } + if let NetworkBehaviourAction::NotifyHandler { event, .. } = sevent { + if let GossipsubHandlerIn::Message(ref m) = **event { + assert!( + m.subscriptions.len() == 2, + "There should be two subscriptions sent to each peer (1 for each topic)." + ); } - _ => {} }; } @@ -1274,9 +1263,9 @@ mod tests { gs.handle_graft(&peers[7], their_topics.clone()); - for i in 0..2 { + for hash in topic_hashes.iter().take(2) { assert!( - gs.mesh.get(&topic_hashes[i]).unwrap().contains(&peers[7]), + gs.mesh.get(hash).unwrap().contains(&peers[7]), "Expected peer to be in the mesh for the first 2 topics" ); } @@ -1365,7 +1354,7 @@ mod tests { //add peer as explicit peer gs.add_explicit_peer(&peer); - let dial_events: Vec<_> = gs + let num_events = gs .events .iter() .filter(|e| match e { @@ -1374,11 +1363,10 @@ mod tests { } _ => false, }) - .collect(); + .count(); assert_eq!( - dial_events.len(), - 1, + num_events, 1, "There was no dial peer event for the explicit peer" ); } @@ -1489,10 +1477,7 @@ mod tests { //assert that graft gets created to non-explicit peer assert!( count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) + && matches!(m, GossipsubControlAction::Graft { .. })) >= 1, "No graft message got created to non-explicit peer" ); @@ -1500,10 +1485,7 @@ mod tests { //assert that no graft gets created to explicit peer assert_eq!( count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), + && matches!(m, GossipsubControlAction::Graft { .. })), 0, "A graft message got created to an explicit peer" ); @@ -1527,10 +1509,7 @@ mod tests { //assert that no graft gets created to explicit peer assert_eq!( count_control_msgs(&gs, |peer_id, m| peer_id == &others[0] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), + && matches!(m, GossipsubControlAction::Graft { .. })), 0, "A graft message got created to an explicit peer" ); @@ -1598,13 +1577,13 @@ mod tests { //create new topic, both peers subscribing to it but we do not subscribe to it let topic = Topic::new(String::from("t")); let topic_hash = topic.hash(); - for i in 0..2 { + for peer in peers.iter().take(2) { gs.handle_received_subscriptions( &[GossipsubSubscription { action: GossipsubSubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), }], - &peers[i], + peer, ); } @@ -1617,10 +1596,7 @@ mod tests { //assert that graft gets created to non-explicit peer assert!( count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) + && matches!(m, GossipsubControlAction::Graft { .. })) > 0, "No graft message got created to non-explicit peer" ); @@ -1628,10 +1604,7 @@ mod tests { //assert that no graft gets created to explicit peer assert_eq!( count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), + && matches!(m, GossipsubControlAction::Graft { .. })), 0, "A graft message got created to an explicit peer" ); @@ -1650,13 +1623,13 @@ mod tests { //create new topic, both peers subscribing to it but we do not subscribe to it let topic = Topic::new(String::from("t")); let topic_hash = topic.hash(); - for i in 0..2 { + for peer in peers.iter().take(2) { gs.handle_received_subscriptions( &[GossipsubSubscription { action: GossipsubSubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), }], - &peers[i], + peer, ); } @@ -1672,10 +1645,7 @@ mod tests { //assert that graft gets created to non-explicit peer assert!( count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) + && matches!(m, GossipsubControlAction::Graft { .. })) >= 1, "No graft message got created to non-explicit peer" ); @@ -1683,10 +1653,7 @@ mod tests { //assert that no graft gets created to explicit peer assert_eq!( count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), + && matches!(m, GossipsubControlAction::Graft { .. })), 0, "A graft message got created to an explicit peer" ); @@ -1728,10 +1695,7 @@ mod tests { .get(&peers[0]) .unwrap_or(&Vec::new()) .iter() - .filter(|m| match m { - GossipsubControlAction::IHave { .. } => true, - _ => false, - }) + .filter(|m| matches!(m, GossipsubControlAction::IHave { .. })) .count(), 0, "Gossip got emitted to explicit peer" @@ -1752,9 +1716,9 @@ mod tests { let to_remove_peers = config.mesh_n() + 1 - config.mesh_n_low() - 1; - for index in 0..to_remove_peers { + for peer in peers.iter().take(to_remove_peers) { gs.handle_prune( - &peers[index], + peer, topics.iter().map(|h| (h.clone(), vec![], None)).collect(), ); } @@ -1885,11 +1849,11 @@ mod tests { backoff, } => topic_hash == &topics[0] && - peers.len() == config.prune_peers() && - //all peers are different - peers.iter().collect::>().len() == - config.prune_peers() && - backoff.unwrap() == config.prune_backoff().as_secs(), + peers.len() == config.prune_peers() && + //all peers are different + peers.iter().collect::>().len() == + config.prune_peers() && + backoff.unwrap() == config.prune_backoff().as_secs(), _ => false, }), 1 @@ -1933,9 +1897,9 @@ mod tests { backoff, } => topic_hash == &topics[0] && - //no px in this case - peers.is_empty() && - backoff.unwrap() == config.prune_backoff().as_secs(), + //no px in this case + peers.is_empty() && + backoff.unwrap() == config.prune_backoff().as_secs(), _ => false, }), 1 @@ -1975,10 +1939,10 @@ mod tests { //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )), 0, "Graft message created too early within backoff period" ); @@ -1989,10 +1953,10 @@ mod tests { //check that graft got created assert!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) > 0, + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )) > 0, "No graft message was created after backoff period" ); } @@ -2030,10 +1994,10 @@ mod tests { //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )), 0, "Graft message created too early within backoff period" ); @@ -2044,10 +2008,10 @@ mod tests { //check that graft got created assert!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) > 0, + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )) > 0, "No graft message was created after backoff period" ); } @@ -2101,10 +2065,10 @@ mod tests { // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }), + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )), 0, "Graft message created too early within backoff period" ); @@ -2115,10 +2079,10 @@ mod tests { // check that graft got created assert!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Graft { .. } => true, - _ => false, - }) > 0, + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )) > 0, "No graft message was created after backoff period" ); } @@ -2517,10 +2481,7 @@ mod tests { assert_eq!( gs.events .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { .. } => true, - _ => false, - }) + .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) .count(), 0 ); @@ -2582,8 +2543,10 @@ mod tests { fn test_do_not_gossip_to_peers_below_gossip_threshold() { let config = GossipsubConfig::default(); let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; // Build full mesh let (mut gs, peers, topics) = inject_nodes1() @@ -2655,8 +2618,10 @@ mod tests { fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { let config = GossipsubConfig::default(); let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; // Build full mesh let (mut gs, peers, topics) = inject_nodes1() @@ -2730,7 +2695,7 @@ mod tests { peer_id, gs.data_transform.inbound_transform(msg.clone()).unwrap() )) - .any(|(peer_id, msg)| peer_id == &p2 && &gs.config.message_id(&msg) == &msg_id)); + .any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id)); //the message got not sent to p1 assert!(sent_messages .iter() @@ -2738,16 +2703,17 @@ mod tests { peer_id, gs.data_transform.inbound_transform(msg.clone()).unwrap() )) - .all(|(peer_id, msg)| !(peer_id == &p1 && &gs.config.message_id(&msg) == &msg_id))); + .all(|(peer_id, msg)| !(peer_id == &p1 && gs.config.message_id(&msg) == msg_id))); } #[test] fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { let config = GossipsubConfig::default(); let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; - + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; //build full mesh let (mut gs, peers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) @@ -2818,9 +2784,11 @@ mod tests { .build() .unwrap(); let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 0.5 * peer_score_params.behaviour_penalty_weight; - peer_score_thresholds.publish_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; //build mesh with no peers and no subscribed topics let (mut gs, _, _) = inject_nodes1() @@ -2877,10 +2845,11 @@ mod tests { fn test_do_not_flood_publish_to_peer_below_publish_threshold() { let config = GossipsubConfig::default(); let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 0.5 * peer_score_params.behaviour_penalty_weight; - peer_score_thresholds.publish_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; - + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; //build mesh with no peers let (mut gs, _, topics) = inject_nodes1() .topics(vec!["test".into()]) @@ -2933,10 +2902,12 @@ mod tests { fn test_ignore_rpc_from_peers_below_graylist_threshold() { let config = GossipsubConfig::default(); let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.gossip_threshold = 0.5 * peer_score_params.behaviour_penalty_weight; - peer_score_thresholds.publish_threshold = 0.5 * peer_score_params.behaviour_penalty_weight; - peer_score_thresholds.graylist_threshold = 3.0 * peer_score_params.behaviour_penalty_weight; + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + graylist_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; //build mesh with no peers let (mut gs, _, topics) = inject_nodes1() @@ -3032,13 +3003,10 @@ mod tests { //only the subscription event gets processed, the rest is dropped assert_eq!(gs.events.len(), 1); - assert!(match &gs.events[0] { - NetworkBehaviourAction::GenerateEvent(event) => match event { - GossipsubEvent::Subscribed { .. } => true, - _ => false, - }, - _ => false, - }); + assert!(matches!( + gs.events[0], + NetworkBehaviourAction::GenerateEvent(GossipsubEvent::Subscribed { .. }) + )); let control_action = GossipsubControlAction::IHave { topic_hash: topics[0].clone(), @@ -3070,9 +3038,10 @@ mod tests { .build() .unwrap(); let peer_score_params = PeerScoreParams::default(); - let mut peer_score_thresholds = PeerScoreThresholds::default(); - peer_score_thresholds.accept_px_threshold = peer_score_params.app_specific_weight; - + let peer_score_thresholds = PeerScoreThresholds { + accept_px_threshold: peer_score_params.app_specific_weight, + ..PeerScoreThresholds::default() + }; // Build mesh with two peers let (mut gs, peers, topics) = inject_nodes1() .peer_no(2) @@ -3105,10 +3074,7 @@ mod tests { assert_eq!( gs.events .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { .. } => true, - _ => false, - }) + .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) .count(), 0 ); @@ -3130,10 +3096,7 @@ mod tests { assert!( gs.events .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { .. } => true, - _ => false, - }) + .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) .count() > 0 ); @@ -3198,11 +3161,13 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 2.0; - topic_params.time_in_mesh_quantum = Duration::from_millis(50); - topic_params.time_in_mesh_cap = 10.0; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 2.0, + time_in_mesh_quantum: Duration::from_millis(50), + time_in_mesh_cap: 10.0, + topic_weight: 0.7, + ..TopicScoreParams::default() + }; peer_score_params .topics .insert(topic_hash, topic_params.clone()); @@ -3280,12 +3245,14 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 2.0; - topic_params.first_message_deliveries_cap = 10.0; - topic_params.first_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 2.0, + first_message_deliveries_cap: 10.0, + first_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..TopicScoreParams::default() + }; peer_score_params .topics .insert(topic_hash, topic_params.clone()); @@ -3376,16 +3343,18 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = -2.0; - topic_params.mesh_message_deliveries_decay = 0.9; - topic_params.mesh_message_deliveries_cap = 10.0; - topic_params.mesh_message_deliveries_threshold = 5.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); - topic_params.mesh_message_deliveries_window = Duration::from_millis(100); - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: -2.0, + mesh_message_deliveries_decay: 0.9, + mesh_message_deliveries_cap: 0.0, + mesh_message_deliveries_threshold: 5.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(100), + topic_weight: 0.7, + ..TopicScoreParams::default() + }; peer_score_params.topics.insert(topic_hash, topic_params); let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3472,18 +3441,20 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = -2.0; - topic_params.mesh_message_deliveries_decay = 0.9; - topic_params.mesh_message_deliveries_cap = 10.0; - topic_params.mesh_message_deliveries_threshold = 5.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); - topic_params.mesh_message_deliveries_window = Duration::from_millis(100); - topic_params.mesh_failure_penalty_weight = -3.0; - topic_params.mesh_failure_penalty_decay = 0.95; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: -2.0, + mesh_message_deliveries_decay: 0.9, + mesh_message_deliveries_cap: 10.0, + mesh_message_deliveries_threshold: 5.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(100), + mesh_failure_penalty_weight: -3.0, + mesh_failure_penalty_decay: 0.95, + topic_weight: 0.7, + ..Default::default() + }; peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3567,14 +3538,16 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3624,14 +3597,16 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3680,14 +3655,16 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3728,14 +3705,16 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3785,14 +3764,16 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3845,14 +3826,16 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3913,14 +3896,16 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -3995,14 +3980,16 @@ mod tests { let mut peer_score_params = PeerScoreParams::default(); let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut topic_params = TopicScoreParams::default(); - topic_params.time_in_mesh_weight = 0.0; //deactivate time in mesh - topic_params.first_message_deliveries_weight = 0.0; //deactivate first time deliveries - topic_params.mesh_message_deliveries_weight = 0.0; //deactivate message deliveries - topic_params.mesh_failure_penalty_weight = 0.0; //deactivate mesh failure penalties - topic_params.invalid_message_deliveries_weight = -2.0; - topic_params.invalid_message_deliveries_decay = 0.9; - topic_params.topic_weight = 0.7; + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; peer_score_params.topics.insert(topic_hash, topic_params); peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); @@ -4056,8 +4043,10 @@ mod tests { #[test] fn test_scoring_p5() { - let mut peer_score_params = PeerScoreParams::default(); - peer_score_params.app_specific_weight = 2.0; + let peer_score_params = PeerScoreParams { + app_specific_weight: 2.0, + ..PeerScoreParams::default() + }; //build mesh with one peer let (mut gs, peers, _) = inject_nodes1() @@ -4080,9 +4069,11 @@ mod tests { #[test] fn test_scoring_p6() { - let mut peer_score_params = PeerScoreParams::default(); - peer_score_params.ip_colocation_factor_threshold = 5.0; - peer_score_params.ip_colocation_factor_weight = -2.0; + let peer_score_params = PeerScoreParams { + ip_colocation_factor_threshold: 5.0, + ip_colocation_factor_weight: -2.0, + ..Default::default() + }; let (mut gs, _, _) = inject_nodes1() .peer_no(0) @@ -4119,9 +4110,9 @@ mod tests { } //add additional connection for 3 others with addr - for i in 0..3 { + for id in others.iter().take(3) { gs.inject_connection_established( - &others[i], + id, &ConnectionId::new(0), &ConnectedPoint::Dialer { address: addr.clone(), @@ -4140,9 +4131,9 @@ mod tests { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0); //add additional connection for 3 of the peers to addr2 - for i in 0..3 { + for peer in peers.iter().take(3) { gs.inject_connection_established( - &peers[i], + peer, &ConnectionId::new(0), &ConnectedPoint::Dialer { address: addr2.clone(), @@ -4208,9 +4199,11 @@ mod tests { .graft_flood_threshold(Duration::from_millis(100)) .build() .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - peer_score_params.behaviour_penalty_weight = -2.0; - peer_score_params.behaviour_penalty_decay = 0.9; + let peer_score_params = PeerScoreParams { + behaviour_penalty_weight: -2.0, + behaviour_penalty_decay: 0.9, + ..Default::default() + }; let (mut gs, peers, topics) = inject_nodes1() .peer_no(2) @@ -4223,13 +4216,11 @@ mod tests { .create_network(); //remove peers from mesh and send prune to them => this adds a backoff for the peers - for i in 0..2 { - gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[i]); + for peer in peers.iter().take(2) { + gs.mesh.get_mut(&topics[0]).unwrap().remove(peer); gs.send_graft_prune( HashMap::new(), - vec![(peers[i], vec![topics[0].clone()])] - .into_iter() - .collect(), + HashMap::from([(*peer, vec![topics[0].clone()])]), HashSet::new(), ); } @@ -4282,10 +4273,14 @@ mod tests { .opportunistic_graft_peers(2) .build() .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - peer_score_params.app_specific_weight = 1.0; - let mut thresholds = PeerScoreThresholds::default(); - thresholds.opportunistic_graft_threshold = 2.0; + let peer_score_params = PeerScoreParams { + app_specific_weight: 1.0, + ..Default::default() + }; + let thresholds = PeerScoreThresholds { + opportunistic_graft_threshold: 2.0, + ..Default::default() + }; let (mut gs, peers, topics) = inject_nodes1() .peer_no(5) @@ -4312,13 +4307,13 @@ mod tests { assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect()); //give others high scores (but the first two have not high enough scores) - for i in 0..5 { - gs.set_application_score(&peers[i], 0.0 + i as f64); + for (i, peer) in peers.iter().enumerate().take(5) { + gs.set_application_score(peer, 0.0 + i as f64); } //set scores for peers in the mesh - for i in 0..5 { - gs.set_application_score(&others[i], 0.0 + i as f64); + for (i, peer) in others.iter().enumerate().take(5) { + gs.set_application_score(peer, 0.0 + i as f64); } //this gives a median of exactly 2.0 => should not apply opportunistic grafting @@ -4376,10 +4371,10 @@ mod tests { //assert that no prune got created assert_eq!( - count_control_msgs(&gs, |_, a| match a { - GossipsubControlAction::Prune { .. } => true, - _ => false, - }), + count_control_msgs(&gs, |_, a| matches!( + a, + GossipsubControlAction::Prune { .. } + )), 0, "we should not prune after graft in unknown topic" ); @@ -4482,22 +4477,8 @@ mod tests { //we send iwant only for the first 10 messages assert_eq!( - count_control_msgs(&gs, |p, action| match action { - GossipsubControlAction::IWant { message_ids } => - p == &peer && { - assert_eq!( - message_ids.len(), - 1, - "each iwant should have one message \ - corresponding to one ihave" - ); - - assert!(first_ten.contains(&message_ids[0])); - - true - }, - _ => false, - }), + count_control_msgs(&gs, |p, action| p == &peer + && matches!(action, GossipsubControlAction::IWant { message_ids } if message_ids.len() == 1 && first_ten.contains(&message_ids[0]))), 10, "exactly the first ten ihaves should be processed and one iwant for each created" ); @@ -4519,19 +4500,8 @@ mod tests { //we sent iwant for all 20 messages assert_eq!( - count_control_msgs(&gs, |p, action| match action { - GossipsubControlAction::IWant { message_ids } => - p == &peer && { - assert_eq!( - message_ids.len(), - 1, - "each iwant should have one message \ - corresponding to one ihave" - ); - true - }, - _ => false, - }), + count_control_msgs(&gs, |p, action| p == &peer + && matches!(action, GossipsubControlAction::IWant { message_ids } if message_ids.len() == 1)), 20, "all 20 should get sent" ); @@ -4706,8 +4676,10 @@ mod tests { .iwant_followup_time(Duration::from_secs(4)) .build() .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - peer_score_params.behaviour_penalty_weight = -1.0; + let peer_score_params = PeerScoreParams { + behaviour_penalty_weight: -1.0, + ..Default::default() + }; // fill the mesh let (mut gs, peers, topics) = inject_nodes1() @@ -4807,7 +4779,7 @@ mod tests { } else { println!("{}", peer); println!("{}", score); - assert!(false, "Invalid score of peer") + panic!("Invalid score of peer"); } } @@ -5229,7 +5201,7 @@ mod tests { //collect messages to p1 let messages_to_p1 = gs2.events.drain(..).filter_map(|e| match e { NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if &peer_id == &p1 { + if peer_id == p1 { if let GossipsubHandlerIn::Message(m) = Arc::try_unwrap(event).unwrap() { Some(m) } else { diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index ad19eaf08dd..ef838c82a8d 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -275,18 +275,11 @@ mod tests { mc.put(&id, m.clone()); - assert!(mc.history[0].len() == 1); + assert_eq!(mc.history[0].len(), 1); let fetched = mc.get(&id); - assert_eq!(fetched.is_none(), false); - assert_eq!(fetched.is_some(), true); - - // Make sure it is the same fetched message - match fetched { - Some(x) => assert_eq!(*x, m), - _ => assert!(false), - } + assert_eq!(fetched.unwrap(), &m); } #[test] @@ -302,7 +295,7 @@ mod tests { // Try to get an incorrect ID let wrong_id = MessageId::new(b"wrongid"); let fetched = mc.get(&wrong_id); - assert_eq!(fetched.is_none(), true); + assert!(fetched.is_none()); } #[test] @@ -313,7 +306,7 @@ mod tests { // Try to get an incorrect ID let wrong_string = MessageId::new(b"imempty"); let fetched = mc.get(&wrong_string); - assert_eq!(fetched.is_none(), true); + assert!(fetched.is_none()); } #[test] diff --git a/protocols/gossipsub/src/peer_score/tests.rs b/protocols/gossipsub/src/peer_score/tests.rs index 7f2658fed42..1c47eeeb183 100644 --- a/protocols/gossipsub/src/peer_score/tests.rs +++ b/protocols/gossipsub/src/peer_score/tests.rs @@ -76,14 +76,18 @@ fn test_score_time_in_mesh() { // Create parameters with reasonable default values let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut params = PeerScoreParams::default(); - params.topic_score_cap = 1000.0; + let mut params = PeerScoreParams { + topic_score_cap: 1000.0, + ..Default::default() + }; - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 0.5; - topic_params.time_in_mesh_weight = 1.0; - topic_params.time_in_mesh_quantum = Duration::from_millis(1); - topic_params.time_in_mesh_cap = 3600.0; + let topic_params = TopicScoreParams { + topic_weight: 0.5, + time_in_mesh_weight: 1.0, + time_in_mesh_quantum: Duration::from_millis(1), + time_in_mesh_cap: 3600.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); @@ -125,11 +129,13 @@ fn test_score_time_in_mesh_cap() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 0.5; - topic_params.time_in_mesh_weight = 1.0; - topic_params.time_in_mesh_quantum = Duration::from_millis(1); - topic_params.time_in_mesh_cap = 10.0; + let topic_params = TopicScoreParams { + topic_weight: 0.5, + time_in_mesh_weight: 1.0, + time_in_mesh_quantum: Duration::from_millis(1), + time_in_mesh_cap: 10.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); @@ -173,12 +179,14 @@ fn test_score_first_message_deliveries() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.first_message_deliveries_weight = 1.0; - topic_params.first_message_deliveries_decay = 1.0; - topic_params.first_message_deliveries_cap = 2000.0; - topic_params.time_in_mesh_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 1.0, + first_message_deliveries_cap: 2000.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); @@ -217,12 +225,14 @@ fn test_score_first_message_deliveries_cap() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.first_message_deliveries_weight = 1.0; - topic_params.first_message_deliveries_decay = 1.0; // test without decay - topic_params.first_message_deliveries_cap = 50.0; - topic_params.time_in_mesh_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 1.0, + first_message_deliveries_cap: 50.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); @@ -261,12 +271,14 @@ fn test_score_first_message_deliveries_decay() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.first_message_deliveries_weight = 1.0; - topic_params.first_message_deliveries_decay = 0.9; // decay 10% per decay interval - topic_params.first_message_deliveries_cap = 2000.0; - topic_params.time_in_mesh_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + first_message_deliveries_weight: 1.0, + first_message_deliveries_decay: 0.9, + first_message_deliveries_cap: 2000.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let peer_id = PeerId::random(); @@ -317,17 +329,19 @@ fn test_score_mesh_message_deliveries() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = -1.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); - topic_params.mesh_message_deliveries_window = Duration::from_millis(10); - topic_params.mesh_message_deliveries_threshold = 20.0; - topic_params.mesh_message_deliveries_cap = 100.0; - topic_params.mesh_message_deliveries_decay = 1.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: -1.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let mut peer_score = PeerScore::new(params); @@ -419,18 +433,20 @@ fn test_score_mesh_message_deliveries_decay() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = -1.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(0); - topic_params.mesh_message_deliveries_window = Duration::from_millis(10); - topic_params.mesh_message_deliveries_threshold = 20.0; - topic_params.mesh_message_deliveries_cap = 100.0; - topic_params.mesh_message_deliveries_decay = 0.9; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.mesh_failure_penalty_weight = 0.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: -1.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 0.9, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + mesh_failure_penalty_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let mut peer_score = PeerScore::new(params); @@ -480,24 +496,26 @@ fn test_score_mesh_failure_penalty() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - // the mesh failure penalty is applied when a peer is pruned while their - // mesh deliveries are under the threshold. - // for this test, we set the mesh delivery threshold, but set - // mesh_message_deliveries to zero, so the only affect on the score - // is from the mesh failure penalty - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(0); - topic_params.mesh_message_deliveries_window = Duration::from_millis(10); - topic_params.mesh_message_deliveries_threshold = 20.0; - topic_params.mesh_message_deliveries_cap = 100.0; - topic_params.mesh_message_deliveries_decay = 1.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.mesh_failure_penalty_weight = -1.0; - topic_params.mesh_failure_penalty_decay = 1.0; + let topic_params = TopicScoreParams { + // the mesh failure penalty is applied when a peer is pruned while their + // mesh deliveries are under the threshold. + // for this test, we set the mesh delivery threshold, but set + // mesh_message_deliveries to zero, so the only affect on the score + // is from the mesh failure penalty + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + mesh_failure_penalty_weight: -1.0, + mesh_failure_penalty_decay: 1.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let mut peer_score = PeerScore::new(params); @@ -562,20 +580,21 @@ fn test_score_invalid_message_deliveries() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); - topic_params.mesh_message_deliveries_window = Duration::from_millis(10); - topic_params.mesh_message_deliveries_threshold = 20.0; - topic_params.mesh_message_deliveries_cap = 100.0; - topic_params.mesh_message_deliveries_decay = 1.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - - topic_params.invalid_message_deliveries_weight = -1.0; - topic_params.invalid_message_deliveries_decay = 1.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let mut peer_score = PeerScore::new(params); @@ -608,20 +627,21 @@ fn test_score_invalid_message_deliveris_decay() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(1); - topic_params.mesh_message_deliveries_window = Duration::from_millis(10); - topic_params.mesh_message_deliveries_threshold = 20.0; - topic_params.mesh_message_deliveries_cap = 100.0; - topic_params.mesh_message_deliveries_decay = 1.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - - topic_params.invalid_message_deliveries_weight = -1.0; - topic_params.invalid_message_deliveries_decay = 0.9; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(10), + mesh_message_deliveries_threshold: 20.0, + mesh_message_deliveries_cap: 100.0, + mesh_message_deliveries_decay: 1.0, + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 0.9, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params.clone()); let mut peer_score = PeerScore::new(params); @@ -667,15 +687,17 @@ fn test_score_reject_message_deliveries() { let topic_hash = topic.hash(); let mut params = PeerScoreParams::default(); - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.invalid_message_deliveries_weight = -1.0; - topic_params.invalid_message_deliveries_decay = 1.0; + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: -1.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); @@ -777,18 +799,22 @@ fn test_application_score() { let app_specific_weight = 0.5; let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut params = PeerScoreParams::default(); - params.app_specific_weight = app_specific_weight; - - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.invalid_message_deliveries_weight = 0.0; - topic_params.invalid_message_deliveries_decay = 1.0; + let mut params = PeerScoreParams { + app_specific_weight, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + invalid_message_deliveries_decay: 1.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); @@ -815,18 +841,22 @@ fn test_score_ip_colocation() { let ip_colocation_factor_threshold = 1.0; let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut params = PeerScoreParams::default(); - params.ip_colocation_factor_weight = ip_colocation_factor_weight; - params.ip_colocation_factor_threshold = ip_colocation_factor_threshold; - - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.invalid_message_deliveries_weight = 0.0; + let mut params = PeerScoreParams { + ip_colocation_factor_weight, + ip_colocation_factor_threshold, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); @@ -875,18 +905,22 @@ fn test_score_behaviour_penality() { let topic = Topic::new("test"); let topic_hash = topic.hash(); - let mut params = PeerScoreParams::default(); - params.behaviour_penalty_decay = behaviour_penalty_decay; - params.behaviour_penalty_weight = behaviour_penalty_weight; - - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 1.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.first_message_deliveries_weight = 0.0; - topic_params.mesh_failure_penalty_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; - topic_params.time_in_mesh_quantum = Duration::from_secs(1); - topic_params.invalid_message_deliveries_weight = 0.0; + let mut params = PeerScoreParams { + behaviour_penalty_decay, + behaviour_penalty_weight, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 1.0, + mesh_message_deliveries_weight: 0.0, + first_message_deliveries_weight: 0.0, + mesh_failure_penalty_weight: 0.0, + time_in_mesh_weight: 0.0, + time_in_mesh_quantum: Duration::from_secs(1), + invalid_message_deliveries_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); @@ -926,16 +960,20 @@ fn test_score_retention() { let app_specific_weight = 1.0; let app_score_value = -1000.0; let retain_score = Duration::from_secs(1); - let mut params = PeerScoreParams::default(); - params.app_specific_weight = app_specific_weight; - params.retain_score = retain_score; - - let mut topic_params = TopicScoreParams::default(); - topic_params.topic_weight = 0.0; - topic_params.mesh_message_deliveries_weight = 0.0; - topic_params.mesh_message_deliveries_activation = Duration::from_secs(0); - topic_params.first_message_deliveries_weight = 0.0; - topic_params.time_in_mesh_weight = 0.0; + let mut params = PeerScoreParams { + app_specific_weight, + retain_score, + ..Default::default() + }; + + let topic_params = TopicScoreParams { + topic_weight: 0.0, + mesh_message_deliveries_weight: 0.0, + mesh_message_deliveries_activation: Duration::from_secs(0), + first_message_deliveries_weight: 0.0, + time_in_mesh_weight: 0.0, + ..Default::default() + }; params.topics.insert(topic_hash, topic_params); let mut peer_score = PeerScore::new(params); diff --git a/protocols/identify/src/identify.rs b/protocols/identify/src/identify.rs index 76a331b728c..c141713cade 100644 --- a/protocols/identify/src/identify.rs +++ b/protocols/identify/src/identify.rs @@ -602,9 +602,8 @@ mod tests { loop { let swarm1_fut = swarm1.select_next_some(); pin_mut!(swarm1_fut); - match swarm1_fut.await { - SwarmEvent::NewListenAddr { address, .. } => return address, - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = swarm1_fut.await { + return address; } } }); @@ -681,9 +680,8 @@ mod tests { loop { let swarm1_fut = swarm1.select_next_some(); pin_mut!(swarm1_fut); - match swarm1_fut.await { - SwarmEvent::NewListenAddr { address, .. } => return address, - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = swarm1_fut.await { + return address; } } }); diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index b7e281af136..c61ffaf158f 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -510,11 +510,11 @@ fn put_record() { let mut single_swarm = build_node_with_config(config); // Connect `single_swarm` to three bootnodes. - for i in 0..3 { - single_swarm.1.behaviour_mut().add_address( - fully_connected_swarms[i].1.local_peer_id(), - fully_connected_swarms[i].0.clone(), - ); + for swarm in fully_connected_swarms.iter().take(3) { + single_swarm + .1 + .behaviour_mut() + .add_address(swarm.1.local_peer_id(), swarm.0.clone()); } let mut swarms = vec![single_swarm]; @@ -527,6 +527,7 @@ fn put_record() { .collect::>() }; + #[allow(clippy::mutable_key_type)] // False positive, we never modify `Bytes`. let records = records .into_iter() .take(num_total) @@ -810,8 +811,8 @@ fn get_record_many() { let record = Record::new(random_multihash(), vec![4, 5, 6]); - for i in 0..num_nodes { - swarms[i].behaviour_mut().store.put(record.clone()).unwrap(); + for swarm in swarms.iter_mut().take(num_nodes) { + swarm.behaviour_mut().store.put(record.clone()).unwrap(); } let quorum = Quorum::N(NonZeroUsize::new(num_results).unwrap()); @@ -870,11 +871,11 @@ fn add_provider() { let mut single_swarm = build_node_with_config(config); // Connect `single_swarm` to three bootnodes. - for i in 0..3 { - single_swarm.1.behaviour_mut().add_address( - fully_connected_swarms[i].1.local_peer_id(), - fully_connected_swarms[i].0.clone(), - ); + for swarm in fully_connected_swarms.iter().take(3) { + single_swarm + .1 + .behaviour_mut() + .add_address(swarm.1.local_peer_id(), swarm.0.clone()); } let mut swarms = vec![single_swarm]; @@ -887,6 +888,7 @@ fn add_provider() { .collect::>() }; + #[allow(clippy::mutable_key_type)] // False positive, we never modify `Bytes`. let keys: HashSet<_> = keys.into_iter().take(num_total).collect(); // Each test run publishes all records twice. diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs index a02b6f8abc1..0a6b69003bd 100644 --- a/protocols/kad/src/kbucket/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -504,18 +504,15 @@ mod tests { value: (), }; let full = bucket.num_entries() == K_VALUE.get(); - match bucket.insert(node, status) { - InsertResult::Inserted => { - let vec = match status { - NodeStatus::Connected => &mut connected, - NodeStatus::Disconnected => &mut disconnected, - }; - if full { - vec.pop_front(); - } - vec.push_back((status, key.clone())); + if let InsertResult::Inserted = bucket.insert(node, status) { + let vec = match status { + NodeStatus::Connected => &mut connected, + NodeStatus::Disconnected => &mut disconnected, + }; + if full { + vec.pop_front(); } - _ => {} + vec.push_back((status, key.clone())); } } @@ -635,7 +632,7 @@ mod tests { // The pending node has been discarded. assert!(bucket.pending().is_none()); - assert!(bucket.iter().all(|(n, _)| &n.key != &key)); + assert!(bucket.iter().all(|(n, _)| n.key != key)); // The initially disconnected node is now the most-recently connected. assert_eq!( diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index b399f462195..7fe87b7fe4d 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -494,7 +494,7 @@ mod tests { .collect() } - fn sorted>(target: &T, peers: &Vec>) -> bool { + fn sorted>(target: &T, peers: &[Key]) -> bool { peers .windows(2) .all(|w| w[0].distance(&target) < w[1].distance(&target)) @@ -549,10 +549,7 @@ mod tests { .map(|e| (e.key.clone(), &e.state)) .unzip(); - let none_contacted = states.iter().all(|s| match s { - PeerState::NotContacted => true, - _ => false, - }); + let none_contacted = states.iter().all(|s| matches!(s, PeerState::NotContacted)); assert!(none_contacted, "Unexpected peer state in new iterator."); assert!( @@ -650,10 +647,10 @@ mod tests { // Determine if all peers have been contacted by the iterator. This _must_ be // the case if the iterator finished with fewer than the requested number // of results. - let all_contacted = iter.closest_peers.values().all(|e| match e.state { - PeerState::NotContacted | PeerState::Waiting { .. } => false, - _ => true, - }); + let all_contacted = iter + .closest_peers + .values() + .all(|e| !matches!(e.state, PeerState::NotContacted | PeerState::Waiting { .. })); let target = iter.target.clone(); let num_results = iter.config.num_results; diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs index dbc6d34eb6c..2272b8e46fc 100644 --- a/protocols/kad/src/query/peers/closest/disjoint.rs +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -456,22 +456,20 @@ mod tests { let num_closest_iters = g.gen_range(0..20 + 1); let peers = random_peers(g.gen_range(0..20 * num_closest_iters + 1), g); - let iters: Vec<_> = (0..num_closest_iters) - .map(|_| { - let num_peers = g.gen_range(0..20 + 1); - let mut peers = g - .choose_multiple(&peers, num_peers) - .cloned() - .map(Key::from) - .collect::>(); + let iters = (0..num_closest_iters).map(|_| { + let num_peers = g.gen_range(0..20 + 1); + let mut peers = g + .choose_multiple(&peers, num_peers) + .cloned() + .map(Key::from) + .collect::>(); - peers.sort_unstable_by_key(|a| target.distance(a)); + peers.sort_unstable_by_key(|a| target.distance(a)); - peers.into_iter() - }) - .collect(); + peers.into_iter() + }); - ResultIter::new(target, iters.into_iter()) + ResultIter::new(target.clone(), iters) } fn shrink(&self) -> Box> { @@ -514,20 +512,15 @@ mod tests { // The peer that should not be included. let peer = self.peers.pop()?; - let iters = self - .iters - .clone() - .into_iter() - .filter_map(|mut iter| { - iter.retain(|p| p != &peer); - if iter.is_empty() { - return None; - } - Some(iter.into_iter()) - }) - .collect::>(); + let iters = self.iters.clone().into_iter().filter_map(|mut iter| { + iter.retain(|p| p != &peer); + if iter.is_empty() { + return None; + } + Some(iter.into_iter()) + }); - Some(ResultIter::new(self.target.clone(), iters.into_iter())) + Some(ResultIter::new(self.target.clone(), iters)) } } diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index 683aed338ce..1bd311dbcbe 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -75,33 +75,27 @@ async fn run_discovery_test(config: MdnsConfig) -> Result<(), Box> { let mut discovered_b = false; loop { futures::select! { - ev = a.select_next_some() => match ev { - SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { - for (peer, _addr) in peers { - if peer == *b.local_peer_id() { - if discovered_a { - return Ok(()); - } else { - discovered_b = true; - } + ev = a.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev { + for (peer, _addr) in peers { + if peer == *b.local_peer_id() { + if discovered_a { + return Ok(()); + } else { + discovered_b = true; } } } - _ => {} }, - ev = b.select_next_some() => match ev { - SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { - for (peer, _addr) in peers { - if peer == *a.local_peer_id() { - if discovered_b { - return Ok(()); - } else { - discovered_a = true; - } + ev = b.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev { + for (peer, _addr) in peers { + if peer == *a.local_peer_id() { + if discovered_b { + return Ok(()); + } else { + discovered_a = true; } } } - _ => {} } } } @@ -113,27 +107,20 @@ async fn run_peer_expiration_test(config: MdnsConfig) -> Result<(), Box match ev { - SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) => { - for (peer, _addr) in peers { - if peer == *b.local_peer_id() { - return Ok(()); - } + ev = a.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) = ev { + for (peer, _addr) in peers { + if peer == *b.local_peer_id() { + return Ok(()); } } - _ => {} }, - ev = b.select_next_some() => match ev { - SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) => { - for (peer, _addr) in peers { - if peer == *a.local_peer_id() { - return Ok(()); - } + ev = b.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) = ev { + for (peer, _addr) in peers { + if peer == *a.local_peer_id() { + return Ok(()); } } - _ => {} } - } } } diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index 9d6cacd76cb..040b32a984e 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -71,33 +71,27 @@ async fn run_discovery_test(config: MdnsConfig) -> Result<(), Box> { let mut discovered_b = false; loop { futures::select! { - ev = a.select_next_some() => match ev { - SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { - for (peer, _addr) in peers { - if peer == *b.local_peer_id() { - if discovered_a { - return Ok(()); - } else { - discovered_b = true; - } + ev = a.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev { + for (peer, _addr) in peers { + if peer == *b.local_peer_id() { + if discovered_a { + return Ok(()); + } else { + discovered_b = true; } } } - _ => {} }, - ev = b.select_next_some() => match ev { - SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => { - for (peer, _addr) in peers { - if peer == *a.local_peer_id() { - if discovered_b { - return Ok(()); - } else { - discovered_a = true; - } + ev = b.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev { + for (peer, _addr) in peers { + if peer == *a.local_peer_id() { + if discovered_b { + return Ok(()); + } else { + discovered_a = true; } } } - _ => {} } } } diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 3583b9b1faf..a9df0874b73 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -207,9 +207,8 @@ fn unsupported_doesnt_fail() { async_std::task::spawn(async move { loop { - match swarm1.select_next_some().await { - SwarmEvent::NewListenAddr { address, .. } => tx.send(address).await.unwrap(), - _ => {} + if let SwarmEvent::NewListenAddr { address, .. } = swarm1.select_next_some().await { + tx.send(address).await.unwrap() } } }); diff --git a/protocols/relay/src/v2/relay/rate_limiter.rs b/protocols/relay/src/v2/relay/rate_limiter.rs index d0b4b4e631f..00d70aa3541 100644 --- a/protocols/relay/src/v2/relay/rate_limiter.rs +++ b/protocols/relay/src/v2/relay/rate_limiter.rs @@ -279,10 +279,7 @@ mod generic { } let mut now = Instant::now(); - let mut l = RateLimiter::new(RateLimiterConfig { - limit: limit.try_into().unwrap(), - interval, - }); + let mut l = RateLimiter::new(RateLimiterConfig { limit, interval }); for (id, d) in events { now = if let Some(now) = now.checked_add(d) { diff --git a/protocols/rendezvous/examples/register_with_identify.rs b/protocols/rendezvous/examples/register_with_identify.rs index 3896db3e3d1..11b5c653ebc 100644 --- a/protocols/rendezvous/examples/register_with_identify.rs +++ b/protocols/rendezvous/examples/register_with_identify.rs @@ -114,6 +114,7 @@ async fn main() { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] enum MyEvent { Rendezvous(rendezvous::client::Event), Identify(IdentifyEvent), diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index 2fe45f7ba8f..e9c4d871b1c 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -374,6 +374,7 @@ struct CombinedBehaviour { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] enum CombinedEvent { Client(rendezvous::client::Event), Server(rendezvous::server::Event), diff --git a/swarm-derive/tests/test.rs b/swarm-derive/tests/test.rs index e0f77eefd30..5b1376354cf 100644 --- a/swarm-derive/tests/test.rs +++ b/swarm-derive/tests/test.rs @@ -43,8 +43,7 @@ fn one_field() { ping: libp2p::ping::Ping, } - #[allow(dead_code)] - #[allow(unreachable_code)] + #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] fn foo() { let _out_event: ::OutEvent = unimplemented!(); match _out_event { @@ -62,8 +61,7 @@ fn two_fields() { identify: libp2p::identify::Identify, } - #[allow(dead_code)] - #[allow(unreachable_code)] + #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] fn foo() { let _out_event: ::OutEvent = unimplemented!(); match _out_event { @@ -85,8 +83,7 @@ fn three_fields() { kad: libp2p::kad::Kademlia, } - #[allow(dead_code)] - #[allow(unreachable_code)] + #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] fn foo() { let _out_event: ::OutEvent = unimplemented!(); match _out_event { @@ -111,6 +108,7 @@ fn custom_event() { identify: libp2p::identify::Identify, } + #[allow(clippy::large_enum_variant)] enum MyEvent { Ping(libp2p::ping::PingEvent), Identify(libp2p::identify::IdentifyEvent), @@ -144,6 +142,7 @@ fn custom_event_mismatching_field_names() { b: libp2p::identify::Identify, } + #[allow(clippy::large_enum_variant)] enum MyEvent { Ping(libp2p::ping::PingEvent), Identify(libp2p::identify::IdentifyEvent), @@ -208,8 +207,7 @@ fn nested_derives_with_import() { foo: Foo, } - #[allow(dead_code)] - #[allow(unreachable_code)] + #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] fn foo() { let _out_event: ::OutEvent = unimplemented!(); match _out_event { @@ -220,6 +218,7 @@ fn nested_derives_with_import() { #[test] fn custom_event_emit_event_through_poll() { + #[allow(clippy::large_enum_variant)] enum BehaviourOutEvent { Ping(libp2p::ping::PingEvent), Identify(libp2p::identify::IdentifyEvent), @@ -237,7 +236,7 @@ fn custom_event_emit_event_through_poll() { } } - #[allow(dead_code)] + #[allow(dead_code, clippy::large_enum_variant)] #[derive(NetworkBehaviour)] #[behaviour(out_event = "BehaviourOutEvent")] struct Foo { @@ -245,7 +244,7 @@ fn custom_event_emit_event_through_poll() { identify: libp2p::identify::Identify, } - #[allow(dead_code, unreachable_code)] + #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] fn bar() { require_net_behaviour::(); diff --git a/swarm/src/handler/one_shot.rs b/swarm/src/handler/one_shot.rs index 5db6b4d10a6..c599ff801e1 100644 --- a/swarm/src/handler/one_shot.rs +++ b/swarm/src/handler/one_shot.rs @@ -252,7 +252,7 @@ mod tests { ); block_on(poll_fn(|cx| loop { - if let Poll::Pending = handler.poll(cx) { + if handler.poll(cx).is_pending() { return Poll::Ready(()); } })); diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 69020aff2a9..d6a07af10ac 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -2139,7 +2139,6 @@ mod tests { .addresses(vec![addr.clone()]) .build(), ) - .ok() .expect("Unexpected connection limit."); } diff --git a/swarm/src/registry.rs b/swarm/src/registry.rs index 05255876f32..04204930580 100644 --- a/swarm/src/registry.rs +++ b/swarm/src/registry.rs @@ -388,7 +388,7 @@ mod tests { // Add the first address. addresses.add(first.addr.clone(), first.score); - assert!(addresses.iter().any(|a| &a.addr == &first.addr)); + assert!(addresses.iter().any(|a| a.addr == first.addr)); // Add another address so often that the initial report of // the first address may be purged and, since it was the @@ -397,7 +397,7 @@ mod tests { addresses.add(other.addr.clone(), other.score); } - let exists = addresses.iter().any(|a| &a.addr == &first.addr); + let exists = addresses.iter().any(|a| a.addr == first.addr); match (first.score, other.score) { // Only finite scores push out other finite scores. @@ -428,14 +428,14 @@ mod tests { // Add the first address. addresses.add(first.addr.clone(), first.score); - assert!(addresses.iter().any(|a| &a.addr == &first.addr)); + assert!(addresses.iter().any(|a| a.addr == first.addr)); // Add another address so the first will address be purged, // because its score is finite(0) addresses.add(other.addr.clone(), other.score); - assert!(addresses.iter().any(|a| &a.addr == &other.addr)); - assert!(!addresses.iter().any(|a| &a.addr == &first.addr)); + assert!(addresses.iter().any(|a| a.addr == other.addr)); + assert!(!addresses.iter().any(|a| a.addr == first.addr)); } #[test] @@ -451,12 +451,14 @@ mod tests { // Count the finitely scored addresses. let num_finite = addresses .iter() - .filter(|r| match r { - AddressRecord { - score: AddressScore::Finite(_), - .. - } => true, - _ => false, + .filter(|r| { + matches!( + r, + AddressRecord { + score: AddressScore::Finite(_), + .. + } + ) }) .count(); @@ -482,7 +484,7 @@ mod tests { // Check that each address in the registry has the expected score. for r in &addresses.registry { let expected_score = records.iter().fold(None::, |sum, rec| { - if &rec.addr == &r.addr { + if rec.addr == r.addr { sum.map_or(Some(rec.score), |s| Some(s + rec.score)) } else { sum diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index 0ee89f78373..7f76a378990 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -607,13 +607,10 @@ mod tests { fn dial(&mut self, addr: Multiaddr) -> Result> { // Check that all DNS components have been resolved, i.e. replaced. - assert!(!addr.iter().any(|p| match p { - Protocol::Dns(_) - | Protocol::Dns4(_) - | Protocol::Dns6(_) - | Protocol::Dnsaddr(_) => true, - _ => false, - })); + assert!(!addr.iter().any(|p| matches!( + p, + Protocol::Dns(_) | Protocol::Dns4(_) | Protocol::Dns6(_) | Protocol::Dnsaddr(_) + ))); Ok(Box::pin(future::ready(Ok(())))) } diff --git a/transports/plaintext/tests/smoke.rs b/transports/plaintext/tests/smoke.rs index ea62f0a9dfa..d1316309c64 100644 --- a/transports/plaintext/tests/smoke.rs +++ b/transports/plaintext/tests/smoke.rs @@ -32,7 +32,7 @@ fn variable_msg_length() { let _ = env_logger::try_init(); fn prop(msg: Vec) { - let mut msg_to_send = msg.clone(); + let msg_to_send = msg.clone(); let msg_to_receive = msg; let server_id = identity::Keypair::generate_ed25519(); @@ -91,7 +91,7 @@ fn variable_msg_length() { debug!("Client: writing message."); client_channel - .write_all(&mut msg_to_send) + .write_all(&msg_to_send) .await .expect("no error"); debug!("Client: flushing channel."); diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 221f8eb2dff..71e6c094013 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -319,7 +319,7 @@ mod tests { let mut uds = UdsConfig::new(); let addr = rx.await.unwrap(); let mut socket = uds.dial(addr).unwrap().await.unwrap(); - socket.write(&[1, 2, 3]).await.unwrap(); + let _ = socket.write(&[1, 2, 3]).await.unwrap(); }); } From 9443c7a4f7481b3185deb490f14c11be2951fe7a Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 28 Sep 2022 16:35:46 +1000 Subject: [PATCH 3/7] Remove one level of indentation --- protocols/gossipsub/src/behaviour/tests.rs | 9562 ++++++++++---------- 1 file changed, 4781 insertions(+), 4781 deletions(-) diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 7f8e8878b34..d58d5373b77 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -21,5253 +21,5253 @@ // Collection of tests for the gossipsub network behaviour mod tests { - use super::super::*; - use crate::error::ValidationError; - use crate::subscription_filter::WhitelistSubscriptionFilter; - use crate::transform::{DataTransform, IdentityTransform}; - use crate::types::FastMessageId; - use crate::{ - GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, IdentTopic as Topic, - TopicScoreParams, - }; - use async_std::net::Ipv4Addr; - use byteorder::{BigEndian, ByteOrder}; - use libp2p_core::Endpoint; - use rand::Rng; - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; - use std::thread::sleep; - use std::time::Duration; - - #[derive(Default, Builder, Debug)] - #[builder(default)] - struct InjectNodes - // TODO: remove trait bound Default when this issue is fixed: - // https://github.com/colin-kiegel/rust-derive-builder/issues/93 - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - peer_no: usize, - topics: Vec, - to_subscribe: bool, - gs_config: GossipsubConfig, - explicit: usize, - outbound: usize, - scoring: Option<(PeerScoreParams, PeerScoreThresholds)>, - data_transform: D, - subscription_filter: F, - } - - impl InjectNodes - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - pub fn create_network(self) -> (Gossipsub, Vec, Vec) { - let keypair = libp2p_core::identity::Keypair::generate_ed25519(); - // create a gossipsub struct - let mut gs: Gossipsub = Gossipsub::new_with_subscription_filter_and_transform( - MessageAuthenticity::Signed(keypair), - self.gs_config, - None, - self.subscription_filter, - self.data_transform, - ) - .unwrap(); +use super::super::*; +use crate::error::ValidationError; +use crate::subscription_filter::WhitelistSubscriptionFilter; +use crate::transform::{DataTransform, IdentityTransform}; +use crate::types::FastMessageId; +use crate::{ + GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, IdentTopic as Topic, + TopicScoreParams, +}; +use async_std::net::Ipv4Addr; +use byteorder::{BigEndian, ByteOrder}; +use libp2p_core::Endpoint; +use rand::Rng; +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; +use std::thread::sleep; +use std::time::Duration; + +#[derive(Default, Builder, Debug)] +#[builder(default)] +struct InjectNodes +// TODO: remove trait bound Default when this issue is fixed: +// https://github.com/colin-kiegel/rust-derive-builder/issues/93 +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + peer_no: usize, + topics: Vec, + to_subscribe: bool, + gs_config: GossipsubConfig, + explicit: usize, + outbound: usize, + scoring: Option<(PeerScoreParams, PeerScoreThresholds)>, + data_transform: D, + subscription_filter: F, +} - if let Some((scoring_params, scoring_thresholds)) = self.scoring { - gs.with_peer_score(scoring_params, scoring_thresholds) - .unwrap(); - } +impl InjectNodes +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + pub fn create_network(self) -> (Gossipsub, Vec, Vec) { + let keypair = libp2p_core::identity::Keypair::generate_ed25519(); + // create a gossipsub struct + let mut gs: Gossipsub = Gossipsub::new_with_subscription_filter_and_transform( + MessageAuthenticity::Signed(keypair), + self.gs_config, + None, + self.subscription_filter, + self.data_transform, + ) + .unwrap(); - let mut topic_hashes = vec![]; + if let Some((scoring_params, scoring_thresholds)) = self.scoring { + gs.with_peer_score(scoring_params, scoring_thresholds) + .unwrap(); + } - // subscribe to the topics - for t in self.topics { - let topic = Topic::new(t); - gs.subscribe(&topic).unwrap(); - topic_hashes.push(topic.hash().clone()); - } + let mut topic_hashes = vec![]; - // build and connect peer_no random peers - let mut peers = vec![]; + // subscribe to the topics + for t in self.topics { + let topic = Topic::new(t); + gs.subscribe(&topic).unwrap(); + topic_hashes.push(topic.hash().clone()); + } - let empty = vec![]; - for i in 0..self.peer_no { - peers.push(add_peer( - &mut gs, - if self.to_subscribe { - &topic_hashes - } else { - &empty - }, - i < self.outbound, - i < self.explicit, - )); - } + // build and connect peer_no random peers + let mut peers = vec![]; - (gs, peers, topic_hashes) + let empty = vec![]; + for i in 0..self.peer_no { + peers.push(add_peer( + &mut gs, + if self.to_subscribe { + &topic_hashes + } else { + &empty + }, + i < self.outbound, + i < self.explicit, + )); } - } - impl InjectNodesBuilder - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - pub fn create_network(&self) -> (Gossipsub, Vec, Vec) { - self.build().unwrap().create_network() - } + (gs, peers, topic_hashes) } +} - fn inject_nodes() -> InjectNodesBuilder - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - InjectNodesBuilder::default() +impl InjectNodesBuilder +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + pub fn create_network(&self) -> (Gossipsub, Vec, Vec) { + self.build().unwrap().create_network() } +} - fn inject_nodes1() -> InjectNodesBuilder { - inject_nodes() - } +fn inject_nodes() -> InjectNodesBuilder +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + InjectNodesBuilder::default() +} - // helper functions for testing - - fn add_peer( - gs: &mut Gossipsub, - topic_hashes: &Vec, - outbound: bool, - explicit: bool, - ) -> PeerId - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - add_peer_with_addr(gs, topic_hashes, outbound, explicit, Multiaddr::empty()) - } +fn inject_nodes1() -> InjectNodesBuilder { + inject_nodes() +} - fn add_peer_with_addr( - gs: &mut Gossipsub, - topic_hashes: &Vec, - outbound: bool, - explicit: bool, - address: Multiaddr, - ) -> PeerId - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - add_peer_with_addr_and_kind( - gs, - topic_hashes, - outbound, - explicit, - address, - Some(PeerKind::Gossipsubv1_1), - ) - } +// helper functions for testing + +fn add_peer( + gs: &mut Gossipsub, + topic_hashes: &Vec, + outbound: bool, + explicit: bool, +) -> PeerId +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + add_peer_with_addr(gs, topic_hashes, outbound, explicit, Multiaddr::empty()) +} - fn add_peer_with_addr_and_kind( - gs: &mut Gossipsub, - topic_hashes: &Vec, - outbound: bool, - explicit: bool, - address: Multiaddr, - kind: Option, - ) -> PeerId - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - let peer = PeerId::random(); - gs.inject_connection_established( +fn add_peer_with_addr( + gs: &mut Gossipsub, + topic_hashes: &Vec, + outbound: bool, + explicit: bool, + address: Multiaddr, +) -> PeerId +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + add_peer_with_addr_and_kind( + gs, + topic_hashes, + outbound, + explicit, + address, + Some(PeerKind::Gossipsubv1_1), + ) +} + +fn add_peer_with_addr_and_kind( + gs: &mut Gossipsub, + topic_hashes: &Vec, + outbound: bool, + explicit: bool, + address: Multiaddr, + kind: Option, +) -> PeerId +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + let peer = PeerId::random(); + gs.inject_connection_established( + &peer, + &ConnectionId::new(0), + &if outbound { + ConnectedPoint::Dialer { + address, + role_override: Endpoint::Dialer, + } + } else { + ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: address, + } + }, + None, + 0, // first connection + ); + if let Some(kind) = kind { + gs.inject_event(peer, ConnectionId::new(1), HandlerEvent::PeerKind(kind)); + } + if explicit { + gs.add_explicit_peer(&peer); + } + if !topic_hashes.is_empty() { + gs.handle_received_subscriptions( + &topic_hashes + .iter() + .cloned() + .map(|t| GossipsubSubscription { + action: GossipsubSubscriptionAction::Subscribe, + topic_hash: t, + }) + .collect::>(), &peer, - &ConnectionId::new(0), - &if outbound { - ConnectedPoint::Dialer { - address, - role_override: Endpoint::Dialer, - } - } else { - ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: address, - } - }, - None, - 0, // first connection ); - if let Some(kind) = kind { - gs.inject_event(peer, ConnectionId::new(1), HandlerEvent::PeerKind(kind)); - } - if explicit { - gs.add_explicit_peer(&peer); - } - if !topic_hashes.is_empty() { - gs.handle_received_subscriptions( - &topic_hashes - .iter() - .cloned() - .map(|t| GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: t, - }) - .collect::>(), - &peer, + } + peer +} + +fn disconnect_peer(gs: &mut Gossipsub, peer_id: &PeerId) +where + D: DataTransform + Default + Clone + Send + 'static, + F: TopicSubscriptionFilter + Clone + Default + Send + 'static, +{ + if let Some(peer_connections) = gs.connected_peers.get(peer_id) { + let fake_endpoint = ConnectedPoint::Dialer { + address: Multiaddr::empty(), + role_override: Endpoint::Dialer, + }; // this is not relevant + // peer_connections.connections should never be empty. + let mut active_connections = peer_connections.connections.len(); + for conn_id in peer_connections.connections.clone() { + let handler = gs.new_handler(); + active_connections = active_connections.checked_sub(1).unwrap(); + gs.inject_connection_closed( + peer_id, + &conn_id, + &fake_endpoint, + handler, + active_connections, ); } - peer } +} - fn disconnect_peer(gs: &mut Gossipsub, peer_id: &PeerId) - where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, - { - if let Some(peer_connections) = gs.connected_peers.get(peer_id) { - let fake_endpoint = ConnectedPoint::Dialer { - address: Multiaddr::empty(), - role_override: Endpoint::Dialer, - }; // this is not relevant - // peer_connections.connections should never be empty. - let mut active_connections = peer_connections.connections.len(); - for conn_id in peer_connections.connections.clone() { - let handler = gs.new_handler(); - active_connections = active_connections.checked_sub(1).unwrap(); - gs.inject_connection_closed( - peer_id, - &conn_id, - &fake_endpoint, - handler, - active_connections, - ); - } - } +// Converts a protobuf message into a gossipsub message for reading the Gossipsub event queue. +fn proto_to_message(rpc: &crate::rpc_proto::Rpc) -> GossipsubRpc { + // Store valid messages. + let mut messages = Vec::with_capacity(rpc.publish.len()); + let rpc = rpc.clone(); + for message in rpc.publish.into_iter() { + messages.push(RawGossipsubMessage { + source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()), + data: message.data.unwrap_or_default(), + sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application + topic: TopicHash::from_raw(message.topic), + signature: message.signature, // don't inform the application + key: None, + validated: false, + }); } + let mut control_msgs = Vec::new(); + if let Some(rpc_control) = rpc.control { + // Collect the gossipsub control messages + let ihave_msgs: Vec = rpc_control + .ihave + .into_iter() + .map(|ihave| GossipsubControlAction::IHave { + topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), + message_ids: ihave + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) + .collect(); - // Converts a protobuf message into a gossipsub message for reading the Gossipsub event queue. - fn proto_to_message(rpc: &crate::rpc_proto::Rpc) -> GossipsubRpc { - // Store valid messages. - let mut messages = Vec::with_capacity(rpc.publish.len()); - let rpc = rpc.clone(); - for message in rpc.publish.into_iter() { - messages.push(RawGossipsubMessage { - source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()), - data: message.data.unwrap_or_default(), - sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application - topic: TopicHash::from_raw(message.topic), - signature: message.signature, // don't inform the application - key: None, - validated: false, - }); - } - let mut control_msgs = Vec::new(); - if let Some(rpc_control) = rpc.control { - // Collect the gossipsub control messages - let ihave_msgs: Vec = rpc_control - .ihave - .into_iter() - .map(|ihave| GossipsubControlAction::IHave { - topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), - message_ids: ihave - .message_ids - .into_iter() - .map(MessageId::from) - .collect::>(), - }) - .collect(); + let iwant_msgs: Vec = rpc_control + .iwant + .into_iter() + .map(|iwant| GossipsubControlAction::IWant { + message_ids: iwant + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) + .collect(); - let iwant_msgs: Vec = rpc_control - .iwant - .into_iter() - .map(|iwant| GossipsubControlAction::IWant { - message_ids: iwant - .message_ids - .into_iter() - .map(MessageId::from) - .collect::>(), - }) - .collect(); + let graft_msgs: Vec = rpc_control + .graft + .into_iter() + .map(|graft| GossipsubControlAction::Graft { + topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), + }) + .collect(); - let graft_msgs: Vec = rpc_control - .graft + let mut prune_msgs = Vec::new(); + + for prune in rpc_control.prune { + // filter out invalid peers + let peers = prune + .peers .into_iter() - .map(|graft| GossipsubControlAction::Graft { - topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), + .filter_map(|info| { + info.peer_id + .and_then(|id| PeerId::from_bytes(&id).ok()) + .map(|peer_id| + //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 + PeerInfo { + peer_id: Some(peer_id), + }) }) - .collect(); - - let mut prune_msgs = Vec::new(); - - for prune in rpc_control.prune { - // filter out invalid peers - let peers = prune - .peers - .into_iter() - .filter_map(|info| { - info.peer_id - .and_then(|id| PeerId::from_bytes(&id).ok()) - .map(|peer_id| - //TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217 - PeerInfo { - peer_id: Some(peer_id), - }) - }) - .collect::>(); - - let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default()); - prune_msgs.push(GossipsubControlAction::Prune { - topic_hash, - peers, - backoff: prune.backoff, - }); - } + .collect::>(); - control_msgs.extend(ihave_msgs); - control_msgs.extend(iwant_msgs); - control_msgs.extend(graft_msgs); - control_msgs.extend(prune_msgs); + let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default()); + prune_msgs.push(GossipsubControlAction::Prune { + topic_hash, + peers, + backoff: prune.backoff, + }); } - GossipsubRpc { - messages, - subscriptions: rpc - .subscriptions - .into_iter() - .map(|sub| GossipsubSubscription { - action: if Some(true) == sub.subscribe { - GossipsubSubscriptionAction::Subscribe - } else { - GossipsubSubscriptionAction::Unsubscribe - }, - topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), - }) - .collect(), - control_msgs, - } + control_msgs.extend(ihave_msgs); + control_msgs.extend(iwant_msgs); + control_msgs.extend(graft_msgs); + control_msgs.extend(prune_msgs); } - #[test] - /// Test local node subscribing to a topic - fn test_subscribe() { - // The node should: - // - Create an empty vector in mesh[topic] - // - Send subscription request to all peers - // - run JOIN(topic) - - let subscribe_topic = vec![String::from("test_subscribe")]; - let (gs, _, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(subscribe_topic) - .to_subscribe(true) - .create_network(); - - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); - - // collect all the subscriptions - let subscriptions = - gs.events - .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { - GossipsubHandlerIn::Message(ref message) => { - for s in &message.subscriptions { - if let Some(true) = s.subscribe { - collected_subscriptions.push(s.clone()) - }; - } - collected_subscriptions - } - _ => collected_subscriptions, - }, - _ => collected_subscriptions, - }); - - // we sent a subscribe to all known peers - assert!( - subscriptions.len() == 20, - "Should send a subscription to all known peers" - ); + GossipsubRpc { + messages, + subscriptions: rpc + .subscriptions + .into_iter() + .map(|sub| GossipsubSubscription { + action: if Some(true) == sub.subscribe { + GossipsubSubscriptionAction::Subscribe + } else { + GossipsubSubscriptionAction::Unsubscribe + }, + topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), + }) + .collect(), + control_msgs, } +} - #[test] - /// Test unsubscribe. - fn test_unsubscribe() { - // Unsubscribe should: - // - Remove the mesh entry for topic - // - Send UNSUBSCRIBE to all known peers - // - Call Leave - - let topic_strings = vec![String::from("topic1"), String::from("topic2")]; - let topics = topic_strings +#[test] +/// Test local node subscribing to a topic +fn test_subscribe() { + // The node should: + // - Create an empty vector in mesh[topic] + // - Send subscription request to all peers + // - run JOIN(topic) + + let subscribe_topic = vec![String::from("test_subscribe")]; + let (gs, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(subscribe_topic) + .to_subscribe(true) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + + // collect all the subscriptions + let subscriptions = + gs.events .iter() - .map(|t| Topic::new(t.clone())) - .collect::>(); - - // subscribe to topic_strings - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(topic_strings) - .to_subscribe(true) - .create_network(); + .fold(vec![], |mut collected_subscriptions, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { + GossipsubHandlerIn::Message(ref message) => { + for s in &message.subscriptions { + if let Some(true) = s.subscribe { + collected_subscriptions.push(s.clone()) + }; + } + collected_subscriptions + } + _ => collected_subscriptions, + }, + _ => collected_subscriptions, + }); - for topic_hash in &topic_hashes { - assert!( - gs.topic_peers.get(topic_hash).is_some(), - "Topic_peers contain a topic entry" - ); - assert!( - gs.mesh.get(topic_hash).is_some(), - "mesh should contain a topic entry" - ); - } + // we sent a subscribe to all known peers + assert!( + subscriptions.len() == 20, + "Should send a subscription to all known peers" + ); +} - // unsubscribe from both topics - assert!( - gs.unsubscribe(&topics[0]).unwrap(), - "should be able to unsubscribe successfully from each topic", - ); +#[test] +/// Test unsubscribe. +fn test_unsubscribe() { + // Unsubscribe should: + // - Remove the mesh entry for topic + // - Send UNSUBSCRIBE to all known peers + // - Call Leave + + let topic_strings = vec![String::from("topic1"), String::from("topic2")]; + let topics = topic_strings + .iter() + .map(|t| Topic::new(t.clone())) + .collect::>(); + + // subscribe to topic_strings + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topic_strings) + .to_subscribe(true) + .create_network(); + + for topic_hash in &topic_hashes { assert!( - gs.unsubscribe(&topics[1]).unwrap(), - "should be able to unsubscribe successfully from each topic", + gs.topic_peers.get(topic_hash).is_some(), + "Topic_peers contain a topic entry" ); - - // collect all the subscriptions - let subscriptions = - gs.events - .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { - GossipsubHandlerIn::Message(ref message) => { - for s in &message.subscriptions { - if let Some(true) = s.subscribe { - collected_subscriptions.push(s.clone()) - }; - } - collected_subscriptions - } - _ => collected_subscriptions, - }, - _ => collected_subscriptions, - }); - - // we sent a unsubscribe to all known peers, for two topics assert!( - subscriptions.len() == 40, - "Should send an unsubscribe event to all known peers" + gs.mesh.get(topic_hash).is_some(), + "mesh should contain a topic entry" ); - - // check we clean up internal structures - for topic_hash in &topic_hashes { - assert!( - gs.mesh.get(topic_hash).is_none(), - "All topics should have been removed from the mesh" - ); - } } - #[test] - /// Test JOIN(topic) functionality. - fn test_join() { - // The Join function should: - // - Remove peers from fanout[topic] - // - Add any fanout[topic] peers to the mesh (up to mesh_n) - // - Fill up to mesh_n peers from known gossipsub peers in the topic - // - Send GRAFT messages to all nodes added to the mesh - - // This test is not an isolated unit test, rather it uses higher level, - // subscribe/unsubscribe to perform the test. - - let topic_strings = vec![String::from("topic1"), String::from("topic2")]; - let topics = topic_strings + // unsubscribe from both topics + assert!( + gs.unsubscribe(&topics[0]).unwrap(), + "should be able to unsubscribe successfully from each topic", + ); + assert!( + gs.unsubscribe(&topics[1]).unwrap(), + "should be able to unsubscribe successfully from each topic", + ); + + // collect all the subscriptions + let subscriptions = + gs.events .iter() - .map(|t| Topic::new(t.clone())) - .collect::>(); - - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(topic_strings) - .to_subscribe(true) - .create_network(); - - // unsubscribe, then call join to invoke functionality - assert!( - gs.unsubscribe(&topics[0]).unwrap(), - "should be able to unsubscribe successfully" - ); - assert!( - gs.unsubscribe(&topics[1]).unwrap(), - "should be able to unsubscribe successfully" - ); + .fold(vec![], |mut collected_subscriptions, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { + GossipsubHandlerIn::Message(ref message) => { + for s in &message.subscriptions { + if let Some(true) = s.subscribe { + collected_subscriptions.push(s.clone()) + }; + } + collected_subscriptions + } + _ => collected_subscriptions, + }, + _ => collected_subscriptions, + }); - // re-subscribe - there should be peers associated with the topic - assert!( - gs.subscribe(&topics[0]).unwrap(), - "should be able to subscribe successfully" - ); + // we sent a unsubscribe to all known peers, for two topics + assert!( + subscriptions.len() == 40, + "Should send an unsubscribe event to all known peers" + ); - // should have added mesh_n nodes to the mesh + // check we clean up internal structures + for topic_hash in &topic_hashes { assert!( - gs.mesh.get(&topic_hashes[0]).unwrap().len() == 6, - "Should have added 6 nodes to the mesh" + gs.mesh.get(topic_hash).is_none(), + "All topics should have been removed from the mesh" ); + } +} - fn collect_grafts( - mut collected_grafts: Vec, - (_, controls): (&PeerId, &Vec), - ) -> Vec { - for c in controls.iter() { - if let GossipsubControlAction::Graft { topic_hash: _ } = c { - collected_grafts.push(c.clone()) - } +#[test] +/// Test JOIN(topic) functionality. +fn test_join() { + // The Join function should: + // - Remove peers from fanout[topic] + // - Add any fanout[topic] peers to the mesh (up to mesh_n) + // - Fill up to mesh_n peers from known gossipsub peers in the topic + // - Send GRAFT messages to all nodes added to the mesh + + // This test is not an isolated unit test, rather it uses higher level, + // subscribe/unsubscribe to perform the test. + + let topic_strings = vec![String::from("topic1"), String::from("topic2")]; + let topics = topic_strings + .iter() + .map(|t| Topic::new(t.clone())) + .collect::>(); + + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topic_strings) + .to_subscribe(true) + .create_network(); + + // unsubscribe, then call join to invoke functionality + assert!( + gs.unsubscribe(&topics[0]).unwrap(), + "should be able to unsubscribe successfully" + ); + assert!( + gs.unsubscribe(&topics[1]).unwrap(), + "should be able to unsubscribe successfully" + ); + + // re-subscribe - there should be peers associated with the topic + assert!( + gs.subscribe(&topics[0]).unwrap(), + "should be able to subscribe successfully" + ); + + // should have added mesh_n nodes to the mesh + assert!( + gs.mesh.get(&topic_hashes[0]).unwrap().len() == 6, + "Should have added 6 nodes to the mesh" + ); + + fn collect_grafts( + mut collected_grafts: Vec, + (_, controls): (&PeerId, &Vec), + ) -> Vec { + for c in controls.iter() { + if let GossipsubControlAction::Graft { topic_hash: _ } = c { + collected_grafts.push(c.clone()) } - collected_grafts } + collected_grafts + } - // there should be mesh_n GRAFT messages. - let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); - - assert_eq!( - graft_messages.len(), - 6, - "There should be 6 grafts messages sent to peers" + // there should be mesh_n GRAFT messages. + let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); + + assert_eq!( + graft_messages.len(), + 6, + "There should be 6 grafts messages sent to peers" + ); + + // verify fanout nodes + // add 3 random peers to the fanout[topic1] + gs.fanout + .insert(topic_hashes[1].clone(), Default::default()); + let new_peers: Vec = vec![]; + for _ in 0..3 { + let random_peer = PeerId::random(); + // inform the behaviour of a new peer + gs.inject_connection_established( + &random_peer, + &ConnectionId::new(1), + &ConnectedPoint::Dialer { + address: "/ip4/127.0.0.1".parse::().unwrap(), + role_override: Endpoint::Dialer, + }, + None, + 0, ); - // verify fanout nodes - // add 3 random peers to the fanout[topic1] - gs.fanout - .insert(topic_hashes[1].clone(), Default::default()); - let new_peers: Vec = vec![]; - for _ in 0..3 { - let random_peer = PeerId::random(); - // inform the behaviour of a new peer - gs.inject_connection_established( - &random_peer, - &ConnectionId::new(1), - &ConnectedPoint::Dialer { - address: "/ip4/127.0.0.1".parse::().unwrap(), - role_override: Endpoint::Dialer, - }, - None, - 0, - ); - - // add the new peer to the fanout - let fanout_peers = gs.fanout.get_mut(&topic_hashes[1]).unwrap(); - fanout_peers.insert(random_peer); - } - - // subscribe to topic1 - gs.subscribe(&topics[1]).unwrap(); - - // the three new peers should have been added, along with 3 more from the pool. - assert!( - gs.mesh.get(&topic_hashes[1]).unwrap().len() == 6, - "Should have added 6 nodes to the mesh" - ); - let mesh_peers = gs.mesh.get(&topic_hashes[1]).unwrap(); - for new_peer in new_peers { - assert!( - mesh_peers.contains(&new_peer), - "Fanout peer should be included in the mesh" - ); - } + // add the new peer to the fanout + let fanout_peers = gs.fanout.get_mut(&topic_hashes[1]).unwrap(); + fanout_peers.insert(random_peer); + } - // there should now be 12 graft messages to be sent - let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); + // subscribe to topic1 + gs.subscribe(&topics[1]).unwrap(); + // the three new peers should have been added, along with 3 more from the pool. + assert!( + gs.mesh.get(&topic_hashes[1]).unwrap().len() == 6, + "Should have added 6 nodes to the mesh" + ); + let mesh_peers = gs.mesh.get(&topic_hashes[1]).unwrap(); + for new_peer in new_peers { assert!( - graft_messages.len() == 12, - "There should be 12 grafts messages sent to peers" + mesh_peers.contains(&new_peer), + "Fanout peer should be included in the mesh" ); } - /// Test local node publish to subscribed topic - #[test] - fn test_publish_without_flood_publishing() { - // node should: - // - Send publish message to all peers - // - Insert message into gs.mcache and gs.received - - //turn off flood publish to test old behaviour - let config = GossipsubConfigBuilder::default() - .flood_publish(false) - .build() - .unwrap(); - - let publish_topic = String::from("test_publish"); - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![publish_topic.clone()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); + // there should now be 12 graft messages to be sent + let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts); - // all peers should be subscribed to the topic - assert_eq!( - gs.topic_peers.get(&topic_hashes[0]).map(|p| p.len()), - Some(20), - "Peers should be subscribed to the topic" - ); + assert!( + graft_messages.len() == 12, + "There should be 12 grafts messages sent to peers" + ); +} - // publish on topic - let publish_data = vec![0; 42]; - gs.publish(Topic::new(publish_topic), publish_data).unwrap(); +/// Test local node publish to subscribed topic +#[test] +fn test_publish_without_flood_publishing() { + // node should: + // - Send publish message to all peers + // - Insert message into gs.mcache and gs.received + + //turn off flood publish to test old behaviour + let config = GossipsubConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); - // Collect all publish messages - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { - GossipsubHandlerIn::Message(ref message) => { - let event = proto_to_message(message); - for s in &event.messages { - collected_publish.push(s.clone()); - } - collected_publish + let publish_topic = String::from("test_publish"); + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![publish_topic.clone()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + + // all peers should be subscribed to the topic + assert_eq!( + gs.topic_peers.get(&topic_hashes[0]).map(|p| p.len()), + Some(20), + "Peers should be subscribed to the topic" + ); + + // publish on topic + let publish_data = vec![0; 42]; + gs.publish(Topic::new(publish_topic), publish_data).unwrap(); + + // Collect all publish messages + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { + GossipsubHandlerIn::Message(ref message) => { + let event = proto_to_message(message); + for s in &event.messages { + collected_publish.push(s.clone()); } - _ => collected_publish, - }, + collected_publish + } _ => collected_publish, - }); - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform( - publishes - .first() - .expect("Should contain > 0 entries") - .clone(), - ) - .unwrap(); - - let msg_id = gs.config.message_id(message); - - let config: GossipsubConfig = GossipsubConfig::default(); - assert_eq!( - publishes.len(), - config.mesh_n_low(), - "Should send a publish message to all known peers" - ); + }, + _ => collected_publish, + }); - assert!( - gs.mcache.get(&msg_id).is_some(), - "Message cache should contain published message" - ); - } + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform( + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + ) + .unwrap(); - /// Test local node publish to unsubscribed topic - #[test] - fn test_fanout() { - // node should: - // - Populate fanout peers - // - Send publish message to fanout peers - // - Insert message into gs.mcache and gs.received - - //turn off flood publish to test fanout behaviour - let config = GossipsubConfigBuilder::default() - .flood_publish(false) - .build() - .unwrap(); + let msg_id = gs.config.message_id(message); - let fanout_topic = String::from("test_fanout"); - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![fanout_topic.clone()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); + let config: GossipsubConfig = GossipsubConfig::default(); + assert_eq!( + publishes.len(), + config.mesh_n_low(), + "Should send a publish message to all known peers" + ); - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); - // Unsubscribe from topic - assert!( - gs.unsubscribe(&Topic::new(fanout_topic.clone())).unwrap(), - "should be able to unsubscribe successfully from topic" - ); + assert!( + gs.mcache.get(&msg_id).is_some(), + "Message cache should contain published message" + ); +} - // Publish on unsubscribed topic - let publish_data = vec![0; 42]; - gs.publish(Topic::new(fanout_topic.clone()), publish_data) - .unwrap(); +/// Test local node publish to unsubscribed topic +#[test] +fn test_fanout() { + // node should: + // - Populate fanout peers + // - Send publish message to fanout peers + // - Insert message into gs.mcache and gs.received + + //turn off flood publish to test fanout behaviour + let config = GossipsubConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); - assert_eq!( - gs.fanout - .get(&TopicHash::from_raw(fanout_topic)) - .unwrap() - .len(), - gs.config.mesh_n(), - "Fanout should contain `mesh_n` peers for fanout topic" - ); + let fanout_topic = String::from("test_fanout"); + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![fanout_topic.clone()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + // Unsubscribe from topic + assert!( + gs.unsubscribe(&Topic::new(fanout_topic.clone())).unwrap(), + "should be able to unsubscribe successfully from topic" + ); + + // Publish on unsubscribed topic + let publish_data = vec![0; 42]; + gs.publish(Topic::new(fanout_topic.clone()), publish_data) + .unwrap(); - // Collect all publish messages - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { - GossipsubHandlerIn::Message(ref message) => { - let event = proto_to_message(message); - for s in &event.messages { - collected_publish.push(s.clone()); - } - collected_publish + assert_eq!( + gs.fanout + .get(&TopicHash::from_raw(fanout_topic)) + .unwrap() + .len(), + gs.config.mesh_n(), + "Fanout should contain `mesh_n` peers for fanout topic" + ); + + // Collect all publish messages + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { + GossipsubHandlerIn::Message(ref message) => { + let event = proto_to_message(message); + for s in &event.messages { + collected_publish.push(s.clone()); } - _ => collected_publish, - }, + collected_publish + } _ => collected_publish, - }); + }, + _ => collected_publish, + }); - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform( - publishes - .first() - .expect("Should contain > 0 entries") - .clone(), - ) - .unwrap(); + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform( + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + ) + .unwrap(); - let msg_id = gs.config.message_id(message); + let msg_id = gs.config.message_id(message); - assert_eq!( - publishes.len(), - gs.config.mesh_n(), - "Should send a publish message to `mesh_n` fanout peers" - ); + assert_eq!( + publishes.len(), + gs.config.mesh_n(), + "Should send a publish message to `mesh_n` fanout peers" + ); - assert!( - gs.mcache.get(&msg_id).is_some(), - "Message cache should contain published message" - ); - } + assert!( + gs.mcache.get(&msg_id).is_some(), + "Message cache should contain published message" + ); +} - #[test] - /// Test the gossipsub NetworkBehaviour peer connection logic. - fn test_inject_connected() { - let (gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1"), String::from("topic2")]) - .to_subscribe(true) - .create_network(); - - // check that our subscriptions are sent to each of the peers - // collect all the SendEvents - let send_events: Vec<_> = gs - .events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - !m.subscriptions.is_empty() - } else { - false - } - } - _ => false, - }) - .collect(); - - // check that there are two subscriptions sent to each peer - for sevent in send_events.clone() { - if let NetworkBehaviourAction::NotifyHandler { event, .. } = sevent { +#[test] +/// Test the gossipsub NetworkBehaviour peer connection logic. +fn test_inject_connected() { + let (gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .create_network(); + + // check that our subscriptions are sent to each of the peers + // collect all the SendEvents + let send_events: Vec<_> = gs + .events + .iter() + .filter(|e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => { if let GossipsubHandlerIn::Message(ref m) = **event { - assert!( - m.subscriptions.len() == 2, - "There should be two subscriptions sent to each peer (1 for each topic)." - ); - } - }; - } - - // check that there are 20 send events created - assert!( - send_events.len() == 20, - "There should be a subscription event sent to each peer." - ); - - // should add the new peers to `peer_topics` with an empty vec as a gossipsub node - for peer in peers { - let known_topics = gs.peer_topics.get(&peer).unwrap(); - assert!( - known_topics == &topic_hashes.iter().cloned().collect(), - "The topics for each node should all topics" - ); - } - } - - #[test] - /// Test subscription handling - fn test_handle_received_subscriptions() { - // For every subscription: - // SUBSCRIBE: - Add subscribed topic to peer_topics for peer. - // - Add peer to topics_peer. - // UNSUBSCRIBE - Remove topic from peer_topics for peer. - // - Remove peer from topic_peers. - - let topics = vec!["topic1", "topic2", "topic3", "topic4"] - .iter() - .map(|&t| String::from(t)) - .collect(); - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(topics) - .to_subscribe(false) - .create_network(); - - // The first peer sends 3 subscriptions and 1 unsubscription - let mut subscriptions = topic_hashes[..3] - .iter() - .map(|topic_hash| GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: topic_hash.clone(), - }) - .collect::>(); - - subscriptions.push(GossipsubSubscription { - action: GossipsubSubscriptionAction::Unsubscribe, - topic_hash: topic_hashes[topic_hashes.len() - 1].clone(), - }); - - let unknown_peer = PeerId::random(); - // process the subscriptions - // first and second peers send subscriptions - gs.handle_received_subscriptions(&subscriptions, &peers[0]); - gs.handle_received_subscriptions(&subscriptions, &peers[1]); - // unknown peer sends the same subscriptions - gs.handle_received_subscriptions(&subscriptions, &unknown_peer); - - // verify the result - - let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); - assert!( - peer_topics == topic_hashes.iter().take(3).cloned().collect(), - "First peer should be subscribed to three topics" - ); - let peer_topics = gs.peer_topics.get(&peers[1]).unwrap().clone(); - assert!( - peer_topics == topic_hashes.iter().take(3).cloned().collect(), - "Second peer should be subscribed to three topics" - ); - - assert!( - gs.peer_topics.get(&unknown_peer).is_none(), - "Unknown peer should not have been added" - ); - - for topic_hash in topic_hashes[..3].iter() { - let topic_peers = gs.topic_peers.get(topic_hash).unwrap().clone(); - assert!( - topic_peers == peers[..2].iter().cloned().collect(), - "Two peers should be added to the first three topics" - ); - } - - // Peer 0 unsubscribes from the first topic - - gs.handle_received_subscriptions( - &[GossipsubSubscription { - action: GossipsubSubscriptionAction::Unsubscribe, - topic_hash: topic_hashes[0].clone(), - }], - &peers[0], - ); - - let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); - assert!( - peer_topics == topic_hashes[1..3].iter().cloned().collect(), - "Peer should be subscribed to two topics" - ); - - let topic_peers = gs.topic_peers.get(&topic_hashes[0]).unwrap().clone(); // only gossipsub at the moment - assert!( - topic_peers == peers[1..2].iter().cloned().collect(), - "Only the second peers should be in the first topic" - ); - } - - #[test] - /// Test Gossipsub.get_random_peers() function - fn test_get_random_peers() { - // generate a default GossipsubConfig - let gs_config = GossipsubConfigBuilder::default() - .validation_mode(ValidationMode::Anonymous) - .build() - .unwrap(); - // create a gossipsub struct - let mut gs: Gossipsub = Gossipsub::new(MessageAuthenticity::Anonymous, gs_config).unwrap(); - - // create a topic and fill it with some peers - let topic_hash = Topic::new("Test").hash(); - let mut peers = vec![]; - for _ in 0..20 { - peers.push(PeerId::random()) - } - - gs.topic_peers - .insert(topic_hash.clone(), peers.iter().cloned().collect()); - - gs.connected_peers = peers - .iter() - .map(|p| { - ( - *p, - PeerConnections { - kind: PeerKind::Gossipsubv1_1, - connections: vec![ConnectionId::new(1)], - }, - ) - }) - .collect(); - - let random_peers = - get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { - true - }); - assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); - let random_peers = get_random_peers( - &gs.topic_peers, - &gs.connected_peers, - &topic_hash, - 30, - |_| true, - ); - assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); - assert!( - random_peers == peers.iter().cloned().collect(), - "Expected no shuffling" - ); - let random_peers = get_random_peers( - &gs.topic_peers, - &gs.connected_peers, - &topic_hash, - 20, - |_| true, - ); - assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); - assert!( - random_peers == peers.iter().cloned().collect(), - "Expected no shuffling" - ); - let random_peers = - get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 0, |_| { - true - }); - assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); - // test the filter - let random_peers = - get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { - false - }); - assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); - let random_peers = - get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 10, { - |peer| peers.contains(peer) - }); - assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); - } - - /// Tests that the correct message is sent when a peer asks for a message in our cache. - #[test] - fn test_handle_iwant_msg_cached() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(20) - .topics(Vec::new()) - .to_subscribe(true) - .create_network(); - - let raw_message = RawGossipsubMessage { - source: Some(peers[11]), - data: vec![1, 2, 3, 4], - sequence_number: Some(1u64), - topic: TopicHash::from_raw("topic"), - signature: None, - key: None, - validated: true, - }; - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); - - let msg_id = gs.config.message_id(message); - gs.mcache.put(&msg_id, raw_message); - - gs.handle_iwant(&peers[7], vec![msg_id.clone()]); - - // the messages we are sending - let sent_messages = gs - .events - .iter() - .fold(vec![], |mut collected_messages, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for c in &event.messages { - collected_messages.push(c.clone()) - } - } - collected_messages - } - _ => collected_messages, - }); - - assert!( - sent_messages - .iter() - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) - .any(|msg| gs.config.message_id(&msg) == msg_id), - "Expected the cached message to be sent to an IWANT peer" - ); - } - - /// Tests that messages are sent correctly depending on the shifting of the message cache. - #[test] - fn test_handle_iwant_msg_cached_shifted() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(20) - .topics(Vec::new()) - .to_subscribe(true) - .create_network(); - - // perform 10 memshifts and check that it leaves the cache - for shift in 1..10 { - let raw_message = RawGossipsubMessage { - source: Some(peers[11]), - data: vec![1, 2, 3, 4], - sequence_number: Some(shift), - topic: TopicHash::from_raw("topic"), - signature: None, - key: None, - validated: true, - }; - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); - - let msg_id = gs.config.message_id(message); - gs.mcache.put(&msg_id, raw_message); - for _ in 0..shift { - gs.mcache.shift(); - } - - gs.handle_iwant(&peers[7], vec![msg_id.clone()]); - - // is the message is being sent? - let message_exists = gs.events.iter().any(|e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - event - .messages - .iter() - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) - .any(|msg| gs.config.message_id(&msg) == msg_id) - } else { - false - } + !m.subscriptions.is_empty() + } else { + false } - _ => false, - }); - // default history_length is 5, expect no messages after shift > 5 - if shift < 5 { - assert!( - message_exists, - "Expected the cached message to be sent to an IWANT peer before 5 shifts" - ); - } else { - assert!( - !message_exists, - "Expected the cached message to not be sent to an IWANT peer after 5 shifts" - ); } - } - } - - #[test] - // tests that an event is not created when a peers asks for a message not in our cache - fn test_handle_iwant_msg_not_cached() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(20) - .topics(Vec::new()) - .to_subscribe(true) - .create_network(); - - let events_before = gs.events.len(); - gs.handle_iwant(&peers[7], vec![MessageId::new(b"unknown id")]); - let events_after = gs.events.len(); - - assert_eq!( - events_before, events_after, - "Expected event count to stay the same" - ); - } - - #[test] - // tests that an event is created when a peer shares that it has a message we want - fn test_handle_ihave_subscribed_and_msg_not_cached() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); - - gs.handle_ihave( - &peers[7], - vec![(topic_hashes[0].clone(), vec![MessageId::new(b"unknown id")])], - ); - - // check that we sent an IWANT request for `unknown id` - let iwant_exists = match gs.control_pool.get(&peers[7]) { - Some(controls) => controls.iter().any(|c| match c { - GossipsubControlAction::IWant { message_ids } => message_ids - .iter() - .any(|m| *m == MessageId::new(b"unknown id")), - _ => false, - }), _ => false, - }; - - assert!( - iwant_exists, - "Expected to send an IWANT control message for unkown message id" - ); - } - - #[test] - // tests that an event is not created when a peer shares that it has a message that - // we already have - fn test_handle_ihave_subscribed_and_msg_cached() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); - - let msg_id = MessageId::new(b"known id"); - - let events_before = gs.events.len(); - gs.handle_ihave(&peers[7], vec![(topic_hashes[0].clone(), vec![msg_id])]); - let events_after = gs.events.len(); - - assert_eq!( - events_before, events_after, - "Expected event count to stay the same" - ) - } - - #[test] - // test that an event is not created when a peer shares that it has a message in - // a topic that we are not subscribed to - fn test_handle_ihave_not_subscribed() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(20) - .topics(vec![]) - .to_subscribe(true) - .create_network(); - - let events_before = gs.events.len(); - gs.handle_ihave( - &peers[7], - vec![( - TopicHash::from_raw(String::from("unsubscribed topic")), - vec![MessageId::new(b"irrelevant id")], - )], - ); - let events_after = gs.events.len(); - - assert_eq!( - events_before, events_after, - "Expected event count to stay the same" - ) - } - - #[test] - // tests that a peer is added to our mesh when we are both subscribed - // to the same topic - fn test_handle_graft_is_subscribed() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); - - gs.handle_graft(&peers[7], topic_hashes.clone()); - - assert!( - gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to have been added to mesh" - ); - } - - #[test] - // tests that a peer is not added to our mesh when they are subscribed to - // a topic that we are not - fn test_handle_graft_is_not_subscribed() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); - - gs.handle_graft( - &peers[7], - vec![TopicHash::from_raw(String::from("unsubscribed topic"))], - ); - - assert!( - !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to have been added to mesh" - ); - } - - #[test] - // tests multiple topics in a single graft message - fn test_handle_graft_multiple_topics() { - let topics: Vec = vec!["topic1", "topic2", "topic3", "topic4"] - .iter() - .map(|&t| String::from(t)) - .collect(); - - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(topics) - .to_subscribe(true) - .create_network(); - - let mut their_topics = topic_hashes.clone(); - // their_topics = [topic1, topic2, topic3] - // our_topics = [topic1, topic2, topic4] - their_topics.pop(); - gs.leave(&their_topics[2]); - - gs.handle_graft(&peers[7], their_topics.clone()); - - for hash in topic_hashes.iter().take(2) { - assert!( - gs.mesh.get(hash).unwrap().contains(&peers[7]), - "Expected peer to be in the mesh for the first 2 topics" - ); - } - - assert!( - gs.mesh.get(&topic_hashes[2]).is_none(), - "Expected the second topic to not be in the mesh" - ); - } - - #[test] - // tests that a peer is removed from our mesh - fn test_handle_prune_peer_in_mesh() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(20) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); - - // insert peer into our mesh for 'topic1' - gs.mesh - .insert(topic_hashes[0].clone(), peers.iter().cloned().collect()); - assert!( - gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to be in mesh" - ); - - gs.handle_prune( - &peers[7], - topic_hashes - .iter() - .map(|h| (h.clone(), vec![], None)) - .collect(), - ); - assert!( - !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to be removed from mesh" - ); - } - - fn count_control_msgs( - gs: &Gossipsub, - mut filter: impl FnMut(&PeerId, &GossipsubControlAction) -> bool, - ) -> usize { - gs.control_pool - .iter() - .map(|(peer_id, actions)| actions.iter().filter(|m| filter(peer_id, m)).count()) - .sum::() - + gs.events - .iter() - .map(|e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - event - .control_msgs - .iter() - .filter(|m| filter(peer_id, m)) - .count() - } else { - 0 - } - } - _ => 0, - }) - .sum::() - } - - fn flush_events(gs: &mut Gossipsub) { - gs.control_pool.clear(); - gs.events.clear(); - } - - #[test] - // tests that a peer added as explicit peer gets connected to - fn test_explicit_peer_gets_connected() { - let (mut gs, _, _) = inject_nodes1() - .peer_no(0) - .topics(Vec::new()) - .to_subscribe(true) - .create_network(); - - //create new peer - let peer = PeerId::random(); - - //add peer as explicit peer - gs.add_explicit_peer(&peer); - - let num_events = gs - .events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { opts, handler: _ } => { - opts.get_peer_id() == Some(peer) - } - _ => false, - }) - .count(); - - assert_eq!( - num_events, 1, - "There was no dial peer event for the explicit peer" - ); - } - - #[test] - fn test_explicit_peer_reconnects() { - let config = GossipsubConfigBuilder::default() - .check_explicit_peers_ticks(2) - .build() - .unwrap(); - let (mut gs, others, _) = inject_nodes1() - .peer_no(1) - .topics(Vec::new()) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - let peer = others.get(0).unwrap(); - - //add peer as explicit peer - gs.add_explicit_peer(peer); - - flush_events(&mut gs); - - //disconnect peer - disconnect_peer(&mut gs, peer); - - gs.heartbeat(); - - //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` - assert_eq!( - gs.events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { opts, handler: _ } => - opts.get_peer_id() == Some(*peer), - _ => false, - }) - .count(), - 0, - "There was a dial peer event before explicit_peer_ticks heartbeats" - ); - - gs.heartbeat(); - - //check that there is a reconnect after second heartbeat - assert!( - gs.events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::Dial { opts, handler: _ } => - opts.get_peer_id() == Some(*peer), - _ => false, - }) - .count() - >= 1, - "There was no dial peer event for the explicit peer" - ); - } - - #[test] - fn test_handle_graft_explicit_peer() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(1) - .topics(vec![String::from("topic1"), String::from("topic2")]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - let peer = peers.get(0).unwrap(); - - gs.handle_graft(peer, topic_hashes.clone()); - - //peer got not added to mesh - assert!(gs.mesh[&topic_hashes[0]].is_empty()); - assert!(gs.mesh[&topic_hashes[1]].is_empty()); - - //check prunes - assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == peer - && match m { - GossipsubControlAction::Prune { topic_hash, .. } => - topic_hash == &topic_hashes[0] || topic_hash == &topic_hashes[1], - _ => false, - }) - >= 2, - "Not enough prunes sent when grafting from explicit peer" - ); - } - - #[test] - fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { - let (gs, peers, topic_hashes) = inject_nodes1() - .peer_no(2) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) - assert_eq!( - gs.mesh[&topic_hashes[0]], - vec![peers[1]].into_iter().collect() - ); - - //assert that graft gets created to non-explicit peer - assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && matches!(m, GossipsubControlAction::Graft { .. })) - >= 1, - "No graft message got created to non-explicit peer" - ); - - //assert that no graft gets created to explicit peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && matches!(m, GossipsubControlAction::Graft { .. })), - 0, - "A graft message got created to an explicit peer" - ); - } - - #[test] - fn do_not_graft_explicit_peer() { - let (mut gs, others, topic_hashes) = inject_nodes1() - .peer_no(1) - .topics(vec![String::from("topic")]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - gs.heartbeat(); - - //mesh stays empty - assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); - - //assert that no graft gets created to explicit peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &others[0] - && matches!(m, GossipsubControlAction::Graft { .. })), - 0, - "A graft message got created to an explicit peer" - ); - } - - #[test] - fn do_forward_messages_to_explicit_peers() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(2) - .topics(vec![String::from("topic1"), String::from("topic2")]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - let local_id = PeerId::random(); - - let message = RawGossipsubMessage { - source: Some(peers[1]), - data: vec![12], - sequence_number: Some(0), - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; - gs.handle_received_message(message.clone(), &local_id); - - assert_eq!( - gs.events - .iter() - .filter(|e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - peer_id == &peers[0] - && event - .messages - .iter() - .filter(|m| m.data == message.data) - .count() - > 0 - } else { - false - } - } - _ => false, - }) - .count(), - 1, - "The message did not get forwarded to the explicit peer" - ); - } - - #[test] - fn explicit_peers_not_added_to_mesh_on_subscribe() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(2) - .topics(Vec::new()) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - //create new topic, both peers subscribing to it but we do not subscribe to it - let topic = Topic::new(String::from("t")); - let topic_hash = topic.hash(); - for peer in peers.iter().take(2) { - gs.handle_received_subscriptions( - &[GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: topic_hash.clone(), - }], - peer, - ); - } - - //subscribe now to topic - gs.subscribe(&topic).unwrap(); - - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) - assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); - - //assert that graft gets created to non-explicit peer - assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && matches!(m, GossipsubControlAction::Graft { .. })) - > 0, - "No graft message got created to non-explicit peer" - ); - - //assert that no graft gets created to explicit peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && matches!(m, GossipsubControlAction::Graft { .. })), - 0, - "A graft message got created to an explicit peer" - ); - } - - #[test] - fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { - let (mut gs, peers, _) = inject_nodes1() - .peer_no(2) - .topics(Vec::new()) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - //create new topic, both peers subscribing to it but we do not subscribe to it - let topic = Topic::new(String::from("t")); - let topic_hash = topic.hash(); - for peer in peers.iter().take(2) { - gs.handle_received_subscriptions( - &[GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: topic_hash.clone(), - }], - peer, - ); - } - - //we send a message for this topic => this will initialize the fanout - gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); - - //subscribe now to topic - gs.subscribe(&topic).unwrap(); - - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) - assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); - - //assert that graft gets created to non-explicit peer - assert!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && matches!(m, GossipsubControlAction::Graft { .. })) - >= 1, - "No graft message got created to non-explicit peer" - ); - - //assert that no graft gets created to explicit peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && matches!(m, GossipsubControlAction::Graft { .. })), - 0, - "A graft message got created to an explicit peer" - ); - } - - #[test] - fn no_gossip_gets_sent_to_explicit_peers() { - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(2) - .topics(vec![String::from("topic1"), String::from("topic2")]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(1) - .create_network(); - - let local_id = PeerId::random(); - - let message = RawGossipsubMessage { - source: Some(peers[1]), - data: vec![], - sequence_number: Some(0), - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; - - //forward the message - gs.handle_received_message(message, &local_id); - - //simulate multiple gossip calls (for randomness) - for _ in 0..3 { - gs.emit_gossip(); - } - - //assert that no gossip gets sent to explicit peer - assert_eq!( - gs.control_pool - .get(&peers[0]) - .unwrap_or(&Vec::new()) - .iter() - .filter(|m| matches!(m, GossipsubControlAction::IHave { .. })) - .count(), - 0, - "Gossip got emitted to explicit peer" - ); - } - - // Tests the mesh maintenance addition - #[test] - fn test_mesh_addition() { - let config: GossipsubConfig = GossipsubConfig::default(); - - // Adds mesh_low peers and PRUNE 2 giving us a deficit. - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n() + 1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - let to_remove_peers = config.mesh_n() + 1 - config.mesh_n_low() - 1; - - for peer in peers.iter().take(to_remove_peers) { - gs.handle_prune( - peer, - topics.iter().map(|h| (h.clone(), vec![], None)).collect(), - ); - } - - // Verify the pruned peers are removed from the mesh. - assert_eq!( - gs.mesh.get(&topics[0]).unwrap().len(), - config.mesh_n_low() - 1 - ); - - // run a heartbeat - gs.heartbeat(); - - // Peers should be added to reach mesh_n - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); - } - - // Tests the mesh maintenance subtraction - #[test] - fn test_mesh_subtraction() { - let config = GossipsubConfig::default(); - - // Adds mesh_low peers and PRUNE 2 giving us a deficit. - let n = config.mesh_n_high() + 10; - //make all outbound connections so that we allow grafting to all - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(n) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .outbound(n) - .create_network(); - - // graft all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - // run a heartbeat - gs.heartbeat(); - - // Peers should be removed to reach mesh_n - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); - } - - #[test] - fn test_connect_to_px_peers_on_handle_prune() { - let config: GossipsubConfig = GossipsubConfig::default(); - - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - //handle prune from single peer with px peers - - let mut px = Vec::new(); - //propose more px peers than config.prune_peers() - for _ in 0..config.prune_peers() + 5 { - px.push(PeerInfo { - peer_id: Some(PeerId::random()), - }); - } - - gs.handle_prune( - &peers[0], - vec![( - topics[0].clone(), - px.clone(), - Some(config.prune_backoff().as_secs()), - )], - ); - - //Check DialPeer events for px peers - let dials: Vec<_> = gs - .events - .iter() - .filter_map(|e| match e { - NetworkBehaviourAction::Dial { opts, handler: _ } => opts.get_peer_id(), - _ => None, - }) - .collect(); - - // Exactly config.prune_peers() many random peers should be dialled - assert_eq!(dials.len(), config.prune_peers()); - - let dials_set: HashSet<_> = dials.into_iter().collect(); - - // No duplicates - assert_eq!(dials_set.len(), config.prune_peers()); - - //all dial peers must be in px - assert!(dials_set.is_subset( - &px.iter() - .map(|i| *i.peer_id.as_ref().unwrap()) - .collect::>() - )); - } - - #[test] - fn test_send_px_and_backoff_in_prune() { - let config: GossipsubConfig = GossipsubConfig::default(); - - //build mesh with enough peers for px - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.prune_peers() + 1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - //send prune to peer - gs.send_graft_prune( - HashMap::new(), - vec![(peers[0], vec![topics[0].clone()])] - .into_iter() - .collect(), - HashSet::new(), - ); - - //check prune message - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Prune { - topic_hash, - peers, - backoff, - } => - topic_hash == &topics[0] && - peers.len() == config.prune_peers() && - //all peers are different - peers.iter().collect::>().len() == - config.prune_peers() && - backoff.unwrap() == config.prune_backoff().as_secs(), - _ => false, - }), - 1 - ); - } - - #[test] - fn test_prune_backoffed_peer_on_graft() { - let config: GossipsubConfig = GossipsubConfig::default(); - - //build mesh with enough peers for px - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.prune_peers() + 1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - //remove peer from mesh and send prune to peer => this adds a backoff for this peer - gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]); - gs.send_graft_prune( - HashMap::new(), - vec![(peers[0], vec![topics[0].clone()])] - .into_iter() - .collect(), - HashSet::new(), - ); - - //ignore all messages until now - gs.events.clear(); - - //handle graft - gs.handle_graft(&peers[0], vec![topics[0].clone()]); - - //check prune message - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Prune { - topic_hash, - peers, - backoff, - } => - topic_hash == &topics[0] && - //no px in this case - peers.is_empty() && - backoff.unwrap() == config.prune_backoff().as_secs(), - _ => false, - }), - 1 - ); - } - - #[test] - fn test_do_not_graft_within_backoff_period() { - let config = GossipsubConfigBuilder::default() - .backoff_slack(1) - .heartbeat_interval(Duration::from_millis(100)) - .build() - .unwrap(); - //only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - //handle prune from peer with backoff of one second - gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); - - //forget all events until now - flush_events(&mut gs); - - //call heartbeat - gs.heartbeat(); - - //Sleep for one second and apply 10 regular heartbeats (interval = 100ms). - for _ in 0..10 { - sleep(Duration::from_millis(100)); - gs.heartbeat(); - } + }) + .collect(); - //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat - // is needed). - assert_eq!( - count_control_msgs(&gs, |_, m| matches!( - m, - GossipsubControlAction::Graft { .. } - )), - 0, - "Graft message created too early within backoff period" - ); - - //Heartbeat one more time this should graft now - sleep(Duration::from_millis(100)); - gs.heartbeat(); - - //check that graft got created - assert!( - count_control_msgs(&gs, |_, m| matches!( - m, - GossipsubControlAction::Graft { .. } - )) > 0, - "No graft message was created after backoff period" - ); - } - - #[test] - fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() { - //set default backoff period to 1 second - let config = GossipsubConfigBuilder::default() - .prune_backoff(Duration::from_millis(90)) - .backoff_slack(1) - .heartbeat_interval(Duration::from_millis(100)) - .build() - .unwrap(); - //only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - //handle prune from peer without a specified backoff - gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); - - //forget all events until now - flush_events(&mut gs); - - //call heartbeat - gs.heartbeat(); - - //Apply one more heartbeat - sleep(Duration::from_millis(100)); - gs.heartbeat(); - - //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat - // is needed). - assert_eq!( - count_control_msgs(&gs, |_, m| matches!( - m, - GossipsubControlAction::Graft { .. } - )), - 0, - "Graft message created too early within backoff period" - ); - - //Heartbeat one more time this should graft now - sleep(Duration::from_millis(100)); - gs.heartbeat(); - - //check that graft got created - assert!( - count_control_msgs(&gs, |_, m| matches!( - m, - GossipsubControlAction::Graft { .. } - )) > 0, - "No graft message was created after backoff period" - ); - } - - #[test] - fn test_unsubscribe_backoff() { - const HEARTBEAT_INTERVAL: Duration = Duration::from_millis(100); - let config = GossipsubConfigBuilder::default() - .backoff_slack(1) - // ensure a prune_backoff > unsubscribe_backoff - .prune_backoff(Duration::from_secs(5)) - .unsubscribe_backoff(1) - .heartbeat_interval(HEARTBEAT_INTERVAL) - .build() - .unwrap(); - - let topic = String::from("test"); - // only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, _, topics) = inject_nodes1() - .peer_no(1) - .topics(vec![topic.clone()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - let _ = gs.unsubscribe(&Topic::new(topic)); - - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Prune { backoff, .. } => backoff == &Some(1), - _ => false, - }), - 1, - "Peer should be pruned with `unsubscribe_backoff`." - ); - - let _ = gs.subscribe(&Topic::new(topics[0].to_string())); - - // forget all events until now - flush_events(&mut gs); - - // call heartbeat - gs.heartbeat(); - - // Sleep for one second and apply 10 regular heartbeats (interval = 100ms). - for _ in 0..10 { - sleep(HEARTBEAT_INTERVAL); - gs.heartbeat(); - } - - // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat - // is needed). - assert_eq!( - count_control_msgs(&gs, |_, m| matches!( - m, - GossipsubControlAction::Graft { .. } - )), - 0, - "Graft message created too early within backoff period" - ); - - // Heartbeat one more time this should graft now - sleep(HEARTBEAT_INTERVAL); - gs.heartbeat(); - - // check that graft got created - assert!( - count_control_msgs(&gs, |_, m| matches!( - m, - GossipsubControlAction::Graft { .. } - )) > 0, - "No graft message was created after backoff period" - ); - } - - #[test] - fn test_flood_publish() { - let config: GossipsubConfig = GossipsubConfig::default(); - - let topic = "test"; - // Adds more peers than mesh can hold to test flood publishing - let (mut gs, _, _) = inject_nodes1() - .peer_no(config.mesh_n_high() + 10) - .topics(vec![topic.into()]) - .to_subscribe(true) - .create_network(); - - //publish message - let publish_data = vec![0; 42]; - gs.publish(Topic::new(topic), publish_data).unwrap(); - - // Collect all publish messages - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } - } - collected_publish - } - _ => collected_publish, - }); - - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform( - publishes - .first() - .expect("Should contain > 0 entries") - .clone(), - ) - .unwrap(); + // check that there are two subscriptions sent to each peer + for sevent in send_events.clone() { + if let NetworkBehaviourAction::NotifyHandler { event, .. } = sevent { + if let GossipsubHandlerIn::Message(ref m) = **event { + assert!( + m.subscriptions.len() == 2, + "There should be two subscriptions sent to each peer (1 for each topic)." + ); + } + }; + } - let msg_id = gs.config.message_id(message); + // check that there are 20 send events created + assert!( + send_events.len() == 20, + "There should be a subscription event sent to each peer." + ); - let config: GossipsubConfig = GossipsubConfig::default(); - assert_eq!( - publishes.len(), - config.mesh_n_high() + 10, - "Should send a publish message to all known peers" + // should add the new peers to `peer_topics` with an empty vec as a gossipsub node + for peer in peers { + let known_topics = gs.peer_topics.get(&peer).unwrap(); + assert!( + known_topics == &topic_hashes.iter().cloned().collect(), + "The topics for each node should all topics" ); + } +} +#[test] +/// Test subscription handling +fn test_handle_received_subscriptions() { + // For every subscription: + // SUBSCRIBE: - Add subscribed topic to peer_topics for peer. + // - Add peer to topics_peer. + // UNSUBSCRIBE - Remove topic from peer_topics for peer. + // - Remove peer from topic_peers. + + let topics = vec!["topic1", "topic2", "topic3", "topic4"] + .iter() + .map(|&t| String::from(t)) + .collect(); + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topics) + .to_subscribe(false) + .create_network(); + + // The first peer sends 3 subscriptions and 1 unsubscription + let mut subscriptions = topic_hashes[..3] + .iter() + .map(|topic_hash| GossipsubSubscription { + action: GossipsubSubscriptionAction::Subscribe, + topic_hash: topic_hash.clone(), + }) + .collect::>(); + + subscriptions.push(GossipsubSubscription { + action: GossipsubSubscriptionAction::Unsubscribe, + topic_hash: topic_hashes[topic_hashes.len() - 1].clone(), + }); + + let unknown_peer = PeerId::random(); + // process the subscriptions + // first and second peers send subscriptions + gs.handle_received_subscriptions(&subscriptions, &peers[0]); + gs.handle_received_subscriptions(&subscriptions, &peers[1]); + // unknown peer sends the same subscriptions + gs.handle_received_subscriptions(&subscriptions, &unknown_peer); + + // verify the result + + let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); + assert!( + peer_topics == topic_hashes.iter().take(3).cloned().collect(), + "First peer should be subscribed to three topics" + ); + let peer_topics = gs.peer_topics.get(&peers[1]).unwrap().clone(); + assert!( + peer_topics == topic_hashes.iter().take(3).cloned().collect(), + "Second peer should be subscribed to three topics" + ); + + assert!( + gs.peer_topics.get(&unknown_peer).is_none(), + "Unknown peer should not have been added" + ); + + for topic_hash in topic_hashes[..3].iter() { + let topic_peers = gs.topic_peers.get(topic_hash).unwrap().clone(); assert!( - gs.mcache.get(&msg_id).is_some(), - "Message cache should contain published message" + topic_peers == peers[..2].iter().cloned().collect(), + "Two peers should be added to the first three topics" ); } - #[test] - fn test_gossip_to_at_least_gossip_lazy_peers() { - let config: GossipsubConfig = GossipsubConfig::default(); + // Peer 0 unsubscribes from the first topic - //add more peers than in mesh to test gossipping - //by default only mesh_n_low peers will get added to mesh - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) - .topics(vec!["topic".into()]) - .to_subscribe(true) - .create_network(); + gs.handle_received_subscriptions( + &[GossipsubSubscription { + action: GossipsubSubscriptionAction::Unsubscribe, + topic_hash: topic_hashes[0].clone(), + }], + &peers[0], + ); + + let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); + assert!( + peer_topics == topic_hashes[1..3].iter().cloned().collect(), + "Peer should be subscribed to two topics" + ); + + let topic_peers = gs.topic_peers.get(&topic_hashes[0]).unwrap().clone(); // only gossipsub at the moment + assert!( + topic_peers == peers[1..2].iter().cloned().collect(), + "Only the second peers should be in the first topic" + ); +} - //receive message - let raw_message = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![], - sequence_number: Some(0), - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; - gs.handle_received_message(raw_message.clone(), &PeerId::random()); +#[test] +/// Test Gossipsub.get_random_peers() function +fn test_get_random_peers() { + // generate a default GossipsubConfig + let gs_config = GossipsubConfigBuilder::default() + .validation_mode(ValidationMode::Anonymous) + .build() + .unwrap(); + // create a gossipsub struct + let mut gs: Gossipsub = Gossipsub::new(MessageAuthenticity::Anonymous, gs_config).unwrap(); + + // create a topic and fill it with some peers + let topic_hash = Topic::new("Test").hash(); + let mut peers = vec![]; + for _ in 0..20 { + peers.push(PeerId::random()) + } - //emit gossip - gs.emit_gossip(); + gs.topic_peers + .insert(topic_hash.clone(), peers.iter().cloned().collect()); + + gs.connected_peers = peers + .iter() + .map(|p| { + ( + *p, + PeerConnections { + kind: PeerKind::Gossipsubv1_1, + connections: vec![ConnectionId::new(1)], + }, + ) + }) + .collect(); - // Transform the inbound message - let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + let random_peers = + get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { + true + }); + assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); + let random_peers = get_random_peers( + &gs.topic_peers, + &gs.connected_peers, + &topic_hash, + 30, + |_| true, + ); + assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); + assert!( + random_peers == peers.iter().cloned().collect(), + "Expected no shuffling" + ); + let random_peers = get_random_peers( + &gs.topic_peers, + &gs.connected_peers, + &topic_hash, + 20, + |_| true, + ); + assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); + assert!( + random_peers == peers.iter().cloned().collect(), + "Expected no shuffling" + ); + let random_peers = + get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 0, |_| { + true + }); + assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); + // test the filter + let random_peers = + get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 5, |_| { + false + }); + assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); + let random_peers = + get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 10, { + |peer| peers.contains(peer) + }); + assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); +} - let msg_id = gs.config.message_id(message); +/// Tests that the correct message is sent when a peer asks for a message in our cache. +#[test] +fn test_handle_iwant_msg_cached() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(20) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + let raw_message = RawGossipsubMessage { + source: Some(peers[11]), + data: vec![1, 2, 3, 4], + sequence_number: Some(1u64), + topic: TopicHash::from_raw("topic"), + signature: None, + key: None, + validated: true, + }; - //check that exactly config.gossip_lazy() many gossip messages were sent. - assert_eq!( - count_control_msgs(&gs, |_, action| match action { - GossipsubControlAction::IHave { - topic_hash, - message_ids, - } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), - _ => false, - }), - config.gossip_lazy() - ); - } + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); + + let msg_id = gs.config.message_id(message); + gs.mcache.put(&msg_id, raw_message); - #[test] - fn test_gossip_to_at_most_gossip_factor_peers() { - let config: GossipsubConfig = GossipsubConfig::default(); + gs.handle_iwant(&peers[7], vec![msg_id.clone()]); + + // the messages we are sending + let sent_messages = gs + .events + .iter() + .fold(vec![], |mut collected_messages, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for c in &event.messages { + collected_messages.push(c.clone()) + } + } + collected_messages + } + _ => collected_messages, + }); - //add a lot of peers - let m = - config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(m) - .topics(vec!["topic".into()]) - .to_subscribe(true) - .create_network(); + assert!( + sent_messages + .iter() + .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) + .any(|msg| gs.config.message_id(&msg) == msg_id), + "Expected the cached message to be sent to an IWANT peer" + ); +} - //receive message +/// Tests that messages are sent correctly depending on the shifting of the message cache. +#[test] +fn test_handle_iwant_msg_cached_shifted() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(20) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + // perform 10 memshifts and check that it leaves the cache + for shift in 1..10 { let raw_message = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![], - sequence_number: Some(0), - topic: topic_hashes[0].clone(), + source: Some(peers[11]), + data: vec![1, 2, 3, 4], + sequence_number: Some(shift), + topic: TopicHash::from_raw("topic"), signature: None, key: None, validated: true, }; - gs.handle_received_message(raw_message.clone(), &PeerId::random()); - - //emit gossip - gs.emit_gossip(); // Transform the inbound message - let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); let msg_id = gs.config.message_id(message); - //check that exactly config.gossip_lazy() many gossip messages were sent. - assert_eq!( - count_control_msgs(&gs, |_, action| match action { - GossipsubControlAction::IHave { - topic_hash, - message_ids, - } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), - _ => false, - }), - ((m - config.mesh_n_low()) as f64 * config.gossip_factor()) as usize - ); - } - - #[test] - fn test_accept_only_outbound_peer_grafts_when_mesh_full() { - let config: GossipsubConfig = GossipsubConfig::default(); + gs.mcache.put(&msg_id, raw_message); + for _ in 0..shift { + gs.mcache.shift(); + } - //enough peers to fill the mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); + gs.handle_iwant(&peers[7], vec![msg_id.clone()]); - // graft all the peers => this will fill the mesh - for peer in peers { - gs.handle_graft(&peer, topics.clone()); + // is the message is being sent? + let message_exists = gs.events.iter().any(|e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + event + .messages + .iter() + .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) + .any(|msg| gs.config.message_id(&msg) == msg_id) + } else { + false + } + } + _ => false, + }); + // default history_length is 5, expect no messages after shift > 5 + if shift < 5 { + assert!( + message_exists, + "Expected the cached message to be sent to an IWANT peer before 5 shifts" + ); + } else { + assert!( + !message_exists, + "Expected the cached message to not be sent to an IWANT peer after 5 shifts" + ); } + } +} - //assert current mesh size - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); +#[test] +// tests that an event is not created when a peers asks for a message not in our cache +fn test_handle_iwant_msg_not_cached() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(20) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + let events_before = gs.events.len(); + gs.handle_iwant(&peers[7], vec![MessageId::new(b"unknown id")]); + let events_after = gs.events.len(); + + assert_eq!( + events_before, events_after, + "Expected event count to stay the same" + ); +} - //create an outbound and an inbound peer - let inbound = add_peer(&mut gs, &topics, false, false); - let outbound = add_peer(&mut gs, &topics, true, false); +#[test] +// tests that an event is created when a peer shares that it has a message we want +fn test_handle_ihave_subscribed_and_msg_not_cached() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + gs.handle_ihave( + &peers[7], + vec![(topic_hashes[0].clone(), vec![MessageId::new(b"unknown id")])], + ); + + // check that we sent an IWANT request for `unknown id` + let iwant_exists = match gs.control_pool.get(&peers[7]) { + Some(controls) => controls.iter().any(|c| match c { + GossipsubControlAction::IWant { message_ids } => message_ids + .iter() + .any(|m| *m == MessageId::new(b"unknown id")), + _ => false, + }), + _ => false, + }; - //send grafts - gs.handle_graft(&inbound, vec![topics[0].clone()]); - gs.handle_graft(&outbound, vec![topics[0].clone()]); + assert!( + iwant_exists, + "Expected to send an IWANT control message for unkown message id" + ); +} - //assert mesh size - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); +#[test] +// tests that an event is not created when a peer shares that it has a message that +// we already have +fn test_handle_ihave_subscribed_and_msg_cached() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + let msg_id = MessageId::new(b"known id"); + + let events_before = gs.events.len(); + gs.handle_ihave(&peers[7], vec![(topic_hashes[0].clone(), vec![msg_id])]); + let events_after = gs.events.len(); + + assert_eq!( + events_before, events_after, + "Expected event count to stay the same" + ) +} - //inbound is not in mesh - assert!(!gs.mesh[&topics[0]].contains(&inbound)); +#[test] +// test that an event is not created when a peer shares that it has a message in +// a topic that we are not subscribed to +fn test_handle_ihave_not_subscribed() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(20) + .topics(vec![]) + .to_subscribe(true) + .create_network(); + + let events_before = gs.events.len(); + gs.handle_ihave( + &peers[7], + vec![( + TopicHash::from_raw(String::from("unsubscribed topic")), + vec![MessageId::new(b"irrelevant id")], + )], + ); + let events_after = gs.events.len(); + + assert_eq!( + events_before, events_after, + "Expected event count to stay the same" + ) +} - //outbound is in mesh - assert!(gs.mesh[&topics[0]].contains(&outbound)); - } +#[test] +// tests that a peer is added to our mesh when we are both subscribed +// to the same topic +fn test_handle_graft_is_subscribed() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + gs.handle_graft(&peers[7], topic_hashes.clone()); + + assert!( + gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to have been added to mesh" + ); +} - #[test] - fn test_do_not_remove_too_many_outbound_peers() { - //use an extreme case to catch errors with high probability - let m = 50; - let n = 2 * m; - let config = GossipsubConfigBuilder::default() - .mesh_n_high(n) - .mesh_n(n) - .mesh_n_low(n) - .mesh_outbound_min(m) - .build() - .unwrap(); +#[test] +// tests that a peer is not added to our mesh when they are subscribed to +// a topic that we are not +fn test_handle_graft_is_not_subscribed() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + gs.handle_graft( + &peers[7], + vec![TopicHash::from_raw(String::from("unsubscribed topic"))], + ); + + assert!( + !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to have been added to mesh" + ); +} - //fill the mesh with inbound connections - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(n) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - // graft all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } +#[test] +// tests multiple topics in a single graft message +fn test_handle_graft_multiple_topics() { + let topics: Vec = vec!["topic1", "topic2", "topic3", "topic4"] + .iter() + .map(|&t| String::from(t)) + .collect(); + + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(topics) + .to_subscribe(true) + .create_network(); + + let mut their_topics = topic_hashes.clone(); + // their_topics = [topic1, topic2, topic3] + // our_topics = [topic1, topic2, topic4] + their_topics.pop(); + gs.leave(&their_topics[2]); + + gs.handle_graft(&peers[7], their_topics.clone()); + + for hash in topic_hashes.iter().take(2) { + assert!( + gs.mesh.get(hash).unwrap().contains(&peers[7]), + "Expected peer to be in the mesh for the first 2 topics" + ); + } - //create m outbound connections and graft (we will accept the graft) - let mut outbound = HashSet::new(); - for _ in 0..m { - let peer = add_peer(&mut gs, &topics, true, false); - outbound.insert(peer); - gs.handle_graft(&peer, topics.clone()); - } + assert!( + gs.mesh.get(&topic_hashes[2]).is_none(), + "Expected the second topic to not be in the mesh" + ); +} - //mesh is overly full - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); +#[test] +// tests that a peer is removed from our mesh +fn test_handle_prune_peer_in_mesh() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(20) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + + // insert peer into our mesh for 'topic1' + gs.mesh + .insert(topic_hashes[0].clone(), peers.iter().cloned().collect()); + assert!( + gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to be in mesh" + ); + + gs.handle_prune( + &peers[7], + topic_hashes + .iter() + .map(|h| (h.clone(), vec![], None)) + .collect(), + ); + assert!( + !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), + "Expected peer to be removed from mesh" + ); +} - // run a heartbeat - gs.heartbeat(); +fn count_control_msgs( + gs: &Gossipsub, + mut filter: impl FnMut(&PeerId, &GossipsubControlAction) -> bool, +) -> usize { + gs.control_pool + .iter() + .map(|(peer_id, actions)| actions.iter().filter(|m| filter(peer_id, m)).count()) + .sum::() + + gs.events + .iter() + .map(|e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + event + .control_msgs + .iter() + .filter(|m| filter(peer_id, m)) + .count() + } else { + 0 + } + } + _ => 0, + }) + .sum::() +} - // Peers should be removed to reach n - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); +fn flush_events(gs: &mut Gossipsub) { + gs.control_pool.clear(); + gs.events.clear(); +} - //all outbound peers are still in the mesh - assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); - } +#[test] +// tests that a peer added as explicit peer gets connected to +fn test_explicit_peer_gets_connected() { + let (mut gs, _, _) = inject_nodes1() + .peer_no(0) + .topics(Vec::new()) + .to_subscribe(true) + .create_network(); + + //create new peer + let peer = PeerId::random(); + + //add peer as explicit peer + gs.add_explicit_peer(&peer); + + let num_events = gs + .events + .iter() + .filter(|e| match e { + NetworkBehaviourAction::Dial { opts, handler: _ } => { + opts.get_peer_id() == Some(peer) + } + _ => false, + }) + .count(); - #[test] - fn test_add_outbound_peers_if_min_is_not_satisfied() { - let config: GossipsubConfig = GossipsubConfig::default(); + assert_eq!( + num_events, 1, + "There was no dial peer event for the explicit peer" + ); +} - // Fill full mesh with inbound peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); +#[test] +fn test_explicit_peer_reconnects() { + let config = GossipsubConfigBuilder::default() + .check_explicit_peers_ticks(2) + .build() + .unwrap(); + let (mut gs, others, _) = inject_nodes1() + .peer_no(1) + .topics(Vec::new()) + .to_subscribe(true) + .gs_config(config) + .create_network(); - // graft all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } + let peer = others.get(0).unwrap(); - //create config.mesh_outbound_min() many outbound connections without grafting - for _ in 0..config.mesh_outbound_min() { - add_peer(&mut gs, &topics, true, false); - } + //add peer as explicit peer + gs.add_explicit_peer(peer); - // Nothing changed in the mesh yet - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); + flush_events(&mut gs); - // run a heartbeat - gs.heartbeat(); + //disconnect peer + disconnect_peer(&mut gs, peer); - // The outbound peers got additionally added - assert_eq!( - gs.mesh[&topics[0]].len(), - config.mesh_n_high() + config.mesh_outbound_min() - ); - } + gs.heartbeat(); - //TODO add a test that ensures that new outbound connections are recognized as such. - // This is at the moment done in behaviour with relying on the fact that the call to - // `inject_connection_established` for the first connection is done before `inject_connected` - // gets called. For all further connections `inject_connection_established` should get called - // after `inject_connected`. - - #[test] - fn test_prune_negative_scored_peers() { - let config = GossipsubConfig::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); - - //add penalty to peer - gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - - //execute heartbeat - gs.heartbeat(); + //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` + assert_eq!( + gs.events + .iter() + .filter(|e| match e { + NetworkBehaviourAction::Dial { opts, handler: _ } => + opts.get_peer_id() == Some(*peer), + _ => false, + }) + .count(), + 0, + "There was a dial peer event before explicit_peer_ticks heartbeats" + ); - //peer should not be in mesh anymore - assert!(gs.mesh[&topics[0]].is_empty()); + gs.heartbeat(); - //check prune message - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] - && match m { - GossipsubControlAction::Prune { - topic_hash, - peers, - backoff, - } => - topic_hash == &topics[0] && - //no px in this case - peers.is_empty() && - backoff.unwrap() == config.prune_backoff().as_secs(), - _ => false, - }), - 1 - ); - } + //check that there is a reconnect after second heartbeat + assert!( + gs.events + .iter() + .filter(|e| match e { + NetworkBehaviourAction::Dial { opts, handler: _ } => + opts.get_peer_id() == Some(*peer), + _ => false, + }) + .count() + >= 1, + "There was no dial peer event for the explicit peer" + ); +} - #[test] - fn test_dont_graft_to_negative_scored_peers() { - let config = GossipsubConfig::default(); - //init full mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); - - //add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); - - //reduce score of p1 to negative - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1); - - //handle prunes of all other peers - for p in peers { - gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]); - } +#[test] +fn test_handle_graft_explicit_peer() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(1) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + let peer = peers.get(0).unwrap(); + + gs.handle_graft(peer, topic_hashes.clone()); + + //peer got not added to mesh + assert!(gs.mesh[&topic_hashes[0]].is_empty()); + assert!(gs.mesh[&topic_hashes[1]].is_empty()); + + //check prunes + assert!( + count_control_msgs(&gs, |peer_id, m| peer_id == peer + && match m { + GossipsubControlAction::Prune { topic_hash, .. } => + topic_hash == &topic_hashes[0] || topic_hash == &topic_hashes[1], + _ => false, + }) + >= 2, + "Not enough prunes sent when grafting from explicit peer" + ); +} - //heartbeat - gs.heartbeat(); +#[test] +fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { + let (gs, peers, topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + assert_eq!( + gs.mesh[&topic_hashes[0]], + vec![peers[1]].into_iter().collect() + ); + + //assert that graft gets created to non-explicit peer + assert!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] + && matches!(m, GossipsubControlAction::Graft { .. })) + >= 1, + "No graft message got created to non-explicit peer" + ); + + //assert that no graft gets created to explicit peer + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && matches!(m, GossipsubControlAction::Graft { .. })), + 0, + "A graft message got created to an explicit peer" + ); +} - //assert that mesh only contains p2 - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1); - assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2)); - } +#[test] +fn do_not_graft_explicit_peer() { + let (mut gs, others, topic_hashes) = inject_nodes1() + .peer_no(1) + .topics(vec![String::from("topic")]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + gs.heartbeat(); + + //mesh stays empty + assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); + + //assert that no graft gets created to explicit peer + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &others[0] + && matches!(m, GossipsubControlAction::Graft { .. })), + 0, + "A graft message got created to an explicit peer" + ); +} - ///Note that in this test also without a penalty the px would be ignored because of the - /// acceptPXThreshold, but the spec still explicitely states the rule that px from negative - /// peers should get ignored, therefore we test it here. - #[test] - fn test_ignore_px_from_negative_scored_peer() { - let config = GossipsubConfig::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); - - //penalize peer - gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - - //handle prune from single peer with px peers - let px = vec![PeerInfo { - peer_id: Some(PeerId::random()), - }]; +#[test] +fn do_forward_messages_to_explicit_peers() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawGossipsubMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(message.clone(), &local_id); - gs.handle_prune( - &peers[0], - vec![( - topics[0].clone(), - px, - Some(config.prune_backoff().as_secs()), - )], - ); + assert_eq!( + gs.events + .iter() + .filter(|e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + peer_id == &peers[0] + && event + .messages + .iter() + .filter(|m| m.data == message.data) + .count() + > 0 + } else { + false + } + } + _ => false, + }) + .count(), + 1, + "The message did not get forwarded to the explicit peer" + ); +} - //assert no dials - assert_eq!( - gs.events - .iter() - .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) - .count(), - 0 +#[test] +fn explicit_peers_not_added_to_mesh_on_subscribe() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(2) + .topics(Vec::new()) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + //create new topic, both peers subscribing to it but we do not subscribe to it + let topic = Topic::new(String::from("t")); + let topic_hash = topic.hash(); + for peer in peers.iter().take(2) { + gs.handle_received_subscriptions( + &[GossipsubSubscription { + action: GossipsubSubscriptionAction::Subscribe, + topic_hash: topic_hash.clone(), + }], + peer, ); } - #[test] - fn test_only_send_nonnegative_scoring_peers_in_px() { - let config = GossipsubConfigBuilder::default() - .prune_peers(16) - .do_px() - .build() - .unwrap(); - - // Build mesh with three peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(3) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); - - // Penalize first peer - gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - - // Prune second peer - gs.send_graft_prune( - HashMap::new(), - vec![(peers[1], vec![topics[0].clone()])] - .into_iter() - .collect(), - HashSet::new(), - ); + //subscribe now to topic + gs.subscribe(&topic).unwrap(); + + //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); + + //assert that graft gets created to non-explicit peer + assert!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] + && matches!(m, GossipsubControlAction::Graft { .. })) + > 0, + "No graft message got created to non-explicit peer" + ); + + //assert that no graft gets created to explicit peer + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && matches!(m, GossipsubControlAction::Graft { .. })), + 0, + "A graft message got created to an explicit peer" + ); +} - // Check that px in prune message only contains third peer - assert_eq!( - count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] - && match m { - GossipsubControlAction::Prune { - topic_hash, - peers: px, - .. - } => - topic_hash == &topics[0] - && px.len() == 1 - && px[0].peer_id.as_ref().unwrap() == &peers[2], - _ => false, - }), - 1 +#[test] +fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { + let (mut gs, peers, _) = inject_nodes1() + .peer_no(2) + .topics(Vec::new()) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + //create new topic, both peers subscribing to it but we do not subscribe to it + let topic = Topic::new(String::from("t")); + let topic_hash = topic.hash(); + for peer in peers.iter().take(2) { + gs.handle_received_subscriptions( + &[GossipsubSubscription { + action: GossipsubSubscriptionAction::Subscribe, + topic_hash: topic_hash.clone(), + }], + peer, ); } - #[test] - fn test_do_not_gossip_to_peers_below_gossip_threshold() { - let config = GossipsubConfig::default(); - let peer_score_params = PeerScoreParams::default(); - let peer_score_thresholds = PeerScoreThresholds { - gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, - ..PeerScoreThresholds::default() - }; - - // Build full mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - // Graft all the peer - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } + //we send a message for this topic => this will initialize the fanout + gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); + + //subscribe now to topic + gs.subscribe(&topic).unwrap(); + + //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); + + //assert that graft gets created to non-explicit peer + assert!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] + && matches!(m, GossipsubControlAction::Graft { .. })) + >= 1, + "No graft message got created to non-explicit peer" + ); + + //assert that no graft gets created to explicit peer + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && matches!(m, GossipsubControlAction::Graft { .. })), + 0, + "A graft message got created to an explicit peer" + ); +} - // Add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); +#[test] +fn no_gossip_gets_sent_to_explicit_peers() { + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1"), String::from("topic2")]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(1) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawGossipsubMessage { + source: Some(peers[1]), + data: vec![], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; - // Reduce score of p1 below peer_score_thresholds.gossip_threshold - // note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + //forward the message + gs.handle_received_message(message, &local_id); - // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + //simulate multiple gossip calls (for randomness) + for _ in 0..3 { + gs.emit_gossip(); + } - // Receive message - let raw_message = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![], - sequence_number: Some(0), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; - gs.handle_received_message(raw_message.clone(), &PeerId::random()); + //assert that no gossip gets sent to explicit peer + assert_eq!( + gs.control_pool + .get(&peers[0]) + .unwrap_or(&Vec::new()) + .iter() + .filter(|m| matches!(m, GossipsubControlAction::IHave { .. })) + .count(), + 0, + "Gossip got emitted to explicit peer" + ); +} - // Transform the inbound message - let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); +// Tests the mesh maintenance addition +#[test] +fn test_mesh_addition() { + let config: GossipsubConfig = GossipsubConfig::default(); - let msg_id = gs.config.message_id(message); + // Adds mesh_low peers and PRUNE 2 giving us a deficit. + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n() + 1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); - // Emit gossip - gs.emit_gossip(); + let to_remove_peers = config.mesh_n() + 1 - config.mesh_n_low() - 1; - // Check that exactly one gossip messages got sent and it got sent to p2 - assert_eq!( - count_control_msgs(&gs, |peer, action| match action { - GossipsubControlAction::IHave { - topic_hash, - message_ids, - } => { - if topic_hash == &topics[0] && message_ids.iter().any(|id| id == &msg_id) { - assert_eq!(peer, &p2); - true - } else { - false - } - } - _ => false, - }), - 1 + for peer in peers.iter().take(to_remove_peers) { + gs.handle_prune( + peer, + topics.iter().map(|h| (h.clone(), vec![], None)).collect(), ); } - #[test] - fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { - let config = GossipsubConfig::default(); - let peer_score_params = PeerScoreParams::default(); - let peer_score_thresholds = PeerScoreThresholds { - gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, - ..PeerScoreThresholds::default() - }; - - // Build full mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - // Graft all the peer - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } + // Verify the pruned peers are removed from the mesh. + assert_eq!( + gs.mesh.get(&topics[0]).unwrap().len(), + config.mesh_n_low() - 1 + ); - // Add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + // run a heartbeat + gs.heartbeat(); - // Reduce score of p1 below peer_score_thresholds.gossip_threshold - // note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + // Peers should be added to reach mesh_n + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); +} - // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); +// Tests the mesh maintenance subtraction +#[test] +fn test_mesh_subtraction() { + let config = GossipsubConfig::default(); + + // Adds mesh_low peers and PRUNE 2 giving us a deficit. + let n = config.mesh_n_high() + 10; + //make all outbound connections so that we allow grafting to all + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(n) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .outbound(n) + .create_network(); + + // graft all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } - // Receive message - let raw_message = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![], - sequence_number: Some(0), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; - gs.handle_received_message(raw_message.clone(), &PeerId::random()); + // run a heartbeat + gs.heartbeat(); - // Transform the inbound message - let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + // Peers should be removed to reach mesh_n + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n()); +} - let msg_id = gs.config.message_id(message); +#[test] +fn test_connect_to_px_peers_on_handle_prune() { + let config: GossipsubConfig = GossipsubConfig::default(); - gs.handle_iwant(&p1, vec![msg_id.clone()]); - gs.handle_iwant(&p2, vec![msg_id.clone()]); + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); - // the messages we are sending - let sent_messages = gs - .events - .iter() - .fold(vec![], |mut collected_messages, e| match e { - NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for c in &event.messages { - collected_messages.push((*peer_id, c.clone())) - } - } - collected_messages - } - _ => collected_messages, - }); + //handle prune from single peer with px peers - //the message got sent to p2 - assert!(sent_messages - .iter() - .map(|(peer_id, msg)| ( - peer_id, - gs.data_transform.inbound_transform(msg.clone()).unwrap() - )) - .any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id)); - //the message got not sent to p1 - assert!(sent_messages - .iter() - .map(|(peer_id, msg)| ( - peer_id, - gs.data_transform.inbound_transform(msg.clone()).unwrap() - )) - .all(|(peer_id, msg)| !(peer_id == &p1 && gs.config.message_id(&msg) == msg_id))); + let mut px = Vec::new(); + //propose more px peers than config.prune_peers() + for _ in 0..config.prune_peers() + 5 { + px.push(PeerInfo { + peer_id: Some(PeerId::random()), + }); } - #[test] - fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { - let config = GossipsubConfig::default(); - let peer_score_params = PeerScoreParams::default(); - let peer_score_thresholds = PeerScoreThresholds { - gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, - ..PeerScoreThresholds::default() - }; - //build full mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - // graft all the peer - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } + gs.handle_prune( + &peers[0], + vec![( + topics[0].clone(), + px.clone(), + Some(config.prune_backoff().as_secs()), + )], + ); + + //Check DialPeer events for px peers + let dials: Vec<_> = gs + .events + .iter() + .filter_map(|e| match e { + NetworkBehaviourAction::Dial { opts, handler: _ } => opts.get_peer_id(), + _ => None, + }) + .collect(); + + // Exactly config.prune_peers() many random peers should be dialled + assert_eq!(dials.len(), config.prune_peers()); + + let dials_set: HashSet<_> = dials.into_iter().collect(); + + // No duplicates + assert_eq!(dials_set.len(), config.prune_peers()); + + //all dial peers must be in px + assert!(dials_set.is_subset( + &px.iter() + .map(|i| *i.peer_id.as_ref().unwrap()) + .collect::>() + )); +} - //add two additional peers that will not be part of the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); +#[test] +fn test_send_px_and_backoff_in_prune() { + let config: GossipsubConfig = GossipsubConfig::default(); + + //build mesh with enough peers for px + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.prune_peers() + 1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + //send prune to peer + gs.send_graft_prune( + HashMap::new(), + vec![(peers[0], vec![topics[0].clone()])] + .into_iter() + .collect(), + HashSet::new(), + ); + + //check prune message + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && match m { + GossipsubControlAction::Prune { + topic_hash, + peers, + backoff, + } => + topic_hash == &topics[0] && + peers.len() == config.prune_peers() && + //all peers are different + peers.iter().collect::>().len() == + config.prune_peers() && + backoff.unwrap() == config.prune_backoff().as_secs(), + _ => false, + }), + 1 + ); +} - //reduce score of p1 below peer_score_thresholds.gossip_threshold - //note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); +#[test] +fn test_prune_backoffed_peer_on_graft() { + let config: GossipsubConfig = GossipsubConfig::default(); + + //build mesh with enough peers for px + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.prune_peers() + 1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + //remove peer from mesh and send prune to peer => this adds a backoff for this peer + gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]); + gs.send_graft_prune( + HashMap::new(), + vec![(peers[0], vec![topics[0].clone()])] + .into_iter() + .collect(), + HashSet::new(), + ); - //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + //ignore all messages until now + gs.events.clear(); - //message that other peers have - let raw_message = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![], - sequence_number: Some(0), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; + //handle graft + gs.handle_graft(&peers[0], vec![topics[0].clone()]); + + //check prune message + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && match m { + GossipsubControlAction::Prune { + topic_hash, + peers, + backoff, + } => + topic_hash == &topics[0] && + //no px in this case + peers.is_empty() && + backoff.unwrap() == config.prune_backoff().as_secs(), + _ => false, + }), + 1 + ); +} - // Transform the inbound message - let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); +#[test] +fn test_do_not_graft_within_backoff_period() { + let config = GossipsubConfigBuilder::default() + .backoff_slack(1) + .heartbeat_interval(Duration::from_millis(100)) + .build() + .unwrap(); + //only one peer => mesh too small and will try to regraft as early as possible + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); - let msg_id = gs.config.message_id(message); + //handle prune from peer with backoff of one second + gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); - gs.handle_ihave(&p1, vec![(topics[0].clone(), vec![msg_id.clone()])]); - gs.handle_ihave(&p2, vec![(topics[0].clone(), vec![msg_id.clone()])]); + //forget all events until now + flush_events(&mut gs); - // check that we sent exactly one IWANT request to p2 - assert_eq!( - count_control_msgs(&gs, |peer, c| match c { - GossipsubControlAction::IWant { message_ids } => - if message_ids.iter().any(|m| m == &msg_id) { - assert_eq!(peer, &p2); - true - } else { - false - }, - _ => false, - }), - 1 - ); - } + //call heartbeat + gs.heartbeat(); - #[test] - fn test_do_not_publish_to_peer_below_publish_threshold() { - let config = GossipsubConfigBuilder::default() - .flood_publish(false) - .build() - .unwrap(); - let peer_score_params = PeerScoreParams::default(); - let peer_score_thresholds = PeerScoreThresholds { - gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, - publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, - ..PeerScoreThresholds::default() - }; + //Sleep for one second and apply 10 regular heartbeats (interval = 100ms). + for _ in 0..10 { + sleep(Duration::from_millis(100)); + gs.heartbeat(); + } - //build mesh with no peers and no subscribed topics - let (mut gs, _, _) = inject_nodes1() - .gs_config(config) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); + //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // is needed). + assert_eq!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )), + 0, + "Graft message created too early within backoff period" + ); + + //Heartbeat one more time this should graft now + sleep(Duration::from_millis(100)); + gs.heartbeat(); + + //check that graft got created + assert!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )) > 0, + "No graft message was created after backoff period" + ); +} - //create a new topic for which we are not subscribed - let topic = Topic::new("test"); - let topics = vec![topic.hash()]; +#[test] +fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() { + //set default backoff period to 1 second + let config = GossipsubConfigBuilder::default() + .prune_backoff(Duration::from_millis(90)) + .backoff_slack(1) + .heartbeat_interval(Duration::from_millis(100)) + .build() + .unwrap(); + //only one peer => mesh too small and will try to regraft as early as possible + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + //handle prune from peer without a specified backoff + gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); + + //forget all events until now + flush_events(&mut gs); + + //call heartbeat + gs.heartbeat(); + + //Apply one more heartbeat + sleep(Duration::from_millis(100)); + gs.heartbeat(); + + //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // is needed). + assert_eq!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )), + 0, + "Graft message created too early within backoff period" + ); + + //Heartbeat one more time this should graft now + sleep(Duration::from_millis(100)); + gs.heartbeat(); + + //check that graft got created + assert!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )) > 0, + "No graft message was created after backoff period" + ); +} - //add two additional peers that will be added to the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); +#[test] +fn test_unsubscribe_backoff() { + const HEARTBEAT_INTERVAL: Duration = Duration::from_millis(100); + let config = GossipsubConfigBuilder::default() + .backoff_slack(1) + // ensure a prune_backoff > unsubscribe_backoff + .prune_backoff(Duration::from_secs(5)) + .unsubscribe_backoff(1) + .heartbeat_interval(HEARTBEAT_INTERVAL) + .build() + .unwrap(); - //reduce score of p1 below peer_score_thresholds.publish_threshold - //note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + let topic = String::from("test"); + // only one peer => mesh too small and will try to regraft as early as possible + let (mut gs, _, topics) = inject_nodes1() + .peer_no(1) + .topics(vec![topic.clone()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); - //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + let _ = gs.unsubscribe(&Topic::new(topic)); - //a heartbeat will remove the peers from the mesh - gs.heartbeat(); + assert_eq!( + count_control_msgs(&gs, |_, m| match m { + GossipsubControlAction::Prune { backoff, .. } => backoff == &Some(1), + _ => false, + }), + 1, + "Peer should be pruned with `unsubscribe_backoff`." + ); - // publish on topic - let publish_data = vec![0; 42]; - gs.publish(topic, publish_data).unwrap(); + let _ = gs.subscribe(&Topic::new(topics[0].to_string())); - // Collect all publish messages - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push((*peer_id, s.clone())); - } - } - collected_publish - } - _ => collected_publish, - }); + // forget all events until now + flush_events(&mut gs); - //assert only published to p2 - assert_eq!(publishes.len(), 1); - assert_eq!(publishes[0].0, p2); - } + // call heartbeat + gs.heartbeat(); - #[test] - fn test_do_not_flood_publish_to_peer_below_publish_threshold() { - let config = GossipsubConfig::default(); - let peer_score_params = PeerScoreParams::default(); - let peer_score_thresholds = PeerScoreThresholds { - gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, - publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, - ..PeerScoreThresholds::default() - }; - //build mesh with no peers - let (mut gs, _, topics) = inject_nodes1() - .topics(vec!["test".into()]) - .gs_config(config) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - //add two additional peers that will be added to the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); - - //reduce score of p1 below peer_score_thresholds.publish_threshold - //note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - - //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - - //a heartbeat will remove the peers from the mesh + // Sleep for one second and apply 10 regular heartbeats (interval = 100ms). + for _ in 0..10 { + sleep(HEARTBEAT_INTERVAL); gs.heartbeat(); + } - // publish on topic - let publish_data = vec![0; 42]; - gs.publish(Topic::new("test"), publish_data).unwrap(); + // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat + // is needed). + assert_eq!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )), + 0, + "Graft message created too early within backoff period" + ); + + // Heartbeat one more time this should graft now + sleep(HEARTBEAT_INTERVAL); + gs.heartbeat(); + + // check that graft got created + assert!( + count_control_msgs(&gs, |_, m| matches!( + m, + GossipsubControlAction::Graft { .. } + )) > 0, + "No graft message was created after backoff period" + ); +} - // Collect all publish messages - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push((*peer_id, s.clone())); - } +#[test] +fn test_flood_publish() { + let config: GossipsubConfig = GossipsubConfig::default(); + + let topic = "test"; + // Adds more peers than mesh can hold to test flood publishing + let (mut gs, _, _) = inject_nodes1() + .peer_no(config.mesh_n_high() + 10) + .topics(vec![topic.into()]) + .to_subscribe(true) + .create_network(); + + //publish message + let publish_data = vec![0; 42]; + gs.publish(Topic::new(topic), publish_data).unwrap(); + + // Collect all publish messages + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for s in &event.messages { + collected_publish.push(s.clone()); } - collected_publish } - _ => collected_publish, - }); - - //assert only published to p2 - assert_eq!(publishes.len(), 1); - assert!(publishes[0].0 == p2); - } - - #[test] - fn test_ignore_rpc_from_peers_below_graylist_threshold() { - let config = GossipsubConfig::default(); - let peer_score_params = PeerScoreParams::default(); - let peer_score_thresholds = PeerScoreThresholds { - gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, - publish_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, - graylist_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, - ..PeerScoreThresholds::default() - }; + collected_publish + } + _ => collected_publish, + }); - //build mesh with no peers - let (mut gs, _, topics) = inject_nodes1() - .topics(vec!["test".into()]) - .gs_config(config.clone()) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform( + publishes + .first() + .expect("Should contain > 0 entries") + .clone(), + ) + .unwrap(); - //add two additional peers that will be added to the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + let msg_id = gs.config.message_id(message); - //reduce score of p1 below peer_score_thresholds.graylist_threshold - //note that penalties get squared so two penalties means a score of - // 4 * peer_score_params.behaviour_penalty_weight. - gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + let config: GossipsubConfig = GossipsubConfig::default(); + assert_eq!( + publishes.len(), + config.mesh_n_high() + 10, + "Should send a publish message to all known peers" + ); - //reduce score of p2 below publish_threshold but not below graylist_threshold - gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + assert!( + gs.mcache.get(&msg_id).is_some(), + "Message cache should contain published message" + ); +} - let raw_message1 = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![1, 2, 3, 4], - sequence_number: Some(1u64), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; +#[test] +fn test_gossip_to_at_least_gossip_lazy_peers() { + let config: GossipsubConfig = GossipsubConfig::default(); + + //add more peers than in mesh to test gossipping + //by default only mesh_n_low peers will get added to mesh + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) + .topics(vec!["topic".into()]) + .to_subscribe(true) + .create_network(); + + //receive message + let raw_message = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); - let raw_message2 = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![1, 2, 3, 4, 5], - sequence_number: Some(2u64), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; + //emit gossip + gs.emit_gossip(); - let raw_message3 = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![1, 2, 3, 4, 5, 6], - sequence_number: Some(3u64), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); - let raw_message4 = RawGossipsubMessage { - source: Some(PeerId::random()), - data: vec![1, 2, 3, 4, 5, 6, 7], - sequence_number: Some(4u64), - topic: topics[0].clone(), - signature: None, - key: None, - validated: true, - }; + let msg_id = gs.config.message_id(message); - // Transform the inbound message - let message2 = &gs.data_transform.inbound_transform(raw_message2).unwrap(); + //check that exactly config.gossip_lazy() many gossip messages were sent. + assert_eq!( + count_control_msgs(&gs, |_, action| match action { + GossipsubControlAction::IHave { + topic_hash, + message_ids, + } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), + _ => false, + }), + config.gossip_lazy() + ); +} - // Transform the inbound message - let message4 = &gs.data_transform.inbound_transform(raw_message4).unwrap(); +#[test] +fn test_gossip_to_at_most_gossip_factor_peers() { + let config: GossipsubConfig = GossipsubConfig::default(); + + //add a lot of peers + let m = + config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(m) + .topics(vec!["topic".into()]) + .to_subscribe(true) + .create_network(); + + //receive message + let raw_message = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + //emit gossip + gs.emit_gossip(); + + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + + let msg_id = gs.config.message_id(message); + //check that exactly config.gossip_lazy() many gossip messages were sent. + assert_eq!( + count_control_msgs(&gs, |_, action| match action { + GossipsubControlAction::IHave { + topic_hash, + message_ids, + } => topic_hash == &topic_hashes[0] && message_ids.iter().any(|id| id == &msg_id), + _ => false, + }), + ((m - config.mesh_n_low()) as f64 * config.gossip_factor()) as usize + ); +} - let subscription = GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: topics[0].clone(), - }; +#[test] +fn test_accept_only_outbound_peer_grafts_when_mesh_full() { + let config: GossipsubConfig = GossipsubConfig::default(); - let control_action = GossipsubControlAction::IHave { - topic_hash: topics[0].clone(), - message_ids: vec![config.message_id(message2)], - }; + //enough peers to fill the mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); - //clear events - gs.events.clear(); - - //receive from p1 - gs.inject_event( - p1, - ConnectionId::new(0), - HandlerEvent::Message { - rpc: GossipsubRpc { - messages: vec![raw_message1], - subscriptions: vec![subscription.clone()], - control_msgs: vec![control_action], - }, - invalid_messages: Vec::new(), - }, - ); + // graft all the peers => this will fill the mesh + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } - //only the subscription event gets processed, the rest is dropped - assert_eq!(gs.events.len(), 1); - assert!(matches!( - gs.events[0], - NetworkBehaviourAction::GenerateEvent(GossipsubEvent::Subscribed { .. }) - )); + //assert current mesh size + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - let control_action = GossipsubControlAction::IHave { - topic_hash: topics[0].clone(), - message_ids: vec![config.message_id(message4)], - }; + //create an outbound and an inbound peer + let inbound = add_peer(&mut gs, &topics, false, false); + let outbound = add_peer(&mut gs, &topics, true, false); - //receive from p2 - gs.inject_event( - p2, - ConnectionId::new(0), - HandlerEvent::Message { - rpc: GossipsubRpc { - messages: vec![raw_message3], - subscriptions: vec![subscription], - control_msgs: vec![control_action], - }, - invalid_messages: Vec::new(), - }, - ); + //send grafts + gs.handle_graft(&inbound, vec![topics[0].clone()]); + gs.handle_graft(&outbound, vec![topics[0].clone()]); - //events got processed - assert!(gs.events.len() > 1); - } + //assert mesh size + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); - #[test] - fn test_ignore_px_from_peers_below_accept_px_threshold() { - let config = GossipsubConfigBuilder::default() - .prune_peers(16) - .build() - .unwrap(); - let peer_score_params = PeerScoreParams::default(); - let peer_score_thresholds = PeerScoreThresholds { - accept_px_threshold: peer_score_params.app_specific_weight, - ..PeerScoreThresholds::default() - }; - // Build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - // Decrease score of first peer to less than accept_px_threshold - gs.set_application_score(&peers[0], 0.99); - - // Increase score of second peer to accept_px_threshold - gs.set_application_score(&peers[1], 1.0); - - // Handle prune from peer peers[0] with px peers - let px = vec![PeerInfo { - peer_id: Some(PeerId::random()), - }]; - gs.handle_prune( - &peers[0], - vec![( - topics[0].clone(), - px, - Some(config.prune_backoff().as_secs()), - )], - ); + //inbound is not in mesh + assert!(!gs.mesh[&topics[0]].contains(&inbound)); - // Assert no dials - assert_eq!( - gs.events - .iter() - .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) - .count(), - 0 - ); + //outbound is in mesh + assert!(gs.mesh[&topics[0]].contains(&outbound)); +} - //handle prune from peer peers[1] with px peers - let px = vec![PeerInfo { - peer_id: Some(PeerId::random()), - }]; - gs.handle_prune( - &peers[1], - vec![( - topics[0].clone(), - px, - Some(config.prune_backoff().as_secs()), - )], - ); +#[test] +fn test_do_not_remove_too_many_outbound_peers() { + //use an extreme case to catch errors with high probability + let m = 50; + let n = 2 * m; + let config = GossipsubConfigBuilder::default() + .mesh_n_high(n) + .mesh_n(n) + .mesh_n_low(n) + .mesh_outbound_min(m) + .build() + .unwrap(); - //assert there are dials now - assert!( - gs.events - .iter() - .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) - .count() - > 0 - ); + //fill the mesh with inbound connections + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(n) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + // graft all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_keep_best_scoring_peers_on_oversubscription() { - let config = GossipsubConfigBuilder::default() - .mesh_n_low(15) - .mesh_n(30) - .mesh_n_high(60) - .retain_scores(29) - .build() - .unwrap(); + //create m outbound connections and graft (we will accept the graft) + let mut outbound = HashSet::new(); + for _ in 0..m { + let peer = add_peer(&mut gs, &topics, true, false); + outbound.insert(peer); + gs.handle_graft(&peer, topics.clone()); + } - //build mesh with more peers than mesh can hold - let n = config.mesh_n_high() + 1; - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(n) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(n) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); - - // graft all, will be accepted since the are outbound - for peer in &peers { - gs.handle_graft(peer, topics.clone()); - } + //mesh is overly full + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); - //assign scores to peers equalling their index + // run a heartbeat + gs.heartbeat(); - //set random positive scores - for (index, peer) in peers.iter().enumerate() { - gs.set_application_score(peer, index as f64); - } + // Peers should be removed to reach n + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); - assert_eq!(gs.mesh[&topics[0]].len(), n); + //all outbound peers are still in the mesh + assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); +} - //heartbeat to prune some peers - gs.heartbeat(); +#[test] +fn test_add_outbound_peers_if_min_is_not_satisfied() { + let config: GossipsubConfig = GossipsubConfig::default(); - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n()); + // Fill full mesh with inbound peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); - //mesh contains retain_scores best peers - assert!(gs.mesh[&topics[0]].is_superset( - &peers[(n - config.retain_scores())..] - .iter() - .cloned() - .collect() - )); + // graft all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_scoring_p1() { - let config = GossipsubConfig::default(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 2.0, - time_in_mesh_quantum: Duration::from_millis(50), - time_in_mesh_cap: 10.0, - topic_weight: 0.7, - ..TopicScoreParams::default() - }; - peer_score_params - .topics - .insert(topic_hash, topic_params.clone()); - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, _) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - //sleep for 2 times the mesh_quantum - sleep(topic_params.time_in_mesh_quantum * 2); - //refresh scores - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - assert!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]) - >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, - "score should be at least 2 * time_in_mesh_weight * topic_weight" - ); - assert!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]) - < 3.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, - "score should be less than 3 * time_in_mesh_weight * topic_weight" - ); - - //sleep again for 2 times the mesh_quantum - sleep(topic_params.time_in_mesh_quantum * 2); - //refresh scores - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - assert!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]) - >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, - "score should be at least 4 * time_in_mesh_weight * topic_weight" - ); - - //sleep for enough periods to reach maximum - sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32); - //refresh scores - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - topic_params.time_in_mesh_cap - * topic_params.time_in_mesh_weight - * topic_params.topic_weight, - "score should be exactly time_in_mesh_cap * time_in_mesh_weight * topic_weight" - ); + //create config.mesh_outbound_min() many outbound connections without grafting + for _ in 0..config.mesh_outbound_min() { + add_peer(&mut gs, &topics, true, false); } - fn random_message(seq: &mut u64, topics: &Vec) -> RawGossipsubMessage { - let mut rng = rand::thread_rng(); - *seq += 1; - RawGossipsubMessage { - source: Some(PeerId::random()), - data: (0..rng.gen_range(10..30)) - .into_iter() - .map(|_| rng.gen()) - .collect(), - sequence_number: Some(*seq), - topic: topics[rng.gen_range(0..topics.len())].clone(), - signature: None, - key: None, - validated: true, - } - } + // Nothing changed in the mesh yet + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - #[test] - fn test_scoring_p2() { - let config = GossipsubConfig::default(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 2.0, - first_message_deliveries_cap: 10.0, - first_message_deliveries_decay: 0.9, - topic_weight: 0.7, - ..TopicScoreParams::default() - }; - peer_score_params - .topics - .insert(topic_hash, topic_params.clone()); - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + // run a heartbeat + gs.heartbeat(); - let m1 = random_message(&mut seq, &topics); - //peer 0 delivers message first - deliver_message(&mut gs, 0, m1.clone()); - //peer 1 delivers message second - deliver_message(&mut gs, 1, m1); + // The outbound peers got additionally added + assert_eq!( + gs.mesh[&topics[0]].len(), + config.mesh_n_high() + config.mesh_outbound_min() + ); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 1.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, - "score should be exactly first_message_deliveries_weight * topic_weight" - ); +//TODO add a test that ensures that new outbound connections are recognized as such. +// This is at the moment done in behaviour with relying on the fact that the call to +// `inject_connection_established` for the first connection is done before `inject_connected` +// gets called. For all further connections `inject_connection_established` should get called +// after `inject_connected`. + +#[test] +fn test_prune_negative_scored_peers() { + let config = GossipsubConfig::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + //add penalty to peer + gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + + //execute heartbeat + gs.heartbeat(); + + //peer should not be in mesh anymore + assert!(gs.mesh[&topics[0]].is_empty()); + + //check prune message + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[0] + && match m { + GossipsubControlAction::Prune { + topic_hash, + peers, + backoff, + } => + topic_hash == &topics[0] && + //no px in this case + peers.is_empty() && + backoff.unwrap() == config.prune_backoff().as_secs(), + _ => false, + }), + 1 + ); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - 0.0, - "there should be no score for second message deliveries * topic_weight" - ); +#[test] +fn test_dont_graft_to_negative_scored_peers() { + let config = GossipsubConfig::default(); + //init full mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + //add two additional peers that will not be part of the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 to negative + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1); + + //handle prunes of all other peers + for p in peers { + gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]); + } - //peer 2 delivers two new messages - deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); - deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - 2.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, - "score should be exactly 2 * first_message_deliveries_weight * topic_weight" - ); + //heartbeat + gs.heartbeat(); - //test decaying - gs.peer_score.as_mut().unwrap().0.refresh_scores(); + //assert that mesh only contains p2 + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1); + assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2)); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 1.0 * topic_params.first_message_deliveries_decay - * topic_params.first_message_deliveries_weight - * topic_params.topic_weight, - "score should be exactly first_message_deliveries_decay * \ - first_message_deliveries_weight * topic_weight" - ); +///Note that in this test also without a penalty the px would be ignored because of the +/// acceptPXThreshold, but the spec still explicitely states the rule that px from negative +/// peers should get ignored, therefore we test it here. +#[test] +fn test_ignore_px_from_negative_scored_peer() { + let config = GossipsubConfig::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + //penalize peer + gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + + //handle prune from single peer with px peers + let px = vec![PeerInfo { + peer_id: Some(PeerId::random()), + }]; + + gs.handle_prune( + &peers[0], + vec![( + topics[0].clone(), + px, + Some(config.prune_backoff().as_secs()), + )], + ); + + //assert no dials + assert_eq!( + gs.events + .iter() + .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) + .count(), + 0 + ); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - 2.0 * topic_params.first_message_deliveries_decay - * topic_params.first_message_deliveries_weight - * topic_params.topic_weight, - "score should be exactly 2 * first_message_deliveries_decay * \ - first_message_deliveries_weight * topic_weight" - ); +#[test] +fn test_only_send_nonnegative_scoring_peers_in_px() { + let config = GossipsubConfigBuilder::default() + .prune_peers(16) + .do_px() + .build() + .unwrap(); - //test cap - for _ in 0..topic_params.first_message_deliveries_cap as u64 { - deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); - } + // Build mesh with three peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(3) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + // Penalize first peer + gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); + + // Prune second peer + gs.send_graft_prune( + HashMap::new(), + vec![(peers[1], vec![topics[0].clone()])] + .into_iter() + .collect(), + HashSet::new(), + ); + + // Check that px in prune message only contains third peer + assert_eq!( + count_control_msgs(&gs, |peer_id, m| peer_id == &peers[1] + && match m { + GossipsubControlAction::Prune { + topic_hash, + peers: px, + .. + } => + topic_hash == &topics[0] + && px.len() == 1 + && px[0].peer_id.as_ref().unwrap() == &peers[2], + _ => false, + }), + 1 + ); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - topic_params.first_message_deliveries_cap - * topic_params.first_message_deliveries_weight - * topic_params.topic_weight, - "score should be exactly first_message_deliveries_cap * \ - first_message_deliveries_weight * topic_weight" - ); - } +#[test] +fn test_do_not_gossip_to_peers_below_gossip_threshold() { + let config = GossipsubConfig::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; - #[test] - fn test_scoring_p3() { - let config = GossipsubConfig::default(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: -2.0, - mesh_message_deliveries_decay: 0.9, - mesh_message_deliveries_cap: 0.0, - mesh_message_deliveries_threshold: 5.0, - mesh_message_deliveries_activation: Duration::from_secs(1), - mesh_message_deliveries_window: Duration::from_millis(100), - topic_weight: 0.7, - ..TopicScoreParams::default() - }; - peer_score_params.topics.insert(topic_hash, topic_params); - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + // Build full mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // Graft all the peer + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } - let mut expected_message_deliveries = 0.0; + // Add two additional peers that will not be part of the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + // Reduce score of p1 below peer_score_thresholds.gossip_threshold + // note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + // Receive message + let raw_message = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); + + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); + + let msg_id = gs.config.message_id(message); + + // Emit gossip + gs.emit_gossip(); + + // Check that exactly one gossip messages got sent and it got sent to p2 + assert_eq!( + count_control_msgs(&gs, |peer, action| match action { + GossipsubControlAction::IHave { + topic_hash, + message_ids, + } => { + if topic_hash == &topics[0] && message_ids.iter().any(|id| id == &msg_id) { + assert_eq!(peer, &p2); + true + } else { + false + } + } + _ => false, + }), + 1 + ); +} - //messages used to test window - let m1 = random_message(&mut seq, &topics); - let m2 = random_message(&mut seq, &topics); +#[test] +fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { + let config = GossipsubConfig::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; - //peer 1 delivers m1 - deliver_message(&mut gs, 1, m1.clone()); + // Build full mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // Graft all the peer + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } - //peer 0 delivers two message - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - expected_message_deliveries += 2.0; + // Add two additional peers that will not be part of the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + // Reduce score of p1 below peer_score_thresholds.gossip_threshold + // note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + // Receive message + let raw_message = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(raw_message.clone(), &PeerId::random()); - sleep(Duration::from_millis(60)); + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); - //peer 1 delivers m2 - deliver_message(&mut gs, 1, m2.clone()); + let msg_id = gs.config.message_id(message); - sleep(Duration::from_millis(70)); - //peer 0 delivers m1 and m2 only m2 gets counted - deliver_message(&mut gs, 0, m1); - deliver_message(&mut gs, 0, m2); - expected_message_deliveries += 1.0; + gs.handle_iwant(&p1, vec![msg_id.clone()]); + gs.handle_iwant(&p2, vec![msg_id.clone()]); - sleep(Duration::from_millis(900)); + // the messages we are sending + let sent_messages = gs + .events + .iter() + .fold(vec![], |mut collected_messages, e| match e { + NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for c in &event.messages { + collected_messages.push((*peer_id, c.clone())) + } + } + collected_messages + } + _ => collected_messages, + }); - //message deliveries penalties get activated, peer 0 has only delivered 3 messages and - // therefore gets a penalty - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + //the message got sent to p2 + assert!(sent_messages + .iter() + .map(|(peer_id, msg)| ( + peer_id, + gs.data_transform.inbound_transform(msg.clone()).unwrap() + )) + .any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id)); + //the message got not sent to p1 + assert!(sent_messages + .iter() + .map(|(peer_id, msg)| ( + peer_id, + gs.data_transform.inbound_transform(msg.clone()).unwrap() + )) + .all(|(peer_id, msg)| !(peer_id == &p1 && gs.config.message_id(&msg) == msg_id))); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 - ); +#[test] +fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { + let config = GossipsubConfig::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + //build full mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // graft all the peer + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } - // peer 0 delivers a lot of messages => message_deliveries should be capped at 10 - for _ in 0..20 { - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - } + //add two additional peers that will not be part of the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 below peer_score_thresholds.gossip_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + //message that other peers have + let raw_message = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![], + sequence_number: Some(0), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; - expected_message_deliveries = 10.0; + // Transform the inbound message + let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + let msg_id = gs.config.message_id(message); - //apply 10 decays - for _ in 0..10 { - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay - } + gs.handle_ihave(&p1, vec![(topics[0].clone(), vec![msg_id.clone()])]); + gs.handle_ihave(&p2, vec![(topics[0].clone(), vec![msg_id.clone()])]); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 - ); - } + // check that we sent exactly one IWANT request to p2 + assert_eq!( + count_control_msgs(&gs, |peer, c| match c { + GossipsubControlAction::IWant { message_ids } => + if message_ids.iter().any(|m| m == &msg_id) { + assert_eq!(peer, &p2); + true + } else { + false + }, + _ => false, + }), + 1 + ); +} - #[test] - fn test_scoring_p3b() { - let config = GossipsubConfigBuilder::default() - .prune_backoff(Duration::from_millis(100)) - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: -2.0, - mesh_message_deliveries_decay: 0.9, - mesh_message_deliveries_cap: 10.0, - mesh_message_deliveries_threshold: 5.0, - mesh_message_deliveries_activation: Duration::from_secs(1), - mesh_message_deliveries_window: Duration::from_millis(100), - mesh_failure_penalty_weight: -3.0, - mesh_failure_penalty_decay: 0.95, - topic_weight: 0.7, - ..Default::default() - }; - peer_score_params.topics.insert(topic_hash, topic_params); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; +#[test] +fn test_do_not_publish_to_peer_below_publish_threshold() { + let config = GossipsubConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; - let mut expected_message_deliveries = 0.0; + //build mesh with no peers and no subscribed topics + let (mut gs, _, _) = inject_nodes1() + .gs_config(config) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //create a new topic for which we are not subscribed + let topic = Topic::new("test"); + let topics = vec![topic.hash()]; + + //add two additional peers that will be added to the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 below peer_score_thresholds.publish_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + //a heartbeat will remove the peers from the mesh + gs.heartbeat(); + + // publish on topic + let publish_data = vec![0; 42]; + gs.publish(topic, publish_data).unwrap(); + + // Collect all publish messages + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for s in &event.messages { + collected_publish.push((*peer_id, s.clone())); + } + } + collected_publish + } + _ => collected_publish, + }); - //add some positive score - gs.peer_score - .as_mut() - .unwrap() - .0 - .set_application_score(&peers[0], 100.0); + //assert only published to p2 + assert_eq!(publishes.len(), 1); + assert_eq!(publishes[0].0, p2); +} - //peer 0 delivers two message - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - expected_message_deliveries += 2.0; +#[test] +fn test_do_not_flood_publish_to_peer_below_publish_threshold() { + let config = GossipsubConfig::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; + //build mesh with no peers + let (mut gs, _, topics) = inject_nodes1() + .topics(vec!["test".into()]) + .gs_config(config) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //add two additional peers that will be added to the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 below peer_score_thresholds.publish_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + //a heartbeat will remove the peers from the mesh + gs.heartbeat(); + + // publish on topic + let publish_data = vec![0; 42]; + gs.publish(Topic::new("test"), publish_data).unwrap(); + + // Collect all publish messages + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for s in &event.messages { + collected_publish.push((*peer_id, s.clone())); + } + } + collected_publish + } + _ => collected_publish, + }); - sleep(Duration::from_millis(1050)); + //assert only published to p2 + assert_eq!(publishes.len(), 1); + assert!(publishes[0].0 == p2); +} - //activation kicks in - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay +#[test] +fn test_ignore_rpc_from_peers_below_graylist_threshold() { + let config = GossipsubConfig::default(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + gossip_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + publish_threshold: 0.5 * peer_score_params.behaviour_penalty_weight, + graylist_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, + ..PeerScoreThresholds::default() + }; - //prune peer - gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]); + //build mesh with no peers + let (mut gs, _, topics) = inject_nodes1() + .topics(vec!["test".into()]) + .gs_config(config.clone()) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //add two additional peers that will be added to the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); + + //reduce score of p1 below peer_score_thresholds.graylist_threshold + //note that penalties get squared so two penalties means a score of + // 4 * peer_score_params.behaviour_penalty_weight. + gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); + + //reduce score of p2 below publish_threshold but not below graylist_threshold + gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + + let raw_message1 = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4], + sequence_number: Some(1u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; - //wait backoff - sleep(Duration::from_millis(130)); + let raw_message2 = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4, 5], + sequence_number: Some(2u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; - //regraft peer - gs.handle_graft(&peers[0], topics.clone()); + let raw_message3 = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4, 5, 6], + sequence_number: Some(3u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; - //the score should now consider p3b - let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 100.0 + expected_b3 * -3.0 * 0.7 - ); + let raw_message4 = RawGossipsubMessage { + source: Some(PeerId::random()), + data: vec![1, 2, 3, 4, 5, 6, 7], + sequence_number: Some(4u64), + topic: topics[0].clone(), + signature: None, + key: None, + validated: true, + }; - //we can also add a new p3 to the score + // Transform the inbound message + let message2 = &gs.data_transform.inbound_transform(raw_message2).unwrap(); - //peer 0 delivers one message - deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); - expected_message_deliveries += 1.0; + // Transform the inbound message + let message4 = &gs.data_transform.inbound_transform(raw_message4).unwrap(); - sleep(Duration::from_millis(1050)); - gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay - expected_b3 *= 0.95; + let subscription = GossipsubSubscription { + action: GossipsubSubscriptionAction::Subscribe, + topic_hash: topics[0].clone(), + }; - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 100.0 - + (expected_b3 * -3.0 + (5f64 - expected_message_deliveries).powi(2) * -2.0) * 0.7 - ); - } + let control_action = GossipsubControlAction::IHave { + topic_hash: topics[0].clone(), + message_ids: vec![config.message_id(message2)], + }; - #[test] - fn test_scoring_p4_valid_message() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties - invalid_message_deliveries_weight: -2.0, - invalid_message_deliveries_decay: 0.9, - topic_weight: 0.7, - ..Default::default() - }; - peer_score_params.topics.insert(topic_hash, topic_params); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + //clear events + gs.events.clear(); + + //receive from p1 + gs.inject_event( + p1, + ConnectionId::new(0), + HandlerEvent::Message { + rpc: GossipsubRpc { + messages: vec![raw_message1], + subscriptions: vec![subscription.clone()], + control_msgs: vec![control_action], + }, + invalid_messages: Vec::new(), + }, + ); + + //only the subscription event gets processed, the rest is dropped + assert_eq!(gs.events.len(), 1); + assert!(matches!( + gs.events[0], + NetworkBehaviourAction::GenerateEvent(GossipsubEvent::Subscribed { .. }) + )); + + let control_action = GossipsubControlAction::IHave { + topic_hash: topics[0].clone(), + message_ids: vec![config.message_id(message4)], + }; - //peer 0 delivers valid message - let m1 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); + //receive from p2 + gs.inject_event( + p2, + ConnectionId::new(0), + HandlerEvent::Message { + rpc: GossipsubRpc { + messages: vec![raw_message3], + subscriptions: vec![subscription], + control_msgs: vec![control_action], + }, + invalid_messages: Vec::new(), + }, + ); - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + //events got processed + assert!(gs.events.len() > 1); +} - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +#[test] +fn test_ignore_px_from_peers_below_accept_px_threshold() { + let config = GossipsubConfigBuilder::default() + .prune_peers(16) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams::default(); + let peer_score_thresholds = PeerScoreThresholds { + accept_px_threshold: peer_score_params.app_specific_weight, + ..PeerScoreThresholds::default() + }; + // Build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + // Decrease score of first peer to less than accept_px_threshold + gs.set_application_score(&peers[0], 0.99); + + // Increase score of second peer to accept_px_threshold + gs.set_application_score(&peers[1], 1.0); + + // Handle prune from peer peers[0] with px peers + let px = vec![PeerInfo { + peer_id: Some(PeerId::random()), + }]; + gs.handle_prune( + &peers[0], + vec![( + topics[0].clone(), + px, + Some(config.prune_backoff().as_secs()), + )], + ); + + // Assert no dials + assert_eq!( + gs.events + .iter() + .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) + .count(), + 0 + ); + + //handle prune from peer peers[1] with px peers + let px = vec![PeerInfo { + peer_id: Some(PeerId::random()), + }]; + gs.handle_prune( + &peers[1], + vec![( + topics[0].clone(), + px, + Some(config.prune_backoff().as_secs()), + )], + ); + + //assert there are dials now + assert!( + gs.events + .iter() + .filter(|e| matches!(e, NetworkBehaviourAction::Dial { .. })) + .count() + > 0 + ); +} - //message m1 gets validated - gs.report_message_validation_result( - &config.message_id(message1), - &peers[0], - MessageAcceptance::Accept, - ) +#[test] +fn test_keep_best_scoring_peers_on_oversubscription() { + let config = GossipsubConfigBuilder::default() + .mesh_n_low(15) + .mesh_n(30) + .mesh_n_high(60) + .retain_scores(29) + .build() .unwrap(); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + //build mesh with more peers than mesh can hold + let n = config.mesh_n_high() + 1; + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(n) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(n) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); + + // graft all, will be accepted since the are outbound + for peer in &peers { + gs.handle_graft(peer, topics.clone()); } - #[test] - fn test_scoring_p4_invalid_signature() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties - invalid_message_deliveries_weight: -2.0, - invalid_message_deliveries_decay: 0.9, - topic_weight: 0.7, - ..Default::default() - }; - peer_score_params.topics.insert(topic_hash, topic_params); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - - //peer 0 delivers message with invalid signature - let m = random_message(&mut seq, &topics); - - gs.inject_event( - peers[0], - ConnectionId::new(0), - HandlerEvent::Message { - rpc: GossipsubRpc { - messages: vec![], - subscriptions: vec![], - control_msgs: vec![], - }, - invalid_messages: vec![(m, ValidationError::InvalidSignature)], - }, - ); + //assign scores to peers equalling their index - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - -2.0 * 0.7 - ); + //set random positive scores + for (index, peer) in peers.iter().enumerate() { + gs.set_application_score(peer, index as f64); } - #[test] - fn test_scoring_p4_message_from_self() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties - invalid_message_deliveries_weight: -2.0, - invalid_message_deliveries_decay: 0.9, - topic_weight: 0.7, - ..Default::default() - }; - peer_score_params.topics.insert(topic_hash, topic_params); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; - - //peer 0 delivers invalid message from self - let mut m = random_message(&mut seq, &topics); - m.source = Some(*gs.publish_config.get_own_id().unwrap()); + assert_eq!(gs.mesh[&topics[0]].len(), n); - deliver_message(&mut gs, 0, m); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - -2.0 * 0.7 - ); - } + //heartbeat to prune some peers + gs.heartbeat(); - #[test] - fn test_scoring_p4_ignored_message() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties - invalid_message_deliveries_weight: -2.0, - invalid_message_deliveries_decay: 0.9, - topic_weight: 0.7, - ..Default::default() - }; - peer_score_params.topics.insert(topic_hash, topic_params); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n()); - //peer 0 delivers ignored message - let m1 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); + //mesh contains retain_scores best peers + assert!(gs.mesh[&topics[0]].is_superset( + &peers[(n - config.retain_scores())..] + .iter() + .cloned() + .collect() + )); +} - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +#[test] +fn test_scoring_p1() { + let config = GossipsubConfig::default(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 2.0, + time_in_mesh_quantum: Duration::from_millis(50), + time_in_mesh_cap: 10.0, + topic_weight: 0.7, + ..TopicScoreParams::default() + }; + peer_score_params + .topics + .insert(topic_hash, topic_params.clone()); + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, _) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + //sleep for 2 times the mesh_quantum + sleep(topic_params.time_in_mesh_quantum * 2); + //refresh scores + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]) + >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, + "score should be at least 2 * time_in_mesh_weight * topic_weight" + ); + assert!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]) + < 3.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, + "score should be less than 3 * time_in_mesh_weight * topic_weight" + ); + + //sleep again for 2 times the mesh_quantum + sleep(topic_params.time_in_mesh_quantum * 2); + //refresh scores + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]) + >= 2.0 * topic_params.time_in_mesh_weight * topic_params.topic_weight, + "score should be at least 4 * time_in_mesh_weight * topic_weight" + ); + + //sleep for enough periods to reach maximum + sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32); + //refresh scores + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + topic_params.time_in_mesh_cap + * topic_params.time_in_mesh_weight + * topic_params.topic_weight, + "score should be exactly time_in_mesh_cap * time_in_mesh_weight * topic_weight" + ); +} - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); +fn random_message(seq: &mut u64, topics: &Vec) -> RawGossipsubMessage { + let mut rng = rand::thread_rng(); + *seq += 1; + RawGossipsubMessage { + source: Some(PeerId::random()), + data: (0..rng.gen_range(10..30)) + .into_iter() + .map(|_| rng.gen()) + .collect(), + sequence_number: Some(*seq), + topic: topics[rng.gen_range(0..topics.len())].clone(), + signature: None, + key: None, + validated: true, + } +} - //message m1 gets ignored - gs.report_message_validation_result( - &config.message_id(message1), - &peers[0], - MessageAcceptance::Ignore, - ) - .unwrap(); +#[test] +fn test_scoring_p2() { + let config = GossipsubConfig::default(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 2.0, + first_message_deliveries_cap: 10.0, + first_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..TopicScoreParams::default() + }; + peer_score_params + .topics + .insert(topic_hash, topic_params.clone()); + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + let m1 = random_message(&mut seq, &topics); + //peer 0 delivers message first + deliver_message(&mut gs, 0, m1.clone()); + //peer 1 delivers message second + deliver_message(&mut gs, 1, m1); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 1.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, + "score should be exactly first_message_deliveries_weight * topic_weight" + ); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 0.0, + "there should be no score for second message deliveries * topic_weight" + ); + + //peer 2 delivers two new messages + deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); + deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 2.0 * topic_params.first_message_deliveries_weight * topic_params.topic_weight, + "score should be exactly 2 * first_message_deliveries_weight * topic_weight" + ); + + //test decaying + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 1.0 * topic_params.first_message_deliveries_decay + * topic_params.first_message_deliveries_weight + * topic_params.topic_weight, + "score should be exactly first_message_deliveries_decay * \ + first_message_deliveries_weight * topic_weight" + ); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 2.0 * topic_params.first_message_deliveries_decay + * topic_params.first_message_deliveries_weight + * topic_params.topic_weight, + "score should be exactly 2 * first_message_deliveries_decay * \ + first_message_deliveries_weight * topic_weight" + ); + + //test cap + for _ in 0..topic_params.first_message_deliveries_cap as u64 { + deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); } - #[test] - fn test_scoring_p4_application_invalidated_message() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties - invalid_message_deliveries_weight: -2.0, - invalid_message_deliveries_decay: 0.9, - topic_weight: 0.7, - ..Default::default() - }; - peer_score_params.topics.insert(topic_hash, topic_params); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + topic_params.first_message_deliveries_cap + * topic_params.first_message_deliveries_weight + * topic_params.topic_weight, + "score should be exactly first_message_deliveries_cap * \ + first_message_deliveries_weight * topic_weight" + ); +} - //peer 0 delivers invalid message - let m1 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); +#[test] +fn test_scoring_p3() { + let config = GossipsubConfig::default(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: -2.0, + mesh_message_deliveries_decay: 0.9, + mesh_message_deliveries_cap: 0.0, + mesh_message_deliveries_threshold: 5.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(100), + topic_weight: 0.7, + ..TopicScoreParams::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + let mut expected_message_deliveries = 0.0; - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + //messages used to test window + let m1 = random_message(&mut seq, &topics); + let m2 = random_message(&mut seq, &topics); - //message m1 gets rejected - gs.report_message_validation_result( - &config.message_id(message1), - &peers[0], - MessageAcceptance::Reject, - ) - .unwrap(); + //peer 1 delivers m1 + deliver_message(&mut gs, 1, m1.clone()); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - -2.0 * 0.7 - ); - } + //peer 0 delivers two message + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + expected_message_deliveries += 2.0; - #[test] - fn test_scoring_p4_application_invalid_message_from_two_peers() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties - invalid_message_deliveries_weight: -2.0, - invalid_message_deliveries_decay: 0.9, - topic_weight: 0.7, - ..Default::default() - }; - peer_score_params.topics.insert(topic_hash, topic_params); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with two peers - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + sleep(Duration::from_millis(60)); - //peer 0 delivers invalid message - let m1 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); + //peer 1 delivers m2 + deliver_message(&mut gs, 1, m2.clone()); - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + sleep(Duration::from_millis(70)); + //peer 0 delivers m1 and m2 only m2 gets counted + deliver_message(&mut gs, 0, m1); + deliver_message(&mut gs, 0, m2); + expected_message_deliveries += 1.0; - //peer 1 delivers same message - deliver_message(&mut gs, 1, m1); + sleep(Duration::from_millis(900)); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0); + //message deliveries penalties get activated, peer 0 has only delivered 3 messages and + // therefore gets a penalty + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay - //message m1 gets rejected - gs.report_message_validation_result( - &config.message_id(message1), - &peers[0], - MessageAcceptance::Reject, - ) - .unwrap(); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 + ); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - -2.0 * 0.7 - ); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - -2.0 * 0.7 - ); + // peer 0 delivers a lot of messages => message_deliveries should be capped at 10 + for _ in 0..20 { + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); } - #[test] - fn test_scoring_p4_three_application_invalid_messages() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties - invalid_message_deliveries_weight: -2.0, - invalid_message_deliveries_decay: 0.9, - topic_weight: 0.7, - ..Default::default() - }; - peer_score_params.topics.insert(topic_hash, topic_params); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; - - //peer 0 delivers two invalid message - let m1 = random_message(&mut seq, &topics); - let m2 = random_message(&mut seq, &topics); - let m3 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); - deliver_message(&mut gs, 0, m2.clone()); - deliver_message(&mut gs, 0, m3.clone()); + expected_message_deliveries = 10.0; - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - // Transform the inbound message - let message2 = &gs.data_transform.inbound_transform(m2).unwrap(); - // Transform the inbound message - let message3 = &gs.data_transform.inbound_transform(m3).unwrap(); + //apply 10 decays + for _ in 0..10 { + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay + } - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 + ); +} - //messages gets rejected - gs.report_message_validation_result( - &config.message_id(message1), - &peers[0], - MessageAcceptance::Reject, - ) - .unwrap(); - gs.report_message_validation_result( - &config.message_id(message2), - &peers[0], - MessageAcceptance::Reject, - ) - .unwrap(); - gs.report_message_validation_result( - &config.message_id(message3), - &peers[0], - MessageAcceptance::Reject, - ) +#[test] +fn test_scoring_p3b() { + let config = GossipsubConfigBuilder::default() + .prune_backoff(Duration::from_millis(100)) + .build() .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: -2.0, + mesh_message_deliveries_decay: 0.9, + mesh_message_deliveries_cap: 10.0, + mesh_message_deliveries_threshold: 5.0, + mesh_message_deliveries_activation: Duration::from_secs(1), + mesh_message_deliveries_window: Duration::from_millis(100), + mesh_failure_penalty_weight: -3.0, + mesh_failure_penalty_decay: 0.95, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //number of invalid messages gets squared - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 9.0 * -2.0 * 0.7 - ); - } + let mut expected_message_deliveries = 0.0; - #[test] - fn test_scoring_p4_decay() { - let config = GossipsubConfigBuilder::default() - .validate_messages() - .build() - .unwrap(); - let mut peer_score_params = PeerScoreParams::default(); - let topic = Topic::new("test"); - let topic_hash = topic.hash(); - let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties - invalid_message_deliveries_weight: -2.0, - invalid_message_deliveries_decay: 0.9, - topic_weight: 0.7, - ..Default::default() - }; - peer_score_params.topics.insert(topic_hash, topic_params); - peer_score_params.app_specific_weight = 1.0; - let peer_score_thresholds = PeerScoreThresholds::default(); - - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, peer_score_thresholds))) - .create_network(); - - let mut seq = 0; - let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { - gs.handle_received_message(msg, &peers[index]); - }; + //add some positive score + gs.peer_score + .as_mut() + .unwrap() + .0 + .set_application_score(&peers[0], 100.0); - //peer 0 delivers invalid message - let m1 = random_message(&mut seq, &topics); - deliver_message(&mut gs, 0, m1.clone()); + //peer 0 delivers two message + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + expected_message_deliveries += 2.0; - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - - //message m1 gets rejected - gs.report_message_validation_result( - &config.message_id(message1), - &peers[0], - MessageAcceptance::Reject, - ) - .unwrap(); + sleep(Duration::from_millis(1050)); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - -2.0 * 0.7 - ); + //activation kicks in + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay - //we decay - gs.peer_score.as_mut().unwrap().0.refresh_scores(); + //prune peer + gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]); - // the number of invalids gets decayed to 0.9 and then squared in the score - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 0.9 * 0.9 * -2.0 * 0.7 - ); - } + //wait backoff + sleep(Duration::from_millis(130)); - #[test] - fn test_scoring_p5() { - let peer_score_params = PeerScoreParams { - app_specific_weight: 2.0, - ..PeerScoreParams::default() - }; + //regraft peer + gs.handle_graft(&peers[0], topics.clone()); - //build mesh with one peer - let (mut gs, peers, _) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .gs_config(GossipsubConfig::default()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) - .create_network(); + //the score should now consider p3b + let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 100.0 + expected_b3 * -3.0 * 0.7 + ); - gs.set_application_score(&peers[0], 1.1); + //we can also add a new p3 to the score - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 1.1 * 2.0 - ); - } + //peer 0 delivers one message + deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); + expected_message_deliveries += 1.0; - #[test] - fn test_scoring_p6() { - let peer_score_params = PeerScoreParams { - ip_colocation_factor_threshold: 5.0, - ip_colocation_factor_weight: -2.0, - ..Default::default() - }; + sleep(Duration::from_millis(1050)); + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + expected_message_deliveries *= 0.9; //decay + expected_b3 *= 0.95; - let (mut gs, _, _) = inject_nodes1() - .peer_no(0) - .topics(vec![]) - .to_subscribe(false) - .gs_config(GossipsubConfig::default()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) - .create_network(); - - //create 5 peers with the same ip - let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); - let peers = vec![ - add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()), - add_peer_with_addr(&mut gs, &vec![], true, true, addr.clone()), - ]; - - //create 4 other peers with other ip - let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); - let others = vec![ - add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()), - add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()), - add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()), - ]; - - //no penalties yet - for peer in peers.iter().chain(others.iter()) { - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); - } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 100.0 + + (expected_b3 * -3.0 + (5f64 - expected_message_deliveries).powi(2) * -2.0) * 0.7 + ); +} - //add additional connection for 3 others with addr - for id in others.iter().take(3) { - gs.inject_connection_established( - id, - &ConnectionId::new(0), - &ConnectedPoint::Dialer { - address: addr.clone(), - role_override: Endpoint::Dialer, - }, - None, - 0, - ); - } +#[test] +fn test_scoring_p4_valid_message() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //penalties apply squared - for peer in peers.iter().chain(others.iter().take(3)) { - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); - } - //fourth other peer still no penalty - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0); - - //add additional connection for 3 of the peers to addr2 - for peer in peers.iter().take(3) { - gs.inject_connection_established( - peer, - &ConnectionId::new(0), - &ConnectedPoint::Dialer { - address: addr2.clone(), - role_override: Endpoint::Dialer, - }, - None, - 1, - ); - } + //peer 0 delivers valid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); - //double penalties for the first three of each - for peer in peers.iter().take(3).chain(others.iter().take(3)) { - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(peer), - (9.0 + 4.0) * -2.0 - ); - } + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - //single penalties for the rest - for peer in peers.iter().skip(3) { - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); - } - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&others[3]), - 4.0 * -2.0 - ); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //two times same ip doesn't count twice - gs.inject_connection_established( - &peers[0], - &ConnectionId::new(0), - &ConnectedPoint::Dialer { - address: addr, - role_override: Endpoint::Dialer, - }, - None, - 2, - ); + //message m1 gets validated + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Accept, + ) + .unwrap(); - //nothing changed - //double penalties for the first three of each - for peer in peers.iter().take(3).chain(others.iter().take(3)) { - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(peer), - (9.0 + 4.0) * -2.0 - ); - } + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +} - //single penalties for the rest - for peer in peers.iter().skip(3) { - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); - } - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&others[3]), - 4.0 * -2.0 - ); - } +#[test] +fn test_scoring_p4_invalid_signature() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + + //peer 0 delivers message with invalid signature + let m = random_message(&mut seq, &topics); + + gs.inject_event( + peers[0], + ConnectionId::new(0), + HandlerEvent::Message { + rpc: GossipsubRpc { + messages: vec![], + subscriptions: vec![], + control_msgs: vec![], + }, + invalid_messages: vec![(m, ValidationError::InvalidSignature)], + }, + ); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); +} - #[test] - fn test_scoring_p7_grafts_before_backoff() { - let config = GossipsubConfigBuilder::default() - .prune_backoff(Duration::from_millis(200)) - .graft_flood_threshold(Duration::from_millis(100)) - .build() - .unwrap(); - let peer_score_params = PeerScoreParams { - behaviour_penalty_weight: -2.0, - behaviour_penalty_decay: 0.9, - ..Default::default() - }; +#[test] +fn test_scoring_p4_message_from_self() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) - .create_network(); - - //remove peers from mesh and send prune to them => this adds a backoff for the peers - for peer in peers.iter().take(2) { - gs.mesh.get_mut(&topics[0]).unwrap().remove(peer); - gs.send_graft_prune( - HashMap::new(), - HashMap::from([(*peer, vec![topics[0].clone()])]), - HashSet::new(), - ); - } + //peer 0 delivers invalid message from self + let mut m = random_message(&mut seq, &topics); + m.source = Some(*gs.publish_config.get_own_id().unwrap()); - //wait 50 millisecs - sleep(Duration::from_millis(50)); + deliver_message(&mut gs, 0, m); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); +} - //first peer tries to graft - gs.handle_graft(&peers[0], vec![topics[0].clone()]); +#[test] +fn test_scoring_p4_ignored_message() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //double behaviour penalty for first peer (squared) - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 4.0 * -2.0 - ); + //peer 0 delivers ignored message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); - //wait 100 millisecs - sleep(Duration::from_millis(100)); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //second peer tries to graft - gs.handle_graft(&peers[1], vec![topics[0].clone()]); + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - //single behaviour penalty for second peer - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - 1.0 * -2.0 - ); + //message m1 gets ignored + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Ignore, + ) + .unwrap(); - //test decay - gs.peer_score.as_mut().unwrap().0.refresh_scores(); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); +} - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 4.0 * 0.9 * 0.9 * -2.0 - ); - assert_eq!( - gs.peer_score.as_ref().unwrap().0.score(&peers[1]), - 1.0 * 0.9 * 0.9 * -2.0 - ); - } +#[test] +fn test_scoring_p4_application_invalidated_message() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - #[test] - fn test_opportunistic_grafting() { - let config = GossipsubConfigBuilder::default() - .mesh_n_low(3) - .mesh_n(5) - .mesh_n_high(7) - .mesh_outbound_min(0) //deactivate outbound handling - .opportunistic_graft_ticks(2) - .opportunistic_graft_peers(2) - .build() - .unwrap(); - let peer_score_params = PeerScoreParams { - app_specific_weight: 1.0, - ..Default::default() - }; - let thresholds = PeerScoreThresholds { - opportunistic_graft_threshold: 2.0, - ..Default::default() - }; + //peer 0 delivers invalid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(5) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, thresholds))) - .create_network(); - - //fill mesh with 5 peers - for peer in &peers { - gs.handle_graft(peer, topics.clone()); - } + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //add additional 5 peers - let others: Vec<_> = (0..5) - .into_iter() - .map(|_| add_peer(&mut gs, &topics, false, false)) - .collect(); + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - //currently mesh equals peers - assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect()); + //message m1 gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); - //give others high scores (but the first two have not high enough scores) - for (i, peer) in peers.iter().enumerate().take(5) { - gs.set_application_score(peer, 0.0 + i as f64); - } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); +} - //set scores for peers in the mesh - for (i, peer) in others.iter().enumerate().take(5) { - gs.set_application_score(peer, 0.0 + i as f64); - } +#[test] +fn test_scoring_p4_application_invalid_message_from_two_peers() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with two peers + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //this gives a median of exactly 2.0 => should not apply opportunistic grafting - gs.heartbeat(); - gs.heartbeat(); + //peer 0 delivers invalid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); + + //peer 1 delivers same message + deliver_message(&mut gs, 1, m1); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0); + + //message m1 gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + -2.0 * 0.7 + ); +} - assert_eq!( - gs.mesh[&topics[0]].len(), - 5, - "should not apply opportunistic grafting" - ); +#[test] +fn test_scoring_p4_three_application_invalid_messages() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - //reduce middle score to 1.0 giving a median of 1.0 - gs.set_application_score(&peers[2], 1.0); + //peer 0 delivers two invalid message + let m1 = random_message(&mut seq, &topics); + let m2 = random_message(&mut seq, &topics); + let m3 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + deliver_message(&mut gs, 0, m2.clone()); + deliver_message(&mut gs, 0, m3.clone()); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + + // Transform the inbound message + let message2 = &gs.data_transform.inbound_transform(m2).unwrap(); + // Transform the inbound message + let message3 = &gs.data_transform.inbound_transform(m3).unwrap(); + + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + + //messages gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + gs.report_message_validation_result( + &config.message_id(message2), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + gs.report_message_validation_result( + &config.message_id(message3), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + + //number of invalid messages gets squared + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 9.0 * -2.0 * 0.7 + ); +} - //opportunistic grafting after two heartbeats +#[test] +fn test_scoring_p4_decay() { + let config = GossipsubConfigBuilder::default() + .validate_messages() + .build() + .unwrap(); + let mut peer_score_params = PeerScoreParams::default(); + let topic = Topic::new("test"); + let topic_hash = topic.hash(); + let topic_params = TopicScoreParams { + time_in_mesh_weight: 0.0, //deactivate time in mesh + first_message_deliveries_weight: 0.0, //deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, //deactivate message deliveries + mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + invalid_message_deliveries_weight: -2.0, + invalid_message_deliveries_decay: 0.9, + topic_weight: 0.7, + ..Default::default() + }; + peer_score_params.topics.insert(topic_hash, topic_params); + peer_score_params.app_specific_weight = 1.0; + let peer_score_thresholds = PeerScoreThresholds::default(); + + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, peer_score_thresholds))) + .create_network(); + + let mut seq = 0; + let deliver_message = |gs: &mut Gossipsub, index: usize, msg: RawGossipsubMessage| { + gs.handle_received_message(msg, &peers[index]); + }; - gs.heartbeat(); - assert_eq!( - gs.mesh[&topics[0]].len(), - 5, - "should not apply opportunistic grafting after first tick" - ); + //peer 0 delivers invalid message + let m1 = random_message(&mut seq, &topics); + deliver_message(&mut gs, 0, m1.clone()); + + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); + + //message m1 gets rejected + gs.report_message_validation_result( + &config.message_id(message1), + &peers[0], + MessageAcceptance::Reject, + ) + .unwrap(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + -2.0 * 0.7 + ); + + //we decay + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + + // the number of invalids gets decayed to 0.9 and then squared in the score + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 0.9 * 0.9 * -2.0 * 0.7 + ); +} - gs.heartbeat(); +#[test] +fn test_scoring_p5() { + let peer_score_params = PeerScoreParams { + app_specific_weight: 2.0, + ..PeerScoreParams::default() + }; - assert_eq!( - gs.mesh[&topics[0]].len(), - 7, - "opportunistic grafting should have added 2 peers" - ); + //build mesh with one peer + let (mut gs, peers, _) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .gs_config(GossipsubConfig::default()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + gs.set_application_score(&peers[0], 1.1); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 1.1 * 2.0 + ); +} - assert!( - gs.mesh[&topics[0]].is_superset(&peers.iter().cloned().collect()), - "old peers are still part of the mesh" - ); +#[test] +fn test_scoring_p6() { + let peer_score_params = PeerScoreParams { + ip_colocation_factor_threshold: 5.0, + ip_colocation_factor_weight: -2.0, + ..Default::default() + }; - assert!( - gs.mesh[&topics[0]].is_disjoint(&others.iter().cloned().take(2).collect()), - "peers below or equal to median should not be added in opportunistic grafting" - ); + let (mut gs, _, _) = inject_nodes1() + .peer_no(0) + .topics(vec![]) + .to_subscribe(false) + .gs_config(GossipsubConfig::default()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + //create 5 peers with the same ip + let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); + let peers = vec![ + add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()), + add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()), + add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()), + add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()), + add_peer_with_addr(&mut gs, &vec![], true, true, addr.clone()), + ]; + + //create 4 other peers with other ip + let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); + let others = vec![ + add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()), + add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()), + add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()), + add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()), + ]; + + //no penalties yet + for peer in peers.iter().chain(others.iter()) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); } - #[test] - fn test_ignore_graft_from_unknown_topic() { - //build gossipsub without subscribing to any topics - let (mut gs, _, _) = inject_nodes1() - .peer_no(0) - .topics(vec![]) - .to_subscribe(false) - .create_network(); - - //handle an incoming graft for some topic - gs.handle_graft(&PeerId::random(), vec![Topic::new("test").hash()]); - - //assert that no prune got created - assert_eq!( - count_control_msgs(&gs, |_, a| matches!( - a, - GossipsubControlAction::Prune { .. } - )), + //add additional connection for 3 others with addr + for id in others.iter().take(3) { + gs.inject_connection_established( + id, + &ConnectionId::new(0), + &ConnectedPoint::Dialer { + address: addr.clone(), + role_override: Endpoint::Dialer, + }, + None, 0, - "we should not prune after graft in unknown topic" ); } - #[test] - fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { - let config = GossipsubConfig::default(); - //build gossipsub with full mesh - let (mut gs, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(false) - .create_network(); - - //add another peer not in the mesh - let peer = add_peer(&mut gs, &topics, false, false); - - //receive a message - let mut seq = 0; - let m1 = random_message(&mut seq, &topics); - - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); - - let id = config.message_id(message1); - - gs.handle_received_message(m1, &PeerId::random()); - - //clear events - gs.events.clear(); + //penalties apply squared + for peer in peers.iter().chain(others.iter().take(3)) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); + } + //fourth other peer still no penalty + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0); - //the first gossip_retransimission many iwants return the valid message, all others are - // ignored. - for _ in 0..(2 * config.gossip_retransimission() + 10) { - gs.handle_iwant(&peer, vec![id.clone()]); - } + //add additional connection for 3 of the peers to addr2 + for peer in peers.iter().take(3) { + gs.inject_connection_established( + peer, + &ConnectionId::new(0), + &ConnectedPoint::Dialer { + address: addr2.clone(), + role_override: Endpoint::Dialer, + }, + None, + 1, + ); + } + //double penalties for the first three of each + for peer in peers.iter().take(3).chain(others.iter().take(3)) { assert_eq!( - gs.events - .iter() - .map(|e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - event.messages.len() - } else { - 0 - } - } - _ => 0, - }) - .sum::(), - config.gossip_retransimission() as usize, - "not more then gossip_retransmission many messages get sent back" + gs.peer_score.as_ref().unwrap().0.score(peer), + (9.0 + 4.0) * -2.0 ); } - #[test] - fn test_ignore_too_many_ihaves() { - let config = GossipsubConfigBuilder::default() - .max_ihave_messages(10) - .build() - .unwrap(); - //build gossipsub with full mesh - let (mut gs, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config.clone()) - .create_network(); - - //add another peer not in the mesh - let peer = add_peer(&mut gs, &topics, false, false); - - //peer has 20 messages - let mut seq = 0; - let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect(); - - //peer sends us one ihave for each message in order - for raw_message in &messages { - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); + //single penalties for the rest + for peer in peers.iter().skip(3) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); + } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&others[3]), + 4.0 * -2.0 + ); + + //two times same ip doesn't count twice + gs.inject_connection_established( + &peers[0], + &ConnectionId::new(0), + &ConnectedPoint::Dialer { + address: addr, + role_override: Endpoint::Dialer, + }, + None, + 2, + ); + + //nothing changed + //double penalties for the first three of each + for peer in peers.iter().take(3).chain(others.iter().take(3)) { + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(peer), + (9.0 + 4.0) * -2.0 + ); + } - gs.handle_ihave( - &peer, - vec![(topics[0].clone(), vec![config.message_id(message)])], - ); - } + //single penalties for the rest + for peer in peers.iter().skip(3) { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); + } + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&others[3]), + 4.0 * -2.0 + ); +} - let first_ten: HashSet<_> = messages - .iter() - .take(10) - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) - .map(|m| config.message_id(&m)) - .collect(); +#[test] +fn test_scoring_p7_grafts_before_backoff() { + let config = GossipsubConfigBuilder::default() + .prune_backoff(Duration::from_millis(200)) + .graft_flood_threshold(Duration::from_millis(100)) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams { + behaviour_penalty_weight: -2.0, + behaviour_penalty_decay: 0.9, + ..Default::default() + }; - //we send iwant only for the first 10 messages - assert_eq!( - count_control_msgs(&gs, |p, action| p == &peer - && matches!(action, GossipsubControlAction::IWant { message_ids } if message_ids.len() == 1 && first_ten.contains(&message_ids[0]))), - 10, - "exactly the first ten ihaves should be processed and one iwant for each created" + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + //remove peers from mesh and send prune to them => this adds a backoff for the peers + for peer in peers.iter().take(2) { + gs.mesh.get_mut(&topics[0]).unwrap().remove(peer); + gs.send_graft_prune( + HashMap::new(), + HashMap::from([(*peer, vec![topics[0].clone()])]), + HashSet::new(), ); + } - //after a heartbeat everything is forgotten - gs.heartbeat(); - for raw_message in messages[10..].iter() { - // Transform the inbound message - let message = &gs - .data_transform - .inbound_transform(raw_message.clone()) - .unwrap(); + //wait 50 millisecs + sleep(Duration::from_millis(50)); + + //first peer tries to graft + gs.handle_graft(&peers[0], vec![topics[0].clone()]); + + //double behaviour penalty for first peer (squared) + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 4.0 * -2.0 + ); + + //wait 100 millisecs + sleep(Duration::from_millis(100)); + + //second peer tries to graft + gs.handle_graft(&peers[1], vec![topics[0].clone()]); + + //single behaviour penalty for second peer + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 1.0 * -2.0 + ); + + //test decay + gs.peer_score.as_mut().unwrap().0.refresh_scores(); + + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[0]), + 4.0 * 0.9 * 0.9 * -2.0 + ); + assert_eq!( + gs.peer_score.as_ref().unwrap().0.score(&peers[1]), + 1.0 * 0.9 * 0.9 * -2.0 + ); +} - gs.handle_ihave( - &peer, - vec![(topics[0].clone(), vec![config.message_id(message)])], - ); - } +#[test] +fn test_opportunistic_grafting() { + let config = GossipsubConfigBuilder::default() + .mesh_n_low(3) + .mesh_n(5) + .mesh_n_high(7) + .mesh_outbound_min(0) //deactivate outbound handling + .opportunistic_graft_ticks(2) + .opportunistic_graft_peers(2) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams { + app_specific_weight: 1.0, + ..Default::default() + }; + let thresholds = PeerScoreThresholds { + opportunistic_graft_threshold: 2.0, + ..Default::default() + }; - //we sent iwant for all 20 messages - assert_eq!( - count_control_msgs(&gs, |p, action| p == &peer - && matches!(action, GossipsubControlAction::IWant { message_ids } if message_ids.len() == 1)), - 20, - "all 20 should get sent" - ); + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(5) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, thresholds))) + .create_network(); + + //fill mesh with 5 peers + for peer in &peers { + gs.handle_graft(peer, topics.clone()); } - #[test] - fn test_ignore_too_many_messages_in_ihave() { - let config = GossipsubConfigBuilder::default() - .max_ihave_messages(10) - .max_ihave_length(10) - .build() - .unwrap(); - //build gossipsub with full mesh - let (mut gs, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config.clone()) - .create_network(); - - //add another peer not in the mesh - let peer = add_peer(&mut gs, &topics, false, false); - - //peer has 20 messages - let mut seq = 0; - let message_ids: Vec<_> = (0..20) - .map(|_| random_message(&mut seq, &topics)) - .map(|msg| gs.data_transform.inbound_transform(msg).unwrap()) - .map(|msg| config.message_id(&msg)) - .collect(); + //add additional 5 peers + let others: Vec<_> = (0..5) + .into_iter() + .map(|_| add_peer(&mut gs, &topics, false, false)) + .collect(); - //peer sends us three ihaves - gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]); - gs.handle_ihave( - &peer, - vec![(topics[0].clone(), message_ids[0..12].to_vec())], - ); - gs.handle_ihave( - &peer, - vec![(topics[0].clone(), message_ids[0..20].to_vec())], - ); + //currently mesh equals peers + assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect()); - let first_twelve: HashSet<_> = message_ids.iter().take(12).collect(); + //give others high scores (but the first two have not high enough scores) + for (i, peer) in peers.iter().enumerate().take(5) { + gs.set_application_score(peer, 0.0 + i as f64); + } - //we send iwant only for the first 10 messages - let mut sum = 0; - assert_eq!( - count_control_msgs(&gs, |p, action| match action { - GossipsubControlAction::IWant { message_ids } => - p == &peer && { - assert!(first_twelve.is_superset(&message_ids.iter().collect())); - sum += message_ids.len(); - true - }, - _ => false, - }), - 2, - "the third ihave should get ignored and no iwant sent" - ); + //set scores for peers in the mesh + for (i, peer) in others.iter().enumerate().take(5) { + gs.set_application_score(peer, 0.0 + i as f64); + } - assert_eq!(sum, 10, "exactly the first ten ihaves should be processed"); + //this gives a median of exactly 2.0 => should not apply opportunistic grafting + gs.heartbeat(); + gs.heartbeat(); + + assert_eq!( + gs.mesh[&topics[0]].len(), + 5, + "should not apply opportunistic grafting" + ); + + //reduce middle score to 1.0 giving a median of 1.0 + gs.set_application_score(&peers[2], 1.0); + + //opportunistic grafting after two heartbeats + + gs.heartbeat(); + assert_eq!( + gs.mesh[&topics[0]].len(), + 5, + "should not apply opportunistic grafting after first tick" + ); + + gs.heartbeat(); + + assert_eq!( + gs.mesh[&topics[0]].len(), + 7, + "opportunistic grafting should have added 2 peers" + ); + + assert!( + gs.mesh[&topics[0]].is_superset(&peers.iter().cloned().collect()), + "old peers are still part of the mesh" + ); + + assert!( + gs.mesh[&topics[0]].is_disjoint(&others.iter().cloned().take(2).collect()), + "peers below or equal to median should not be added in opportunistic grafting" + ); +} - //after a heartbeat everything is forgotten - gs.heartbeat(); - gs.handle_ihave( - &peer, - vec![(topics[0].clone(), message_ids[10..20].to_vec())], - ); +#[test] +fn test_ignore_graft_from_unknown_topic() { + //build gossipsub without subscribing to any topics + let (mut gs, _, _) = inject_nodes1() + .peer_no(0) + .topics(vec![]) + .to_subscribe(false) + .create_network(); + + //handle an incoming graft for some topic + gs.handle_graft(&PeerId::random(), vec![Topic::new("test").hash()]); + + //assert that no prune got created + assert_eq!( + count_control_msgs(&gs, |_, a| matches!( + a, + GossipsubControlAction::Prune { .. } + )), + 0, + "we should not prune after graft in unknown topic" + ); +} - //we sent 20 iwant messages - let mut sum = 0; - assert_eq!( - count_control_msgs(&gs, |p, action| match action { - GossipsubControlAction::IWant { message_ids } => - p == &peer && { - sum += message_ids.len(); - true - }, - _ => false, - }), - 3 - ); - assert_eq!(sum, 20, "exactly 20 iwants should get sent"); - } +#[test] +fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { + let config = GossipsubConfig::default(); + //build gossipsub with full mesh + let (mut gs, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .create_network(); - #[test] - fn test_limit_number_of_message_ids_inside_ihave() { - let config = GossipsubConfigBuilder::default() - .max_ihave_messages(10) - .max_ihave_length(100) - .build() - .unwrap(); - //build gossipsub with full mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config) - .create_network(); - - //graft to all peers to really fill the mesh with all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } + //add another peer not in the mesh + let peer = add_peer(&mut gs, &topics, false, false); - //add two other peers not in the mesh - let p1 = add_peer(&mut gs, &topics, false, false); - let p2 = add_peer(&mut gs, &topics, false, false); + //receive a message + let mut seq = 0; + let m1 = random_message(&mut seq, &topics); - //receive 200 messages from another peer - let mut seq = 0; - for _ in 0..200 { - gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random()); - } + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); - //emit gossip - gs.emit_gossip(); + let id = config.message_id(message1); - // both peers should have gotten 100 random ihave messages, to asser the randomness, we - // assert that both have not gotten the same set of messages, but have an intersection - // (which is the case with very high probability, the probabiltity of failure is < 10^-58). + gs.handle_received_message(m1, &PeerId::random()); - let mut ihaves1 = HashSet::new(); - let mut ihaves2 = HashSet::new(); + //clear events + gs.events.clear(); - assert_eq!( - count_control_msgs(&gs, |p, action| match action { - GossipsubControlAction::IHave { message_ids, .. } => { - if p == &p1 { - ihaves1 = message_ids.iter().cloned().collect(); - true - } else if p == &p2 { - ihaves2 = message_ids.iter().cloned().collect(); - true + //the first gossip_retransimission many iwants return the valid message, all others are + // ignored. + for _ in 0..(2 * config.gossip_retransimission() + 10) { + gs.handle_iwant(&peer, vec![id.clone()]); + } + + assert_eq!( + gs.events + .iter() + .map(|e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + event.messages.len() } else { - false + 0 } } - _ => false, - }), - 2, - "should have emitted one ihave to p1 and one to p2" - ); + _ => 0, + }) + .sum::(), + config.gossip_retransimission() as usize, + "not more then gossip_retransmission many messages get sent back" + ); +} - assert_eq!( - ihaves1.len(), - 100, - "should have sent 100 message ids in ihave to p1" - ); - assert_eq!( - ihaves2.len(), - 100, - "should have sent 100 message ids in ihave to p2" - ); - assert!( - ihaves1 != ihaves2, - "should have sent different random messages to p1 and p2 \ - (this may fail with a probability < 10^-58" - ); - assert!( - ihaves1.intersection(&ihaves2).into_iter().count() > 0, - "should have sent random messages with some common messages to p1 and p2 \ - (this may fail with a probability < 10^-58" +#[test] +fn test_ignore_too_many_ihaves() { + let config = GossipsubConfigBuilder::default() + .max_ihave_messages(10) + .build() + .unwrap(); + //build gossipsub with full mesh + let (mut gs, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config.clone()) + .create_network(); + + //add another peer not in the mesh + let peer = add_peer(&mut gs, &topics, false, false); + + //peer has 20 messages + let mut seq = 0; + let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect(); + + //peer sends us one ihave for each message in order + for raw_message in &messages { + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); + + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), vec![config.message_id(message)])], ); } - #[test] - fn test_iwant_penalties() { - let _ = env_logger::try_init(); - - let config = GossipsubConfigBuilder::default() - .iwant_followup_time(Duration::from_secs(4)) - .build() + let first_ten: HashSet<_> = messages + .iter() + .take(10) + .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) + .map(|m| config.message_id(&m)) + .collect(); + + //we send iwant only for the first 10 messages + assert_eq!( + count_control_msgs(&gs, |p, action| p == &peer + && matches!(action, GossipsubControlAction::IWant { message_ids } if message_ids.len() == 1 && first_ten.contains(&message_ids[0]))), + 10, + "exactly the first ten ihaves should be processed and one iwant for each created" + ); + + //after a heartbeat everything is forgotten + gs.heartbeat(); + for raw_message in messages[10..].iter() { + // Transform the inbound message + let message = &gs + .data_transform + .inbound_transform(raw_message.clone()) .unwrap(); - let peer_score_params = PeerScoreParams { - behaviour_penalty_weight: -1.0, - ..Default::default() - }; - - // fill the mesh - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(2) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config.clone()) - .explicit(0) - .outbound(0) - .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) - .create_network(); - - // graft to all peers to really fill the mesh with all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - // add 100 more peers - let other_peers: Vec<_> = (0..100) - .map(|_| add_peer(&mut gs, &topics, false, false)) - .collect(); - - // each peer sends us an ihave containing each two message ids - let mut first_messages = Vec::new(); - let mut second_messages = Vec::new(); - let mut seq = 0; - for peer in &other_peers { - let msg1 = random_message(&mut seq, &topics); - let msg2 = random_message(&mut seq, &topics); - - // Decompress the raw message and calculate the message id. - // Transform the inbound message - let message1 = &gs.data_transform.inbound_transform(msg1.clone()).unwrap(); - - // Transform the inbound message - let message2 = &gs.data_transform.inbound_transform(msg2.clone()).unwrap(); - - first_messages.push(msg1.clone()); - second_messages.push(msg2.clone()); - gs.handle_ihave( - peer, - vec![( - topics[0].clone(), - vec![config.message_id(message1), config.message_id(message2)], - )], - ); - } - // the peers send us all the first message ids in time - for (index, peer) in other_peers.iter().enumerate() { - gs.handle_received_message(first_messages[index].clone(), peer); - } + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), vec![config.message_id(message)])], + ); + } - // now we do a heartbeat no penalization should have been applied yet - gs.heartbeat(); + //we sent iwant for all 20 messages + assert_eq!( + count_control_msgs(&gs, |p, action| p == &peer + && matches!(action, GossipsubControlAction::IWant { message_ids } if message_ids.len() == 1)), + 20, + "all 20 should get sent" + ); +} - for peer in &other_peers { - assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); - } +#[test] +fn test_ignore_too_many_messages_in_ihave() { + let config = GossipsubConfigBuilder::default() + .max_ihave_messages(10) + .max_ihave_length(10) + .build() + .unwrap(); + //build gossipsub with full mesh + let (mut gs, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config.clone()) + .create_network(); + + //add another peer not in the mesh + let peer = add_peer(&mut gs, &topics, false, false); + + //peer has 20 messages + let mut seq = 0; + let message_ids: Vec<_> = (0..20) + .map(|_| random_message(&mut seq, &topics)) + .map(|msg| gs.data_transform.inbound_transform(msg).unwrap()) + .map(|msg| config.message_id(&msg)) + .collect(); + + //peer sends us three ihaves + gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]); + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), message_ids[0..12].to_vec())], + ); + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), message_ids[0..20].to_vec())], + ); + + let first_twelve: HashSet<_> = message_ids.iter().take(12).collect(); + + //we send iwant only for the first 10 messages + let mut sum = 0; + assert_eq!( + count_control_msgs(&gs, |p, action| match action { + GossipsubControlAction::IWant { message_ids } => + p == &peer && { + assert!(first_twelve.is_superset(&message_ids.iter().collect())); + sum += message_ids.len(); + true + }, + _ => false, + }), + 2, + "the third ihave should get ignored and no iwant sent" + ); + + assert_eq!(sum, 10, "exactly the first ten ihaves should be processed"); + + //after a heartbeat everything is forgotten + gs.heartbeat(); + gs.handle_ihave( + &peer, + vec![(topics[0].clone(), message_ids[10..20].to_vec())], + ); + + //we sent 20 iwant messages + let mut sum = 0; + assert_eq!( + count_control_msgs(&gs, |p, action| match action { + GossipsubControlAction::IWant { message_ids } => + p == &peer && { + sum += message_ids.len(); + true + }, + _ => false, + }), + 3 + ); + assert_eq!(sum, 20, "exactly 20 iwants should get sent"); +} - // receive the first twenty of the other peers then send their response - for (index, peer) in other_peers.iter().enumerate().take(20) { - gs.handle_received_message(second_messages[index].clone(), peer); - } +#[test] +fn test_limit_number_of_message_ids_inside_ihave() { + let config = GossipsubConfigBuilder::default() + .max_ihave_messages(10) + .max_ihave_length(100) + .build() + .unwrap(); + //build gossipsub with full mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(config.mesh_n_high()) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .create_network(); + + //graft to all peers to really fill the mesh with all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } - // sleep for the promise duration - sleep(Duration::from_secs(4)); + //add two other peers not in the mesh + let p1 = add_peer(&mut gs, &topics, false, false); + let p2 = add_peer(&mut gs, &topics, false, false); - // now we do a heartbeat to apply penalization - gs.heartbeat(); + //receive 200 messages from another peer + let mut seq = 0; + for _ in 0..200 { + gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random()); + } - // now we get the second messages from the last 80 peers. - for (index, peer) in other_peers.iter().enumerate() { - if index > 19 { - gs.handle_received_message(second_messages[index].clone(), peer); + //emit gossip + gs.emit_gossip(); + + // both peers should have gotten 100 random ihave messages, to asser the randomness, we + // assert that both have not gotten the same set of messages, but have an intersection + // (which is the case with very high probability, the probabiltity of failure is < 10^-58). + + let mut ihaves1 = HashSet::new(); + let mut ihaves2 = HashSet::new(); + + assert_eq!( + count_control_msgs(&gs, |p, action| match action { + GossipsubControlAction::IHave { message_ids, .. } => { + if p == &p1 { + ihaves1 = message_ids.iter().cloned().collect(); + true + } else if p == &p2 { + ihaves2 = message_ids.iter().cloned().collect(); + true + } else { + false + } } - } + _ => false, + }), + 2, + "should have emitted one ihave to p1 and one to p2" + ); + + assert_eq!( + ihaves1.len(), + 100, + "should have sent 100 message ids in ihave to p1" + ); + assert_eq!( + ihaves2.len(), + 100, + "should have sent 100 message ids in ihave to p2" + ); + assert!( + ihaves1 != ihaves2, + "should have sent different random messages to p1 and p2 \ + (this may fail with a probability < 10^-58" + ); + assert!( + ihaves1.intersection(&ihaves2).into_iter().count() > 0, + "should have sent random messages with some common messages to p1 and p2 \ + (this may fail with a probability < 10^-58" + ); +} - // no further penalizations should get applied - gs.heartbeat(); +#[test] +fn test_iwant_penalties() { + let _ = env_logger::try_init(); - // Only the last 80 peers should be penalized for not responding in time - let mut not_penalized = 0; - let mut single_penalized = 0; - let mut double_penalized = 0; - - for (i, peer) in other_peers.iter().enumerate() { - let score = gs.peer_score.as_ref().unwrap().0.score(peer); - if score == 0.0 { - not_penalized += 1; - } else if score == -1.0 { - assert!(i > 9); - single_penalized += 1; - } else if score == -4.0 { - assert!(i > 9); - double_penalized += 1 - } else { - println!("{}", peer); - println!("{}", score); - panic!("Invalid score of peer"); - } - } + let config = GossipsubConfigBuilder::default() + .iwant_followup_time(Duration::from_secs(4)) + .build() + .unwrap(); + let peer_score_params = PeerScoreParams { + behaviour_penalty_weight: -1.0, + ..Default::default() + }; - assert_eq!(not_penalized, 20); - assert_eq!(single_penalized, 80); - assert_eq!(double_penalized, 0); + // fill the mesh + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(2) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config.clone()) + .explicit(0) + .outbound(0) + .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) + .create_network(); + + // graft to all peers to really fill the mesh with all the peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); } - #[test] - fn test_publish_to_floodsub_peers_without_flood_publish() { - let config = GossipsubConfigBuilder::default() - .flood_publish(false) - .build() - .unwrap(); - let (mut gs, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_low() - 1) - .topics(vec!["test".into()]) - .to_subscribe(false) - .gs_config(config) - .create_network(); - - //add two floodsub peer, one explicit, one implicit - let p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - false, - false, - Multiaddr::empty(), - Some(PeerKind::Floodsub), - ); - let p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + // add 100 more peers + let other_peers: Vec<_> = (0..100) + .map(|_| add_peer(&mut gs, &topics, false, false)) + .collect(); - //p1 and p2 are not in the mesh - assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); + // each peer sends us an ihave containing each two message ids + let mut first_messages = Vec::new(); + let mut second_messages = Vec::new(); + let mut seq = 0; + for peer in &other_peers { + let msg1 = random_message(&mut seq, &topics); + let msg2 = random_message(&mut seq, &topics); - //publish a message - let publish_data = vec![0; 42]; - gs.publish(Topic::new("test"), publish_data).unwrap(); + // Decompress the raw message and calculate the message id. + // Transform the inbound message + let message1 = &gs.data_transform.inbound_transform(msg1.clone()).unwrap(); - // Collect publish messages to floodsub peers - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if peer_id == &p1 || peer_id == &p2 { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } - } - } - collected_publish - } - _ => collected_publish, - }); + // Transform the inbound message + let message2 = &gs.data_transform.inbound_transform(msg2.clone()).unwrap(); - assert_eq!( - publishes.len(), - 2, - "Should send a publish message to all floodsub peers" + first_messages.push(msg1.clone()); + second_messages.push(msg2.clone()); + gs.handle_ihave( + peer, + vec![( + topics[0].clone(), + vec![config.message_id(message1), config.message_id(message2)], + )], ); } - #[test] - fn test_do_not_use_floodsub_in_fanout() { - let config = GossipsubConfigBuilder::default() - .flood_publish(false) - .build() - .unwrap(); - let (mut gs, _, _) = inject_nodes1() - .peer_no(config.mesh_n_low() - 1) - .topics(Vec::new()) - .to_subscribe(false) - .gs_config(config) - .create_network(); - - let topic = Topic::new("test"); - let topics = vec![topic.hash()]; - - //add two floodsub peer, one explicit, one implicit - let p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - false, - false, - Multiaddr::empty(), - Some(PeerKind::Floodsub), - ); - let p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); - - //publish a message - let publish_data = vec![0; 42]; - gs.publish(Topic::new("test"), publish_data).unwrap(); + // the peers send us all the first message ids in time + for (index, peer) in other_peers.iter().enumerate() { + gs.handle_received_message(first_messages[index].clone(), peer); + } - // Collect publish messages to floodsub peers - let publishes = gs - .events - .iter() - .fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if peer_id == &p1 || peer_id == &p2 { - if let GossipsubHandlerIn::Message(ref m) = **event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } - } - } - collected_publish - } - _ => collected_publish, - }); + // now we do a heartbeat no penalization should have been applied yet + gs.heartbeat(); - assert_eq!( - publishes.len(), - 2, - "Should send a publish message to all floodsub peers" - ); + for peer in &other_peers { + assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); + } - assert!( - !gs.fanout[&topics[0]].contains(&p1) && !gs.fanout[&topics[0]].contains(&p2), - "Floodsub peers are not allowed in fanout" - ); + // receive the first twenty of the other peers then send their response + for (index, peer) in other_peers.iter().enumerate().take(20) { + gs.handle_received_message(second_messages[index].clone(), peer); } - #[test] - fn test_dont_add_floodsub_peers_to_mesh_on_join() { - let (mut gs, _, _) = inject_nodes1() - .peer_no(0) - .topics(Vec::new()) - .to_subscribe(false) - .create_network(); - - let topic = Topic::new("test"); - let topics = vec![topic.hash()]; - - //add two floodsub peer, one explicit, one implicit - let _p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - false, - false, - Multiaddr::empty(), - Some(PeerKind::Floodsub), - ); - let _p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + // sleep for the promise duration + sleep(Duration::from_secs(4)); - gs.join(&topics[0]); + // now we do a heartbeat to apply penalization + gs.heartbeat(); - assert!( - gs.mesh[&topics[0]].is_empty(), - "Floodsub peers should not get added to mesh" - ); + // now we get the second messages from the last 80 peers. + for (index, peer) in other_peers.iter().enumerate() { + if index > 19 { + gs.handle_received_message(second_messages[index].clone(), peer); + } } - #[test] - fn test_dont_send_px_to_old_gossipsub_peers() { - let (mut gs, _, topics) = inject_nodes1() - .peer_no(0) - .topics(vec!["test".into()]) - .to_subscribe(false) - .create_network(); - - //add an old gossipsub peer - let p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - false, - false, - Multiaddr::empty(), - Some(PeerKind::Gossipsub), - ); + // no further penalizations should get applied + gs.heartbeat(); + + // Only the last 80 peers should be penalized for not responding in time + let mut not_penalized = 0; + let mut single_penalized = 0; + let mut double_penalized = 0; + + for (i, peer) in other_peers.iter().enumerate() { + let score = gs.peer_score.as_ref().unwrap().0.score(peer); + if score == 0.0 { + not_penalized += 1; + } else if score == -1.0 { + assert!(i > 9); + single_penalized += 1; + } else if score == -4.0 { + assert!(i > 9); + double_penalized += 1 + } else { + println!("{}", peer); + println!("{}", score); + panic!("Invalid score of peer"); + } + } - //prune the peer - gs.send_graft_prune( - HashMap::new(), - vec![(p1, topics.clone())].into_iter().collect(), - HashSet::new(), - ); + assert_eq!(not_penalized, 20); + assert_eq!(single_penalized, 80); + assert_eq!(double_penalized, 0); +} - //check that prune does not contain px - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Prune { peers: px, .. } => !px.is_empty(), - _ => false, - }), - 0, - "Should not send px to floodsub peers" - ); - } +#[test] +fn test_publish_to_floodsub_peers_without_flood_publish() { + let config = GossipsubConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + let (mut gs, _, topics) = inject_nodes1() + .peer_no(config.mesh_n_low() - 1) + .topics(vec!["test".into()]) + .to_subscribe(false) + .gs_config(config) + .create_network(); + + //add two floodsub peer, one explicit, one implicit + let p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let p2 = + add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + //p1 and p2 are not in the mesh + assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); + + //publish a message + let publish_data = vec![0; 42]; + gs.publish(Topic::new("test"), publish_data).unwrap(); + + // Collect publish messages to floodsub peers + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { + if peer_id == &p1 || peer_id == &p2 { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for s in &event.messages { + collected_publish.push(s.clone()); + } + } + } + collected_publish + } + _ => collected_publish, + }); - #[test] - fn test_dont_send_floodsub_peers_in_px() { - //build mesh with one peer - let (mut gs, peers, topics) = inject_nodes1() - .peer_no(1) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - //add two floodsub peers - let _p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - false, - false, - Multiaddr::empty(), - Some(PeerKind::Floodsub), - ); - let _p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + assert_eq!( + publishes.len(), + 2, + "Should send a publish message to all floodsub peers" + ); +} - //prune only mesh node - gs.send_graft_prune( - HashMap::new(), - vec![(peers[0], topics.clone())].into_iter().collect(), - HashSet::new(), - ); +#[test] +fn test_do_not_use_floodsub_in_fanout() { + let config = GossipsubConfigBuilder::default() + .flood_publish(false) + .build() + .unwrap(); + let (mut gs, _, _) = inject_nodes1() + .peer_no(config.mesh_n_low() - 1) + .topics(Vec::new()) + .to_subscribe(false) + .gs_config(config) + .create_network(); + + let topic = Topic::new("test"); + let topics = vec![topic.hash()]; + + //add two floodsub peer, one explicit, one implicit + let p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let p2 = + add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + //publish a message + let publish_data = vec![0; 42]; + gs.publish(Topic::new("test"), publish_data).unwrap(); + + // Collect publish messages to floodsub peers + let publishes = gs + .events + .iter() + .fold(vec![], |mut collected_publish, e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { + if peer_id == &p1 || peer_id == &p2 { + if let GossipsubHandlerIn::Message(ref m) = **event { + let event = proto_to_message(m); + for s in &event.messages { + collected_publish.push(s.clone()); + } + } + } + collected_publish + } + _ => collected_publish, + }); - //check that px in prune message is empty - assert_eq!( - count_control_msgs(&gs, |_, m| match m { - GossipsubControlAction::Prune { peers: px, .. } => !px.is_empty(), - _ => false, - }), - 0, - "Should not include floodsub peers in px" - ); - } + assert_eq!( + publishes.len(), + 2, + "Should send a publish message to all floodsub peers" + ); - #[test] - fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { - let (mut gs, _, topics) = inject_nodes1() - .peer_no(0) - .topics(vec!["test".into()]) - .to_subscribe(false) - .create_network(); - - //add two floodsub peer, one explicit, one implicit - let _p1 = add_peer_with_addr_and_kind( - &mut gs, - &topics, - true, - false, - Multiaddr::empty(), - Some(PeerKind::Floodsub), - ); - let _p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, true, false, Multiaddr::empty(), None); + assert!( + !gs.fanout[&topics[0]].contains(&p1) && !gs.fanout[&topics[0]].contains(&p2), + "Floodsub peers are not allowed in fanout" + ); +} - gs.heartbeat(); +#[test] +fn test_dont_add_floodsub_peers_to_mesh_on_join() { + let (mut gs, _, _) = inject_nodes1() + .peer_no(0) + .topics(Vec::new()) + .to_subscribe(false) + .create_network(); + + let topic = Topic::new("test"); + let topics = vec![topic.hash()]; + + //add two floodsub peer, one explicit, one implicit + let _p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let _p2 = + add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + gs.join(&topics[0]); + + assert!( + gs.mesh[&topics[0]].is_empty(), + "Floodsub peers should not get added to mesh" + ); +} - assert!( - gs.mesh[&topics[0]].is_empty(), - "Floodsub peers should not get added to mesh" - ); - } +#[test] +fn test_dont_send_px_to_old_gossipsub_peers() { + let (mut gs, _, topics) = inject_nodes1() + .peer_no(0) + .topics(vec!["test".into()]) + .to_subscribe(false) + .create_network(); + + //add an old gossipsub peer + let p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Gossipsub), + ); + + //prune the peer + gs.send_graft_prune( + HashMap::new(), + vec![(p1, topics.clone())].into_iter().collect(), + HashSet::new(), + ); + + //check that prune does not contain px + assert_eq!( + count_control_msgs(&gs, |_, m| match m { + GossipsubControlAction::Prune { peers: px, .. } => !px.is_empty(), + _ => false, + }), + 0, + "Should not send px to floodsub peers" + ); +} - // Some very basic test of public api methods. - #[test] - fn test_public_api() { - let (gs, peers, topic_hashes) = inject_nodes1() - .peer_no(4) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .create_network(); - let peers = peers.into_iter().collect::>(); +#[test] +fn test_dont_send_floodsub_peers_in_px() { + //build mesh with one peer + let (mut gs, peers, topics) = inject_nodes1() + .peer_no(1) + .topics(vec!["test".into()]) + .to_subscribe(true) + .create_network(); + + //add two floodsub peers + let _p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + false, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let _p2 = + add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + + //prune only mesh node + gs.send_graft_prune( + HashMap::new(), + vec![(peers[0], topics.clone())].into_iter().collect(), + HashSet::new(), + ); + + //check that px in prune message is empty + assert_eq!( + count_control_msgs(&gs, |_, m| match m { + GossipsubControlAction::Prune { peers: px, .. } => !px.is_empty(), + _ => false, + }), + 0, + "Should not include floodsub peers in px" + ); +} - assert_eq!( - gs.topics().cloned().collect::>(), - topic_hashes, - "Expected topics to match registered topic." - ); +#[test] +fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { + let (mut gs, _, topics) = inject_nodes1() + .peer_no(0) + .topics(vec!["test".into()]) + .to_subscribe(false) + .create_network(); + + //add two floodsub peer, one explicit, one implicit + let _p1 = add_peer_with_addr_and_kind( + &mut gs, + &topics, + true, + false, + Multiaddr::empty(), + Some(PeerKind::Floodsub), + ); + let _p2 = + add_peer_with_addr_and_kind(&mut gs, &topics, true, false, Multiaddr::empty(), None); + + gs.heartbeat(); + + assert!( + gs.mesh[&topics[0]].is_empty(), + "Floodsub peers should not get added to mesh" + ); +} - assert_eq!( - gs.mesh_peers(&TopicHash::from_raw("topic1")) - .cloned() - .collect::>(), - peers, - "Expected peers for a registered topic to contain all peers." - ); +// Some very basic test of public api methods. +#[test] +fn test_public_api() { + let (gs, peers, topic_hashes) = inject_nodes1() + .peer_no(4) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .create_network(); + let peers = peers.into_iter().collect::>(); + + assert_eq!( + gs.topics().cloned().collect::>(), + topic_hashes, + "Expected topics to match registered topic." + ); + + assert_eq!( + gs.mesh_peers(&TopicHash::from_raw("topic1")) + .cloned() + .collect::>(), + peers, + "Expected peers for a registered topic to contain all peers." + ); + + assert_eq!( + gs.all_mesh_peers().cloned().collect::>(), + peers, + "Expected all_peers to contain all peers." + ); +} - assert_eq!( - gs.all_mesh_peers().cloned().collect::>(), - peers, - "Expected all_peers to contain all peers." - ); +#[test] +fn test_msg_id_fn_only_called_once_with_fast_message_ids() { + struct Pointers { + slow_counter: u32, + fast_counter: u32, } - #[test] - fn test_msg_id_fn_only_called_once_with_fast_message_ids() { - struct Pointers { - slow_counter: u32, - fast_counter: u32, - } + let mut counters = Pointers { + slow_counter: 0, + fast_counter: 0, + }; - let mut counters = Pointers { - slow_counter: 0, - fast_counter: 0, - }; + let counters_pointer: *mut Pointers = &mut counters; - let counters_pointer: *mut Pointers = &mut counters; + let counters_address = counters_pointer as u64; - let counters_address = counters_pointer as u64; + macro_rules! get_counters_pointer { + ($m: expr) => {{ + let mut address_bytes: [u8; 8] = Default::default(); + address_bytes.copy_from_slice($m.as_slice()); + let address = u64::from_be_bytes(address_bytes); + address as *mut Pointers + }}; + } - macro_rules! get_counters_pointer { - ($m: expr) => {{ - let mut address_bytes: [u8; 8] = Default::default(); - address_bytes.copy_from_slice($m.as_slice()); - let address = u64::from_be_bytes(address_bytes); - address as *mut Pointers - }}; - } + macro_rules! get_counters_and_hash { + ($m: expr) => {{ + let mut hasher = DefaultHasher::new(); + $m.hash(&mut hasher); + let id = hasher.finish().to_be_bytes().into(); + (id, get_counters_pointer!($m)) + }}; + } - macro_rules! get_counters_and_hash { - ($m: expr) => {{ - let mut hasher = DefaultHasher::new(); - $m.hash(&mut hasher); - let id = hasher.finish().to_be_bytes().into(); - (id, get_counters_pointer!($m)) - }}; + let message_id_fn = |m: &GossipsubMessage| -> MessageId { + let (mut id, mut counters_pointer): (MessageId, *mut Pointers) = + get_counters_and_hash!(&m.data); + unsafe { + (*counters_pointer).slow_counter += 1; } - - let message_id_fn = |m: &GossipsubMessage| -> MessageId { - let (mut id, mut counters_pointer): (MessageId, *mut Pointers) = - get_counters_and_hash!(&m.data); - unsafe { - (*counters_pointer).slow_counter += 1; - } - id.0.reverse(); - id - }; - let fast_message_id_fn = |m: &RawGossipsubMessage| -> FastMessageId { - let (id, mut counters_pointer) = get_counters_and_hash!(&m.data); - unsafe { - (*counters_pointer).fast_counter += 1; - } - id - }; - let config = GossipsubConfigBuilder::default() - .message_id_fn(message_id_fn) - .fast_message_id_fn(fast_message_id_fn) - .build() - .unwrap(); - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(0) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - let message = RawGossipsubMessage { - source: None, - data: counters_address.to_be_bytes().to_vec(), - sequence_number: None, - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; - - for _ in 0..5 { - gs.handle_received_message(message.clone(), &PeerId::random()); + id.0.reverse(); + id + }; + let fast_message_id_fn = |m: &RawGossipsubMessage| -> FastMessageId { + let (id, mut counters_pointer) = get_counters_and_hash!(&m.data); + unsafe { + (*counters_pointer).fast_counter += 1; } + id + }; + let config = GossipsubConfigBuilder::default() + .message_id_fn(message_id_fn) + .fast_message_id_fn(fast_message_id_fn) + .build() + .unwrap(); + let (mut gs, _, topic_hashes) = inject_nodes1() + .peer_no(0) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(config) + .create_network(); + + let message = RawGossipsubMessage { + source: None, + data: counters_address.to_be_bytes().to_vec(), + sequence_number: None, + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; - assert_eq!(counters.fast_counter, 5); - assert_eq!(counters.slow_counter, 1); + for _ in 0..5 { + gs.handle_received_message(message.clone(), &PeerId::random()); } - #[test] - fn test_subscribe_to_invalid_topic() { - let t1 = Topic::new("t1"); - let t2 = Topic::new("t2"); - let (mut gs, _, _) = inject_nodes::() - .subscription_filter(WhitelistSubscriptionFilter( - vec![t1.hash()].into_iter().collect(), - )) - .to_subscribe(false) - .create_network(); - - assert!(gs.subscribe(&t1).is_ok()); - assert!(gs.subscribe(&t2).is_err()); - } + assert_eq!(counters.fast_counter, 5); + assert_eq!(counters.slow_counter, 1); +} - #[test] - fn test_subscribe_and_graft_with_negative_score() { - //simulate a communication between two gossipsub instances - let (mut gs1, _, topic_hashes) = inject_nodes1() - .topics(vec!["test".into()]) - .scoring(Some(( - PeerScoreParams::default(), - PeerScoreThresholds::default(), - ))) - .create_network(); +#[test] +fn test_subscribe_to_invalid_topic() { + let t1 = Topic::new("t1"); + let t2 = Topic::new("t2"); + let (mut gs, _, _) = inject_nodes::() + .subscription_filter(WhitelistSubscriptionFilter( + vec![t1.hash()].into_iter().collect(), + )) + .to_subscribe(false) + .create_network(); + + assert!(gs.subscribe(&t1).is_ok()); + assert!(gs.subscribe(&t2).is_err()); +} - let (mut gs2, _, _) = inject_nodes1().create_network(); +#[test] +fn test_subscribe_and_graft_with_negative_score() { + //simulate a communication between two gossipsub instances + let (mut gs1, _, topic_hashes) = inject_nodes1() + .topics(vec!["test".into()]) + .scoring(Some(( + PeerScoreParams::default(), + PeerScoreThresholds::default(), + ))) + .create_network(); - let connection_id = ConnectionId::new(0); + let (mut gs2, _, _) = inject_nodes1().create_network(); - let topic = Topic::new("test"); + let connection_id = ConnectionId::new(0); - let p2 = add_peer(&mut gs1, &Vec::new(), true, false); - let p1 = add_peer(&mut gs2, &topic_hashes, false, false); + let topic = Topic::new("test"); - //add penalty to peer p2 - gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); + let p2 = add_peer(&mut gs1, &Vec::new(), true, false); + let p1 = add_peer(&mut gs2, &topic_hashes, false, false); - let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2); + //add penalty to peer p2 + gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - //subscribe to topic in gs2 - gs2.subscribe(&topic).unwrap(); + let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2); - let forward_messages_to_p1 = |gs1: &mut Gossipsub<_, _>, gs2: &mut Gossipsub<_, _>| { - //collect messages to p1 - let messages_to_p1 = gs2.events.drain(..).filter_map(|e| match e { - NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { - if peer_id == p1 { - if let GossipsubHandlerIn::Message(m) = Arc::try_unwrap(event).unwrap() { - Some(m) - } else { - None - } + //subscribe to topic in gs2 + gs2.subscribe(&topic).unwrap(); + + let forward_messages_to_p1 = |gs1: &mut Gossipsub<_, _>, gs2: &mut Gossipsub<_, _>| { + //collect messages to p1 + let messages_to_p1 = gs2.events.drain(..).filter_map(|e| match e { + NetworkBehaviourAction::NotifyHandler { peer_id, event, .. } => { + if peer_id == p1 { + if let GossipsubHandlerIn::Message(m) = Arc::try_unwrap(event).unwrap() { + Some(m) } else { None } + } else { + None } - _ => None, - }); - for message in messages_to_p1 { - gs1.inject_event( - p2, - connection_id, - HandlerEvent::Message { - rpc: proto_to_message(&message), - invalid_messages: vec![], - }, - ); } - }; - - //forward the subscribe message - forward_messages_to_p1(&mut gs1, &mut gs2); - - //heartbeats on both - gs1.heartbeat(); - gs2.heartbeat(); - - //forward messages again - forward_messages_to_p1(&mut gs1, &mut gs2); - - //nobody got penalized - assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score); - } + _ => None, + }); + for message in messages_to_p1 { + gs1.inject_event( + p2, + connection_id, + HandlerEvent::Message { + rpc: proto_to_message(&message), + invalid_messages: vec![], + }, + ); + } + }; - #[test] - /// Test nodes that send grafts without subscriptions. - fn test_graft_without_subscribe() { - // The node should: - // - Create an empty vector in mesh[topic] - // - Send subscription request to all peers - // - run JOIN(topic) - - let topic = String::from("test_subscribe"); - let subscribe_topic = vec![topic.clone()]; - let subscribe_topic_hash = vec![Topic::new(topic.clone()).hash()]; - let (mut gs, peers, topic_hashes) = inject_nodes1() - .peer_no(1) - .topics(subscribe_topic) - .to_subscribe(false) - .create_network(); + //forward the subscribe message + forward_messages_to_p1(&mut gs1, &mut gs2); - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); + //heartbeats on both + gs1.heartbeat(); + gs2.heartbeat(); - // The node sends a graft for the subscribe topic. - gs.handle_graft(&peers[0], subscribe_topic_hash); + //forward messages again + forward_messages_to_p1(&mut gs1, &mut gs2); - // The node disconnects - disconnect_peer(&mut gs, &peers[0]); + //nobody got penalized + assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score); +} - // We unsubscribe from the topic. - let _ = gs.unsubscribe(&Topic::new(topic)); - } +#[test] +/// Test nodes that send grafts without subscriptions. +fn test_graft_without_subscribe() { + // The node should: + // - Create an empty vector in mesh[topic] + // - Send subscription request to all peers + // - run JOIN(topic) + + let topic = String::from("test_subscribe"); + let subscribe_topic = vec![topic.clone()]; + let subscribe_topic_hash = vec![Topic::new(topic.clone()).hash()]; + let (mut gs, peers, topic_hashes) = inject_nodes1() + .peer_no(1) + .topics(subscribe_topic) + .to_subscribe(false) + .create_network(); + + assert!( + gs.mesh.get(&topic_hashes[0]).is_some(), + "Subscribe should add a new entry to the mesh[topic] hashmap" + ); + + // The node sends a graft for the subscribe topic. + gs.handle_graft(&peers[0], subscribe_topic_hash); + + // The node disconnects + disconnect_peer(&mut gs, &peers[0]); + + // We unsubscribe from the topic. + let _ = gs.unsubscribe(&Topic::new(topic)); +} } From a6638350b12ebb5bb341bc79572477a635570577 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 28 Sep 2022 16:36:27 +1000 Subject: [PATCH 4/7] Remove module --- protocols/gossipsub/src/behaviour/tests.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index d58d5373b77..d8c20b37c62 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -20,8 +20,7 @@ // Collection of tests for the gossipsub network behaviour -mod tests { -use super::super::*; +use super::*; use crate::error::ValidationError; use crate::subscription_filter::WhitelistSubscriptionFilter; use crate::transform::{DataTransform, IdentityTransform}; @@ -5270,4 +5269,3 @@ fn test_graft_without_subscribe() { // We unsubscribe from the topic. let _ = gs.unsubscribe(&Topic::new(topic)); } -} From b2bed821cf7a26037b0cd53a4a3589e30f964d2f Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 28 Sep 2022 16:46:03 +1000 Subject: [PATCH 5/7] Run cargo fmt --- protocols/gossipsub/src/behaviour/tests.rs | 92 ++++++++++------------ 1 file changed, 41 insertions(+), 51 deletions(-) diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index d8c20b37c62..1540a59175d 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -369,23 +369,23 @@ fn test_subscribe() { ); // collect all the subscriptions - let subscriptions = - gs.events - .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { - GossipsubHandlerIn::Message(ref message) => { - for s in &message.subscriptions { - if let Some(true) = s.subscribe { - collected_subscriptions.push(s.clone()) - }; - } - collected_subscriptions + let subscriptions = gs + .events + .iter() + .fold(vec![], |mut collected_subscriptions, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { + GossipsubHandlerIn::Message(ref message) => { + for s in &message.subscriptions { + if let Some(true) = s.subscribe { + collected_subscriptions.push(s.clone()) + }; } - _ => collected_subscriptions, - }, + collected_subscriptions + } _ => collected_subscriptions, - }); + }, + _ => collected_subscriptions, + }); // we sent a subscribe to all known peers assert!( @@ -437,23 +437,23 @@ fn test_unsubscribe() { ); // collect all the subscriptions - let subscriptions = - gs.events - .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { - GossipsubHandlerIn::Message(ref message) => { - for s in &message.subscriptions { - if let Some(true) = s.subscribe { - collected_subscriptions.push(s.clone()) - }; - } - collected_subscriptions + let subscriptions = gs + .events + .iter() + .fold(vec![], |mut collected_subscriptions, e| match e { + NetworkBehaviourAction::NotifyHandler { event, .. } => match **event { + GossipsubHandlerIn::Message(ref message) => { + for s in &message.subscriptions { + if let Some(true) = s.subscribe { + collected_subscriptions.push(s.clone()) + }; } - _ => collected_subscriptions, - }, + collected_subscriptions + } _ => collected_subscriptions, - }); + }, + _ => collected_subscriptions, + }); // we sent a unsubscribe to all known peers, for two topics assert!( @@ -974,10 +974,9 @@ fn test_get_random_peers() { false }); assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); - let random_peers = - get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 10, { - |peer| peers.contains(peer) - }); + let random_peers = get_random_peers(&gs.topic_peers, &gs.connected_peers, &topic_hash, 10, { + |peer| peers.contains(peer) + }); assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); } @@ -1357,9 +1356,7 @@ fn test_explicit_peer_gets_connected() { .events .iter() .filter(|e| match e { - NetworkBehaviourAction::Dial { opts, handler: _ } => { - opts.get_peer_id() == Some(peer) - } + NetworkBehaviourAction::Dial { opts, handler: _ } => opts.get_peer_id() == Some(peer), _ => false, }) .count(); @@ -2195,8 +2192,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() { let config: GossipsubConfig = GossipsubConfig::default(); //add a lot of peers - let m = - config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; + let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; let (mut gs, _, topic_hashes) = inject_nodes1() .peer_no(m) .topics(vec!["topic".into()]) @@ -3523,8 +3519,7 @@ fn test_scoring_p3b() { assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), - 100.0 - + (expected_b3 * -3.0 + (5f64 - expected_message_deliveries).powi(2) * -2.0) * 0.7 + 100.0 + (expected_b3 * -3.0 + (5f64 - expected_message_deliveries).powi(2) * -2.0) * 0.7 ); } @@ -4809,8 +4804,7 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { Multiaddr::empty(), Some(PeerKind::Floodsub), ); - let p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + let p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); //p1 and p2 are not in the mesh assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); @@ -4870,8 +4864,7 @@ fn test_do_not_use_floodsub_in_fanout() { Multiaddr::empty(), Some(PeerKind::Floodsub), ); - let p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + let p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); //publish a message let publish_data = vec![0; 42]; @@ -4928,8 +4921,7 @@ fn test_dont_add_floodsub_peers_to_mesh_on_join() { Multiaddr::empty(), Some(PeerKind::Floodsub), ); - let _p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); gs.join(&topics[0]); @@ -4993,8 +4985,7 @@ fn test_dont_send_floodsub_peers_in_px() { Multiaddr::empty(), Some(PeerKind::Floodsub), ); - let _p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); + let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); //prune only mesh node gs.send_graft_prune( @@ -5031,8 +5022,7 @@ fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { Multiaddr::empty(), Some(PeerKind::Floodsub), ); - let _p2 = - add_peer_with_addr_and_kind(&mut gs, &topics, true, false, Multiaddr::empty(), None); + let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, true, false, Multiaddr::empty(), None); gs.heartbeat(); From cb4da08e8cbdea4b28b9f6f1ce1c86b5f5b64270 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 28 Sep 2022 14:50:11 +1000 Subject: [PATCH 6/7] Enforce clippy across the entire workspace --- .cargo/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index df790f03322..aff4bee9fbe 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,3 @@ [alias] # Temporary solution to have clippy config in a single place until https://github.com/rust-lang/rust-clippy/blob/master/doc/roadmap-2021.md#lintstoml-configuration is shipped. -custom-clippy = "clippy --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -D warnings" +custom-clippy = "clippy --workspace --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -D warnings" From 2d2937a3e5d1f6ef8f3c5745673dd17c39285e40 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Fri, 30 Sep 2022 16:05:53 +1000 Subject: [PATCH 7/7] Fix bad diff --- protocols/gossipsub/src/behaviour/tests.rs | 2 +- protocols/gossipsub/src/peer_score/tests.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 1540a59175d..71f4aae9b50 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -3343,7 +3343,7 @@ fn test_scoring_p3() { first_message_deliveries_weight: 0.0, //deactivate first time deliveries mesh_message_deliveries_weight: -2.0, mesh_message_deliveries_decay: 0.9, - mesh_message_deliveries_cap: 0.0, + mesh_message_deliveries_cap: 10.0, mesh_message_deliveries_threshold: 5.0, mesh_message_deliveries_activation: Duration::from_secs(1), mesh_message_deliveries_window: Duration::from_millis(100), diff --git a/protocols/gossipsub/src/peer_score/tests.rs b/protocols/gossipsub/src/peer_score/tests.rs index 1c47eeeb183..c457ffe0f70 100644 --- a/protocols/gossipsub/src/peer_score/tests.rs +++ b/protocols/gossipsub/src/peer_score/tests.rs @@ -228,7 +228,7 @@ fn test_score_first_message_deliveries_cap() { let topic_params = TopicScoreParams { topic_weight: 1.0, first_message_deliveries_weight: 1.0, - first_message_deliveries_decay: 1.0, + first_message_deliveries_decay: 1.0, // test without decay first_message_deliveries_cap: 50.0, time_in_mesh_weight: 0.0, ..Default::default() @@ -274,7 +274,7 @@ fn test_score_first_message_deliveries_decay() { let topic_params = TopicScoreParams { topic_weight: 1.0, first_message_deliveries_weight: 1.0, - first_message_deliveries_decay: 0.9, + first_message_deliveries_decay: 0.9, // decay 10% per decay interval first_message_deliveries_cap: 2000.0, time_in_mesh_weight: 0.0, ..Default::default()