diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cc61927fd64..c24cff2e6e9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,6 +62,8 @@ jobs: - name: Check if we compile without any features activated run: cargo build --package ${{ matrix.crate }} --no-default-features + - run: cargo clean + - name: Check if crate has been released id: check-released run: | diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 53e9233919a..21535bbf96c 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,8 +1,10 @@ ## 0.43.2 - unreleased - Export pub enum `RoutingUpdate`. See [PR 3739]. +- Deprecate `handler`, `kbucket`, `protocol`, `record` modules to make them private. See [PR 3738]. [PR 3739]: https://github.com/libp2p/rust-libp2p/pull/3739 +[PR 3738]: https://github.com/libp2p/rust-libp2p/pull/3738 ## 0.43.1 diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 8d1ee716973..b34c5e428f7 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -23,15 +23,15 @@ mod test; use crate::addresses::Addresses; -use crate::handler::{ +use crate::handler_priv::{ KademliaHandler, KademliaHandlerConfig, KademliaHandlerEvent, KademliaHandlerIn, KademliaRequestId, }; use crate::jobs::*; -use crate::kbucket::{self, Distance, KBucketsTable, NodeStatus}; +use crate::kbucket_priv::{self, Distance, KBucketsTable, NodeStatus}; use crate::protocol::{KadConnectionType, KadPeer, KademliaProtocolConfig}; use crate::query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}; -use crate::record::{ +use crate::record_priv::{ self, store::{self, RecordStore}, ProviderRecord, Record, @@ -66,7 +66,7 @@ pub use crate::query::QueryStats; /// Kademlia protocol. pub struct Kademlia { /// The Kademlia routing table. - kbuckets: KBucketsTable, Addresses>, + kbuckets: KBucketsTable, Addresses>, /// The k-bucket insertion strategy. kbucket_inserts: KademliaBucketInserts, @@ -148,7 +148,7 @@ pub enum KademliaBucketInserts { /// This can be used for e.g. signature verification or validating /// the accompanying [`Key`]. /// -/// [`Key`]: crate::record::Key +/// [`Key`]: crate::record_priv::Key #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum KademliaStoreInserts { /// Whenever a (provider) record is received, @@ -417,7 +417,7 @@ where /// Creates a new `Kademlia` network behaviour with the given configuration. pub fn with_config(id: PeerId, store: TStore, config: KademliaConfig) -> Self { - let local_key = kbucket::Key::from(id); + let local_key = kbucket_priv::Key::from(id); let put_record_job = config .record_replication_interval @@ -518,9 +518,9 @@ where /// If the routing table has been updated as a result of this operation, /// a [`KademliaEvent::RoutingUpdated`] event is emitted. pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) -> RoutingUpdate { - let key = kbucket::Key::from(*peer); + let key = kbucket_priv::Key::from(*peer); match self.kbuckets.entry(&key) { - kbucket::Entry::Present(mut entry, _) => { + kbucket_priv::Entry::Present(mut entry, _) => { if entry.value().insert(address) { self.queued_events.push_back(ToSwarm::GenerateEvent( KademliaEvent::RoutingUpdated { @@ -538,11 +538,11 @@ where } RoutingUpdate::Success } - kbucket::Entry::Pending(mut entry, _) => { + kbucket_priv::Entry::Pending(mut entry, _) => { entry.value().insert(address); RoutingUpdate::Pending } - kbucket::Entry::Absent(entry) => { + kbucket_priv::Entry::Absent(entry) => { let addresses = Addresses::new(address); let status = if self.connected_peers.contains(peer) { NodeStatus::Connected @@ -550,7 +550,7 @@ where NodeStatus::Disconnected }; match entry.insert(addresses.clone(), status) { - kbucket::InsertResult::Inserted => { + kbucket_priv::InsertResult::Inserted => { self.queued_events.push_back(ToSwarm::GenerateEvent( KademliaEvent::RoutingUpdated { peer: *peer, @@ -566,11 +566,11 @@ where )); RoutingUpdate::Success } - kbucket::InsertResult::Full => { + kbucket_priv::InsertResult::Full => { debug!("Bucket full. Peer not added to routing table: {}", peer); RoutingUpdate::Failed } - kbucket::InsertResult::Pending { disconnected } => { + kbucket_priv::InsertResult::Pending { disconnected } => { self.queued_events.push_back(ToSwarm::Dial { opts: DialOpts::peer_id(disconnected.into_preimage()).build(), }); @@ -578,7 +578,7 @@ where } } } - kbucket::Entry::SelfEntry => RoutingUpdate::Failed, + kbucket_priv::Entry::SelfEntry => RoutingUpdate::Failed, } } @@ -596,24 +596,24 @@ where &mut self, peer: &PeerId, address: &Multiaddr, - ) -> Option, Addresses>> { - let key = kbucket::Key::from(*peer); + ) -> Option, Addresses>> { + let key = kbucket_priv::Key::from(*peer); match self.kbuckets.entry(&key) { - kbucket::Entry::Present(mut entry, _) => { + kbucket_priv::Entry::Present(mut entry, _) => { if entry.value().remove(address).is_err() { Some(entry.remove()) // it is the last address, thus remove the peer. } else { None } } - kbucket::Entry::Pending(mut entry, _) => { + kbucket_priv::Entry::Pending(mut entry, _) => { if entry.value().remove(address).is_err() { Some(entry.remove()) // it is the last address, thus remove the peer. } else { None } } - kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => None, + kbucket_priv::Entry::Absent(..) | kbucket_priv::Entry::SelfEntry => None, } } @@ -624,19 +624,20 @@ where pub fn remove_peer( &mut self, peer: &PeerId, - ) -> Option, Addresses>> { - let key = kbucket::Key::from(*peer); + ) -> Option, Addresses>> { + let key = kbucket_priv::Key::from(*peer); match self.kbuckets.entry(&key) { - kbucket::Entry::Present(entry, _) => Some(entry.remove()), - kbucket::Entry::Pending(entry, _) => Some(entry.remove()), - kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => None, + kbucket_priv::Entry::Present(entry, _) => Some(entry.remove()), + kbucket_priv::Entry::Pending(entry, _) => Some(entry.remove()), + kbucket_priv::Entry::Absent(..) | kbucket_priv::Entry::SelfEntry => None, } } /// Returns an iterator over all non-empty buckets in the routing table. pub fn kbuckets( &mut self, - ) -> impl Iterator, Addresses>> { + ) -> impl Iterator, Addresses>> + { self.kbuckets.iter().filter(|b| !b.is_empty()) } @@ -646,9 +647,9 @@ where pub fn kbucket( &mut self, key: K, - ) -> Option, Addresses>> + ) -> Option, Addresses>> where - K: Into> + Clone, + K: Into> + Clone, { self.kbuckets.bucket(&key.into()) } @@ -659,15 +660,16 @@ where /// [`KademliaEvent::OutboundQueryCompleted{QueryResult::GetClosestPeers}`]. pub fn get_closest_peers(&mut self, key: K) -> QueryId where - K: Into> + Into> + Clone, + K: Into> + Into> + Clone, { - let target: kbucket::Key = key.clone().into(); + let target: kbucket_priv::Key = key.clone().into(); let key: Vec = key.into(); let info = QueryInfo::GetClosestPeers { key, step: ProgressStep::first(), }; - let peer_keys: Vec> = self.kbuckets.closest_keys(&target).collect(); + let peer_keys: Vec> = + self.kbuckets.closest_keys(&target).collect(); let inner = QueryInner::new(info); self.queries.add_iter_closest(target, peer_keys, inner) } @@ -675,8 +677,8 @@ where /// Returns closest peers to the given key; takes peers from local routing table only. pub fn get_closest_local_peers<'a, K: Clone>( &'a mut self, - key: &'a kbucket::Key, - ) -> impl Iterator> + 'a { + key: &'a kbucket_priv::Key, + ) -> impl Iterator> + 'a { self.kbuckets.closest_keys(key) } @@ -684,7 +686,7 @@ where /// /// The result of this operation is delivered in a /// [`KademliaEvent::OutboundQueryCompleted{QueryResult::GetRecord}`]. - pub fn get_record(&mut self, key: record::Key) -> QueryId { + pub fn get_record(&mut self, key: record_priv::Key) -> QueryId { let record = if let Some(record) = self.store.get(&key) { if record.is_expired(Instant::now()) { self.store.remove(&key); @@ -701,7 +703,7 @@ where let step = ProgressStep::first(); - let target = kbucket::Key::new(key.clone()); + let target = kbucket_priv::Key::new(key.clone()); let info = if record.is_some() { QueryInfo::GetRecord { key, @@ -767,7 +769,7 @@ where .expires .or_else(|| self.record_ttl.map(|ttl| Instant::now() + ttl)); let quorum = quorum.eval(self.queries.config().replication_factor); - let target = kbucket::Key::new(record.key.clone()); + let target = kbucket_priv::Key::new(record.key.clone()); let peers = self.kbuckets.closest_keys(&target); let context = PutRecordContext::Publish; let info = QueryInfo::PutRecord { @@ -835,7 +837,7 @@ where /// This is a _local_ operation. However, it also has the effect that /// the record will no longer be periodically re-published, allowing the /// record to eventually expire throughout the DHT. - pub fn remove_record(&mut self, key: &record::Key) { + pub fn remove_record(&mut self, key: &record_priv::Key) { if let Some(r) = self.store.get(key) { if r.publisher.as_ref() == Some(self.kbuckets.local_key().preimage()) { self.store.remove(key) @@ -905,7 +907,7 @@ where /// /// The results of the (repeated) provider announcements sent by this node are /// reported via [`KademliaEvent::OutboundQueryCompleted{QueryResult::StartProviding}`]. - pub fn start_providing(&mut self, key: record::Key) -> Result { + pub fn start_providing(&mut self, key: record_priv::Key) -> Result { // Note: We store our own provider records locally without local addresses // to avoid redundant storage and outdated addresses. Instead these are // acquired on demand when returning a `ProviderRecord` for the local node. @@ -916,7 +918,7 @@ where local_addrs, ); self.store.add_provider(record)?; - let target = kbucket::Key::new(key.clone()); + let target = kbucket_priv::Key::new(key.clone()); let peers = self.kbuckets.closest_keys(&target); let context = AddProviderContext::Publish; let info = QueryInfo::AddProvider { @@ -933,7 +935,7 @@ where /// /// This is a local operation. The local node will still be considered as a /// provider for the key by other nodes until these provider records expire. - pub fn stop_providing(&mut self, key: &record::Key) { + pub fn stop_providing(&mut self, key: &record_priv::Key) { self.store .remove_provider(key, self.kbuckets.local_key().preimage()); } @@ -942,7 +944,7 @@ where /// /// The result of this operation is delivered in a /// reported via [`KademliaEvent::OutboundQueryCompleted{QueryResult::GetProviders}`]. - pub fn get_providers(&mut self, key: record::Key) -> QueryId { + pub fn get_providers(&mut self, key: record_priv::Key) -> QueryId { let providers: HashSet<_> = self .store .providers(&key) @@ -963,7 +965,7 @@ where }, }; - let target = kbucket::Key::new(key.clone()); + let target = kbucket_priv::Key::new(key.clone()); let peers = self.kbuckets.closest_keys(&target); let inner = QueryInner::new(info); let id = self.queries.add_iter_closest(target.clone(), peers, inner); @@ -1015,7 +1017,7 @@ where /// result. fn find_closest( &mut self, - target: &kbucket::Key, + target: &kbucket_priv::Key, source: &PeerId, ) -> Vec { if target == self.kbuckets.local_key() { @@ -1031,7 +1033,7 @@ where } /// Collects all peers who are known to be providers of the value for a given `Multihash`. - fn provider_peers(&mut self, key: &record::Key, source: &PeerId) -> Vec { + fn provider_peers(&mut self, key: &record_priv::Key, source: &PeerId) -> Vec { let kbuckets = &mut self.kbuckets; let connected = &mut self.connected_peers; let listen_addresses = &self.listen_addresses; @@ -1065,7 +1067,7 @@ where .collect::>(), ) } else { - let key = kbucket::Key::from(node_id); + let key = kbucket_priv::Key::from(node_id); kbuckets .entry(&key) .view() @@ -1088,13 +1090,13 @@ where } /// Starts an iterative `ADD_PROVIDER` query for the given key. - fn start_add_provider(&mut self, key: record::Key, context: AddProviderContext) { + fn start_add_provider(&mut self, key: record_priv::Key, context: AddProviderContext) { let info = QueryInfo::AddProvider { context, key: key.clone(), phase: AddProviderPhase::GetClosestPeers, }; - let target = kbucket::Key::new(key); + let target = kbucket_priv::Key::new(key); let peers = self.kbuckets.closest_keys(&target); let inner = QueryInner::new(info); self.queries.add_iter_closest(target.clone(), peers, inner); @@ -1103,7 +1105,7 @@ where /// Starts an iterative `PUT_VALUE` query for the given record. fn start_put_record(&mut self, record: Record, quorum: Quorum, context: PutRecordContext) { let quorum = quorum.eval(self.queries.config().replication_factor); - let target = kbucket::Key::new(record.key.clone()); + let target = kbucket_priv::Key::new(record.key.clone()); let peers = self.kbuckets.closest_keys(&target); let info = QueryInfo::PutRecord { record, @@ -1122,9 +1124,9 @@ where address: Option, new_status: NodeStatus, ) { - let key = kbucket::Key::from(peer); + let key = kbucket_priv::Key::from(peer); match self.kbuckets.entry(&key) { - kbucket::Entry::Present(mut entry, old_status) => { + kbucket_priv::Entry::Present(mut entry, old_status) => { if old_status != new_status { entry.update(new_status) } @@ -1147,7 +1149,7 @@ where } } - kbucket::Entry::Pending(mut entry, old_status) => { + kbucket_priv::Entry::Pending(mut entry, old_status) => { if let Some(address) = address { entry.value().insert(address); } @@ -1156,7 +1158,7 @@ where } } - kbucket::Entry::Absent(entry) => { + kbucket_priv::Entry::Absent(entry) => { // Only connected nodes with a known address are newly inserted. if new_status != NodeStatus::Connected { return; @@ -1175,7 +1177,7 @@ where (Some(a), KademliaBucketInserts::OnConnected) => { let addresses = Addresses::new(a); match entry.insert(addresses.clone(), new_status) { - kbucket::InsertResult::Inserted => { + kbucket_priv::InsertResult::Inserted => { let event = KademliaEvent::RoutingUpdated { peer, is_new_peer: true, @@ -1189,14 +1191,14 @@ where }; self.queued_events.push_back(ToSwarm::GenerateEvent(event)); } - kbucket::InsertResult::Full => { + kbucket_priv::InsertResult::Full => { debug!("Bucket full. Peer not added to routing table: {}", peer); let address = addresses.first().clone(); self.queued_events.push_back(ToSwarm::GenerateEvent( KademliaEvent::RoutablePeer { peer, address }, )); } - kbucket::InsertResult::Pending { disconnected } => { + kbucket_priv::InsertResult::Pending { disconnected } => { let address = addresses.first().clone(); self.queued_events.push_back(ToSwarm::GenerateEvent( KademliaEvent::PendingRoutablePeer { peer, address }, @@ -1259,13 +1261,13 @@ where // Pr(bucket-253) = 1 - (7/8)^16 ~= 0.88 // Pr(bucket-252) = 1 - (15/16)^16 ~= 0.64 // ... - let mut target = kbucket::Key::from(PeerId::random()); + let mut target = kbucket_priv::Key::from(PeerId::random()); for _ in 0..16 { let d = local_key.distance(&target); if b.contains(&d) { break; } - target = kbucket::Key::from(PeerId::random()); + target = kbucket_priv::Key::from(PeerId::random()); } target }) @@ -1426,7 +1428,7 @@ where get_closest_peers_stats, }, } => { - let mk_result = |key: record::Key| { + let mk_result = |key: record_priv::Key| { if success.len() >= quorum.get() { Ok(PutRecordOk { key }) } else { @@ -1630,7 +1632,7 @@ where // number of nodes between the local node and the closest node to the key // (beyond the replication factor). This ensures avoiding over-caching // outside of the k closest nodes to a key. - let target = kbucket::Key::new(record.key.clone()); + let target = kbucket_priv::Key::new(record.key.clone()); let num_between = self.kbuckets.count_nodes_between(&target); let k = self.queries.config().replication_factor.get(); let num_beyond_k = (usize::max(k, num_between) - k) as u32; @@ -1725,7 +1727,7 @@ where } /// Processes a provider record received from a peer. - fn provider_received(&mut self, key: record::Key, provider: KadPeer) { + fn provider_received(&mut self, key: record_priv::Key, provider: KadPeer) { if &provider.node_id != self.kbuckets.local_key().preimage() { let record = ProviderRecord { key, @@ -1760,7 +1762,7 @@ where } fn address_failed(&mut self, peer_id: PeerId, address: &Multiaddr) { - let key = kbucket::Key::from(peer_id); + let key = kbucket_priv::Key::from(peer_id); if let Some(addrs) = self.kbuckets.entry(&key).value() { // TODO: Ideally, the address should only be removed if the error can @@ -1849,7 +1851,7 @@ where let (old, new) = (old.get_remote_address(), new.get_remote_address()); // Update routing table. - if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::from(peer)).value() { + if let Some(addrs) = self.kbuckets.entry(&kbucket_priv::Key::from(peer)).value() { if addrs.replace(old, new) { debug!( "Address '{}' replaced with '{}' for peer '{}'.", @@ -2022,9 +2024,9 @@ where // We should order addresses from decreasing likelyhood of connectivity, so start with // the addresses of that peer in the k-buckets. - let key = kbucket::Key::from(peer_id); + let key = kbucket_priv::Key::from(peer_id); let mut peer_addrs = - if let kbucket::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) { + if let kbucket_priv::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) { let addrs = entry.value().iter().cloned().collect::>(); debug_assert!(!addrs.is_empty(), "Empty peer addresses in routing table."); addrs @@ -2063,7 +2065,7 @@ where } KademliaHandlerEvent::FindNodeReq { key, request_id } => { - let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); + let closer_peers = self.find_closest(&kbucket_priv::Key::new(key), &source); self.queued_events.push_back(ToSwarm::GenerateEvent( KademliaEvent::InboundRequest { @@ -2092,7 +2094,7 @@ where KademliaHandlerEvent::GetProvidersReq { key, request_id } => { let provider_peers = self.provider_peers(&key, &source); - let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); + let closer_peers = self.find_closest(&kbucket_priv::Key::new(key), &source); self.queued_events.push_back(ToSwarm::GenerateEvent( KademliaEvent::InboundRequest { @@ -2188,7 +2190,7 @@ where None => None, }; - let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); + let closer_peers = self.find_closest(&kbucket_priv::Key::new(key), &source); self.queued_events.push_back(ToSwarm::GenerateEvent( KademliaEvent::InboundRequest { @@ -2246,8 +2248,8 @@ where } else { log::trace!("Record with key {:?} not found at {}", key, source); if let KademliaCaching::Enabled { max_peers } = self.caching { - let source_key = kbucket::Key::from(source); - let target_key = kbucket::Key::from(key.clone()); + let source_key = kbucket_priv::Key::from(source); + let target_key = kbucket_priv::Key::from(key.clone()); let distance = source_key.distance(&target_key); cache_candidates.insert(distance, source); if cache_candidates.len() > max_peers as usize { @@ -2352,7 +2354,7 @@ where // Drain applied pending entries from the routing table. if let Some(entry) = self.kbuckets.take_applied_pending() { - let kbucket::Node { key, value } = entry.inserted; + let kbucket_priv::Node { key, value } = entry.inserted; let event = KademliaEvent::RoutingUpdated { bucket_range: self .kbuckets @@ -2667,7 +2669,7 @@ pub enum GetRecordOk { /// If the lookup used a quorum > 1, you may wish to use these /// candidates with [`Kademlia::put_record_to`] after selecting /// one of the returned records. - cache_candidates: BTreeMap, + cache_candidates: BTreeMap, }, } @@ -2676,22 +2678,22 @@ pub enum GetRecordOk { pub enum GetRecordError { #[error("the record was not found")] NotFound { - key: record::Key, + key: record_priv::Key, closest_peers: Vec, }, #[error("the quorum failed; needed {quorum} peers")] QuorumFailed { - key: record::Key, + key: record_priv::Key, records: Vec, quorum: NonZeroUsize, }, #[error("the request timed out")] - Timeout { key: record::Key }, + Timeout { key: record_priv::Key }, } impl GetRecordError { /// Gets the key of the record for which the operation failed. - pub fn key(&self) -> &record::Key { + pub fn key(&self) -> &record_priv::Key { match self { GetRecordError::QuorumFailed { key, .. } => key, GetRecordError::Timeout { key, .. } => key, @@ -2701,7 +2703,7 @@ impl GetRecordError { /// Extracts the key of the record for which the operation failed, /// consuming the error. - pub fn into_key(self) -> record::Key { + pub fn into_key(self) -> record_priv::Key { match self { GetRecordError::QuorumFailed { key, .. } => key, GetRecordError::Timeout { key, .. } => key, @@ -2716,7 +2718,7 @@ pub type PutRecordResult = Result; /// The successful result of [`Kademlia::put_record`]. #[derive(Debug, Clone)] pub struct PutRecordOk { - pub key: record::Key, + pub key: record_priv::Key, } /// The error result of [`Kademlia::put_record`]. @@ -2724,14 +2726,14 @@ pub struct PutRecordOk { pub enum PutRecordError { #[error("the quorum failed; needed {quorum} peers")] QuorumFailed { - key: record::Key, + key: record_priv::Key, /// [`PeerId`]s of the peers the record was successfully stored on. success: Vec, quorum: NonZeroUsize, }, #[error("the request timed out")] Timeout { - key: record::Key, + key: record_priv::Key, /// [`PeerId`]s of the peers the record was successfully stored on. success: Vec, quorum: NonZeroUsize, @@ -2740,7 +2742,7 @@ pub enum PutRecordError { impl PutRecordError { /// Gets the key of the record for which the operation failed. - pub fn key(&self) -> &record::Key { + pub fn key(&self) -> &record_priv::Key { match self { PutRecordError::QuorumFailed { key, .. } => key, PutRecordError::Timeout { key, .. } => key, @@ -2749,7 +2751,7 @@ impl PutRecordError { /// Extracts the key of the record for which the operation failed, /// consuming the error. - pub fn into_key(self) -> record::Key { + pub fn into_key(self) -> record_priv::Key { match self { PutRecordError::QuorumFailed { key, .. } => key, PutRecordError::Timeout { key, .. } => key, @@ -2818,7 +2820,7 @@ pub type GetProvidersResult = Result; #[derive(Debug, Clone)] pub enum GetProvidersOk { FoundProviders { - key: record::Key, + key: record_priv::Key, /// The new set of providers discovered. providers: HashSet, }, @@ -2832,14 +2834,14 @@ pub enum GetProvidersOk { pub enum GetProvidersError { #[error("the request timed out")] Timeout { - key: record::Key, + key: record_priv::Key, closest_peers: Vec, }, } impl GetProvidersError { /// Gets the key for which the operation failed. - pub fn key(&self) -> &record::Key { + pub fn key(&self) -> &record_priv::Key { match self { GetProvidersError::Timeout { key, .. } => key, } @@ -2847,7 +2849,7 @@ impl GetProvidersError { /// Extracts the key for which the operation failed, /// consuming the error. - pub fn into_key(self) -> record::Key { + pub fn into_key(self) -> record_priv::Key { match self { GetProvidersError::Timeout { key, .. } => key, } @@ -2860,34 +2862,34 @@ pub type AddProviderResult = Result; /// The successful result of publishing a provider record. #[derive(Debug, Clone)] pub struct AddProviderOk { - pub key: record::Key, + pub key: record_priv::Key, } /// The possible errors when publishing a provider record. #[derive(Debug, Clone, Error)] pub enum AddProviderError { #[error("the request timed out")] - Timeout { key: record::Key }, + Timeout { key: record_priv::Key }, } impl AddProviderError { /// Gets the key for which the operation failed. - pub fn key(&self) -> &record::Key { + pub fn key(&self) -> &record_priv::Key { match self { AddProviderError::Timeout { key, .. } => key, } } /// Extracts the key for which the operation failed, - pub fn into_key(self) -> record::Key { + pub fn into_key(self) -> record_priv::Key { match self { AddProviderError::Timeout { key, .. } => key, } } } -impl From, Addresses>> for KadPeer { - fn from(e: kbucket::EntryView, Addresses>) -> KadPeer { +impl From, Addresses>> for KadPeer { + fn from(e: kbucket_priv::EntryView, Addresses>) -> KadPeer { KadPeer { node_id: e.node.key.into_preimage(), multiaddrs: e.node.value.into_vec(), @@ -2963,7 +2965,7 @@ pub enum QueryInfo { /// This is `None` if the initial self-lookup has not /// yet completed and `Some` with an exhausted iterator /// if bootstrapping is complete. - remaining: Option>>, + remaining: Option>>, step: ProgressStep, }, @@ -2978,7 +2980,7 @@ pub enum QueryInfo { /// A (repeated) query initiated by [`Kademlia::get_providers`]. GetProviders { /// The key for which to search for providers. - key: record::Key, + key: record_priv::Key, /// The number of providers found so far. providers_found: usize, /// Current index of events. @@ -2988,7 +2990,7 @@ pub enum QueryInfo { /// A (repeated) query initiated by [`Kademlia::start_providing`]. AddProvider { /// The record key. - key: record::Key, + key: record_priv::Key, /// The current phase of the query. phase: AddProviderPhase, /// The execution context of the query. @@ -3009,14 +3011,14 @@ pub enum QueryInfo { /// A (repeated) query initiated by [`Kademlia::get_record`]. GetRecord { /// The key to look for. - key: record::Key, + key: record_priv::Key, /// Current index of events. step: ProgressStep, /// Did we find at least one record? found_a_record: bool, /// The peers closest to the `key` that were queried but did not return a record, /// i.e. the peers that are candidates for caching the record. - cache_candidates: BTreeMap, + cache_candidates: BTreeMap, }, } diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index 33e48377e0b..3a76e2f92b2 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -22,8 +22,8 @@ use super::*; -use crate::kbucket::Distance; -use crate::record::{store::MemoryStore, Key}; +use crate::kbucket_priv::Distance; +use crate::record_priv::{store::MemoryStore, Key}; use crate::{K_VALUE, SHA_256_MH}; use futures::{executor::block_on, future::poll_fn, prelude::*}; use futures_timer::Delay; @@ -234,10 +234,10 @@ fn bootstrap() { #[test] fn query_iter() { - fn distances(key: &kbucket::Key, peers: Vec) -> Vec { + fn distances(key: &kbucket_priv::Key, peers: Vec) -> Vec { peers .into_iter() - .map(kbucket::Key::from) + .map(kbucket_priv::Key::from) .map(|k| k.distance(key)) .collect() } @@ -253,7 +253,7 @@ fn query_iter() { // Ask the first peer in the list to search a random peer. The search should // propagate forwards through the list of peers. let search_target = PeerId::random(); - let search_target_key = kbucket::Key::from(search_target); + let search_target_key = kbucket_priv::Key::from(search_target); let qid = swarms[0].behaviour_mut().get_closest_peers(search_target); match swarms[0].behaviour_mut().query(&qid) { @@ -290,7 +290,7 @@ fn query_iter() { assert_eq!(swarm_ids[i], expected_swarm_id); assert_eq!(swarm.behaviour_mut().queries.size(), 0); assert!(expected_peer_ids.iter().all(|p| ok.peers.contains(p))); - let key = kbucket::Key::new(ok.key); + let key = kbucket_priv::Key::new(ok.key); assert_eq!(expected_distances, distances(&key, ok.peers)); return Poll::Ready(()); } @@ -445,7 +445,7 @@ fn get_record_not_found() { .map(|(_addr, swarm)| swarm) .collect::>(); - let target_key = record::Key::from(random_multihash()); + let target_key = record_priv::Key::from(random_multihash()); let qid = swarms[0].behaviour_mut().get_record(target_key.clone()); block_on(poll_fn(move |ctx| { @@ -653,7 +653,7 @@ fn put_record() { assert_eq!(r.expires, expected.expires); assert_eq!(r.publisher, Some(*swarms[0].local_peer_id())); - let key = kbucket::Key::new(r.key.clone()); + let key = kbucket_priv::Key::new(r.key.clone()); let mut expected = swarms .iter() .skip(1) @@ -661,9 +661,9 @@ fn put_record() { .cloned() .collect::>(); expected.sort_by(|id1, id2| { - kbucket::Key::from(*id1) + kbucket_priv::Key::from(*id1) .distance(&key) - .cmp(&kbucket::Key::from(*id2).distance(&key)) + .cmp(&kbucket_priv::Key::from(*id2).distance(&key)) }); let expected = expected @@ -862,7 +862,7 @@ fn get_record_many() { /// network where X is equal to the configured replication factor. #[test] fn add_provider() { - fn prop(keys: Vec, seed: Seed) { + fn prop(keys: Vec, seed: Seed) { let mut rng = StdRng::from_seed(seed.0); let replication_factor = NonZeroUsize::new(rng.gen_range(1..(K_VALUE.get() / 2) + 1)).unwrap(); @@ -992,11 +992,11 @@ fn add_provider() { .map(Swarm::local_peer_id) .cloned() .collect::>(); - let kbucket_key = kbucket::Key::new(key); + let kbucket_key = kbucket_priv::Key::new(key); expected.sort_by(|id1, id2| { - kbucket::Key::from(*id1) + kbucket_priv::Key::from(*id1) .distance(&kbucket_key) - .cmp(&kbucket::Key::from(*id2).distance(&kbucket_key)) + .cmp(&kbucket_priv::Key::from(*id2).distance(&kbucket_key)) }); let expected = expected @@ -1379,7 +1379,7 @@ fn network_behaviour_on_address_change() { #[test] fn get_providers_single() { - fn prop(key: record::Key) { + fn prop(key: record_priv::Key) { let (_, mut single_swarm) = build_node(); single_swarm .behaviour_mut() @@ -1432,7 +1432,7 @@ fn get_providers_single() { } fn get_providers_limit() { - fn prop(key: record::Key) { + fn prop(key: record_priv::Key) { let mut swarms = build_nodes(3); // Let first peer know of second peer and second peer know of third peer. diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler_priv.rs similarity index 99% rename from protocols/kad/src/handler.rs rename to protocols/kad/src/handler_priv.rs index 22c1253dd1e..7901b7e10a2 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler_priv.rs @@ -22,7 +22,7 @@ use crate::protocol::{ KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, KademliaProtocolConfig, }; -use crate::record::{self, Record}; +use crate::record_priv::{self, Record}; use either::Either; use futures::prelude::*; use futures::stream::SelectAll; @@ -255,7 +255,7 @@ pub enum KademliaHandlerEvent { /// this key. GetProvidersReq { /// The key for which providers are requested. - key: record::Key, + key: record_priv::Key, /// Identifier of the request. Needs to be passed back when answering. request_id: KademliaRequestId, }, @@ -281,7 +281,7 @@ pub enum KademliaHandlerEvent { /// The peer announced itself as a provider of a key. AddProvider { /// The key for which the peer is a provider of the associated value. - key: record::Key, + key: record_priv::Key, /// The peer that is the provider of the value for `key`. provider: KadPeer, }, @@ -289,7 +289,7 @@ pub enum KademliaHandlerEvent { /// Request to get a value from the dht records GetRecord { /// Key for which we should look in the dht - key: record::Key, + key: record_priv::Key, /// Identifier of the request. Needs to be passed back when answering. request_id: KademliaRequestId, }, @@ -314,7 +314,7 @@ pub enum KademliaHandlerEvent { /// Response to a request to store a record. PutRecordRes { /// The key of the stored record. - key: record::Key, + key: record_priv::Key, /// The value of the stored record. value: Vec, /// The user data passed to the `PutValue`. @@ -403,7 +403,7 @@ pub enum KademliaHandlerIn { /// this key. GetProvidersReq { /// Identifier being searched. - key: record::Key, + key: record_priv::Key, /// Custom user data. Passed back in the out event when the results arrive. user_data: TUserData, }, @@ -426,7 +426,7 @@ pub enum KademliaHandlerIn { /// succeeded. AddProvider { /// Key for which we should add providers. - key: record::Key, + key: record_priv::Key, /// Known provider for this key. provider: KadPeer, }, @@ -434,7 +434,7 @@ pub enum KademliaHandlerIn { /// Request to retrieve a record from the DHT. GetRecord { /// The key of the record. - key: record::Key, + key: record_priv::Key, /// Custom data. Passed back in the out event when the results arrive. user_data: TUserData, }, @@ -459,7 +459,7 @@ pub enum KademliaHandlerIn { /// Response to a `PutRecord`. PutRecordRes { /// Key of the value that was put. - key: record::Key, + key: record_priv::Key, /// Value that was put. value: Vec, /// Identifier of the request that was made by the remote. diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index 94c4fe43eef..22e3dfc6797 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -61,7 +61,7 @@ //! > to the size of all stored records. As a job runs, the records are moved //! > out of the job to the consumer, where they can be dropped after being sent. -use crate::record::{self, store::RecordStore, ProviderRecord, Record}; +use crate::record_priv::{self, store::RecordStore, ProviderRecord, Record}; use futures::prelude::*; use futures_timer::Delay; use instant::Instant; @@ -134,7 +134,7 @@ pub struct PutRecordJob { next_publish: Option, publish_interval: Option, record_ttl: Option, - skipped: HashSet, + skipped: HashSet, inner: PeriodicJob>, } @@ -166,7 +166,7 @@ impl PutRecordJob { /// Adds the key of a record that is ignored on the current or /// next run of the job. - pub fn skip(&mut self, key: record::Key) { + pub fn skip(&mut self, key: record_priv::Key) { self.skipped.insert(key); } @@ -327,7 +327,7 @@ impl AddProviderJob { #[cfg(test)] mod tests { use super::*; - use crate::record::store::MemoryStore; + use crate::record_priv::store::MemoryStore; use futures::{executor::block_on, future::poll_fn}; use quickcheck::*; use rand::Rng; diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket_priv.rs similarity index 100% rename from protocols/kad/src/kbucket.rs rename to protocols/kad/src/kbucket_priv.rs diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket_priv/bucket.rs similarity index 100% rename from protocols/kad/src/kbucket/bucket.rs rename to protocols/kad/src/kbucket_priv/bucket.rs diff --git a/protocols/kad/src/kbucket/entry.rs b/protocols/kad/src/kbucket_priv/entry.rs similarity index 100% rename from protocols/kad/src/kbucket/entry.rs rename to protocols/kad/src/kbucket_priv/entry.rs diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket_priv/key.rs similarity index 98% rename from protocols/kad/src/kbucket/key.rs rename to protocols/kad/src/kbucket_priv/key.rs index af2999ebea5..1c48184078a 100644 --- a/protocols/kad/src/kbucket/key.rs +++ b/protocols/kad/src/kbucket_priv/key.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::record; +use crate::record_priv; use libp2p_core::multihash::Multihash; use libp2p_identity::PeerId; use sha2::digest::generic_array::{typenum::U32, GenericArray}; @@ -113,8 +113,8 @@ impl From> for Key> { } } -impl From for Key { - fn from(k: record::Key) -> Self { +impl From for Key { + fn from(k: record_priv::Key) -> Self { Key::new(k) } } diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index a2174d90991..36f2c74bb41 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -38,10 +38,37 @@ #![allow(dead_code)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub mod handler; -pub mod kbucket; -pub mod protocol; -pub mod record; +mod handler_priv; +#[deprecated( + note = "The `handler` module will be made private in the future and should not be depended on." +)] +pub mod handler { + pub use super::handler_priv::*; +} + +mod kbucket_priv; +#[deprecated( + note = "The `kbucket` module will be made private in the future and should not be depended on." +)] +pub mod kbucket { + pub use super::kbucket_priv::*; +} + +mod protocol_priv; +#[deprecated( + note = "The `protocol` module will be made private in the future and should not be depended on." +)] +pub mod protocol { + pub use super::protocol_priv::*; +} + +mod record_priv; +#[deprecated( + note = "The `record` module will be made private in the future and should not be depended on." +)] +pub mod record { + pub use super::record_priv::*; +} mod addresses; mod behaviour; @@ -69,9 +96,10 @@ pub use behaviour::{ Kademlia, KademliaBucketInserts, KademliaCaching, KademliaConfig, KademliaEvent, KademliaStoreInserts, ProgressStep, Quorum, }; +pub use kbucket_priv::{EntryView, KBucketRef, Key as KBucketKey}; pub use protocol::KadConnectionType; pub use query::QueryId; -pub use record::{store, ProviderRecord, Record}; +pub use record_priv::{store, Key as RecordKey, ProviderRecord, Record}; use std::num::NonZeroUsize; diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol_priv.rs similarity index 98% rename from protocols/kad/src/protocol.rs rename to protocols/kad/src/protocol_priv.rs index 969d189325b..3f7f9d99443 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol_priv.rs @@ -27,7 +27,7 @@ //! is used to send messages to remote peers. use crate::proto; -use crate::record::{self, Record}; +use crate::record_priv::{self, Record}; use asynchronous_codec::Framed; use bytes::BytesMut; use codec::UviBytes; @@ -291,13 +291,13 @@ pub enum KadRequestMsg { /// this key. GetProviders { /// Identifier being searched. - key: record::Key, + key: record_priv::Key, }, /// Indicates that this list of providers is known for this key. AddProvider { /// Key for which we should add providers. - key: record::Key, + key: record_priv::Key, /// Known provider for this key. provider: KadPeer, }, @@ -305,7 +305,7 @@ pub enum KadRequestMsg { /// Request to get a value from the dht records. GetValue { /// The key we are searching for. - key: record::Key, + key: record_priv::Key, }, /// Request to put a value into the dht records. @@ -343,7 +343,7 @@ pub enum KadResponseMsg { /// Response to a `PutValue`. PutValue { /// The key of the record. - key: record::Key, + key: record_priv::Key, /// Value of the record. value: Vec, }, @@ -447,11 +447,11 @@ fn proto_to_req_msg(message: proto::Message) -> Result Ok(KadRequestMsg::PutValue { record }) } proto::MessageType::GET_VALUE => Ok(KadRequestMsg::GetValue { - key: record::Key::from(message.key), + key: record_priv::Key::from(message.key), }), proto::MessageType::FIND_NODE => Ok(KadRequestMsg::FindNode { key: message.key }), proto::MessageType::GET_PROVIDERS => Ok(KadRequestMsg::GetProviders { - key: record::Key::from(message.key), + key: record_priv::Key::from(message.key), }), proto::MessageType::ADD_PROVIDER => { // TODO: for now we don't parse the peer properly, so it is possible that we get @@ -463,7 +463,7 @@ fn proto_to_req_msg(message: proto::Message) -> Result .find_map(|peer| KadPeer::try_from(peer).ok()); if let Some(provider) = provider { - let key = record::Key::from(message.key); + let key = record_priv::Key::from(message.key); Ok(KadRequestMsg::AddProvider { key, provider }) } else { Err(invalid_data("AddProvider message with no valid peer.")) @@ -527,7 +527,7 @@ fn proto_to_resp_msg(message: proto::Message) -> Result { - let key = record::Key::from(message.key); + let key = record_priv::Key::from(message.key); let rec = message .record .ok_or_else(|| invalid_data("received PutValue message with no record"))?; @@ -545,7 +545,7 @@ fn proto_to_resp_msg(message: proto::Message) -> Result Result { - let key = record::Key::from(record.key); + let key = record_priv::Key::from(record.key); let value = record.value; let publisher = if !record.publisher.is_empty() { diff --git a/protocols/kad/src/query.rs b/protocols/kad/src/query.rs index e30235c9682..ab29c812b01 100644 --- a/protocols/kad/src/query.rs +++ b/protocols/kad/src/query.rs @@ -26,7 +26,7 @@ use peers::closest::{ use peers::fixed::FixedPeersIter; use peers::PeersIterState; -use crate::kbucket::{Key, KeyBytes}; +use crate::kbucket_priv::{Key, KeyBytes}; use crate::{ALPHA_VALUE, K_VALUE}; use either::Either; use fnv::FnvHashMap; diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index a6611c1b962..11486796c29 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -20,7 +20,7 @@ use super::*; -use crate::kbucket::{Distance, Key, KeyBytes}; +use crate::kbucket_priv::{Distance, Key, KeyBytes}; use crate::{ALPHA_VALUE, K_VALUE}; use instant::Instant; use libp2p_identity::PeerId; diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs index 61c9225745b..22a6051f3de 100644 --- a/protocols/kad/src/query/peers/closest/disjoint.rs +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -19,7 +19,7 @@ // DEALINGS IN THE SOFTWARE. use super::*; -use crate::kbucket::{Key, KeyBytes}; +use crate::kbucket_priv::{Key, KeyBytes}; use instant::Instant; use libp2p_identity::PeerId; use std::{ diff --git a/protocols/kad/src/record.rs b/protocols/kad/src/record_priv.rs similarity index 100% rename from protocols/kad/src/record.rs rename to protocols/kad/src/record_priv.rs diff --git a/protocols/kad/src/record/store.rs b/protocols/kad/src/record_priv/store.rs similarity index 100% rename from protocols/kad/src/record/store.rs rename to protocols/kad/src/record_priv/store.rs diff --git a/protocols/kad/src/record/store/memory.rs b/protocols/kad/src/record_priv/store/memory.rs similarity index 95% rename from protocols/kad/src/record/store/memory.rs rename to protocols/kad/src/record_priv/store/memory.rs index 1d4caab3bd7..40ad4405873 100644 --- a/protocols/kad/src/record/store/memory.rs +++ b/protocols/kad/src/record_priv/store/memory.rs @@ -20,7 +20,7 @@ use super::*; -use crate::kbucket; +use crate::kbucket_priv; use libp2p_identity::PeerId; use smallvec::SmallVec; use std::borrow::Cow; @@ -30,7 +30,7 @@ use std::iter; /// In-memory implementation of a `RecordStore`. pub struct MemoryStore { /// The identity of the peer owning the store. - local_key: kbucket::Key, + local_key: kbucket_priv::Key, /// The configuration of the store. config: MemoryStoreConfig, /// The stored (regular) records. @@ -79,7 +79,7 @@ impl MemoryStore { /// Creates a new `MemoryRecordStore` with the given configuration. pub fn with_config(local_id: PeerId, config: MemoryStoreConfig) -> Self { MemoryStore { - local_key: kbucket::Key::from(local_id), + local_key: kbucket_priv::Key::from(local_id), config, records: HashMap::default(), provided: HashSet::default(), @@ -160,10 +160,10 @@ impl RecordStore for MemoryStore { } else { // It is a new provider record for that key. let local_key = self.local_key.clone(); - let key = kbucket::Key::new(record.key.clone()); - let provider = kbucket::Key::from(record.provider); + let key = kbucket_priv::Key::new(record.key.clone()); + let provider = kbucket_priv::Key::from(record.provider); if let Some(i) = providers.iter().position(|p| { - let pk = kbucket::Key::from(p.provider); + let pk = kbucket_priv::Key::from(p.provider); provider.distance(&key) < pk.distance(&key) }) { // Insert the new provider. @@ -225,8 +225,8 @@ mod tests { Multihash::wrap(SHA_256_MH, &rand::thread_rng().gen::<[u8; 32]>()).unwrap() } - fn distance(r: &ProviderRecord) -> kbucket::Distance { - kbucket::Key::new(r.key.clone()).distance(&kbucket::Key::from(r.provider)) + fn distance(r: &ProviderRecord) -> kbucket_priv::Distance { + kbucket_priv::Key::new(r.key.clone()).distance(&kbucket_priv::Key::from(r.provider)) } #[test] @@ -255,7 +255,7 @@ mod tests { #[test] fn providers_ordered_by_distance_to_key() { - fn prop(providers: Vec>) -> bool { + fn prop(providers: Vec>) -> bool { let mut store = MemoryStore::new(PeerId::random()); let key = Key::from(random_multihash());