From fd4cb438a6d23fa2f25e3b2cd0286b4d274e4484 Mon Sep 17 00:00:00 2001 From: Aleksandr Borzunov Date: Tue, 18 Jul 2023 01:59:10 +0000 Subject: [PATCH] Update config.show_route param --- src/petals/client/routing/sequence_manager.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/petals/client/routing/sequence_manager.py b/src/petals/client/routing/sequence_manager.py index 7f785e71d..7f7adc3f3 100644 --- a/src/petals/client/routing/sequence_manager.py +++ b/src/petals/client/routing/sequence_manager.py @@ -36,7 +36,7 @@ class SequenceManagerConfig: dht_prefix: Optional[str] = None # a prefix for all dht keys that correspond to this model (default: model name) daemon_startup_timeout: int = 60 # timeout for the libp2p daemon connecting to initial peers - show_route: str = "inference" # show chosen route through servers. one of ["no", "inference", "always"] + show_route: Union[str, bool] = "inference" # show chosen route through servers. one of [False, "inference", True] allowed_servers: Optional[Collection[Union[PeerID, str]]] = None # if defined, send requests only to these servers use_server_to_server: bool = True # Use direct server-to-server communication @@ -167,7 +167,7 @@ def make_sequence( else: raise RuntimeError(f"Unexpected mode {mode}") - if self.config.show_route == "always" or (mode == "min_latency" and self.config.show_route == "inference"): + if self.config.show_route is True or (mode == "min_latency" and self.config.show_route == "inference"): route_repr = " => ".join( [f"{span.start}:{span.end} via …{str(span.peer_id)[-6:]}" for span in span_sequence] ) @@ -198,7 +198,8 @@ def _make_sequence_with_min_latency( path = dijkstar.find_path(graph, "start", "end") logger.debug(f"Path info: {path}") - logger.debug(f"Expected speed: {1 / path.total_cost:.1f} steps/sec") + if start_index == 0 and end_index == len(self): + logger.debug(f"Expected speed: {1 / path.total_cost:.1f} steps/sec") span_sequence = [] for peer_id, block_idx in path.nodes[1:-1]: