From a5bd07186abf5b726b5a7b2e15d9dec5bc139d54 Mon Sep 17 00:00:00 2001 From: Yuhong Wen Date: Fri, 9 Aug 2024 16:08:17 -0400 Subject: [PATCH 01/26] app_opt scan changes. (#2781) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- docs/programming_guide/component_configuration.rst | 2 +- docs/programming_guide/execution_api_type/executor.rst | 2 +- docs/resources/init_weights_1_config_fed_server.json | 2 +- .../app/config/config_fed_client.json | 6 +++--- .../app/config/config_fed_server.json | 10 +++++----- .../app/config/config_fed_server.conf | 2 +- .../job/app/config/config_fed_server.json | 2 +- .../job_multi_gpu/app/config/config_fed_client.json | 2 +- .../job_multi_gpu/app/config/config_fed_server.json | 2 +- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/programming_guide/component_configuration.rst b/docs/programming_guide/component_configuration.rst index a257316eb2..6fcbd59073 100644 --- a/docs/programming_guide/component_configuration.rst +++ b/docs/programming_guide/component_configuration.rst @@ -139,7 +139,7 @@ For example: { "id": "shareable_generator", - "name": "PTFedOptModelShareableGenerator", + "path": "nvflare.app_opt.pt.fedopt.PTFedOptModelShareableGenerator", "args": { "device": "cpu", "source_model": "model", diff --git a/docs/programming_guide/execution_api_type/executor.rst b/docs/programming_guide/execution_api_type/executor.rst index 45d4bea8d3..2c50c69245 100644 --- a/docs/programming_guide/execution_api_type/executor.rst +++ b/docs/programming_guide/execution_api_type/executor.rst @@ -93,7 +93,7 @@ processes to use. "local_epochs": 5, "steps_aggregation": 0, "model_reader_writer": { - "name": "PTModelReaderWriter" + "path": "nvflare.app_opt.pt.model_reader_writer.PTModelReaderWriter" } } } diff --git a/docs/resources/init_weights_1_config_fed_server.json b/docs/resources/init_weights_1_config_fed_server.json index a4b8708bf1..5a9703e3a9 100644 --- a/docs/resources/init_weights_1_config_fed_server.json +++ b/docs/resources/init_weights_1_config_fed_server.json @@ -8,7 +8,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor" + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor" }, { "id": "shareable_generator", diff --git a/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_he/app/config/config_fed_client.json b/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_he/app/config/config_fed_client.json index 1ba2acd211..357aebeafd 100644 --- a/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_he/app/config/config_fed_client.json +++ b/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_he/app/config/config_fed_client.json @@ -21,7 +21,7 @@ "tasks": ["train"], "filters":[ { - "name": "HEModelEncryptor", + "path": "nvflare.app_opt.he.model_encryptor.HEModelEncryptor", "args": { "weigh_by_local_iter": true } @@ -32,7 +32,7 @@ "tasks": ["submit_model"], "filters":[ { - "name": "HEModelEncryptor", + "path": "nvflare.app_opt.he.model_encryptor.HEModelEncryptor", "args": { "weigh_by_local_iter": false } @@ -45,7 +45,7 @@ "tasks": ["train", "validate"], "filters":[ { - "name": "HEModelDecryptor", + "path": "nvflare.app_opt.he.model_encryptor.HEModelDecryptor", "args": { } } diff --git a/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_he/app/config/config_fed_server.json b/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_he/app/config/config_fed_server.json index 1e1aa3e0e1..03f2b67976 100644 --- a/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_he/app/config/config_fed_server.json +++ b/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_he/app/config/config_fed_server.json @@ -9,7 +9,7 @@ "components": [ { "id": "serialize_filter", - "name": "HEModelSerializeFilter", + "path": "nvflare.app_opt.he.model_serialize_filter.HEModelSerializeFilter", "args": {} }, { @@ -22,12 +22,12 @@ }, { "id": "shareable_generator", - "name": "HEModelShareableGenerator", + "path": "nvflare.app_opt.he.model_sharable_generator.HEModelShareableGenerator", "args": {} }, { "id": "aggregator", - "name": "HEInTimeAccumulateWeightedAggregator", + "path": "nvflare.app_opt.he.intime_accumulate_model_aggregator.HEInTimeAccumulateWeightedAggregator", "args": { "weigh_by_local_iter": false } @@ -39,7 +39,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } @@ -68,7 +68,7 @@ }, { "id": "cross_site_model_eval", - "name": "HECrossSiteModelEval", + "path": "nvflare.app_opt.he.cross_site_model_eval.HECrossSiteModelEval", "args": { "model_locator_id": "model_locator", "submit_model_timeout": 600, diff --git a/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_local/app/config/config_fed_server.conf b/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_local/app/config/config_fed_server.conf index 43650a1a9d..62842a706f 100644 --- a/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_local/app/config/config_fed_server.conf +++ b/integration/monai/examples/spleen_ct_segmentation_local/jobs/spleen_ct_segmentation_local/app/config/config_fed_server.conf @@ -33,7 +33,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/integration/monai/examples/spleen_ct_segmentation_sim/job/app/config/config_fed_server.json b/integration/monai/examples/spleen_ct_segmentation_sim/job/app/config/config_fed_server.json index 581e3a8c26..5ad75d3803 100644 --- a/integration/monai/examples/spleen_ct_segmentation_sim/job/app/config/config_fed_server.json +++ b/integration/monai/examples/spleen_ct_segmentation_sim/job/app/config/config_fed_server.json @@ -33,7 +33,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/integration/monai/examples/spleen_ct_segmentation_sim/job_multi_gpu/app/config/config_fed_client.json b/integration/monai/examples/spleen_ct_segmentation_sim/job_multi_gpu/app/config/config_fed_client.json index 86a2d86f6b..c542fd4810 100644 --- a/integration/monai/examples/spleen_ct_segmentation_sim/job_multi_gpu/app/config/config_fed_client.json +++ b/integration/monai/examples/spleen_ct_segmentation_sim/job_multi_gpu/app/config/config_fed_client.json @@ -8,7 +8,7 @@ ], "executor": { "id": "executor", - "name": "PTMultiProcessExecutor", + "path": "nvflare.app_opt.pt.multi_process_executor.PTMultiProcessExecutor", "args": { "executor_id": "client_algo_executor", "num_of_processes": 2, diff --git a/integration/monai/examples/spleen_ct_segmentation_sim/job_multi_gpu/app/config/config_fed_server.json b/integration/monai/examples/spleen_ct_segmentation_sim/job_multi_gpu/app/config/config_fed_server.json index bbc8a5c23d..9e9fc7328d 100644 --- a/integration/monai/examples/spleen_ct_segmentation_sim/job_multi_gpu/app/config/config_fed_server.json +++ b/integration/monai/examples/spleen_ct_segmentation_sim/job_multi_gpu/app/config/config_fed_server.json @@ -33,7 +33,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } From 387c470568f7afc5e883dd6196e6e0283a7b3f95 Mon Sep 17 00:00:00 2001 From: Zhihong Zhang <100308595+nvidianz@users.noreply.github.com> Date: Fri, 9 Aug 2024 16:43:14 -0400 Subject: [PATCH 02/26] Added error handling for XGB_CONFIGURED event (#2780) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Updated FOBS readme to add DatumManager, added agrpcs as secure scheme * Added error handling in XGB_CONFIGURED event handler * Fixed formatting errors * Removed some redundant log entries * Addressed PR comments --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- .../adaptors/grpc_client_adaptor.py | 8 ++--- .../xgboost/histogram_based_v2/defs.py | 32 +++++++++++++------ .../xgboost/histogram_based_v2/executor.py | 12 ++++--- .../histogram_based_v2/sec/client_handler.py | 30 ++++++++++------- nvflare/fuel/f3/streaming/byte_receiver.py | 2 +- 5 files changed, 54 insertions(+), 30 deletions(-) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py index 28573d4e53..393c0c0966 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py @@ -49,7 +49,7 @@ def initialize(self, fl_ctx: FLContext): self._workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT) run_number = fl_ctx.get_prop(FLContextKey.CURRENT_RUN) self._run_dir = self._workspace.get_run_dir(run_number) - self.engine = engine + self.engine = fl_ctx.get_engine() def _start_client(self, server_addr: str, fl_ctx: FLContext): """Start the XGB client runner in a separate thread or separate process based on config. @@ -212,10 +212,10 @@ def _check_duplicate_seq(self, op: str, rank: int, seq: int): with self._lock: event = self._pending_req.get((rank, seq), None) if event: - self.log_info(fl_ctx, f"Duplicate seq {op=} {rank=} {seq=}, wait till original req is done") + self.logger.info(f"Duplicate seq {op=} {rank=} {seq=}, wait till original req is done") event.wait(DUPLICATE_REQ_MAX_HOLD_TIME) time.sleep(1) # To ensure the first request is returned first - self.log_info(fl_ctx, f"Duplicate seq {op=} {rank=} {seq=} returned with empty buffer") + self.logger.info(f"Duplicate seq {op=} {rank=} {seq=} returned with empty buffer") return True with self._lock: @@ -231,4 +231,4 @@ def _finish_pending_req(self, op: str, rank: int, seq: int): event.set() del self._pending_req[(rank, seq)] - self.log_info(fl_ctx, f"Request seq {op=} {rank=} {seq=} finished processing") + self.logger.info(f"Request seq {op=} {rank=} {seq=} finished processing") diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py index e392d6877a..469f689392 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py @@ -88,6 +88,7 @@ class Constant: PARAM_KEY_REQUEST = "xgb.request" PARAM_KEY_EVENT = "xgb.event" PARAM_KEY_TRAINING_MODE = "xgb.training_mode" + PARAM_KEY_CONFIG_ERROR = "xgb.config_error" RUNNER_CTX_SERVER_ADDR = "server_addr" RUNNER_CTX_PORT = "port" @@ -128,16 +129,29 @@ class SplitMode: COL = 1 +class TrainingMode: + # Non-secure mode + H = "h" + HORIZONTAL = "horizontal" + V = "v" + VERTICAL = "vertical" + # Secure mode + HS = "hs" + HORIZONTAL_SECURE = "horizontal_secure" + VS = "VS" + VERTICAL_SECURE = "vertical_secure" + + # Mapping of text training mode to split mode TRAINING_MODE_MAPPING = { - "h": SplitMode.ROW, - "horizontal": SplitMode.ROW, - "v": SplitMode.COL, - "vertical": SplitMode.COL, - "hs": SplitMode.ROW, - "horizontal_secure": SplitMode.ROW, - "vs": SplitMode.COL, - "vertical_secure": SplitMode.COL, + TrainingMode.H: SplitMode.ROW, + TrainingMode.HORIZONTAL: SplitMode.ROW, + TrainingMode.V: SplitMode.COL, + TrainingMode.VERTICAL: SplitMode.COL, + TrainingMode.HS: SplitMode.ROW, + TrainingMode.HORIZONTAL_SECURE: SplitMode.ROW, + TrainingMode.VS: SplitMode.COL, + TrainingMode.VERTICAL_SECURE: SplitMode.COL, } -SECURE_TRAINING_MODES = {"hs", "horizontal_secure", "vs", "vertical_secure"} +SECURE_TRAINING_MODES = {TrainingMode.HS, TrainingMode.HORIZONTAL_SECURE, TrainingMode.VS, TrainingMode.VERTICAL_SECURE} diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/executor.py b/nvflare/app_opt/xgboost/histogram_based_v2/executor.py index 769779656b..14008f6bfc 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/executor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/executor.py @@ -14,7 +14,7 @@ from nvflare.apis.event_type import EventType from nvflare.apis.executor import Executor -from nvflare.apis.fl_constant import FLContextKey, ReturnCode +from nvflare.apis.fl_constant import FLContextKey, ReservedKey, ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable, make_reply from nvflare.apis.signal import Signal @@ -91,7 +91,6 @@ def handle_event(self, event_type: str, fl_ctx: FLContext): self.abort_signal.trigger(True) def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: - engine = fl_ctx.get_engine() if task_name == self.configure_task_name: # there are two important config params for the client: # the rank assigned to the client; @@ -123,8 +122,13 @@ def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort shareable, fl_ctx, ) - engine.fire_event(Constant.EVENT_XGB_JOB_CONFIGURED, fl_ctx) - return make_reply(ReturnCode.OK) + self.fire_event(Constant.EVENT_XGB_JOB_CONFIGURED, fl_ctx) + config_error = fl_ctx.get_prop(Constant.PARAM_KEY_CONFIG_ERROR, None) + if not config_error: + return make_reply(ReturnCode.OK) + else: + self.log_error(fl_ctx, f"Config error: {config_error}") + return make_reply(ReturnCode.SERVICE_UNAVAILABLE, {ReservedKey.EXCEPTIONS: config_error}) elif task_name == self.start_task_name: # start adaptor try: diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py index 017fc8ae33..0f90c9b22c 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py @@ -16,10 +16,11 @@ from nvflare.apis.event_type import EventType from nvflare.apis.fl_component import FLComponent +from nvflare.apis.fl_constant import FLContextKey from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable from nvflare.app_opt.xgboost.histogram_based_v2.aggr import Aggregator -from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant +from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant, TrainingMode from nvflare.app_opt.xgboost.histogram_based_v2.sec.dam import DamDecoder from nvflare.app_opt.xgboost.histogram_based_v2.sec.data_converter import FeatureAggregationResult from nvflare.app_opt.xgboost.histogram_based_v2.sec.partial_he.adder import Adder @@ -49,8 +50,10 @@ from nvflare.app_opt.he.homomorphic_encrypt import load_tenseal_context_from_workspace tenseal_imported = True -except Exception: + tenseal_error = None +except Exception as ex: tenseal_imported = False + tenseal_error = f"Import error: {ex}" class ClientSecurityHandler(SecurityHandler): @@ -402,22 +405,25 @@ def _process_after_all_gather_v_horizontal(self, fl_ctx: FLContext): fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=result, private=True, sticky=False) def handle_event(self, event_type: str, fl_ctx: FLContext): + global tenseal_error if event_type == Constant.EVENT_XGB_JOB_CONFIGURED: - training_mode = fl_ctx.get_prop(Constant.PARAM_KEY_TRAINING_MODE) - if training_mode in {"vertical_secure", "vs"} and ipcl_imported: + task_data = fl_ctx.get_prop(FLContextKey.TASK_DATA) + training_mode = task_data.get(Constant.CONF_KEY_TRAINING_MODE) + if training_mode in {TrainingMode.VS, TrainingMode.VERTICAL_SECURE} and ipcl_imported: self.public_key, self.private_key = generate_keys(self.key_length) self.encryptor = Encryptor(self.public_key, self.num_workers) self.decrypter = Decrypter(self.private_key, self.num_workers) self.adder = Adder(self.num_workers) - - try: - if tenseal_imported: + elif training_mode in {TrainingMode.HS, TrainingMode.HORIZONTAL_SECURE}: + if not tenseal_imported: + fl_ctx.set_prop(Constant.PARAM_KEY_CONFIG_ERROR, tenseal_error, private=True, sticky=False) + return + try: self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx) - else: - self.debug(fl_ctx, "Tenseal module not loaded, horizontal secure XGBoost is not supported") - except Exception as ex: - self.error(fl_ctx, f"Can't load tenseal context, horizontal secure XGBoost is not supported: {ex}") - self.tenseal_context = None + except Exception as err: + tenseal_error = f"Can't load tenseal context: {err}" + self.tenseal_context = None + fl_ctx.set_prop(Constant.PARAM_KEY_CONFIG_ERROR, tenseal_error, private=True, sticky=False) elif event_type == EventType.END_RUN: self.tenseal_context = None else: diff --git a/nvflare/fuel/f3/streaming/byte_receiver.py b/nvflare/fuel/f3/streaming/byte_receiver.py index 4fe0de924e..08a815ca38 100644 --- a/nvflare/fuel/f3/streaming/byte_receiver.py +++ b/nvflare/fuel/f3/streaming/byte_receiver.py @@ -173,7 +173,7 @@ def stop_task(self, task: RxTask, error: StreamError = None, notify=True): if error: if task.headers: - optional = task.headers.get(MessageHeaderKey.OPTIONAL, False) + optional = task.headers.get(StreamHeaderKey.OPTIONAL, False) else: optional = False From 564d959591d66083c669acf146dc47a1ac981075 Mon Sep 17 00:00:00 2001 From: Sean Yang Date: Mon, 12 Aug 2024 10:26:54 -0700 Subject: [PATCH 03/26] fix for if torch and tensorflow are both installed (#2775) --- nvflare/job_config/fed_job.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/nvflare/job_config/fed_job.py b/nvflare/job_config/fed_job.py index 4cfd0306b3..4cf5c1d630 100644 --- a/nvflare/job_config/fed_job.py +++ b/nvflare/job_config/fed_job.py @@ -260,19 +260,17 @@ def to( else: # TODO: handle other persistors added_model = False # Check different models framework types and add corresponding persistor - if torch_ok: - if isinstance(obj, nn.Module): # if model, create a PT persistor - component = PTFileModelPersistor(model=obj) - self._deploy_map[target].app.add_component("persistor", component) - - component = PTFileModelLocator(pt_persistor_id="persistor") - self._deploy_map[target].app.add_component("model_locator", component) - added_model = True - elif tf_ok: - if isinstance(obj, tf.keras.Model): # if model, create a TF persistor - component = TFModelPersistor(model=obj) - self._deploy_map[target].app.add_component("persistor", component) - added_model = True + if torch_ok and isinstance(obj, nn.Module): # if model, create a PT persistor + component = PTFileModelPersistor(model=obj) + self._deploy_map[target].app.add_component("persistor", component) + + component = PTFileModelLocator(pt_persistor_id="persistor") + self._deploy_map[target].app.add_component("model_locator", component) + added_model = True + elif tf_ok and isinstance(obj, tf.keras.Model): # if model, create a TF persistor + component = TFModelPersistor(model=obj) + self._deploy_map[target].app.add_component("persistor", component) + added_model = True if not added_model: # if it wasn't a model, add as component self._deploy_map[target].add_component(obj, id) From 9aba299ea8dfde52c7412da006990f539d580235 Mon Sep 17 00:00:00 2001 From: Sean Yang Date: Mon, 12 Aug 2024 10:45:52 -0700 Subject: [PATCH 04/26] Add FedJobAPI documentation (#2718) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add JobAPI docs * 2.5 misc doc updates * address comments --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> --- docs/getting_started.rst | 4 + docs/programming_guide.rst | 1 + docs/programming_guide/execution_api_type.rst | 3 +- .../execution_api_type/client_api.rst | 65 +++-- docs/programming_guide/fed_job_api.rst | 252 ++++++++++++++++++ docs/publications_and_talks.rst | 1 + docs/real_world_fl/cloud_deployment.rst | 5 + docs/resources/Dockerfile | 4 +- nvflare/job_config/fed_job.py | 42 ++- tests/unit_test/job_config/fed_job_test.py | 11 +- 10 files changed, 341 insertions(+), 47 deletions(-) create mode 100644 docs/programming_guide/fed_job_api.rst diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 171a5e6c8c..6d6a150eff 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -175,6 +175,10 @@ Using any text editor to edit the Dockerfile and paste the following: .. literalinclude:: resources/Dockerfile :language: dockerfile +.. note:: + + For nvflare version 2.3 set PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:23.02-py3 + We can then build the new container by running docker build in the directory containing this Dockerfile, for example tagging it nvflare-pt: diff --git a/docs/programming_guide.rst b/docs/programming_guide.rst index 28e8b7992b..1053f84270 100644 --- a/docs/programming_guide.rst +++ b/docs/programming_guide.rst @@ -35,6 +35,7 @@ Please refer to :ref:`application` for more details. .. toctree:: :maxdepth: 1 + programming_guide/fed_job_api programming_guide/workflows_and_controllers programming_guide/execution_api_type programming_guide/fl_model diff --git a/docs/programming_guide/execution_api_type.rst b/docs/programming_guide/execution_api_type.rst index 77baf7806a..9f39978307 100644 --- a/docs/programming_guide/execution_api_type.rst +++ b/docs/programming_guide/execution_api_type.rst @@ -35,7 +35,8 @@ The :ref:`client_api` provides the most straightforward way to write FL code, and can easily be used to convert centralized code with minimal code changes. The Client API uses the :class:`FLModel` object for data transfer and supports common tasks such as train, validate, and submit_model. -Additionally, options for using decorators or PyTorch Lightning are also available. +Options for using decorators or PyTorch Lightning are also available. +For Client API executors, the in-process and external-process executors are provided for different use cases. We recommend users start with the Client API, and to consider the other types for more specific cases as required. diff --git a/docs/programming_guide/execution_api_type/client_api.rst b/docs/programming_guide/execution_api_type/client_api.rst index e5ed5cb7f0..dee674477c 100644 --- a/docs/programming_guide/execution_api_type/client_api.rst +++ b/docs/programming_guide/execution_api_type/client_api.rst @@ -167,20 +167,26 @@ Client API communication patterns We offer various implementations of Client APIs tailored to different scenarios, each linked with distinct communication patterns. -Broadly, we present in-process and sub-process executors. The in-process executor, slated for release in NVFlare 2.5.0, -entails both training scripts and client executor operating within the same process. The training scripts will be launched once -at the event of START_RUN. The training scripts keep on running till the END_RUN event. Communication between them occurs -through an in-memory databus. +In-process Client API +--------------------- -On the other hand, the LauncherExecutor employs a sub-process to execute training scripts, leading to the client executor -and training scripts residing in separate processes. The "launch_once" option is provided to the SubprocessLauncher to control +The in-process executor entails both the training script and client executor operating within the same process. +The training script will be launched once at the event of START_RUN and will keep on running till the END_RUN event. +Communication between them occurs through an efficient in-memory databus. + +When the training process involves either a single GPU or no GPUs, and the training script doesn't integrate third-party +training systems, the in-process executor is preferable (when available). + +Sub-process Client API +---------------------- + +On the other hand, the LauncherExecutor employs the SubprocessLauncher to use a sub-process to execute the training script. This results in the client executor +and training script residing in separate processes. The "launch_once" option is provided to the SubprocessLauncher to control whether to launch the external script everytime when getting the task from server, or just launch the script once at the event of START_RUN and keeps running till the END_RUN event. Communication between them is facilitated by either CellPipe (default) or FilePipe. -When the training process involves either a single GPU or no GPUs, and the training script doesn't integrate third-party -training systems, the in-process executor is preferable (when available). For scenarios involving multi-GPU training or -the utilization of external training infrastructure, opting for the Launcher executor might be more suitable. +For scenarios involving multi-GPU training or the utilization of external training infrastructure, opting for the Launcher executor might be more suitable. Choice of different Pipes @@ -203,34 +209,35 @@ Configuration Different configurations are available for each type of executor. -Definition lists: - in-process executor configuration - .. literalinclude:: ../../../job_templates/sag_pt_in_proc/config_fed_client.conf +--------------------------------- +This configuration specifically caters to PyTorch applications, providing serialization and deserialization +(aka Decomposers) for commonly used PyTorch objects. For non-PyTorch applications, the generic +:class:`InProcessClientAPIExecutor` can be employed. + +.. literalinclude:: ../../../job_templates/sag_pt_in_proc/config_fed_client.conf - This configuration specifically caters to PyTorch applications, providing serialization and deserialization - (aka Decomposers) for commonly used PyTorch objects. For non-PyTorch applications, the generic - ``InProcessClientAPIExecutor`` can be employed. subprocess launcher Executor configuration - In the config_fed_client in the FLARE app, in order to launch the training script we use the - :class:`SubprocessLauncher` component. - The defined ``script`` is invoked, and ``launch_once`` can be set to either - launch once for the whole job (launch_once = True), or launch a process for each task received from the server (launch_once = False) +------------------------------------------ +In the config_fed_client in the FLARE app, in order to launch the training script we use the +:class:`SubprocessLauncher` component. +The defined ``script`` is invoked, and ``launch_once`` can be set to either +launch once for the whole job (launch_once = True), or launch a process for each task received from the server (launch_once = False) - ``launch_once`` dictates how many times the training scripts are invoked during the overall training process. - When set to False, the executor essentially invokes ``python .py`` every round of training. - Typically, launch_once is set to True. +``launch_once`` dictates how many times the training scripts are invoked during the overall training process. +When set to False, the executor essentially invokes ``python .py`` every round of training. +Typically, launch_once is set to True. - A corresponding :class:`LauncherExecutor` - is used as the executor to handle the tasks and perform the data exchange using the pipe. - For the Pipe component we provide implementations of :class:`FilePipe` - and :class:`CellPipe`. +A corresponding :class:`ClientAPILauncherExecutor` +is used as the executor to handle the tasks and perform the data exchange using the pipe. +For the Pipe component we provide implementations of :class:`FilePipe` +and :class:`CellPipe`. - .. literalinclude:: ../../../job_templates/sag_pt/config_fed_client.conf +.. literalinclude:: ../../../job_templates/sag_pt/config_fed_client.conf - For example configurations, take a look at the :github_nvflare_link:`job_templates ` - directory for templates using the launcher and Client API. +For example configurations, take a look at the :github_nvflare_link:`job_templates ` +directory for templates using the launcher and Client API. .. note:: In that case that the user does not need to launch the process and instead diff --git a/docs/programming_guide/fed_job_api.rst b/docs/programming_guide/fed_job_api.rst new file mode 100644 index 0000000000..2754003d15 --- /dev/null +++ b/docs/programming_guide/fed_job_api.rst @@ -0,0 +1,252 @@ +.. _fed_job_api: + +########## +FedJob API +########## + +The FLARE :class:`FedJob` API allows users to Pythonically define and create job configurations. + +Core Concepts +============= + +* Use the :func:`to` routine to assign objects (e.g. controllers, executor, models, filters, components etc.) to the server or clients. +* Export the job to a configuration with :func:`export_job`. +* Run the job in the simulator with :func:`simulator_run`. + +Table overview of the :class:`FedJob` API: + +.. list-table:: FedJob + :widths: 25 35 50 + :header-rows: 1 + + * - API + - Description + - API Doc Link + * - to + - Assign object to target. + - :func:`to` + * - to_server + - Assign object to server. + - :func:`to_server` + * - to_clients + - Assign object to all clients. + - :func:`to_clients` + * - as_id + - Return generated uuid of object. Object will be added as component if referenced. + - :func:`as_id` + * - simulator_run + - Run the job with the simulator. + - :func:`simulator_run` + * - export_job + - Export the job configuration. + - :func:`export_job` + + +Here is an example of how to create a simple cifar10_fedavg job using the :class:`FedJob` API. +We assign a FedAvg controller and the initial PyTorch model to the server, and assign a ScriptExecutor for our training script to the clients. +Then we use the simulator to run the job: + +.. code-block:: python + + from src.net import Net + + from nvflare import FedAvg, FedJob, ScriptExecutor + + if __name__ == "__main__": + n_clients = 2 + num_rounds = 2 + train_script = "src/cifar10_fl.py" + + job = FedJob(name="cifar10_fedavg") + + # Define the controller workflow and send to server + controller = FedAvg( + num_clients=n_clients, + num_rounds=num_rounds, + ) + job.to_server(controller) + + # Define the initial global model and send to server + job.to_server(Net()) + + # Send executor to all clients + executor = ScriptExecutor( + task_script_path=train_script, task_script_args="" # f"--batch_size 32 --data_path /tmp/data/site-{i}" + ) + job.to_clients(executor) + + # job.export_job("/tmp/nvflare/jobs/job_config") + job.simulator_run("/tmp/nvflare/jobs/workdir", n_clients=n_clients) + + +Initializing the FedJob +======================= + +Initialize the :class:`FedJob` object with the following arguments: + +* ``name`` (str): for job name. +* ``min_clients`` (int): required for the job, will be set in the ``meta.json``. +* ``mandatory_clients`` (List[str]): to run the job, will be set in the ``meta.json``. +* ``key_metric`` (str): the metric used for global model selection, will be used by the preconfigured :class:`IntimeModelSelector`. + +Example: + +.. code-block:: python + + job = FedJob(name="cifar10_fedavg", min_clients=2, mandatory_clients=["site-1", "site-2"], key_metric="accuracy") + +Assigning objects with :func:`to` +======================================================================= + +Assign objects with :func:`to` for a specific ``target``, +:func:`to_server` for the server, and +:func:`to_clients` for all the clients. + +These functions have the following parameters which are used depending on the type of object: + +* ``obj`` (any): The object to be assigned. The obj will be given a default id if none is provided based on its type. +* ``target`` (str): (For :func:`to`) The target location of the object. Can be “server” or a client name, e.g. “site-1”. +* ``tasks`` (List[str]): If object is an Executor or Filter, optional list of tasks that should be handled. Defaults to None. If None, all tasks will be handled using [*]. +* ``gpu`` (int | List[int]): GPU index or list of GPU indices used for simulating the run on that target. +* ``filter_type`` (FilterType): The type of filter used. Either FilterType.TASK_RESULT or FilterType.TASK_DATA. +* ``id`` (int): Optional user-defined id for the object. Defaults to None and ID will automatically be assigned. + +.. note:: + + In order for the FedJob to use the values of arguments passed into the ``obj``, the arguments must be set as instance variables of the same name (or prefixed with ``_``) in the constructor. + +Below we cover in-depth how different types of objects are handled when using :func:`to`: + +Controller +---------- + +If the object is a :class:`Controller` sent to the server, the controller is added to the server app workflows. + +* If the ``key_metric`` is defined in the FedJob (see initialization), an :class:`IntimeModelSelector` widget will be added for best model selection. +* A :class:`ValidationJsonGenerator` is automatically added for creating json validation results. +* If PyTorch and TensorBoard are supported, then :class:`TBAnalyticsReceiver` is automatically added to receives analytics data to save to TensorBoard. Other types of receivers can be added as components with :func:`to`. + +Example: + +.. code-block:: python + + controller = FedAvg( + num_clients=n_clients, + num_rounds=num_rounds, + ) + job.to(controller, "server") + +If the object is a :class:`Controller` sent to a client, the controller is added to the client app components as a client-side controller. +The controller can then be used by the :class:`ClientControllerExecutor`. + + +Executor +-------- + +If the object is an :class:`Executor`, it must be sent to a client. The executor is added to the client app executors. + +* The ``tasks`` parameter specifies the tasks that the executor is defined the handle. +* The ``gpu`` parameter specifies which gpus to use for simulating the run on the target. +* If the object is a :class:`ScriptExecutor`, the task_script_path will be added to the external scripts to be included in the custom directory. +* If the object is a :class:`ScriptLauncherExecutor`, the launch_script will be launched in a subprocess. Corresponding :class:`SubprocessLauncher`, :class:`CellPipe`, :class:`MetricRelay`, and :class:`ExternalConfigurator` components will be automatically configured. +* The :class:`ConvertToFedEvent` widget is automatically added to convert local events to federated events. + +Example: + +.. code-block:: python + + executor = ScriptExecutor(task_script_path="src/cifar10_fl.py", task_script_args="") + job.to(executor, "site-1", tasks=["train"], gpu=0) + + +Script (str) +------------ + +If the object is a str, it is treated as an external script and will be included in the custom directory. + +Example: + +.. code-block:: python + + job.to("src/cifar10_fl.py", "site-1") + + +Filter +------ + +If the object is a :class:`Filter`, users must specify the ``filter_type`` +as either FilterType.TASK_RESULT (flow from executor to controller) or FilterType.TASK_DATA (flow from controller to executor). + +The filter will be added task_data_filters and task_result_filters accordingly and be applied to the specified ``tasks``. + +Example: + +.. code-block:: python + + pp_filter = PercentilePrivacy(percentile=10, gamma=0.01) + job.to(pp_filter, "site-1", tasks=["train"], filter_type=FilterType.TASK_RESULT) + + +Model +----- +If the object is a common model type, a corresponding persistor will automatically be configured with the model. + +For PyTorch models (``torch.nn.Module``) we add a :class:`PTFileModelPersistor` and +:class:`PTFileModelLocator`, and for TensorFlow models (``tf.keras.Model``) we add a :class:`TFModelPersistor`. + +Example: + +.. code-block:: python + + job.to(Net(), "server") + +For unsupported models, the model and persistor can be added as components. + + +Components +---------- +For any object that does not fall under any of the previous types, it is added as a component with ``id``. +The ``id`` can be either specified as a parameter, or it will be automatically assigned.Components may reference other components by id + +If an id generated by :func:`as_id`, is referenced by another added object, this the referenced object will also be added as a component. +In the example below, comp2 is assigned to the server. Since comp1 was referenced in comp2 with :func:`as_id`, comp1 will also be added as a component to the server. + +Example: + +.. code-block:: python + + comp1 = Component1() + comp2 = Component2(sub_component_id=job.as_id(comp1)) + job.to(comp2, "server") + + +Running the Job +=============== + +Simulator +--------- + +Run the FedJob with the simulator with :func:`simulator_run` in the ``workspace`` with ``n_clients`` and ``threads``. +(Note: only set ``n_clients`` if you have not specified clients using :func:`to`) + +Example: + +.. code-block:: python + + job.simulator_run(workspace="/tmp/nvflare/jobs/workdir", n_clients=2, threads=2) + + +Export Configuration +-------------------- +We can export the job configuration with :func:`export_job` to the ``job_root`` directory. + +Example: + +.. code-block:: python + + job.export_job(job_root="/tmp/nvflare/jobs/job_config") + +Examples +======== + +To see examples of how the FedJob API can be used for different applications, refer the :github_nvflare_link:`Getting Started ` examples. diff --git a/docs/publications_and_talks.rst b/docs/publications_and_talks.rst index bd700723b2..422512da72 100644 --- a/docs/publications_and_talks.rst +++ b/docs/publications_and_talks.rst @@ -21,6 +21,7 @@ Publications: 2023 Publications: 2022 ------------------ +* **2022-11** `Federated Learning with Azure Machine Learning `__ (Video) * **2022-10** `Auto-FedRL: Federated Hyperparameter Optimization for Multi-institutional Medical Image Segmentation `__ (`ECCV 2022 `__) * **2022-10** `Joint Multi Organ and Tumor Segmentation from Partial Labels Using Federated Learning `__ (`DeCaF @ MICCAI 2022 `__) * **2022-10** `Split-U-Net: Preventing Data Leakage in Split Learning for Collaborative Multi-modal Brain Tumor Segmentation `__ (`DeCaF @ MICCAI 2022 `__) diff --git a/docs/real_world_fl/cloud_deployment.rst b/docs/real_world_fl/cloud_deployment.rst index 8ee2f6d4ec..3d438628e4 100644 --- a/docs/real_world_fl/cloud_deployment.rst +++ b/docs/real_world_fl/cloud_deployment.rst @@ -219,6 +219,11 @@ The configuration file provided is formatted as follows: EC2_TYPE=t2.small REGION=us-west-2 +.. note:: + + For the AWS AMIs, we recommend the following images for each version of Ubuntu: + 20.04:ami-04bad3c587fe60d89, 22.04:ami-03c983f9003cb9cd1, 24.04:ami-0406d1fdd021121cd + Deploy FL Client in the Cloud ============================= As an organization admin for an FL project, you are responsible for setting up your FL Client system. You will receive a Client startup kit either from email, sftp diff --git a/docs/resources/Dockerfile b/docs/resources/Dockerfile index c2c992a651..baefd870d8 100644 --- a/docs/resources/Dockerfile +++ b/docs/resources/Dockerfile @@ -1,7 +1,7 @@ -ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:23.02-py3 +ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:24.03-py3 FROM ${PYTORCH_IMAGE} -ARG NVF_VERSION=2.3 +ARG NVF_VERSION=2.4 ENV NVF_BRANCH=${NVF_VERSION} RUN python3 -m pip install -U pip diff --git a/nvflare/job_config/fed_job.py b/nvflare/job_config/fed_job.py index 4cf5c1d630..5fe038a788 100644 --- a/nvflare/job_config/fed_job.py +++ b/nvflare/job_config/fed_job.py @@ -14,7 +14,7 @@ import os.path import re import uuid -from typing import Any, List, Union +from typing import Any, List, Optional, Union from nvflare.apis.executor import Executor from nvflare.apis.filter import Filter @@ -164,7 +164,13 @@ def _create_server_app(self): class FedJob: - def __init__(self, name="fed_job", min_clients=1, mandatory_clients=None, key_metric="accuracy") -> None: + def __init__( + self, + name: str = "fed_job", + min_clients: int = 1, + mandatory_clients: Optional[List[str]] = None, + key_metric: str = "accuracy", + ) -> None: """FedJob allows users to generate job configurations in a Pythonic way. The `to()` routine allows users to send different components to either the server or clients. @@ -199,9 +205,9 @@ def to( """assign an object to a target (server or clients). Args: - obj: The object to be assigned. The obj will be given a default `id` if non is provided based on its type. - target: The target location of th object. Can be "server" or a client name, e.g. "site-1". - tasks: In case object is an `Executor`, optional list of tasks the executor should handle. + obj: The object to be assigned. The obj will be given a default `id` if none is provided based on its type. + target: The target location of the object. Can be "server" or a client name, e.g. "site-1". + tasks: In case object is an `Executor` or `Filter`, optional list of tasks that should be handled. Defaults to `None`. If `None`, all tasks will be handled using `[*]`. gpu: GPU index or list of GPU indices used for simulating the run on that target. filter_type: The type of filter used. Either `FilterType.TASK_RESULT` or `FilterType.TASK_DATA`. @@ -213,10 +219,21 @@ def to( self._validate_target(target) if isinstance(obj, Controller): - if target not in self._deploy_map: - self._deploy_map[target] = ControllerApp(key_metric=self.key_metric) - self._deploy_map[target].add_controller(obj, id) + if target != "server": # add client-side controllers as components + if target not in self._deploy_map: + raise ValueError( + f"{target} doesn't have an `Executor`. Deploy one first before adding client-side controllers!" + ) + self._deploy_map[target].add_component(obj, id) + else: + if target not in self._deploy_map: + self._deploy_map[target] = ControllerApp(key_metric=self.key_metric) + self._deploy_map[target].add_controller(obj, id) elif isinstance(obj, Executor): + if target == "server": + raise ValueError( + f"`Executor` must be assigned to a client, but tried to assign `Executor` {obj} to 'server'!" + ) if target not in self._deploy_map: self._deploy_map[target] = ExecutorApp() if isinstance(obj, ScriptExecutor): @@ -324,7 +341,8 @@ def to_clients( self.to(obj=obj, target=ALL_SITES, tasks=tasks, filter_type=filter_type, id=id) - def as_id(self, obj: Any): + def as_id(self, obj: Any) -> str: + """Generate and return uuid for `obj`. If this id is referenced by another added object, this `obj` will also be added as a component.""" id = str(uuid.uuid4()) self._components[id] = obj return id @@ -393,11 +411,13 @@ def _set_all_apps(self): self._deployed = True - def export_job(self, job_root): + def export_job(self, job_root: str): + """Export job config to `job_root` directory with name `self.job_name`.""" self._set_all_apps() self.job.generate_job_config(job_root) - def simulator_run(self, workspace, n_clients: int = None, threads: int = None): + def simulator_run(self, workspace: str, n_clients: int = None, threads: int = None): + """Run the job with the simulator with the `workspace` using `n_clients` and `threads`.""" self._set_all_apps() if ALL_SITES in self.clients and not n_clients: diff --git a/tests/unit_test/job_config/fed_job_test.py b/tests/unit_test/job_config/fed_job_test.py index ffcd739730..e1feb9558d 100644 --- a/tests/unit_test/job_config/fed_job_test.py +++ b/tests/unit_test/job_config/fed_job_test.py @@ -15,19 +15,22 @@ import pytest from nvflare import FedAvg +from nvflare.app_common.abstract.model_learner import ModelLearner +from nvflare.app_common.executors.model_learner_executor import ModelLearnerExecutor from nvflare.job_config.fed_job import FedJob class TestFedJob: def test_validate_targets(self): job = FedJob() - component = FedAvg() + controller = FedAvg() + executor = ModelLearnerExecutor(learner_id=job.as_id(ModelLearner())) - job.to(component, "server") - job.to(component, "site-1") + job.to(controller, "server") + job.to(executor, "site-1") with pytest.raises(Exception): - job.to(component, "site-/1", gpu=0) + job.to(executor, "site-/1", gpu=0) def test_non_empty_target(self): job = FedJob() From a91b1d4f99c2299276e3fc5e5b80d5b4e00fa0a7 Mon Sep 17 00:00:00 2001 From: Yuhong Wen Date: Mon, 12 Aug 2024 15:29:50 -0400 Subject: [PATCH 05/26] fixed the cross validation wrong config for swarm_script_executor_cifar10. (#2778) --- examples/getting_started/pt/swarm_script_executor_cifar10.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/getting_started/pt/swarm_script_executor_cifar10.py b/examples/getting_started/pt/swarm_script_executor_cifar10.py index db0092d83e..7e490e6698 100644 --- a/examples/getting_started/pt/swarm_script_executor_cifar10.py +++ b/examples/getting_started/pt/swarm_script_executor_cifar10.py @@ -44,7 +44,7 @@ job.to(Net(), "server") for i in range(n_clients): - executor = ScriptExecutor(task_script_path=train_script) + executor = ScriptExecutor(task_script_path=train_script, evaluate_task_name="validate") job.to(executor, f"site-{i}", gpu=0, tasks=["train", "validate", "submit_model"]) # In swarm learning, each client acts also as an aggregator From 6a7e145a47eaa4f8c7b60c64c2afe51275f5f18f Mon Sep 17 00:00:00 2001 From: Yuhong Wen Date: Mon, 12 Aug 2024 15:59:16 -0400 Subject: [PATCH 06/26] Fixed the mgpu simulator workspace change error (#2770) * Fixed the mgpu simulator workspace change error. * codestyle fix. * Changed back the workspace.get_client_custom_dir(), fixed the sub_worker_process app_custom_folder. * Add the app_custom_folder in a proper way. --- .../private/fed/app/client/sub_worker_process.py | 4 ---- .../private/fed/app/simulator/simulator_runner.py | 14 ++++++++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/nvflare/private/fed/app/client/sub_worker_process.py b/nvflare/private/fed/app/client/sub_worker_process.py index 0ecfe94430..aeb6d297aa 100644 --- a/nvflare/private/fed/app/client/sub_worker_process.py +++ b/nvflare/private/fed/app/client/sub_worker_process.py @@ -18,7 +18,6 @@ import copy import logging import os -import sys import threading import time @@ -310,9 +309,6 @@ def stop(self): def main(args): workspace = Workspace(args.workspace, args.client_name) - app_custom_folder = workspace.get_client_custom_dir() - if os.path.isdir(app_custom_folder) and app_custom_folder not in sys.path: - sys.path.append(app_custom_folder) configure_logging(workspace) fobs_initialize(workspace=workspace, job_id=args.job_id) diff --git a/nvflare/private/fed/app/simulator/simulator_runner.py b/nvflare/private/fed/app/simulator/simulator_runner.py index 3952ea5db5..a7f38138fb 100644 --- a/nvflare/private/fed/app/simulator/simulator_runner.py +++ b/nvflare/private/fed/app/simulator/simulator_runner.py @@ -355,9 +355,11 @@ def _deploy_apps(self, job_name, data_bytes, meta, log_config_file_path): app = os.path.join(temp_job_folder, app_name) shutil.copytree(app, app_root) - job_meta_file = os.path.join(self.simulator_root, "server", WorkspaceConstants.JOB_META_FILE) - with open(job_meta_file, "w") as f: - json.dump(meta, f, indent=4) + job_meta_file = os.path.join( + self.simulator_root, p, SimulatorConstants.JOB_NAME, WorkspaceConstants.JOB_META_FILE + ) + with open(job_meta_file, "w") as f: + json.dump(meta, f, indent=4) def split_clients(self, clients: [], gpus: []): split_clients = [] @@ -668,6 +670,10 @@ def do_one_task(self, client, num_of_threads, gpu, lock, timeout=60.0, task_name name=ConfigVarName.DECOMPOSER_MODULE, conf=SystemConfigs.RESOURCES_CONF ) + app_custom_folder = Workspace(root_dir=client_workspace, site_name="mgh").get_app_custom_dir( + SimulatorConstants.JOB_NAME + ) + command = ( sys.executable + " -m nvflare.private.fed.app.simulator.simulator_worker -o " @@ -696,7 +702,7 @@ def do_one_task(self, client, num_of_threads, gpu, lock, timeout=60.0, task_name if gpu: command += " --gpu " + str(gpu) new_env = os.environ.copy() - new_env["PYTHONPATH"] = os.pathsep.join(self._get_new_sys_path()) + new_env["PYTHONPATH"] = os.pathsep.join(self._get_new_sys_path()) + os.pathsep + app_custom_folder _ = subprocess.Popen(shlex.split(command, True), preexec_fn=os.setsid, env=new_env) From 622f2053094872167a16d31aa95a6c9626e024e0 Mon Sep 17 00:00:00 2001 From: Ziyue Xu Date: Mon, 12 Aug 2024 20:49:11 -0400 Subject: [PATCH 07/26] Update Secure XGBoost example w.r.t. XGBoost's code changes (#2686) * Initial commit for xgboost-secure * Initial commit for xgboost-secure * Change model output path * Change data mode * Most basic xgboost process for coding * Most basic xgboost process for coding * Most basic xgboost process for coding * Most basic xgboost process for coding * First prototype for secure vertical pipeline * Phase 1 concludes * add seal pipeline in C++ * experiment will more tree depth to ensure correct node behavior * experiment will more tree depth to ensure correct node behavior * update secureboost eval bench * set header to none for sample alignment * config processor interface from python * simplify data preparation, add horizontal testing codes * remove redundants * horizontal exps * update scripts * update test scripts * add feature tests * update to align all outputs' format * remove conflict * reorganize * format * add flare jobs * add readme and experiment results * update secure xgboost example to align with new xgboost branch * update secure xgboost example to align with new xgboost branch * add gpu scripts * modify split for gpu exp * modify split for gpu exp * refine readme with Yuanting's inputs * update gpu scripts * update gpu scripts * update gpu script * data preparation minor update * consolidate all testing scripts * update readme and standalone scripts * format update * format update * minor refinements --- examples/advanced/xgboost_secure/README.md | 60 +++++-- .../advanced/xgboost_secure/requirements.txt | 9 + ...n_training_fl.sh => run_training_flare.sh} | 2 +- .../xgboost_secure/run_training_local.sh | 13 -- .../xgboost_secure/run_training_standalone.sh | 30 ++++ .../train_local/train_hori_base.py | 161 ----------------- .../train_local/train_hori_secure.py | 161 ----------------- .../train_local/train_vert_secure.py | 164 ------------------ .../train_base.py | 59 +++++-- .../train_federated.py} | 102 +++++++---- .../utils/prepare_data_horizontal.py | 6 +- 11 files changed, 202 insertions(+), 565 deletions(-) create mode 100644 examples/advanced/xgboost_secure/requirements.txt rename examples/advanced/xgboost_secure/{run_training_fl.sh => run_training_flare.sh} (99%) delete mode 100755 examples/advanced/xgboost_secure/run_training_local.sh create mode 100755 examples/advanced/xgboost_secure/run_training_standalone.sh delete mode 100644 examples/advanced/xgboost_secure/train_local/train_hori_base.py delete mode 100644 examples/advanced/xgboost_secure/train_local/train_hori_secure.py delete mode 100644 examples/advanced/xgboost_secure/train_local/train_vert_secure.py rename examples/advanced/xgboost_secure/{train_local => train_standalone}/train_base.py (68%) rename examples/advanced/xgboost_secure/{train_local/train_vert_base.py => train_standalone/train_federated.py} (62%) diff --git a/examples/advanced/xgboost_secure/README.md b/examples/advanced/xgboost_secure/README.md index daab8ffdb0..be939d358c 100644 --- a/examples/advanced/xgboost_secure/README.md +++ b/examples/advanced/xgboost_secure/README.md @@ -9,6 +9,12 @@ In this example, we further extend the existing horizontal and vertical federate In the following, we illustrate both *horizontal* and *vertical* federated XGBoost, *without* and *with* homomorphic encryption. Please refer to our [documentation]() for more details on the pipeline design and the encryption logic. +## Installation +To be able to run all the examples, please install the requirements first. +``` +pip install -r requirements.txt +``` + ## Data Preparation ### Download and Store Data To run the examples, we first download the dataset from this [link](https://www.kaggle.com/datasets/mlg-ulb/creditcardfraud), which is a single `.csv` file. @@ -43,16 +49,44 @@ For more details regarding federated XGBoost and the interface-plugin design, pl To run all experiments, we provide a script for all settings. ``` -bash run_training_local.sh +bash run_training_standalone.sh ``` This will cover baseline centralized training, local FL with and without secure feature. -From the results, we can have three observations: +## Run Federated Experiments with NVFlare +Next, we run the federated XGBoost training without and with homomorphic encryption using NVFlare. +We run the NVFlare jobs using simulator with: +``` +bash run_training_fl.sh +``` +The running time of each job depends mainly on the encryption workload. + +## Results +Comparing the AUC results with centralized baseline, we have four observations: 1. The performance of the model trained with homomorphic encryption is identical to its counterpart without encryption. -2. Vertical federated learnings have identical performance as the centralized baseline. -3. Horizontal federated learnings have performance slightly different from the centralized baseline. This is because under horizontal FL, the local histogram quantiles are based on the local data distribution, which may not be the same as the global distribution. +2. Vertical federated learning (both secure and non-secure) have identical performance as the centralized baseline. +3. Horizontal federated learning (both secure and non-secure) have performance slightly different from the centralized baseline. This is because under horizontal FL, the local histogram quantiles are based on the local data distribution, which may not be the same as the global distribution. +4. GPU leads to different results compared to CPU, which is expected as the GPU involves some data conversions. + +Below are sample results for CPU training: + +The AUC of vertical learning (both secure and non-secure): +``` +[0] eval-auc:0.90515 train-auc:0.92747 +[1] eval-auc:0.90516 train-auc:0.92748 +[2] eval-auc:0.90518 train-auc:0.92749 +``` +The AUC of horizontal learning (both secure and non-secure): +``` +[0] eval-auc:0.89789 train-auc:0.92732 +[1] eval-auc:0.89791 train-auc:0.92733 +[2] eval-auc:0.89791 train-auc:0.92733 +``` -Upon closer inspection over the tree models (under `/tmp/nvflare/xgb_exp`), we can observe that the tree structures are identical between the baseline and the vertical FL models, while different for horizontal models. Further, the secure vertical FL produces different tree records at different parties - because each party holds different feature subsets: +Comparing the tree models with centralized baseline, we have the following observations: +1. Vertical federated learning (non-secure) has exactly the same tree model as the centralized baseline. +2. Vertical federated learning (secure) has the same tree structures as the centralized baseline, however, it produces produces different tree records at different parties - because each party holds different feature subsets, as illustrated below. +3. Horizontal federated learning (both secure and non-secure) have different tree models from the centralized baseline. | ![Tree Structures](./figs/tree.base.png) | |:-------------------------------------------------:| @@ -68,17 +102,5 @@ In this case we can notice that Party 0 holds Feature 7 and 10, Party 1 holds Fe By combining the feature splits at all parties, the tree structures will be identical to the centralized baseline model. -## Run Federated Experiments with NVFlare -Next, we run the federated XGBoost training without and with homomorphic encryption using NVFlare. This time, instead of using the `mock` plugin, we use the real encryption plugins to perform homomorphic encryption. -We run the NVFlare jobs with: -``` -bash run_training_fl.sh -``` -The running time of each job depends mainly on the encryption workload. - - - - -To add: -- link to the documentation -- FL job results and time comparison, specify the computation environment \ No newline at end of file +## Different Encryption Plugins +We can switch to different plugins for encryption/decryption in federated xgboost. The plugin information is specified in `xgb.collective.CommunicatorContext`. \ No newline at end of file diff --git a/examples/advanced/xgboost_secure/requirements.txt b/examples/advanced/xgboost_secure/requirements.txt new file mode 100644 index 0000000000..c9f1320544 --- /dev/null +++ b/examples/advanced/xgboost_secure/requirements.txt @@ -0,0 +1,9 @@ +nvflare~=2.5.0rc +ipcl_python @ git+https://github.com/intel/pailliercryptolib_python.git@development +xgboost @ https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/vertical-federated-learning/xgboost-2.1.0.dev0%2Bde4013fc733648dfe5c2c803a13e2782056e00a2-py3-none-manylinux_2_28_x86_64.whl +pandas +scikit-learn +shap +matplotlib +tensorboard +tenseal \ No newline at end of file diff --git a/examples/advanced/xgboost_secure/run_training_fl.sh b/examples/advanced/xgboost_secure/run_training_flare.sh similarity index 99% rename from examples/advanced/xgboost_secure/run_training_fl.sh rename to examples/advanced/xgboost_secure/run_training_flare.sh index 9002884772..12070115cc 100755 --- a/examples/advanced/xgboost_secure/run_training_fl.sh +++ b/examples/advanced/xgboost_secure/run_training_flare.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash WORKSPACE_ROOT="/tmp/nvflare/xgb_workspaces" -n=3 +n=2 echo "Training horizontal" nvflare simulator jobs/xgb_hori -w ${WORKSPACE_ROOT}/workspace_hori -n ${n} -t ${n} diff --git a/examples/advanced/xgboost_secure/run_training_local.sh b/examples/advanced/xgboost_secure/run_training_local.sh deleted file mode 100755 index f34392a1d8..0000000000 --- a/examples/advanced/xgboost_secure/run_training_local.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash -mkdir /tmp/nvflare/xgb_exp - -echo "Training baseline" -python3 ./train_local/train_base.py 3 -echo "Training horizontal" -python3 ./train_local/train_hori_base.py 3 -echo "Training secure horizontal" -python3 ./train_local/train_hori_secure.py 3 -echo "Training vertical" -python3 ./train_local/train_vert_base.py 3 -echo "Training secure vertical" -python3 ./train_local/train_vert_secure.py 3 \ No newline at end of file diff --git a/examples/advanced/xgboost_secure/run_training_standalone.sh b/examples/advanced/xgboost_secure/run_training_standalone.sh new file mode 100755 index 0000000000..c1b0eb5af0 --- /dev/null +++ b/examples/advanced/xgboost_secure/run_training_standalone.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +directory="/tmp/nvflare/xgb_exp" +if [ ! -e "$directory" ]; then + mkdir -p "$directory" + echo "Directory created: $directory" +else + echo "Directory already exists: $directory" +fi + +echo "Training baseline CPU" +python3 ./train_standalone/train_base.py --out_path "/tmp/nvflare/xgb_exp/base_cpu" --gpu 0 +echo "Training baseline GPU" +python3 ./train_standalone/train_base.py --out_path "/tmp/nvflare/xgb_exp/base_gpu" --gpu 1 +echo "Training horizontal CPU non-encrypted" +python3 ./train_standalone/train_federated.py --data_train_root "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" --out_path "/tmp/nvflare/xgb_exp/hori_cpu_non_enc" --vert 0 --gpu 0 --enc 0 +echo "Training horizontal CPU encrypted" +python3 ./train_standalone/train_federated.py --data_train_root "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" --out_path "/tmp/nvflare/xgb_exp/hori_cpu_enc" --vert 0 --gpu 0 --enc 1 +echo "Training horizontal GPU non-encrypted" +python3 ./train_standalone/train_federated.py --data_train_root "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" --out_path "/tmp/nvflare/xgb_exp/hori_gpu_non_enc" --vert 0 --gpu 1 --enc 0 +echo "Training horizontal GPU encrypted" +python3 ./train_standalone/train_federated.py --data_train_root "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" --out_path "/tmp/nvflare/xgb_exp/hori_gpu_enc" --vert 0 --gpu 1 --enc 1 +echo "Training vertical CPU non-encrypted" +python3 ./train_standalone/train_federated.py --data_train_root "/tmp/nvflare/xgb_dataset/vertical_xgb_data" --out_path "/tmp/nvflare/xgb_exp/vert_cpu_non_enc" --vert 1 --gpu 0 --enc 0 +echo "Training vertical CPU encrypted" +python3 ./train_standalone/train_federated.py --data_train_root "/tmp/nvflare/xgb_dataset/vertical_xgb_data" --out_path "/tmp/nvflare/xgb_exp/vert_cpu_enc" --vert 1 --gpu 0 --enc 1 +echo "Training vertical GPU non-encrypted" +python3 ./train_standalone/train_federated.py --data_train_root "/tmp/nvflare/xgb_dataset/vertical_xgb_data" --out_path "/tmp/nvflare/xgb_exp/vert_gpu_non_enc" --vert 1 --gpu 1 --enc 0 +echo "Training vertical GPU encrypted" +python3 ./train_standalone/train_federated.py --data_train_root "/tmp/nvflare/xgb_dataset/vertical_xgb_data" --out_path "/tmp/nvflare/xgb_exp/vert_gpu_enc" --vert 1 --gpu 1 --enc 1 \ No newline at end of file diff --git a/examples/advanced/xgboost_secure/train_local/train_hori_base.py b/examples/advanced/xgboost_secure/train_local/train_hori_base.py deleted file mode 100644 index 476d0bf6ac..0000000000 --- a/examples/advanced/xgboost_secure/train_local/train_hori_base.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import multiprocessing -import sys -import time - -import matplotlib.pyplot as plt -import pandas as pd -import shap -import xgboost as xgb -import xgboost.federated - -PRINT_SAMPLE = False -DATASET_ROOT = "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" -TEST_DATA_PATH = "/tmp/nvflare/xgb_dataset/test.csv" -OUTPUT_ROOT = "/tmp/nvflare/xgb_exp" - - -def load_test_data(data_path: str): - df = pd.read_csv(data_path) - # Split to feature and label - X = df.iloc[:, 1:] - y = df.iloc[:, 0] - return X, y - - -def run_server(port: int, world_size: int) -> None: - xgboost.federated.run_federated_server(port, world_size) - - -def run_worker(port: int, world_size: int, rank: int) -> None: - communicator_env = { - "xgboost_communicator": "federated", - "federated_server_address": f"localhost:{port}", - "federated_world_size": world_size, - "federated_rank": rank, - "plugin_name": "mock", - "loader_params": {"LIBRARY_PATH": "/tmp"}, - "proc_params": {"": ""}, - } - - # Always call this before using distributed module - with xgb.collective.CommunicatorContext(**communicator_env): - # Specify file path, rank 0 as the label owner, others as the feature owner - train_path = f"{DATASET_ROOT}/site-{rank + 1}/train.csv" - valid_path = f"{DATASET_ROOT}/site-{rank + 1}/valid.csv" - - # Load file directly to tell the match from loading with DMatrix - df_train = pd.read_csv(train_path, header=None) - if PRINT_SAMPLE: - # print number of rows and columns for each worker - print(f"Direct load: rank={rank}, nrow={df_train.shape[0]}, ncol={df_train.shape[1]}") - # print one sample row of the data - print(f"Direct load: rank={rank}, one sample row of the data: \n {df_train.iloc[0]}") - - # Load file, file will not be sharded in federated mode. - label = "&label_column=0" - # for Vertical XGBoost, read from csv with label_column and set data_split_mode to 1 for column mode - dtrain = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=0) - dvalid = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=0) - - if PRINT_SAMPLE: - # print number of rows and columns for each worker - print(f"DMatrix: rank={rank}, nrow={dtrain.num_row()}, ncol={dtrain.num_col()}") - # print one sample row of the data - data_sample = dtrain.get_data()[0] - print(f"DMatrix: rank={rank}, one sample row of the data: \n {data_sample}") - - # Specify parameters via map, definition are same as c++ version - param = { - "max_depth": 3, - "eta": 0.1, - "objective": "binary:logistic", - "eval_metric": "auc", - "tree_method": "hist", - "nthread": 1, - } - - # Specify validations set to watch performance - watchlist = [(dvalid, "eval"), (dtrain, "train")] - num_round = 3 - - # Run training, all the features in training API is available. - bst = xgb.train(param, dtrain, num_round, evals=watchlist) - - # Save the model - rank = xgb.collective.get_rank() - bst.save_model(f"{OUTPUT_ROOT}/model.hori.base.{rank}.json") - xgb.collective.communicator_print("Finished training\n") - - # save feature importance score to file - score = bst.get_score(importance_type="gain") - with open(f"{OUTPUT_ROOT}/feat_importance.hori.base.{rank}.txt", "w") as f: - for key in score: - f.write(f"{key}: {score[key]}\n") - - # Load test data - X_test, y_test = load_test_data(TEST_DATA_PATH) - # construct xgboost DMatrix - dmat_test = xgb.DMatrix(X_test, label=y_test) - - # Explain the model - explainer = shap.TreeExplainer(bst) - explanation = explainer(dmat_test) - - # save the beeswarm plot to png file - shap.plots.beeswarm(explanation, show=False) - img = plt.gcf() - img.savefig(f"{OUTPUT_ROOT}/shap.hori.base.{rank}.png") - - # dump tree and save to text file - dump = bst.get_dump() - with open(f"{OUTPUT_ROOT}/tree_dump.hori.base.{rank}.txt", "w") as f: - for tree in dump: - f.write(tree) - - # plot tree and save to png file - xgb.plot_tree(bst, num_trees=0, rankdir="LR") - fig = plt.gcf() - fig.set_size_inches(18, 5) - plt.savefig(f"{OUTPUT_ROOT}/tree.hori.base.{rank}.png", dpi=100) - - # export tree to dataframe - tree_df = bst.trees_to_dataframe() - tree_df.to_csv(f"{OUTPUT_ROOT}/tree_df.hori.base.{rank}.csv") - - -def run_federated() -> None: - port = 1111 - world_size = int(sys.argv[1]) - - server = multiprocessing.Process(target=run_server, args=(port, world_size)) - server.start() - time.sleep(1) - if not server.is_alive(): - raise Exception("Error starting Federated Learning server") - - workers = [] - for rank in range(world_size): - worker = multiprocessing.Process(target=run_worker, args=(port, world_size, rank)) - workers.append(worker) - worker.start() - for worker in workers: - worker.join() - server.terminate() - - -if __name__ == "__main__": - run_federated() diff --git a/examples/advanced/xgboost_secure/train_local/train_hori_secure.py b/examples/advanced/xgboost_secure/train_local/train_hori_secure.py deleted file mode 100644 index 647f0427ab..0000000000 --- a/examples/advanced/xgboost_secure/train_local/train_hori_secure.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import multiprocessing -import sys -import time - -import matplotlib.pyplot as plt -import pandas as pd -import shap -import xgboost as xgb -import xgboost.federated - -PRINT_SAMPLE = False -DATASET_ROOT = "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" -TEST_DATA_PATH = "/tmp/nvflare/xgb_dataset/test.csv" -OUTPUT_ROOT = "/tmp/nvflare/xgb_exp" - - -def load_test_data(data_path: str): - df = pd.read_csv(data_path) - # Split to feature and label - X = df.iloc[:, 1:] - y = df.iloc[:, 0] - return X, y - - -def run_server(port: int, world_size: int) -> None: - xgboost.federated.run_federated_server(port, world_size) - - -def run_worker(port: int, world_size: int, rank: int) -> None: - communicator_env = { - "xgboost_communicator": "federated", - "federated_server_address": f"localhost:{port}", - "federated_world_size": world_size, - "federated_rank": rank, - "plugin_name": "mock", - "loader_params": {"LIBRARY_PATH": "/tmp"}, - "proc_params": {"": ""}, - } - - # Always call this before using distributed module - with xgb.collective.CommunicatorContext(**communicator_env): - # Specify file path, rank 0 as the label owner, others as the feature owner - train_path = f"{DATASET_ROOT}/site-{rank + 1}/train.csv" - valid_path = f"{DATASET_ROOT}/site-{rank + 1}/valid.csv" - - # Load file directly to tell the match from loading with DMatrix - df_train = pd.read_csv(train_path, header=None) - if PRINT_SAMPLE: - # print number of rows and columns for each worker - print(f"Direct load: rank={rank}, nrow={df_train.shape[0]}, ncol={df_train.shape[1]}") - # print one sample row of the data - print(f"Direct load: rank={rank}, one sample row of the data: \n {df_train.iloc[0]}") - - # Load file, file will not be sharded in federated mode. - label = "&label_column=0" - # for Vertical XGBoost, read from csv with label_column and set data_split_mode to 1 for column mode - dtrain = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=3) - dvalid = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=3) - - if PRINT_SAMPLE: - # print number of rows and columns for each worker - print(f"DMatrix: rank={rank}, nrow={dtrain.num_row()}, ncol={dtrain.num_col()}") - # print one sample row of the data - data_sample = dtrain.get_data()[0] - print(f"DMatrix: rank={rank}, one sample row of the data: \n {data_sample}") - - # Specify parameters via map, definition are same as c++ version - param = { - "max_depth": 3, - "eta": 0.1, - "objective": "binary:logistic", - "eval_metric": "auc", - "tree_method": "hist", - "nthread": 1, - } - - # Specify validations set to watch performance - watchlist = [(dvalid, "eval"), (dtrain, "train")] - num_round = 3 - - # Run training, all the features in training API is available. - bst = xgb.train(param, dtrain, num_round, evals=watchlist) - - # Save the model - rank = xgb.collective.get_rank() - bst.save_model(f"{OUTPUT_ROOT}/model.hori.secure.{rank}.json") - xgb.collective.communicator_print("Finished training\n") - - # save feature importance score to file - score = bst.get_score(importance_type="gain") - with open(f"{OUTPUT_ROOT}/feat_importance.secure.base.{rank}.txt", "w") as f: - for key in score: - f.write(f"{key}: {score[key]}\n") - - # Load test data - X_test, y_test = load_test_data(TEST_DATA_PATH) - # construct xgboost DMatrix - dmat_test = xgb.DMatrix(X_test, label=y_test) - - # Explain the model - explainer = shap.TreeExplainer(bst) - explanation = explainer(dmat_test) - - # save the beeswarm plot to png file - shap.plots.beeswarm(explanation, show=False) - img = plt.gcf() - img.savefig(f"{OUTPUT_ROOT}/shap.hori.secure.{rank}.png") - - # dump tree and save to text file - dump = bst.get_dump() - with open(f"{OUTPUT_ROOT}/tree_dump.hori.secure.{rank}.txt", "w") as f: - for tree in dump: - f.write(tree) - - # plot tree and save to png file - xgb.plot_tree(bst, num_trees=0, rankdir="LR") - fig = plt.gcf() - fig.set_size_inches(18, 5) - plt.savefig(f"{OUTPUT_ROOT}/tree.hori.secure.{rank}.png", dpi=100) - - # export tree to dataframe - tree_df = bst.trees_to_dataframe() - tree_df.to_csv(f"{OUTPUT_ROOT}/tree_df.hori.secure.{rank}.csv") - - -def run_federated() -> None: - port = 2222 - world_size = int(sys.argv[1]) - - server = multiprocessing.Process(target=run_server, args=(port, world_size)) - server.start() - time.sleep(1) - if not server.is_alive(): - raise Exception("Error starting Federated Learning server") - - workers = [] - for rank in range(world_size): - worker = multiprocessing.Process(target=run_worker, args=(port, world_size, rank)) - workers.append(worker) - worker.start() - for worker in workers: - worker.join() - server.terminate() - - -if __name__ == "__main__": - run_federated() diff --git a/examples/advanced/xgboost_secure/train_local/train_vert_secure.py b/examples/advanced/xgboost_secure/train_local/train_vert_secure.py deleted file mode 100644 index 10e0d4382e..0000000000 --- a/examples/advanced/xgboost_secure/train_local/train_vert_secure.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import multiprocessing -import sys -import time - -import matplotlib.pyplot as plt -import pandas as pd -import shap -import xgboost as xgb -import xgboost.federated - -PRINT_SAMPLE = False -DATASET_ROOT = "/tmp/nvflare/xgb_dataset/vertical_xgb_data" -TEST_DATA_PATH = "/tmp/nvflare/xgb_dataset/test.csv" -OUTPUT_ROOT = "/tmp/nvflare/xgb_exp" - - -def load_test_data(data_path: str): - df = pd.read_csv(data_path) - # Split to feature and label - X = df.iloc[:, 1:] - y = df.iloc[:, 0] - return X, y - - -def run_server(port: int, world_size: int) -> None: - xgboost.federated.run_federated_server(port, world_size) - - -def run_worker(port: int, world_size: int, rank: int) -> None: - communicator_env = { - "xgboost_communicator": "federated", - "federated_server_address": f"localhost:{port}", - "federated_world_size": world_size, - "federated_rank": rank, - "plugin_name": "mock", - "loader_params": {"LIBRARY_PATH": "/tmp"}, - "proc_params": {"": ""}, - } - - # Always call this before using distributed module - with xgb.collective.CommunicatorContext(**communicator_env): - # Specify file path, rank 0 as the label owner, others as the feature owner - train_path = f"{DATASET_ROOT}/site-{rank + 1}/train.csv" - valid_path = f"{DATASET_ROOT}/site-{rank + 1}/valid.csv" - - # Load file directly to tell the match from loading with DMatrix - df_train = pd.read_csv(train_path, header=None) - if PRINT_SAMPLE: - # print number of rows and columns for each worker - print(f"Direct load: rank={rank}, nrow={df_train.shape[0]}, ncol={df_train.shape[1]}") - # print one sample row of the data - print(f"Direct load: rank={rank}, one sample row of the data: \n {df_train.iloc[0]}") - - # Load file, file will not be sharded in federated mode. - if rank == 0: - label = "&label_column=0" - else: - label = "" - # for Vertical XGBoost, read from csv with label_column and set data_split_mode to 1 for column mode - dtrain = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=2) - dvalid = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=2) - - if PRINT_SAMPLE: - # print number of rows and columns for each worker - print(f"DMatrix: rank={rank}, nrow={dtrain.num_row()}, ncol={dtrain.num_col()}") - # print one sample row of the data - data_sample = dtrain.get_data()[0] - print(f"DMatrix: rank={rank}, one sample row of the data: \n {data_sample}") - - # Specify parameters via map, definition are same as c++ version - param = { - "max_depth": 3, - "eta": 0.1, - "objective": "binary:logistic", - "eval_metric": "auc", - "tree_method": "hist", - "nthread": 1, - } - - # Specify validations set to watch performance - watchlist = [(dvalid, "eval"), (dtrain, "train")] - num_round = 3 - - # Run training, all the features in training API is available. - bst = xgb.train(param, dtrain, num_round, evals=watchlist) - - # Save the model, every rank's model is different. - rank = xgb.collective.get_rank() - bst.save_model(f"{OUTPUT_ROOT}/model.vert.secure.{rank}.json") - xgb.collective.communicator_print("Finished training\n") - - # save feature importance score to file - score = bst.get_score(importance_type="gain") - with open(f"{OUTPUT_ROOT}/feat_importance.vert.secure.{rank}.txt", "w") as f: - for key in score: - f.write(f"{key}: {score[key]}\n") - - # Load test data - X_test, y_test = load_test_data(TEST_DATA_PATH) - # construct xgboost DMatrix - dmat_test = xgb.DMatrix(X_test, label=y_test) - - # Explain the model - explainer = shap.TreeExplainer(bst) - explanation = explainer(dmat_test) - - # save the beeswarm plot to png file - shap.plots.beeswarm(explanation, show=False) - img = plt.gcf() - img.savefig(f"{OUTPUT_ROOT}/shap.vert.secure.{rank}.png") - - # dump tree and save to text file - dump = bst.get_dump() - with open(f"{OUTPUT_ROOT}/tree_dump.vert.secure.{rank}.txt", "w") as f: - for tree in dump: - f.write(tree) - - # plot tree and save to png file - xgb.plot_tree(bst, num_trees=0, rankdir="LR") - fig = plt.gcf() - fig.set_size_inches(18, 5) - plt.savefig(f"{OUTPUT_ROOT}/tree.vert.secure.{rank}.png", dpi=100) - - # export tree to dataframe - tree_df = bst.trees_to_dataframe() - tree_df.to_csv(f"{OUTPUT_ROOT}/tree_df.vert.secure.{rank}.csv") - - -def run_federated() -> None: - port = 4444 - world_size = int(sys.argv[1]) - - server = multiprocessing.Process(target=run_server, args=(port, world_size)) - server.start() - time.sleep(1) - if not server.is_alive(): - raise Exception("Error starting Federated Learning server") - - workers = [] - for rank in range(world_size): - worker = multiprocessing.Process(target=run_worker, args=(port, world_size, rank)) - workers.append(worker) - worker.start() - for worker in workers: - worker.join() - server.terminate() - - -if __name__ == "__main__": - run_federated() diff --git a/examples/advanced/xgboost_secure/train_local/train_base.py b/examples/advanced/xgboost_secure/train_standalone/train_base.py similarity index 68% rename from examples/advanced/xgboost_secure/train_local/train_base.py rename to examples/advanced/xgboost_secure/train_standalone/train_base.py index 2a0cad4179..58db56b94c 100644 --- a/examples/advanced/xgboost_secure/train_local/train_base.py +++ b/examples/advanced/xgboost_secure/train_standalone/train_base.py @@ -12,15 +12,36 @@ # See the License for the specific language governing permissions and # limitations under the License. +import argparse +import os + import matplotlib.pyplot as plt import pandas as pd import shap import xgboost as xgb PRINT_SAMPLE = False -DATASET_ROOT = "/tmp/nvflare/xgb_dataset/base_xgb_data" -TEST_DATA_PATH = "/tmp/nvflare/xgb_dataset/test.csv" -OUTPUT_ROOT = "/tmp/nvflare/xgb_exp" + + +def train_base_args_parser(): + parser = argparse.ArgumentParser(description="Train baseline XGBoost model") + parser.add_argument("--gpu", type=int, default=0, help="Whether to use gpu for training, 0 for cpu, 1 for gpu") + parser.add_argument( + "--data_train_root", + type=str, + default="/tmp/nvflare/xgb_dataset/base_xgb_data", + help="Path to training data folder", + ) + parser.add_argument( + "--data_test_file", type=str, default="/tmp/nvflare/xgb_dataset/test.csv", help="Path to testing data file" + ) + parser.add_argument( + "--out_path", + type=str, + default="/tmp/nvflare/xgboost_secure/train_standalone/base", + help="Output path for the data split file", + ) + return parser def load_test_data(data_path: str): @@ -31,10 +52,15 @@ def load_test_data(data_path: str): return X, y -def run_training() -> None: +def main(): + parser = train_base_args_parser() + args = parser.parse_args() + if not os.path.exists(args.out_path): + os.makedirs(args.out_path) + # Specify file path, rank 0 as the label owner, others as the feature owner - train_path = f"{DATASET_ROOT}/train.csv" - valid_path = f"{DATASET_ROOT}/valid.csv" + train_path = f"{args.data_train_root}/train.csv" + valid_path = f"{args.data_train_root}/valid.csv" # Load file directly to tell the match from loading with DMatrix df_train = pd.read_csv(train_path, header=None) @@ -58,12 +84,17 @@ def run_training() -> None: print(f"DMatrix: one sample row of the data: \n {data_sample}") # Specify parameters via map, definition are same as c++ version + if args.gpu: + device = "cuda:0" + else: + device = "cpu" param = { "max_depth": 3, "eta": 0.1, "objective": "binary:logistic", "eval_metric": "auc", "tree_method": "hist", + "device": device, "nthread": 1, } @@ -75,17 +106,17 @@ def run_training() -> None: bst = xgb.train(param, dtrain, num_round, evals=watchlist) # Save the model - bst.save_model(f"{OUTPUT_ROOT}/model.base.json") + bst.save_model(f"{args.out_path}/model.base.json") xgb.collective.communicator_print("Finished training\n") # save feature importance score to file score = bst.get_score(importance_type="gain") - with open(f"{OUTPUT_ROOT}/feat_importance.base.txt", "w") as f: + with open(f"{args.out_path}/feat_importance.base.txt", "w") as f: for key in score: f.write(f"{key}: {score[key]}\n") # Load test data - X_test, y_test = load_test_data(TEST_DATA_PATH) + X_test, y_test = load_test_data(args.data_test_file) # construct xgboost DMatrix dmat_test = xgb.DMatrix(X_test, label=y_test) @@ -96,11 +127,11 @@ def run_training() -> None: # save the beeswarm plot to png file shap.plots.beeswarm(explanation, show=False) img = plt.gcf() - img.savefig(f"{OUTPUT_ROOT}/shap.base.png") + img.savefig(f"{args.out_path}/shap.base.png") # dump tree and save to text file dump = bst.get_dump() - with open(f"{OUTPUT_ROOT}/tree_dump.base.txt", "w") as f: + with open(f"{args.out_path}/tree_dump.base.txt", "w") as f: for tree in dump: f.write(tree) @@ -108,12 +139,12 @@ def run_training() -> None: xgb.plot_tree(bst, num_trees=0, rankdir="LR") fig = plt.gcf() fig.set_size_inches(18, 5) - plt.savefig(f"{OUTPUT_ROOT}/tree.base.png", dpi=100) + plt.savefig(f"{args.out_path}/tree.base.png", dpi=100) # export tree to dataframe tree_df = bst.trees_to_dataframe() - tree_df.to_csv(f"{OUTPUT_ROOT}/tree_df.base.csv") + tree_df.to_csv(f"{args.out_path}/tree_df.base.csv") if __name__ == "__main__": - run_training() + main() diff --git a/examples/advanced/xgboost_secure/train_local/train_vert_base.py b/examples/advanced/xgboost_secure/train_standalone/train_federated.py similarity index 62% rename from examples/advanced/xgboost_secure/train_local/train_vert_base.py rename to examples/advanced/xgboost_secure/train_standalone/train_federated.py index 22a3de3da3..069ae34890 100644 --- a/examples/advanced/xgboost_secure/train_local/train_vert_base.py +++ b/examples/advanced/xgboost_secure/train_standalone/train_federated.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import argparse import multiprocessing -import sys +import os import time import matplotlib.pyplot as plt @@ -23,9 +24,34 @@ import xgboost.federated PRINT_SAMPLE = False -DATASET_ROOT = "/tmp/nvflare/xgb_dataset/vertical_xgb_data" -TEST_DATA_PATH = "/tmp/nvflare/xgb_dataset/test.csv" -OUTPUT_ROOT = "/tmp/nvflare/xgb_exp" + + +def train_federated_args_parser(): + parser = argparse.ArgumentParser(description="Train federated XGBoost model") + parser.add_argument("--world_size", type=int, default=3, help="Total number of clients") + parser.add_argument("--gpu", type=int, default=0, help="Whether to use gpu for training, 0 for cpu, 1 for gpu") + parser.add_argument( + "--vert", type=int, default=0, help="Horizontal or vertical training, 0 for horizontal, 1 for vertical" + ) + parser.add_argument( + "--enc", type=int, default=0, help="Whether to use encryption plugin, 0 for non-encrypted, 1 for encrypted" + ) + parser.add_argument( + "--data_train_root", + type=str, + default="/tmp/nvflare/xgb_dataset/base_xgb_data", + help="Path to training data folder", + ) + parser.add_argument( + "--data_test_file", type=str, default="/tmp/nvflare/xgb_dataset/test.csv", help="Path to testing data file" + ) + parser.add_argument( + "--out_path", + type=str, + default="/tmp/nvflare/xgboost_secure/train_standalone/federated", + help="Output path for the data split file", + ) + return parser def load_test_data(data_path: str): @@ -40,22 +66,24 @@ def run_server(port: int, world_size: int) -> None: xgboost.federated.run_federated_server(port, world_size) -def run_worker(port: int, world_size: int, rank: int) -> None: +def run_worker(port: int, world_size: int, rank: int, args) -> None: + if args.enc: + plugin = {"name": "mock"} + else: + plugin = {} communicator_env = { - "xgboost_communicator": "federated", + "dmlc_communicator": "federated", "federated_server_address": f"localhost:{port}", "federated_world_size": world_size, "federated_rank": rank, - "plugin_name": "mock", - "loader_params": {"LIBRARY_PATH": "/tmp"}, - "proc_params": {"": ""}, + "federated_plugin": plugin, } # Always call this before using distributed module with xgb.collective.CommunicatorContext(**communicator_env): # Specify file path, rank 0 as the label owner, others as the feature owner - train_path = f"{DATASET_ROOT}/site-{rank + 1}/train.csv" - valid_path = f"{DATASET_ROOT}/site-{rank + 1}/valid.csv" + train_path = f"{args.data_train_root}/site-{rank + 1}/train.csv" + valid_path = f"{args.data_train_root}/site-{rank + 1}/valid.csv" # Load file directly to tell the match from loading with DMatrix df_train = pd.read_csv(train_path, header=None) @@ -66,13 +94,17 @@ def run_worker(port: int, world_size: int, rank: int) -> None: print(f"Direct load: rank={rank}, one sample row of the data: \n {df_train.iloc[0]}") # Load file, file will not be sharded in federated mode. - if rank == 0: - label = "&label_column=0" + if args.vert: + split_mode = 1 + if rank == 0: + label = "&label_column=0" + else: + label = "" else: - label = "" - # for Vertical XGBoost, read from csv with label_column and set data_split_mode to 1 for column mode - dtrain = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=1) - dvalid = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=1) + split_mode = 0 + label = "&label_column=0" + dtrain = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=split_mode) + dvalid = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=split_mode) if PRINT_SAMPLE: # print number of rows and columns for each worker @@ -82,12 +114,17 @@ def run_worker(port: int, world_size: int, rank: int) -> None: print(f"DMatrix: rank={rank}, one sample row of the data: \n {data_sample}") # Specify parameters via map, definition are same as c++ version + if args.gpu: + device = "cuda:0" + else: + device = "cpu" param = { "max_depth": 3, "eta": 0.1, "objective": "binary:logistic", "eval_metric": "auc", "tree_method": "hist", + "device": device, "nthread": 1, } @@ -100,17 +137,17 @@ def run_worker(port: int, world_size: int, rank: int) -> None: # Save the model rank = xgb.collective.get_rank() - bst.save_model(f"{OUTPUT_ROOT}/model.vert.base.{rank}.json") + bst.save_model(f"{args.out_path}/model.{rank}.json") xgb.collective.communicator_print("Finished training\n") # save feature importance score to file score = bst.get_score(importance_type="gain") - with open(f"{OUTPUT_ROOT}/feat_importance.vert.base.{rank}.txt", "w") as f: + with open(f"{args.out_path}/feat_importance.{rank}.txt", "w") as f: for key in score: f.write(f"{key}: {score[key]}\n") # Load test data - X_test, y_test = load_test_data(TEST_DATA_PATH) + X_test, y_test = load_test_data(args.data_test_file) # construct xgboost DMatrix dmat_test = xgb.DMatrix(X_test, label=y_test) @@ -121,11 +158,11 @@ def run_worker(port: int, world_size: int, rank: int) -> None: # save the beeswarm plot to png file shap.plots.beeswarm(explanation, show=False) img = plt.gcf() - img.savefig(f"{OUTPUT_ROOT}/shap.vert.base.{rank}.png") + img.savefig(f"{args.out_path}/shap.{rank}.png") # dump tree and save to text file dump = bst.get_dump() - with open(f"{OUTPUT_ROOT}/tree_dump.vert.base.{rank}.txt", "w") as f: + with open(f"{args.out_path}/tree_dump.{rank}.txt", "w") as f: for tree in dump: f.write(tree) @@ -133,18 +170,23 @@ def run_worker(port: int, world_size: int, rank: int) -> None: xgb.plot_tree(bst, num_trees=0, rankdir="LR") fig = plt.gcf() fig.set_size_inches(18, 5) - plt.savefig(f"{OUTPUT_ROOT}/tree.vert.base.{rank}.png", dpi=100) + plt.savefig(f"{args.out_path}/tree.{rank}.png", dpi=100) # export tree to dataframe tree_df = bst.trees_to_dataframe() - tree_df.to_csv(f"{OUTPUT_ROOT}/tree_df.vert.base.{rank}.csv") + tree_df.to_csv(f"{args.out_path}/tree_df.{rank}.csv") + +def main(): + parser = train_federated_args_parser() + args = parser.parse_args() + if not os.path.exists(args.out_path): + os.makedirs(args.out_path) -def run_federated() -> None: - port = 3333 - world_size = int(sys.argv[1]) + port = 1111 + world_size = args.world_size - server = multiprocessing.Process(target=run_server, args=(port, world_size)) + server = multiprocessing.Process(target=run_server, args=(world_size, port)) server.start() time.sleep(1) if not server.is_alive(): @@ -152,7 +194,7 @@ def run_federated() -> None: workers = [] for rank in range(world_size): - worker = multiprocessing.Process(target=run_worker, args=(port, world_size, rank)) + worker = multiprocessing.Process(target=run_worker, args=(port, world_size, rank, args)) workers.append(worker) worker.start() for worker in workers: @@ -161,4 +203,4 @@ def run_federated() -> None: if __name__ == "__main__": - run_federated() + main() diff --git a/examples/advanced/xgboost_secure/utils/prepare_data_horizontal.py b/examples/advanced/xgboost_secure/utils/prepare_data_horizontal.py index 0aedd85032..e62a3a69c8 100644 --- a/examples/advanced/xgboost_secure/utils/prepare_data_horizontal.py +++ b/examples/advanced/xgboost_secure/utils/prepare_data_horizontal.py @@ -70,8 +70,10 @@ def main(): df_valid = df.iloc[int(0.8 * rows_total) :, :] for site in range(args.site_num): - # sort df_train by feature 2 - # df_train = df_train.sort_values(by=2) + # sort df_train by an arbitrary feature, 7, + # creating distribution shift between sites + # to illustrate the horizontal split quantile difference + df_train = df_train.sort_values(by=7) row_start = sum(site_row_size[:site]) row_end = sum(site_row_size[: site + 1]) From 138387e518ecab6b1ff5d543dddf98d60556cc97 Mon Sep 17 00:00:00 2001 From: Yuhong Wen Date: Tue, 13 Aug 2024 15:34:02 -0400 Subject: [PATCH 08/26] Improve the kill children processes (#2789) * use process.kill() to kill the children processes. * removed the sig argument. * removed no use import. --- nvflare/private/fed/app/utils.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nvflare/private/fed/app/utils.py b/nvflare/private/fed/app/utils.py index d3b0c0ae82..b68c5e0c5e 100644 --- a/nvflare/private/fed/app/utils.py +++ b/nvflare/private/fed/app/utils.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import signal import sys import threading import time @@ -49,14 +48,14 @@ def check_parent_alive(parent_pid, stop_event: threading.Event): time.sleep(1) -def kill_child_processes(parent_pid, sig=signal.SIGTERM): +def kill_child_processes(parent_pid): try: parent = psutil.Process(parent_pid) except psutil.NoSuchProcess: return children = parent.children(recursive=True) for process in children: - process.send_signal(sig) + process.kill() def create_admin_server(fl_server: FederatedServer, server_conf=None, args=None, secure_train=False): From 73b75c92fbed6ae3cfcc4dfb39ce450cda12544d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Tue, 13 Aug 2024 14:55:18 -0700 Subject: [PATCH 09/26] Add back metric callback and fix examples based on new xgboost version (#2787) --- .readthedocs.yaml | 4 +- build_doc.sh | 2 +- examples/advanced/vertical_xgboost/README.md | 2 +- .../code/vertical_xgb/vertical_data_loader.py | 2 +- .../vertical_xgboost/requirements.txt | 10 ++- .../base/app/config/config_fed_server.json | 3 - .../base_v2/app/config/config_fed_client.json | 14 +--- .../base_v2/app/config/config_fed_server.json | 16 +++- .../base_v2/app/custom/higgs_data_loader.py | 77 +++++++++++++++++++ .../histogram-based/jobs/base_v2/meta.json | 10 +++ .../xgboost/histogram-based/requirements.txt | 8 +- .../advanced/xgboost/prepare_job_config.sh | 4 +- .../app/config/config_fed_client.json | 3 +- .../app/config/config_fed_server.json | 5 -- .../app/config/config_fed_client.json | 2 +- .../app/config/config_fed_server.json | 6 -- .../xgboost/tree-based/requirements.txt | 8 +- .../xgboost/utils/prepare_job_config.py | 46 ++++++----- .../vertical_xgb/config_fed_client.conf | 6 +- nvflare/app_opt/xgboost/constant.py | 26 +++++++ nvflare/app_opt/xgboost/data_loader.py | 6 +- .../xgboost/histogram_based/controller.py | 2 +- .../xgboost/histogram_based/executor.py | 11 +-- .../histogram_based_v2/adaptors/adaptor.py | 25 +++--- .../adaptors/grpc_client_adaptor.py | 33 +++++++- .../adaptors/grpc_server_adaptor.py | 34 +++++++- .../adaptors/xgb_adaptor.py | 76 ++++++++++-------- .../xgboost/histogram_based_v2/controller.py | 3 +- .../xgboost/histogram_based_v2/defs.py | 15 +--- .../histogram_based_v2/fed_controller.py | 4 +- .../histogram_based_v2/fed_executor.py | 8 +- .../xgboost/histogram_based_v2/grpc_client.py | 3 +- .../xgboost/histogram_based_v2/grpc_server.py | 5 +- .../mock/mock_controller.py | 2 + .../histogram_based_v2/secure_data_loader.py | 4 +- .../app_opt/xgboost/histogram_based_v2/tb.py | 36 --------- .../standalone_job/xgb_histogram_examples.yml | 40 ++++++++++ .../histrogram_based_v2/adaptors/__init__.py | 13 ++++ .../adaptors/adaptor_test.py | 38 +++++++++ .../adaptors/xgb_adaptor_test.py | 48 ++++++++++++ 40 files changed, 472 insertions(+), 188 deletions(-) create mode 100644 examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py create mode 100644 examples/advanced/xgboost/histogram-based/jobs/base_v2/meta.json create mode 100644 nvflare/app_opt/xgboost/constant.py delete mode 100644 nvflare/app_opt/xgboost/histogram_based_v2/tb.py create mode 100644 tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/__init__.py create mode 100644 tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/adaptor_test.py create mode 100644 tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 41450770c5..2ce1d0e0e6 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -9,7 +9,7 @@ version: 2 build: os: ubuntu-22.04 tools: - python: "3.8" + python: "3.10" # Build documentation in the docs/ directory with Sphinx sphinx: @@ -26,6 +26,6 @@ sphinx: python: install: - method: pip - path: .[doc] + path: .[dev] # system_packages: true diff --git a/build_doc.sh b/build_doc.sh index e91c1a5331..384b6d1fa6 100755 --- a/build_doc.sh +++ b/build_doc.sh @@ -49,7 +49,7 @@ function clean_docs() { } function build_html_docs() { - pip install -e .[doc] + pip install -e .[dev] sphinx-apidoc --module-first -f -o docs/apidocs/ nvflare "*poc" "*private" sphinx-build -b html docs docs/_build } diff --git a/examples/advanced/vertical_xgboost/README.md b/examples/advanced/vertical_xgboost/README.md index 88171d9ff9..6a6709e11f 100644 --- a/examples/advanced/vertical_xgboost/README.md +++ b/examples/advanced/vertical_xgboost/README.md @@ -89,7 +89,7 @@ The model will be saved to `test.model.json`. ## Results Model accuracy can be visualized in tensorboard: ``` -tensorboard --logdir /tmp/nvflare/vertical_xgb/simulate_job/tb_events +tensorboard --logdir /tmp/nvflare/vertical_xgb/server/simulate_job/tb_events ``` An example training (pink) and validation (orange) AUC graph from running vertical XGBoost on HIGGS: diff --git a/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py b/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py index bf0d23ce92..096d428d2d 100644 --- a/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py +++ b/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py @@ -62,7 +62,7 @@ def __init__(self, data_split_path, psi_path, id_col, label_owner, train_proport self.label_owner = label_owner self.train_proportion = train_proportion - def load_data(self, client_id: str): + def load_data(self, client_id: str, training_mode: str = ""): client_data_split_path = self.data_split_path.replace("site-x", client_id) client_psi_path = self.psi_path.replace("site-x", client_id) diff --git a/examples/advanced/vertical_xgboost/requirements.txt b/examples/advanced/vertical_xgboost/requirements.txt index a9a1d31eda..9d69e42f2c 100644 --- a/examples/advanced/vertical_xgboost/requirements.txt +++ b/examples/advanced/vertical_xgboost/requirements.txt @@ -1,6 +1,10 @@ -nvflare~=2.4.0rc +nvflare~=2.5.0rc openmined.psi==1.1.1 pandas -tensorboard torch -xgboost>=2.0.0 +tensorboard +# require xgboost 2.2 version, for now need to install a binary build +# "xgboost>=2.2" + +--extra-index-url https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/list.html?prefix=federated-secure/ +xgboost diff --git a/examples/advanced/xgboost/histogram-based/jobs/base/app/config/config_fed_server.json b/examples/advanced/xgboost/histogram-based/jobs/base/app/config/config_fed_server.json index eda52778a7..9814f32e2c 100755 --- a/examples/advanced/xgboost/histogram-based/jobs/base/app/config/config_fed_server.json +++ b/examples/advanced/xgboost/histogram-based/jobs/base/app/config/config_fed_server.json @@ -1,8 +1,5 @@ { "format_version": 2, - "server": { - "heart_beat_timeout": 600 - }, "task_data_filters": [], "task_result_filters": [], "components": [ diff --git a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_client.json b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_client.json index 1dd56f3b26..e436af170b 100755 --- a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_client.json +++ b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_client.json @@ -1,6 +1,5 @@ { "format_version": 2, - "num_rounds": 100, "executors": [ { "tasks": [ @@ -8,19 +7,10 @@ ], "executor": { "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.executor.FedXGBHistogramExecutor", + "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", "args": { "data_loader_id": "dataloader", - "metrics_writer_id": "metrics_writer", - "early_stopping_rounds": 2, - "xgb_params": { - "max_depth": 8, - "eta": 0.1, - "objective": "binary:logistic", - "eval_metric": "auc", - "tree_method": "hist", - "nthread": 16 - } + "metrics_writer_id": "metrics_writer" } } } diff --git a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json index 6ed5edf3ac..5ce8b11ddc 100755 --- a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json +++ b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json @@ -15,9 +15,21 @@ "workflows": [ { "id": "xgb_controller", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.controller.XGBFedController", + "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_controller.XGBFedController", "args": { - "num_rounds": "{num_rounds}" + "num_rounds": "{num_rounds}", + "training_mode": "horizontal", + "xgb_params": { + "max_depth": 8, + "eta": 0.1, + "objective": "binary:logistic", + "eval_metric": "auc", + "tree_method": "hist", + "nthread": 16 + }, + "xgb_options": { + "early_stopping_rounds": 2 + } } } ] diff --git a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py new file mode 100644 index 0000000000..d97f459600 --- /dev/null +++ b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py @@ -0,0 +1,77 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import pandas as pd +import xgboost as xgb + +from nvflare.app_opt.xgboost.data_loader import XGBDataLoader + + +def _read_higgs_with_pandas(data_path, start: int, end: int): + data_size = end - start + data = pd.read_csv(data_path, header=None, skiprows=start, nrows=data_size) + data_num = data.shape[0] + + # split to feature and label + x = data.iloc[:, 1:].copy() + y = data.iloc[:, 0].copy() + + return x, y, data_num + + +class HIGGSDataLoader(XGBDataLoader): + def __init__(self, data_split_filename): + """Reads HIGGS dataset and return XGB data matrix. + + Args: + data_split_filename: file name to data splits + """ + self.data_split_filename = data_split_filename + + def load_data(self, client_id: str, training_mode: str = ""): + with open(self.data_split_filename, "r") as file: + data_split = json.load(file) + + data_path = data_split["data_path"] + data_index = data_split["data_index"] + + # check if site_id and "valid" in the mapping dict + if client_id not in data_index.keys(): + raise ValueError( + f"Data does not contain Client {client_id} split", + ) + + if "valid" not in data_index.keys(): + raise ValueError( + "Data does not contain Validation split", + ) + + site_index = data_index[client_id] + valid_index = data_index["valid"] + + # training + x_train, y_train, total_train_data_num = _read_higgs_with_pandas( + data_path=data_path, start=site_index["start"], end=site_index["end"] + ) + dmat_train = xgb.DMatrix(x_train, label=y_train) + + # validation + x_valid, y_valid, total_valid_data_num = _read_higgs_with_pandas( + data_path=data_path, start=valid_index["start"], end=valid_index["end"] + ) + dmat_valid = xgb.DMatrix(x_valid, label=y_valid) + + return dmat_train, dmat_valid diff --git a/examples/advanced/xgboost/histogram-based/jobs/base_v2/meta.json b/examples/advanced/xgboost/histogram-based/jobs/base_v2/meta.json new file mode 100644 index 0000000000..6d82211a16 --- /dev/null +++ b/examples/advanced/xgboost/histogram-based/jobs/base_v2/meta.json @@ -0,0 +1,10 @@ +{ + "name": "xgboost_histogram_based_v2", + "resource_spec": {}, + "deploy_map": { + "app": [ + "@ALL" + ] + }, + "min_clients": 2 +} diff --git a/examples/advanced/xgboost/histogram-based/requirements.txt b/examples/advanced/xgboost/histogram-based/requirements.txt index 8311f62b9f..b63f038537 100644 --- a/examples/advanced/xgboost/histogram-based/requirements.txt +++ b/examples/advanced/xgboost/histogram-based/requirements.txt @@ -1,6 +1,10 @@ -nvflare~=2.4.0rc +nvflare~=2.5.0rc pandas -xgboost>=2.0.0 scikit-learn torch tensorboard +# require xgboost 2.2 version, for now need to install a binary build +# "xgboost>=2.2" + +--extra-index-url https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/list.html?prefix=federated-secure/ +xgboost diff --git a/examples/advanced/xgboost/prepare_job_config.sh b/examples/advanced/xgboost/prepare_job_config.sh index 40096dcf48..f839b46242 100755 --- a/examples/advanced/xgboost/prepare_job_config.sh +++ b/examples/advanced/xgboost/prepare_job_config.sh @@ -2,7 +2,7 @@ TREE_METHOD="hist" prepare_job_config() { - python3 utils/prepare_job_config.py --site_num "$1" --training_mode "$2" --split_method "$3" \ + python3 utils/prepare_job_config.py --site_num "$1" --training_algo "$2" --split_method "$3" \ --lr_mode "$4" --nthread 16 --tree_method "$5" } @@ -21,4 +21,6 @@ prepare_job_config 20 cyclic uniform uniform $TREE_METHOD prepare_job_config 2 histogram uniform uniform $TREE_METHOD prepare_job_config 5 histogram uniform uniform $TREE_METHOD +prepare_job_config 2 histogram_v2 uniform uniform $TREE_METHOD +prepare_job_config 5 histogram_v2 uniform uniform $TREE_METHOD echo "Job configs generated" diff --git a/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/config/config_fed_client.json b/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/config/config_fed_client.json index fb31f7e059..ef0f19875b 100755 --- a/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/config/config_fed_client.json +++ b/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/config/config_fed_client.json @@ -8,14 +8,13 @@ ], "executor": { "id": "Executor", - "name": "FedXGBTreeExecutor", + "path": "nvflare.app_opt.xgboost.tree_based.executor.FedXGBTreeExecutor", "args": { "data_loader_id": "dataloader", "training_mode": "bagging", "num_client_bagging": 5, "num_local_parallel_tree": 1, "local_subsample": 1, - "lr_mode": "scaled", "local_model_path": "model.json", "global_model_path": "model_global.json", "learning_rate": 0.1, diff --git a/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/config/config_fed_server.json b/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/config/config_fed_server.json index f35526c721..cc364d7f59 100755 --- a/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/config/config_fed_server.json +++ b/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/config/config_fed_server.json @@ -2,11 +2,6 @@ "format_version": 2, "num_rounds": 101, - "server": { - "heart_beat_timeout": 600, - "task_request_interval": 0.05 - }, - "task_data_filters": [], "task_result_filters": [], diff --git a/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/config/config_fed_client.json b/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/config/config_fed_client.json index 6b25f996bb..d63a3ea551 100755 --- a/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/config/config_fed_client.json +++ b/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/config/config_fed_client.json @@ -8,7 +8,7 @@ ], "executor": { "id": "Executor", - "name": "FedXGBTreeExecutor", + "path": "nvflare.app_opt.xgboost.tree_based.executor.FedXGBTreeExecutor", "args": { "data_loader_id": "dataloader", "training_mode": "cyclic", diff --git a/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/config/config_fed_server.json b/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/config/config_fed_server.json index 8681b1cb2d..93a8e3cf4b 100755 --- a/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/config/config_fed_server.json +++ b/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/config/config_fed_server.json @@ -1,12 +1,6 @@ { "format_version": 2, "num_rounds": 20, - - "server": { - "heart_beat_timeout": 600, - "task_request_interval": 0.05 - }, - "task_data_filters": [], "task_result_filters": [], diff --git a/examples/advanced/xgboost/tree-based/requirements.txt b/examples/advanced/xgboost/tree-based/requirements.txt index 96c88f1ec4..b63f038537 100644 --- a/examples/advanced/xgboost/tree-based/requirements.txt +++ b/examples/advanced/xgboost/tree-based/requirements.txt @@ -1,6 +1,10 @@ -nvflare~=2.4.0rc +nvflare~=2.5.0rc pandas -xgboost scikit-learn torch tensorboard +# require xgboost 2.2 version, for now need to install a binary build +# "xgboost>=2.2" + +--extra-index-url https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/list.html?prefix=federated-secure/ +xgboost diff --git a/examples/advanced/xgboost/utils/prepare_job_config.py b/examples/advanced/xgboost/utils/prepare_job_config.py index 86487cba95..e38c88eec8 100644 --- a/examples/advanced/xgboost/utils/prepare_job_config.py +++ b/examples/advanced/xgboost/utils/prepare_job_config.py @@ -23,7 +23,13 @@ SCRIPT_PATH = pathlib.Path(os.path.realpath(__file__)) XGB_EXAMPLE_ROOT = SCRIPT_PATH.parent.parent.absolute() JOB_CONFIGS_ROOT = "jobs" -MODE_ALGO_MAP = {"bagging": "tree-based", "cyclic": "tree-based", "histogram": "histogram-based"} +ALGO_DIR_MAP = { + "bagging": "tree-based", + "cyclic": "tree-based", + "histogram": "histogram-based", + "histogram_v2": "histogram-based", +} +BASE_JOB_MAP = {"bagging": "bagging_base", "cyclic": "cyclic_base", "histogram": "base", "histogram_v2": "base_v2"} def job_config_args_parser(): @@ -38,7 +44,7 @@ def job_config_args_parser(): parser.add_argument("--site_name_prefix", type=str, default="site-", help="Site name prefix") parser.add_argument("--round_num", type=int, default=100, help="Total number of training rounds") parser.add_argument( - "--training_mode", type=str, default="bagging", choices=list(MODE_ALGO_MAP.keys()), help="Training mode" + "--training_algo", type=str, default="bagging", choices=list(ALGO_DIR_MAP.keys()), help="Training algorithm" ) parser.add_argument("--split_method", type=str, default="uniform", help="How to split the dataset") parser.add_argument("--lr_mode", type=str, default="uniform", help="Whether to use uniform or scaled shrinkage") @@ -46,6 +52,7 @@ def job_config_args_parser(): parser.add_argument( "--tree_method", type=str, default="hist", help="tree_method for xgboost - use hist for best perf" ) + parser.add_argument("--training_mode", type=str, default="horizontal", help="histogram_v2 training mode") return parser @@ -66,7 +73,7 @@ def _get_job_name(args) -> str: "higgs_" + str(args.site_num) + "_" - + args.training_mode + + args.training_algo + "_" + args.split_method + "_split" @@ -80,13 +87,8 @@ def _get_data_split_name(args, site_name: str) -> str: return os.path.join(args.data_root, f"{args.site_num}_{args.split_method}", f"data_{site_name}.json") -def _get_src_job_dir(training_mode): - base_job_map = { - "bagging": "bagging_base", - "cyclic": "cyclic_base", - "histogram": "base", - } - return XGB_EXAMPLE_ROOT / MODE_ALGO_MAP[training_mode] / JOB_CONFIGS_ROOT / base_job_map[training_mode] +def _get_src_job_dir(training_algo): + return XGB_EXAMPLE_ROOT / ALGO_DIR_MAP[training_algo] / JOB_CONFIGS_ROOT / BASE_JOB_MAP[training_algo] def _gen_deploy_map(num_sites: int, site_name_prefix: str) -> dict: @@ -122,31 +124,35 @@ def _get_lr_scale_from_split_json(data_split: dict): def _update_client_config(config: dict, args, lr_scale, site_name: str): data_split_name = _get_data_split_name(args, site_name) - if args.training_mode == "bagging" or args.training_mode == "cyclic": + if args.training_algo == "bagging" or args.training_algo == "cyclic": # update client config - config["components"][0]["args"]["data_split_filename"] = data_split_name config["executors"][0]["executor"]["args"]["lr_scale"] = lr_scale config["executors"][0]["executor"]["args"]["lr_mode"] = args.lr_mode config["executors"][0]["executor"]["args"]["nthread"] = args.nthread config["executors"][0]["executor"]["args"]["tree_method"] = args.tree_method - config["executors"][0]["executor"]["args"]["training_mode"] = args.training_mode + config["executors"][0]["executor"]["args"]["training_mode"] = args.training_algo num_client_bagging = 1 - if args.training_mode == "bagging": + if args.training_algo == "bagging": num_client_bagging = args.site_num config["executors"][0]["executor"]["args"]["num_client_bagging"] = num_client_bagging - else: + elif args.training_algo == "histogram": config["num_rounds"] = args.round_num - config["components"][0]["args"]["data_split_filename"] = data_split_name config["executors"][0]["executor"]["args"]["xgb_params"]["nthread"] = args.nthread config["executors"][0]["executor"]["args"]["xgb_params"]["tree_method"] = args.tree_method + config["components"][0]["args"]["data_split_filename"] = data_split_name def _update_server_config(config: dict, args): - if args.training_mode == "bagging": + if args.training_algo == "bagging": config["num_rounds"] = args.round_num + 1 config["workflows"][0]["args"]["min_clients"] = args.site_num - elif args.training_mode == "cyclic": + elif args.training_algo == "cyclic": config["num_rounds"] = int(args.round_num / args.site_num) + elif args.training_algo == "histogram_v2": + config["num_rounds"] = args.round_num + config["workflows"][0]["args"]["xgb_params"]["nthread"] = args.nthread + config["workflows"][0]["args"]["xgb_params"]["tree_method"] = args.tree_method + config["workflows"][0]["args"]["training_mode"] = args.training_mode def _copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name): @@ -198,10 +204,10 @@ def main(): parser = job_config_args_parser() args = parser.parse_args() job_name = _get_job_name(args) - src_job_path = _get_src_job_dir(args.training_mode) + src_job_path = _get_src_job_dir(args.training_algo) # create a new job - dst_job_path = XGB_EXAMPLE_ROOT / MODE_ALGO_MAP[args.training_mode] / JOB_CONFIGS_ROOT / job_name + dst_job_path = XGB_EXAMPLE_ROOT / ALGO_DIR_MAP[args.training_algo] / JOB_CONFIGS_ROOT / job_name if not os.path.exists(dst_job_path): os.makedirs(dst_job_path) diff --git a/job_templates/vertical_xgb/config_fed_client.conf b/job_templates/vertical_xgb/config_fed_client.conf index 72dff673b9..a4aff7ad94 100644 --- a/job_templates/vertical_xgb/config_fed_client.conf +++ b/job_templates/vertical_xgb/config_fed_client.conf @@ -37,10 +37,10 @@ components = [ id = "dataloader" path = "vertical_data_loader.VerticalDataLoader" args { - # path to the data split for site (for the example we replace site-x with client_id) - data_split_path = "/tmp/nvflare/vertical_xgb_data/site-x/higgs.data.csv" + # path to the data split for site + data_split_path = "/tmp/nvflare/vertical_xgb_data/{SITE_NAME}/higgs.data.csv" # written by FilePSIWriter - psi_path = "/tmp/nvflare/vertical_xgb_psi/simulate_job/site-x/psi/intersection.txt" + psi_path = "/tmp/nvflare/vertical_xgb_psi/{SITE_NAME}/simulate_job/{SITE_NAME}/psi/intersection.txt" # column that intersection is calculated with id_col = "uid" # site that contains the label column diff --git a/nvflare/app_opt/xgboost/constant.py b/nvflare/app_opt/xgboost/constant.py new file mode 100644 index 0000000000..826e311418 --- /dev/null +++ b/nvflare/app_opt/xgboost/constant.py @@ -0,0 +1,26 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class TrainingMode: + # Non-secure mode + H = "h" + HORIZONTAL = "horizontal" + V = "v" + VERTICAL = "vertical" + # Secure mode + HS = "hs" + HORIZONTAL_SECURE = "horizontal_secure" + VS = "VS" + VERTICAL_SECURE = "vertical_secure" diff --git a/nvflare/app_opt/xgboost/data_loader.py b/nvflare/app_opt/xgboost/data_loader.py index d9a56552bf..d59a36c4de 100644 --- a/nvflare/app_opt/xgboost/data_loader.py +++ b/nvflare/app_opt/xgboost/data_loader.py @@ -18,10 +18,14 @@ import xgboost as xgb +from .constant import TrainingMode + class XGBDataLoader(ABC): @abstractmethod - def load_data(self, client_id: str) -> Tuple[xgb.DMatrix, xgb.DMatrix]: + def load_data( + self, client_id: str, training_mode: str = TrainingMode.HORIZONTAL + ) -> Tuple[xgb.DMatrix, xgb.DMatrix]: """Loads data for xgboost. Returns: diff --git a/nvflare/app_opt/xgboost/histogram_based/controller.py b/nvflare/app_opt/xgboost/histogram_based/controller.py index 2215f0e059..9ebf8680ae 100644 --- a/nvflare/app_opt/xgboost/histogram_based/controller.py +++ b/nvflare/app_opt/xgboost/histogram_based/controller.py @@ -113,7 +113,7 @@ def start_controller(self, fl_ctx: FLContext): ) else: self._xgb_fl_server = multiprocessing.Process( - target=xgb_federated.run_federated_server, args=(self._port, len(clients)) + target=xgb_federated.run_federated_server, args=(len(clients), self._port) ) self._xgb_fl_server.start() self._started = True diff --git a/nvflare/app_opt/xgboost/histogram_based/executor.py b/nvflare/app_opt/xgboost/histogram_based/executor.py index d7056f1e1f..f48b1775ca 100644 --- a/nvflare/app_opt/xgboost/histogram_based/executor.py +++ b/nvflare/app_opt/xgboost/histogram_based/executor.py @@ -43,7 +43,7 @@ def __init__( xgb_params: The Booster parameters. This dict is passed to `xgboost.train()` as the argument `params`. It contains all the Booster parameters. Please refer to XGBoost documentation for details: - https://xgboost.readthedocs.io/en/stable/python/python_api.html#module-xgboost.training + https://xgboost.readthedocs.io/en/stable/parameter.html """ self.num_rounds = num_rounds self.early_stopping_rounds = early_stopping_rounds @@ -243,9 +243,10 @@ def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) - self.world_size = world_size if self.use_gpus: - # mapping each rank to a GPU (can set to cuda:0 if simulating with only one gpu) - self.log_info(fl_ctx, f"Training with GPU {self.rank}") - self.xgb_params["device"] = f"cuda:{self.rank}" + # mapping each rank to the first GPU if not set + device = self.xgb_params.get("device", "cuda:0") + self.log_info(fl_ctx, f"Training with GPU {device}") + self.xgb_params["device"] = device self.log_info(fl_ctx, f"Using xgb params: {self.xgb_params}") params = XGBoostParams( @@ -259,7 +260,7 @@ def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) - self.log_info(fl_ctx, f"server address is {self._server_address}") communicator_env = { - "xgboost_communicator": "federated", + "dmlc_communicator": "federated", "federated_server_address": f"{self._server_address}:{xgb_fl_server_port}", "federated_world_size": self.world_size, "federated_rank": self.rank, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/adaptor.py index 024fa0ea7b..4550e2e51e 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/adaptor.py @@ -18,6 +18,7 @@ import threading import time from abc import ABC, abstractmethod +from typing import Tuple from xgboost.core import XGBoostError @@ -84,19 +85,15 @@ def start(self, ctx: dict): class AppAdaptor(ABC, FLComponent): - """ - AppAdaptors are used to integrate FLARE with App Target (Server or Client) in run time. - - For example, an XGB server could be run as a gRPC server process, or be run as part of the FLARE's FL server - process. Similarly, an XGB client could be run as a gRPC client process, or be run as part of the - FLARE's FL client process. - - Each type of XGB Target requires an appropriate adaptor to integrate it with FLARE's XGB Controller or Executor. - - The XGBAdaptor class defines commonly required methods for all adaptor implementations. - """ + """AppAdaptors are used to integrate FLARE with App Target (Server or Client) in run time.""" def __init__(self, app_name: str, in_process: bool): + """Constructor of AppAdaptor. + + Args: + app_name (str): The name of the application. + in_process (bool): Whether to call the `AppRunner.run()` in the same process or not. + """ FLComponent.__init__(self) self.abort_signal = None self.app_runner = None @@ -111,7 +108,7 @@ def set_runner(self, runner: AppRunner): separate process). Args: - runner: the runner to be set + runner (AppRunner): the runner to be set Returns: None @@ -187,7 +184,7 @@ def configure(self, config: dict, fl_ctx: FLContext): pass @abstractmethod - def _is_stopped(self) -> (bool, int): + def _is_stopped(self) -> Tuple[bool, int]: """Called by the adaptor's monitor to know whether the target is stopped. Note that this method is not called by XGB Controller/Executor. @@ -277,7 +274,7 @@ def stop_runner(self): if p: p.kill() - def is_runner_stopped(self) -> (bool, int): + def is_runner_stopped(self) -> Tuple[bool, int]: if self.in_process: if self.starter: if self.starter.stopped: diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py index 393c0c0966..acf7850da2 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py @@ -11,8 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import threading import time +from typing import Tuple import grpc @@ -30,7 +32,34 @@ class GrpcClientAdaptor(XGBClientAdaptor, FederatedServicer): + """Implementation of XGBClientAdaptor that uses an internal `GrpcServer`. + + The `GrpcClientAdaptor` class serves as an interface between the XGBoost + federated client and federated server components. + It employs its `XGBRunner` to initiate an XGBoost federated gRPC client + and utilizes an internal `GrpcServer` to forward client requests/responses. + + The communication flow is as follows: + 1. XGBoost federated gRPC client talks to `GrpcClientAdaptor`, which + encapsulates a `GrpcServer`. + Requests are then forwarded to `GrpcServerAdaptor`, which internally + manages a `GrpcClient` responsible for interacting with the XGBoost + federated gRPC server. + 2. XGBoost federated gRPC server talks to `GrpcServerAdaptor`, which + encapsulates a `GrpcClient`. + Responses are then forwarded to `GrpcClientAdaptor`, which internally + manages a `GrpcServer` responsible for interacting with the XGBoost + federated gRPC client. + """ + def __init__(self, int_server_grpc_options=None, in_process=True, per_msg_timeout=10.0, tx_timeout=100.0): + """Constructor method to initialize the object. + + Args: + int_server_grpc_options: An optional list of key-value pairs (`channel_arguments` + in gRPC Core runtime) to configure the gRPC channel of internal `GrpcServer`. + in_process (bool): Specifies whether to start the `XGBRunner` in the same process or not. + """ XGBClientAdaptor.__init__(self, in_process, per_msg_timeout, tx_timeout) self.int_server_grpc_options = int_server_grpc_options self.in_process = in_process @@ -80,7 +109,7 @@ def _stop_client(self): self._training_stopped = True self.stop_runner() - def _is_stopped(self) -> (bool, int): + def _is_stopped(self) -> Tuple[bool, int]: runner_stopped, ec = self.is_runner_stopped() if runner_stopped: return runner_stopped, ec @@ -100,7 +129,7 @@ def start(self, fl_ctx: FLContext): raise RuntimeError("failed to get a port for XGB server") self.internal_server_addr = f"127.0.0.1:{port}" self.log_info(fl_ctx, f"Start internal server at {self.internal_server_addr}") - self.internal_xgb_server = GrpcServer(self.internal_server_addr, 10, self.int_server_grpc_options, self) + self.internal_xgb_server = GrpcServer(self.internal_server_addr, 10, self, self.int_server_grpc_options) self.internal_xgb_server.start(no_blocking=True) self.log_info(fl_ctx, f"Started internal server at {self.internal_server_addr}") self._start_client(self.internal_server_addr, fl_ctx) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_server_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_server_adaptor.py index 389893f986..2fb4b6229e 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_server_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_server_adaptor.py @@ -11,6 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from typing import Tuple + import nvflare.app_opt.xgboost.histogram_based_v2.proto.federated_pb2 as pb2 from nvflare.apis.fl_context import FLContext from nvflare.app_opt.xgboost.histogram_based_v2.adaptors.xgb_adaptor import XGBServerAdaptor @@ -20,12 +23,41 @@ class GrpcServerAdaptor(XGBServerAdaptor): + """Implementation of XGBServerAdaptor that uses an internal `GrpcClient`. + + The `GrpcServerAdaptor` class serves as an interface between the XGBoost + federated client and federated server components. + It employs its `XGBRunner` to initiate an XGBoost federated gRPC server + and utilizes an internal `GrpcClient` to forward client requests/responses. + + The communication flow is as follows: + 1. XGBoost federated gRPC client talks to `GrpcClientAdaptor`, which + encapsulates a `GrpcServer`. + Requests are then forwarded to `GrpcServerAdaptor`, which internally + manages a `GrpcClient` responsible for interacting with the XGBoost + federated gRPC server. + 2. XGBoost federated gRPC server talks to `GrpcServerAdaptor`, which + encapsulates a `GrpcClient`. + Responses are then forwarded to `GrpcClientAdaptor`, which internally + manages a `GrpcServer` responsible for interacting with the XGBoost + federated gRPC client. + """ + def __init__( self, int_client_grpc_options=None, xgb_server_ready_timeout=Constant.XGB_SERVER_READY_TIMEOUT, in_process=True, ): + """Constructor method to initialize the object. + + Args: + int_client_grpc_options: An optional list of key-value pairs (`channel_arguments` + in gRPC Core runtime) to configure the gRPC channel of internal `GrpcClient`. + in_process (bool): Specifies whether to call the `AppRunner.run()` in the same process or not. + xgb_server_ready_timeout (float): Duration for which the internal `GrpcClient` + should wait for the XGBoost gRPC server before timing out. + """ XGBServerAdaptor.__init__(self, in_process) self.int_client_grpc_options = int_client_grpc_options self.xgb_server_ready_timeout = xgb_server_ready_timeout @@ -47,7 +79,7 @@ def _stop_server(self): self._server_stopped = True self.stop_runner() - def _is_stopped(self) -> (bool, int): + def _is_stopped(self) -> Tuple[bool, int]: runner_stopped, ec = self.is_runner_stopped() if runner_stopped: return runner_stopped, ec diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py index 918b9df45d..3cada9ae89 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py @@ -11,7 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from abc import abstractmethod + +from abc import ABC, abstractmethod +from typing import Tuple from nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import FLContext @@ -27,11 +29,17 @@ class XGBServerAdaptor(AppAdaptor): - """ - XGBServerAdaptor specifies commonly required methods for server adaptor implementations. + """XGBServerAdaptor specifies commonly required methods for server adaptor implementations. + + For example, an XGB server could be run as a gRPC server process, or be run as part of the FLARE's FL server + process. Similarly, an XGB client could be run as a gRPC client process, or be run as part of the + FLARE's FL client process. + + Each type of XGB Target requires an appropriate adaptor to integrate it with FLARE's XGB Controller or Executor. + """ - def __init__(self, in_process): + def __init__(self, in_process: bool): AppAdaptor.__init__(self, XGB_APP_NAME, in_process) self.world_size = None @@ -126,13 +134,17 @@ def broadcast(self, rank: int, seq: int, root: int, send_buf: bytes, fl_ctx: FLC pass -class XGBClientAdaptor(AppAdaptor): - """ - XGBClientAdaptor specifies commonly required methods for client adaptor implementations. - """ +class XGBClientAdaptor(AppAdaptor, ABC): + """XGBClientAdaptor specifies commonly required methods for client adaptor implementations.""" - def __init__(self, in_process, per_msg_timeout: float, tx_timeout: float): - """Constructor of XGBClientAdaptor""" + def __init__(self, in_process: bool, per_msg_timeout: float, tx_timeout: float): + """Constructor of XGBClientAdaptor. + + Args: + in_process (bool): + per_msg_timeout (float): Number of seconds to wait for each message before timing out. + tx_timeout (float): Timeout for the entire transaction. + """ AppAdaptor.__init__(self, XGB_APP_NAME, in_process) self.engine = None self.stopped = False @@ -145,14 +157,21 @@ def __init__(self, in_process, per_msg_timeout: float, tx_timeout: float): self.per_msg_timeout = per_msg_timeout self.tx_timeout = tx_timeout - def start(self, fl_ctx: FLContext): - pass + def _check_rank(self, ranks: dict, site_name: str): + if ranks is None or not isinstance(ranks, dict): + raise RuntimeError(f"{Constant.CONF_KEY_CLIENT_RANKS} is not configured.") - def stop(self, fl_ctx: FLContext): - pass + ws = len(ranks) + if ws == 0: + raise RuntimeError(f"{Constant.CONF_KEY_CLIENT_RANKS} length is 0.") + self.world_size = ws - def _is_stopped(self) -> (bool, int): - pass + rank = ranks.get(site_name, None) + if rank is None: + raise RuntimeError(f"rank is not configured ({site_name})") + + check_non_negative_int(f"{Constant.CONF_KEY_CLIENT_RANKS}[{site_name}]", rank) + self.rank = rank def configure(self, config: dict, fl_ctx: FLContext): """Called by XGB Executor to configure the target. @@ -166,18 +185,9 @@ def configure(self, config: dict, fl_ctx: FLContext): Returns: None """ - ranks = config.get(Constant.CONF_KEY_CLIENT_RANKS) - ws = len(ranks) - if not ws: - raise RuntimeError("world_size is not configured") - self.world_size = ws - - me = fl_ctx.get_identity_name() - rank = ranks.get(me) - if rank is None: - raise RuntimeError("rank is not configured") - check_non_negative_int(Constant.CONF_KEY_RANK, rank) - self.rank = rank + ranks = config.get(Constant.CONF_KEY_CLIENT_RANKS, None) + site_name = fl_ctx.get_identity_name() + self._check_rank(ranks, site_name) num_rounds = config.get(Constant.CONF_KEY_NUM_ROUNDS) if num_rounds is None or num_rounds <= 0: @@ -197,7 +207,7 @@ def configure(self, config: dict, fl_ctx: FLContext): self.xgb_options = config.get(Constant.CONF_KEY_XGB_OPTIONS, {}) - def _send_request(self, op: str, req: Shareable) -> (bytes, Shareable): + def _send_request(self, op: str, req: Shareable) -> Tuple[bytes, Shareable]: """Send XGB operation request to the FL server via FLARE message. Args: @@ -234,7 +244,7 @@ def _send_request(self, op: str, req: Shareable) -> (bytes, Shareable): else: raise RuntimeError(f"invalid reply for op {op}: expect Shareable but got {type(reply)}") - def _send_all_gather(self, rank: int, seq: int, send_buf: bytes) -> (bytes, Shareable): + def _send_all_gather(self, rank: int, seq: int, send_buf: bytes) -> Tuple[bytes, Shareable]: """This method is called by a concrete client adaptor to send Allgather operation to the server. Args: @@ -251,7 +261,7 @@ def _send_all_gather(self, rank: int, seq: int, send_buf: bytes) -> (bytes, Shar req[Constant.PARAM_KEY_SEND_BUF] = send_buf return self._send_request(Constant.OP_ALL_GATHER, req) - def _send_all_gather_v(self, rank: int, seq: int, send_buf: bytes, headers=None) -> (bytes, Shareable): + def _send_all_gather_v(self, rank: int, seq: int, send_buf: bytes, headers=None) -> Tuple[bytes, Shareable]: req = Shareable() self._add_headers(req, headers) req[Constant.PARAM_KEY_RANK] = rank @@ -259,7 +269,7 @@ def _send_all_gather_v(self, rank: int, seq: int, send_buf: bytes, headers=None) req[Constant.PARAM_KEY_SEND_BUF] = send_buf return self._send_request(Constant.OP_ALL_GATHER_V, req) - def _do_all_gather_v(self, rank: int, seq: int, send_buf: bytes) -> (bytes, Shareable): + def _do_all_gather_v(self, rank: int, seq: int, send_buf: bytes) -> Tuple[bytes, Shareable]: """This method is called by a concrete client adaptor to send AllgatherV operation to the server. Args: @@ -312,7 +322,7 @@ def _send_all_reduce( req[Constant.PARAM_KEY_SEND_BUF] = send_buf return self._send_request(Constant.OP_ALL_REDUCE, req) - def _send_broadcast(self, rank: int, seq: int, root: int, send_buf: bytes, headers=None) -> (bytes, Shareable): + def _send_broadcast(self, rank: int, seq: int, root: int, send_buf: bytes, headers=None) -> Tuple[bytes, Shareable]: req = Shareable() self._add_headers(req, headers) req[Constant.PARAM_KEY_RANK] = rank diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py index f2ab7e6f95..6303148335 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py @@ -13,6 +13,7 @@ # limitations under the License. import threading import time +from typing import Optional from nvflare.apis.client import Client from nvflare.apis.controller_spec import ClientTask, Task @@ -60,7 +61,7 @@ def __init__( num_rounds: int, training_mode: str, xgb_params: dict, - xgb_options: dict, + xgb_options: Optional[dict] = None, configure_task_name=Constant.CONFIG_TASK_NAME, configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT, start_task_name=Constant.START_TASK_NAME, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py index 469f689392..3b71d59ffb 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from nvflare.app_opt.xgboost.constant import TrainingMode from nvflare.fuel.f3.drivers.net_utils import MAX_FRAME_SIZE @@ -23,7 +24,6 @@ class Constant: # keys of adaptor config parameters CONF_KEY_CLIENT_RANKS = "client_ranks" - CONF_KEY_RANK = "rank" CONF_KEY_WORLD_SIZE = "world_size" CONF_KEY_NUM_ROUNDS = "num_rounds" CONF_KEY_TRAINING_MODE = "training_mode" @@ -129,19 +129,6 @@ class SplitMode: COL = 1 -class TrainingMode: - # Non-secure mode - H = "h" - HORIZONTAL = "horizontal" - V = "v" - VERTICAL = "vertical" - # Secure mode - HS = "hs" - HORIZONTAL_SECURE = "horizontal_secure" - VS = "VS" - VERTICAL_SECURE = "vertical_secure" - - # Mapping of text training mode to split mode TRAINING_MODE_MAPPING = { TrainingMode.H: SplitMode.ROW, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py index a63bcb7061..b0610567d1 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py @@ -38,7 +38,6 @@ def __init__( max_client_op_interval: float = Constant.MAX_CLIENT_OP_INTERVAL, progress_timeout: float = Constant.WORKFLOW_PROGRESS_TIMEOUT, client_ranks=None, - int_client_grpc_options=None, in_process=True, ): XGBController.__init__( @@ -57,7 +56,8 @@ def __init__( progress_timeout=progress_timeout, client_ranks=client_ranks, ) - self.int_client_grpc_options = int_client_grpc_options + # do not let user specify int_client_grpc_options in this version - always use default. + self.int_client_grpc_options = None self.in_process = in_process def get_adaptor(self, fl_ctx: FLContext): diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py b/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py index 8d6aa5dc34..d4c4f72279 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py @@ -25,9 +25,6 @@ class FedXGBHistogramExecutor(XGBExecutor): def __init__( self, data_loader_id: str, - verbose_eval=False, - use_gpus=False, - int_server_grpc_options=None, per_msg_timeout=60.0, tx_timeout=600.0, model_file_name="model.json", @@ -41,9 +38,8 @@ def __init__( tx_timeout=tx_timeout, ) self.data_loader_id = data_loader_id - self.verbose_eval = verbose_eval - self.use_gpus = use_gpus - self.int_server_grpc_options = int_server_grpc_options + # do not let use specify int_server_grpc_options in this version - always use default + self.int_server_grpc_options = None self.model_file_name = model_file_name self.metrics_writer_id = metrics_writer_id self.in_process = in_process diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/grpc_client.py b/nvflare/app_opt/xgboost/histogram_based_v2/grpc_client.py index e733e14db9..ee81e278a4 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/grpc_client.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/grpc_client.py @@ -28,7 +28,8 @@ def __init__(self, server_addr, grpc_options=None): Args: server_addr: address of the gRPC server to connect to - grpc_options: gRPC options for the gRPC client + grpc_options: An optional list of key-value pairs (`channel_arguments` + in gRPC Core runtime) to configure the gRPC channel. """ if not grpc_options: grpc_options = GRPC_DEFAULT_OPTIONS diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/grpc_server.py b/nvflare/app_opt/xgboost/histogram_based_v2/grpc_server.py index 13ba21dc7c..5728439a33 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/grpc_server.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/grpc_server.py @@ -29,14 +29,15 @@ class GrpcServer: """This class implements a gRPC XGB Server that is capable of processing XGB operations.""" - def __init__(self, addr, max_workers: int, grpc_options, servicer): + def __init__(self, addr, max_workers: int, servicer, grpc_options=None): """Constructor Args: addr: the listening address of the server max_workers: max number of workers - grpc_options: gRPC options servicer: the servicer that is capable of processing XGB requests + grpc_options: An optional list of key-value pairs (`channel_arguments` + in gRPC Core runtime) to configure the gRPC channel. """ if not grpc_options: grpc_options = GRPC_DEFAULT_OPTIONS diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py index 5a16ccee3f..8e9e32a9eb 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py @@ -37,6 +37,8 @@ def __init__( ): XGBController.__init__( self, + training_mode="horizontal", + xgb_params={"max_depth": 3}, adaptor_component_id="", num_rounds=num_rounds, configure_task_name=configure_task_name, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py b/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py index 6540eb519c..3939bbd41e 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py @@ -31,8 +31,8 @@ def __init__(self, rank: int, folder: str): def load_data(self, client_id: str, training_mode: str): - train_path = f"{self.folder}/site-{self.rank + 1}/train.csv" - valid_path = f"{self.folder}/site-{self.rank + 1}/valid.csv" + train_path = f"{self.folder}/{client_id}/train.csv" + valid_path = f"{self.folder}/{client_id}/valid.csv" if training_mode not in TRAINING_MODE_MAPPING: raise ValueError(f"Invalid training_mode: {training_mode}") diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/tb.py b/nvflare/app_opt/xgboost/histogram_based_v2/tb.py deleted file mode 100644 index 0719d5b57d..0000000000 --- a/nvflare/app_opt/xgboost/histogram_based_v2/tb.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -import xgboost.callback - - -class TensorBoardCallback(xgboost.callback.TrainingCallback): - def __init__(self, app_dir: str, tensorboard): - xgboost.callback.TrainingCallback.__init__(self) - self.train_writer = tensorboard.SummaryWriter(log_dir=os.path.join(app_dir, "train-auc/")) - self.val_writer = tensorboard.SummaryWriter(log_dir=os.path.join(app_dir, "val-auc/")) - - def after_iteration(self, model, epoch: int, evals_log: xgboost.callback.TrainingCallback.EvalsLog): - if not evals_log: - return False - - for data, metric in evals_log.items(): - for metric_name, log in metric.items(): - score = log[-1][0] if isinstance(log[-1], tuple) else log[-1] - if data == "train": - self.train_writer.add_scalar(metric_name, score, epoch) - else: - self.val_writer.add_scalar(metric_name, score, epoch) - return False diff --git a/tests/integration_test/data/test_configs/standalone_job/xgb_histogram_examples.yml b/tests/integration_test/data/test_configs/standalone_job/xgb_histogram_examples.yml index 9d1706b419..d8d2898a20 100644 --- a/tests/integration_test/data/test_configs/standalone_job/xgb_histogram_examples.yml +++ b/tests/integration_test/data/test_configs/standalone_job/xgb_histogram_examples.yml @@ -53,3 +53,43 @@ tests: teardown: - rm -rf ../../examples/advanced/xgboost/histogram-based/jobs/higgs_2_histogram_uniform_split_uniform_lr - rm -rf ../../examples/advanced/xgboost/histogram-based/jobs/higgs_2_histogram_uniform_split_uniform_lr_copy + +- test_name: Test a simplified copy of job higgs_2_histogram_v2_uniform_split_uniform_lr + for xgboost histogram-based V2 example. + event_sequence: + - actions: + - submit_job higgs_2_histogram_v2_uniform_split_uniform_lr + result: + type: job_submit_success + trigger: + data: Server started + type: server_log + - actions: + - ensure_current_job_done + result: + data: + run_finished: true + type: run_state + trigger: + data: + run_finished: true + type: run_state + setup: + - cp ../../examples/advanced/xgboost/histogram-based/requirements.txt + ../../examples/advanced/xgboost/histogram-based/temp_requirements.txt + - sed -i '/nvflare\|jupyter\|notebook/d' ../../examples/advanced/xgboost/histogram-based/temp_requirements.txt + - pip install -r ../../examples/advanced/xgboost/histogram-based/temp_requirements.txt + - python3 ../../examples/advanced/xgboost/utils/prepare_job_config.py + --site_num 2 + --training_mode histogram_v2 + --split_method uniform + --lr_mode uniform + --nthread 16 + --tree_method hist + - python3 convert_to_test_job.py + --job ../../examples/advanced/xgboost/histogram-based/jobs/higgs_2_histogram_v2_uniform_split_uniform_lr + --post _copy + - rm -f ../../examples/advanced/xgboost/histogram-based/temp_requirements.txt + teardown: + - rm -rf ../../examples/advanced/xgboost/histogram-based/jobs/higgs_2_histogram_v2_uniform_split_uniform_lr + - rm -rf ../../examples/advanced/xgboost/histogram-based/jobs/higgs_2_histogram_v2_uniform_split_uniform_lr_copy \ No newline at end of file diff --git a/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/__init__.py b/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/__init__.py new file mode 100644 index 0000000000..d9155f923f --- /dev/null +++ b/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/adaptor_test.py b/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/adaptor_test.py new file mode 100644 index 0000000000..9bf7987f70 --- /dev/null +++ b/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/adaptor_test.py @@ -0,0 +1,38 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import patch + +from nvflare.apis.signal import Signal +from nvflare.app_opt.xgboost.histogram_based_v2.adaptors.adaptor import AppAdaptor +from nvflare.app_opt.xgboost.histogram_based_v2.runners.xgb_runner import AppRunner + + +@patch.multiple(AppAdaptor, __abstractmethods__=set()) +class TestAppAdaptor: + def test_set_abort_signal(self): + app_adaptor = AppAdaptor("_test", True) + abort_signal = Signal() + app_adaptor.set_abort_signal(abort_signal) + abort_signal.trigger("cool") + assert app_adaptor.abort_signal.triggered + + @patch.multiple(AppRunner, __abstractmethods__=set()) + def test_set_runner(self): + runner = AppRunner() + app_adaptor = AppAdaptor("_test", True) + + app_adaptor.set_runner(runner) + + assert app_adaptor.app_runner == runner diff --git a/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py b/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py new file mode 100644 index 0000000000..ec37a48b01 --- /dev/null +++ b/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py @@ -0,0 +1,48 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import patch + +from nvflare.apis.fl_constant import ReservedKey +from nvflare.apis.fl_context import FLContext +from nvflare.app_opt.xgboost.histogram_based_v2.adaptors.xgb_adaptor import XGBClientAdaptor, XGBServerAdaptor +from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant + + +@patch.multiple(XGBServerAdaptor, __abstractmethods__=set()) +class TestXGBServerAdaptor: + def test_configure(self): + xgb_adaptor = XGBServerAdaptor(True) + config = {Constant.CONF_KEY_WORLD_SIZE: 66} + ctx = FLContext() + xgb_adaptor.configure(config, ctx) + assert xgb_adaptor.world_size == 66 + + +@patch.multiple(XGBClientAdaptor, __abstractmethods__=set()) +class TestXGBClientAdaptor: + def test_configure(self): + xgb_adaptor = XGBClientAdaptor(True, 1, 10) + config = { + Constant.CONF_KEY_CLIENT_RANKS: {"site-test": 1}, + Constant.CONF_KEY_NUM_ROUNDS: 100, + Constant.CONF_KEY_TRAINING_MODE: "horizontal", + Constant.CONF_KEY_XGB_PARAMS: {"depth": 1}, + } + ctx = FLContext() + ctx.set_prop(ReservedKey.IDENTITY_NAME, "site-test") + xgb_adaptor.configure(config, ctx) + assert xgb_adaptor.world_size == 1 + assert xgb_adaptor.rank == 1 + assert xgb_adaptor.num_rounds == 100 From 47e6fe44cd435e4561dc22be00dcf8db28347a1a Mon Sep 17 00:00:00 2001 From: Yan Cheng <58191769+yanchengnv@users.noreply.github.com> Date: Tue, 13 Aug 2024 18:58:12 -0400 Subject: [PATCH 10/26] add docstring and cmd_data check (#2782) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- nvflare/fuel/flare_api/flare_api.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/nvflare/fuel/flare_api/flare_api.py b/nvflare/fuel/flare_api/flare_api.py index f4fba062cb..fbe73cae0b 100644 --- a/nvflare/fuel/flare_api/flare_api.py +++ b/nvflare/fuel/flare_api/flare_api.py @@ -802,8 +802,27 @@ def _collect_info(self, cmd: str, job_id: str, target_type: str, targets=None) - reply = self._do_command(command, enforce_meta=False) return self._get_dict_data(reply) - def do_app_command(self, job_id: str, topic: str, cmd_data): + def do_app_command(self, job_id: str, topic: str, cmd_data) -> dict: + """Ask a running job to execute an app command + + Args: + job_id: the ID of the running job + topic: topic of the command + cmd_data: the data of the command. Must be JSON serializable. + + Returns: result of the app command + + If the job is not currently running, an exception will occur. User must make sure that the job is running when + calling this method. + + """ command = f"{AdminCommandNames.APP_COMMAND} {job_id} {topic}" + if cmd_data: + # cmd_data must be JSON serializable! + try: + json.dumps(cmd_data) + except Exception as ex: + raise ValueError(f"cmd_data cannot be JSON serialized: {ex}") reply = self._do_command(command, enforce_meta=False, props=cmd_data) return self._get_dict_data(reply) From a3fb1e5e00807b585fb50b459790c27c91402b5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Tue, 13 Aug 2024 15:58:25 -0700 Subject: [PATCH 11/26] Add docstring to reliable message (#2788) --- nvflare/apis/utils/reliable_message.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/nvflare/apis/utils/reliable_message.py b/nvflare/apis/utils/reliable_message.py index bb5e01792c..4e3d40d570 100644 --- a/nvflare/apis/utils/reliable_message.py +++ b/nvflare/apis/utils/reliable_message.py @@ -467,20 +467,23 @@ def send_request( abort_signal: Signal, fl_ctx: FLContext, ) -> Shareable: - """Send a reliable request. + """Send a request reliably. Args: - target: the target cell of this request - topic: topic of the request; - request: the request to be sent - per_msg_timeout: timeout when sending a message - tx_timeout: the timeout of the whole transaction - abort_signal: abort signal - fl_ctx: the FL context + target: The target cell of this request. + topic: The topic of the request. + request: The request to be sent. + per_msg_timeout (float): Number of seconds to wait for each message before timing out. + tx_timeout (float): Timeout for the entire transaction. + abort_signal (Signal): Signal to abort the request. + fl_ctx (FLContext): Context for federated learning. - Returns: reply from the peer. + Returns: + The reply from the peer. - If tx_timeout is not specified or <= per_msg_timeout, the request will be sent only once without retrying. + Note: + If `tx_timeout` is not specified or is less than or equal to `per_msg_timeout`, + the request will be sent only once without retrying. """ check_positive_number("per_msg_timeout", per_msg_timeout) From 2d731b94a316fa4aff04d383bd2d40ea45ef0742 Mon Sep 17 00:00:00 2001 From: Zhihong Zhang <100308595+nvidianz@users.noreply.github.com> Date: Tue, 13 Aug 2024 20:36:04 -0400 Subject: [PATCH 12/26] Pre-trained Model and training_mode changes (#2793) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Updated FOBS readme to add DatumManager, added agrpcs as secure scheme * Added support for pre-trained model * Changed training_mode to split_mode + secure_training * split_mode => data_split_mode * Format error * Fixed a format error * Addressed PR comments * Fixed format * Changed all xgboost controller/executor to use new XGBoost --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- .../code/vertical_xgb/vertical_data_loader.py | 6 ++-- .../base_v2/app/custom/higgs_data_loader.py | 2 +- .../xgboost/utils/prepare_job_config.py | 3 +- nvflare/app_opt/xgboost/constant.py | 26 -------------- nvflare/app_opt/xgboost/data_loader.py | 6 +--- .../xgboost/histogram_based/controller.py | 3 +- .../xgboost/histogram_based/executor.py | 6 ++-- .../adaptors/grpc_client_adaptor.py | 3 +- .../adaptors/xgb_adaptor.py | 16 ++++++--- .../xgboost/histogram_based_v2/controller.py | 20 ++++++----- .../xgboost/histogram_based_v2/defs.py | 27 ++++---------- .../histogram_based_v2/fed_controller.py | 6 ++-- .../mock/mock_controller.py | 3 +- .../runners/xgb_client_runner.py | 35 ++++++++++++++----- .../histogram_based_v2/sec/client_handler.py | 9 ++--- .../histogram_based_v2/secure_data_loader.py | 15 +++----- .../adaptors/xgb_adaptor_test.py | 3 +- 17 files changed, 87 insertions(+), 102 deletions(-) delete mode 100644 nvflare/app_opt/xgboost/constant.py diff --git a/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py b/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py index 096d428d2d..246824d819 100644 --- a/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py +++ b/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py @@ -62,7 +62,7 @@ def __init__(self, data_split_path, psi_path, id_col, label_owner, train_proport self.label_owner = label_owner self.train_proportion = train_proportion - def load_data(self, client_id: str, training_mode: str = ""): + def load_data(self, client_id: str, split_mode: int = 1): client_data_split_path = self.data_split_path.replace("site-x", client_id) client_psi_path = self.psi_path.replace("site-x", client_id) @@ -84,7 +84,7 @@ def load_data(self, client_id: str, training_mode: str = ""): label = "" # for Vertical XGBoost, read from csv with label_column and set data_split_mode to 1 for column mode - dtrain = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=1) - dvalid = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=1) + dtrain = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=split_mode) + dvalid = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=split_mode) return dtrain, dvalid diff --git a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py index d97f459600..3edb2d7408 100644 --- a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py +++ b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py @@ -41,7 +41,7 @@ def __init__(self, data_split_filename): """ self.data_split_filename = data_split_filename - def load_data(self, client_id: str, training_mode: str = ""): + def load_data(self, client_id: str, split_mode: int): with open(self.data_split_filename, "r") as file: data_split = json.load(file) diff --git a/examples/advanced/xgboost/utils/prepare_job_config.py b/examples/advanced/xgboost/utils/prepare_job_config.py index e38c88eec8..71b6b650ba 100644 --- a/examples/advanced/xgboost/utils/prepare_job_config.py +++ b/examples/advanced/xgboost/utils/prepare_job_config.py @@ -152,7 +152,8 @@ def _update_server_config(config: dict, args): config["num_rounds"] = args.round_num config["workflows"][0]["args"]["xgb_params"]["nthread"] = args.nthread config["workflows"][0]["args"]["xgb_params"]["tree_method"] = args.tree_method - config["workflows"][0]["args"]["training_mode"] = args.training_mode + config["workflows"][0]["args"]["split_mode"] = args.split_mode + config["workflows"][0]["args"]["secure_training"] = args.secure_training def _copy_custom_files(src_job_path, src_app_name, dst_job_path, dst_app_name): diff --git a/nvflare/app_opt/xgboost/constant.py b/nvflare/app_opt/xgboost/constant.py deleted file mode 100644 index 826e311418..0000000000 --- a/nvflare/app_opt/xgboost/constant.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class TrainingMode: - # Non-secure mode - H = "h" - HORIZONTAL = "horizontal" - V = "v" - VERTICAL = "vertical" - # Secure mode - HS = "hs" - HORIZONTAL_SECURE = "horizontal_secure" - VS = "VS" - VERTICAL_SECURE = "vertical_secure" diff --git a/nvflare/app_opt/xgboost/data_loader.py b/nvflare/app_opt/xgboost/data_loader.py index d59a36c4de..f49d6dc796 100644 --- a/nvflare/app_opt/xgboost/data_loader.py +++ b/nvflare/app_opt/xgboost/data_loader.py @@ -18,14 +18,10 @@ import xgboost as xgb -from .constant import TrainingMode - class XGBDataLoader(ABC): @abstractmethod - def load_data( - self, client_id: str, training_mode: str = TrainingMode.HORIZONTAL - ) -> Tuple[xgb.DMatrix, xgb.DMatrix]: + def load_data(self, client_id: str, split_mode: int) -> Tuple[xgb.DMatrix, xgb.DMatrix]: """Loads data for xgboost. Returns: diff --git a/nvflare/app_opt/xgboost/histogram_based/controller.py b/nvflare/app_opt/xgboost/histogram_based/controller.py index 9ebf8680ae..67be563613 100644 --- a/nvflare/app_opt/xgboost/histogram_based/controller.py +++ b/nvflare/app_opt/xgboost/histogram_based/controller.py @@ -107,9 +107,10 @@ def start_controller(self, fl_ctx: FLContext): if not self._get_certificates(fl_ctx): self.log_error(fl_ctx, "Can't get required certificates for XGB FL server in secure mode.") return + self.log_info(fl_ctx, "Running XGB FL server in secure mode.") self._xgb_fl_server = multiprocessing.Process( target=xgb_federated.run_federated_server, - args=(self._port, len(clients), self._server_key_path, self._server_cert_path, self._ca_cert_path), + args=(len(clients), self._port, self._server_key_path, self._server_cert_path, self._ca_cert_path), ) else: self._xgb_fl_server = multiprocessing.Process( diff --git a/nvflare/app_opt/xgboost/histogram_based/executor.py b/nvflare/app_opt/xgboost/histogram_based/executor.py index f48b1775ca..8336d31aba 100644 --- a/nvflare/app_opt/xgboost/histogram_based/executor.py +++ b/nvflare/app_opt/xgboost/histogram_based/executor.py @@ -269,9 +269,9 @@ def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) - if not self._get_certificates(fl_ctx): return make_reply(ReturnCode.ERROR) - communicator_env["federated_server_cert"] = self._ca_cert_path - communicator_env["federated_client_key"] = self._client_key_path - communicator_env["federated_client_cert"] = self._client_cert_path + communicator_env["federated_server_cert_path"] = self._ca_cert_path + communicator_env["federated_client_key_path"] = self._client_key_path + communicator_env["federated_client_cert_path"] = self._client_cert_path try: with xgb.collective.CommunicatorContext(**communicator_env): diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py index acf7850da2..c4819fea1b 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py @@ -98,7 +98,8 @@ class since the self object contains a sender that contains a Core Cell which ca Constant.RUNNER_CTX_SERVER_ADDR: server_addr, Constant.RUNNER_CTX_RANK: self.rank, Constant.RUNNER_CTX_NUM_ROUNDS: self.num_rounds, - Constant.RUNNER_CTX_TRAINING_MODE: self.training_mode, + Constant.RUNNER_CTX_SPLIT_MODE: self.split_mode, + Constant.RUNNER_CTX_SECURE_TRAINING: self.secure_training, Constant.RUNNER_CTX_XGB_PARAMS: self.xgb_params, Constant.RUNNER_CTX_XGB_OPTIONS: self.xgb_options, Constant.RUNNER_CTX_MODEL_DIR: self._run_dir, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py index 3cada9ae89..c77827c472 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py @@ -150,7 +150,8 @@ def __init__(self, in_process: bool, per_msg_timeout: float, tx_timeout: float): self.stopped = False self.rank = None self.num_rounds = None - self.training_mode = None + self.split_mode = None + self.secure_training = None self.xgb_params = None self.xgb_options = None self.world_size = None @@ -196,10 +197,15 @@ def configure(self, config: dict, fl_ctx: FLContext): check_positive_int(Constant.CONF_KEY_NUM_ROUNDS, num_rounds) self.num_rounds = num_rounds - self.training_mode = config.get(Constant.CONF_KEY_TRAINING_MODE) - if self.training_mode is None: - raise RuntimeError("training_mode is not configured") - fl_ctx.set_prop(key=Constant.PARAM_KEY_TRAINING_MODE, value=self.training_mode, private=True, sticky=True) + self.split_mode = config.get(Constant.CONF_KEY_SPLIT_MODE) + if self.split_mode is None: + raise RuntimeError("split_mode is not configured") + fl_ctx.set_prop(key=Constant.PARAM_KEY_SPLIT_MODE, value=self.split_mode, private=True, sticky=True) + + self.secure_training = config.get(Constant.CONF_KEY_SECURE_TRAINING) + if self.secure_training is None: + raise RuntimeError("secure_training is not configured") + fl_ctx.set_prop(key=Constant.PARAM_KEY_SECURE_TRAINING, value=self.secure_training, private=True, sticky=True) self.xgb_params = config.get(Constant.CONF_KEY_XGB_PARAMS) if not self.xgb_params: diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py index 6303148335..048c1573cf 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py @@ -27,7 +27,7 @@ from nvflare.fuel.utils.validation_utils import check_number_range, check_object_type, check_positive_number, check_str from nvflare.security.logging import secure_format_exception -from .defs import TRAINING_MODE_MAPPING, Constant +from .defs import Constant class ClientStatus: @@ -59,7 +59,8 @@ def __init__( self, adaptor_component_id: str, num_rounds: int, - training_mode: str, + split_mode: int, + secure_training: bool, xgb_params: dict, xgb_options: Optional[dict] = None, configure_task_name=Constant.CONFIG_TASK_NAME, @@ -80,7 +81,8 @@ def __init__( Args: adaptor_component_id - the component ID of server target adaptor num_rounds - number of rounds - training_mode - Split mode (horizontal, vertical, horizontal_secure, vertical_secure) + split_mode - 0 for horizontal/row-split, 1 for vertical/column-split + secure_training - If true, secure training is enabled xgb_params - The params argument for train method xgb_options - All other arguments for train method are passed through this dictionary configure_task_name - name of the config task @@ -100,7 +102,8 @@ def __init__( Controller.__init__(self) self.adaptor_component_id = adaptor_component_id self.num_rounds = num_rounds - self.training_mode = training_mode.lower() + self.split_mode = split_mode + self.secure_training = secure_training self.xgb_params = xgb_params self.xgb_options = xgb_options self.configure_task_name = configure_task_name @@ -118,10 +121,8 @@ def __init__( self.client_statuses = {} # client name => ClientStatus self.abort_signal = None - check_str("training_mode", training_mode) - valid_mode = TRAINING_MODE_MAPPING.keys() - if training_mode not in valid_mode: - raise ValueError(f"training_mode must be one of following values: {valid_mode}") + if split_mode not in {0, 1}: + raise ValueError("split_mode must be either 0 or 1") if not self.xgb_params: raise ValueError("xgb_params can't be empty") @@ -462,7 +463,8 @@ def _configure_clients(self, abort_signal: Signal, fl_ctx: FLContext): shareable[Constant.CONF_KEY_CLIENT_RANKS] = self.client_ranks shareable[Constant.CONF_KEY_NUM_ROUNDS] = self.num_rounds - shareable[Constant.CONF_KEY_TRAINING_MODE] = self.training_mode + shareable[Constant.CONF_KEY_SPLIT_MODE] = self.split_mode + shareable[Constant.CONF_KEY_SECURE_TRAINING] = self.secure_training shareable[Constant.CONF_KEY_XGB_PARAMS] = self.xgb_params shareable[Constant.CONF_KEY_XGB_OPTIONS] = self.xgb_options diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py index 3b71d59ffb..a08b40e28a 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from nvflare.app_opt.xgboost.constant import TrainingMode from nvflare.fuel.f3.drivers.net_utils import MAX_FRAME_SIZE @@ -26,12 +25,13 @@ class Constant: CONF_KEY_CLIENT_RANKS = "client_ranks" CONF_KEY_WORLD_SIZE = "world_size" CONF_KEY_NUM_ROUNDS = "num_rounds" - CONF_KEY_TRAINING_MODE = "training_mode" + CONF_KEY_SPLIT_MODE = "split_mode" + CONF_KEY_SECURE_TRAINING = "secure_training" CONF_KEY_XGB_PARAMS = "xgb_params" CONF_KEY_XGB_OPTIONS = "xgb_options" # default component config values - CONFIG_TASK_TIMEOUT = 20 + CONFIG_TASK_TIMEOUT = 60 START_TASK_TIMEOUT = 10 XGB_SERVER_READY_TIMEOUT = 10.0 @@ -87,14 +87,16 @@ class Constant: PARAM_KEY_REPLY = "xgb.reply" PARAM_KEY_REQUEST = "xgb.request" PARAM_KEY_EVENT = "xgb.event" - PARAM_KEY_TRAINING_MODE = "xgb.training_mode" + PARAM_KEY_SPLIT_MODE = "xgb.split_mode" + PARAM_KEY_SECURE_TRAINING = "xgb.secure_training" PARAM_KEY_CONFIG_ERROR = "xgb.config_error" RUNNER_CTX_SERVER_ADDR = "server_addr" RUNNER_CTX_PORT = "port" RUNNER_CTX_CLIENT_NAME = "client_name" RUNNER_CTX_NUM_ROUNDS = "num_rounds" - RUNNER_CTX_TRAINING_MODE = "training_mode" + RUNNER_CTX_SPLIT_MODE = "split_mode" + RUNNER_CTX_SECURE_TRAINING = "secure_training" RUNNER_CTX_XGB_PARAMS = "xgb_params" RUNNER_CTX_XGB_OPTIONS = "xgb_options" RUNNER_CTX_WORLD_SIZE = "world_size" @@ -127,18 +129,3 @@ class Constant: class SplitMode: ROW = 0 COL = 1 - - -# Mapping of text training mode to split mode -TRAINING_MODE_MAPPING = { - TrainingMode.H: SplitMode.ROW, - TrainingMode.HORIZONTAL: SplitMode.ROW, - TrainingMode.V: SplitMode.COL, - TrainingMode.VERTICAL: SplitMode.COL, - TrainingMode.HS: SplitMode.ROW, - TrainingMode.HORIZONTAL_SECURE: SplitMode.ROW, - TrainingMode.VS: SplitMode.COL, - TrainingMode.VERTICAL_SECURE: SplitMode.COL, -} - -SECURE_TRAINING_MODES = {TrainingMode.HS, TrainingMode.HORIZONTAL_SECURE, TrainingMode.VS, TrainingMode.VERTICAL_SECURE} diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py index b0610567d1..2d6a8cf875 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py @@ -27,7 +27,8 @@ class XGBFedController(XGBController): def __init__( self, num_rounds: int, - training_mode: str, + split_mode: int, + secure_training: bool, xgb_params: dict, xgb_options: Optional[dict] = None, configure_task_name=Constant.CONFIG_TASK_NAME, @@ -44,7 +45,8 @@ def __init__( self, adaptor_component_id="", num_rounds=num_rounds, - training_mode=training_mode, + split_mode=split_mode, + secure_training=secure_training, xgb_params=xgb_params, xgb_options=xgb_options, configure_task_name=configure_task_name, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py index 8e9e32a9eb..ea81a4a1ee 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py @@ -37,7 +37,8 @@ def __init__( ): XGBController.__init__( self, - training_mode="horizontal", + split_mode=0, + secure_training=False, xgb_params={"max_depth": 3}, adaptor_component_id="", num_rounds=num_rounds, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py index 768f2152e6..b9490467c5 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py @@ -19,11 +19,11 @@ from xgboost import callback from nvflare.apis.fl_component import FLComponent -from nvflare.apis.fl_constant import SystemConfigs +from nvflare.apis.fl_constant import FLContextKey, SystemConfigs from nvflare.apis.fl_context import FLContext from nvflare.app_common.tracking.log_writer import LogWriter from nvflare.app_opt.xgboost.data_loader import XGBDataLoader -from nvflare.app_opt.xgboost.histogram_based_v2.defs import SECURE_TRAINING_MODES, Constant +from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant from nvflare.app_opt.xgboost.histogram_based_v2.runners.xgb_runner import AppRunner from nvflare.app_opt.xgboost.metrics_cb import MetricsCallback from nvflare.fuel.utils.config_service import ConfigService @@ -33,6 +33,7 @@ PLUGIN_PARAM_KEY = "federated_plugin" PLUGIN_KEY_NAME = "name" PLUGIN_KEY_PATH = "path" +MODEL_FILE_NAME = "model.json" class XGBClientRunner(AppRunner, FLComponent): @@ -46,12 +47,14 @@ def __init__( self.model_file_name = model_file_name self.data_loader_id = data_loader_id self.logger = get_logger(self) + self.fl_ctx = None self._client_name = None self._rank = None self._world_size = None self._num_rounds = None - self._training_mode = None + self._split_mode = None + self._secure_training = None self._xgb_params = None self._xgb_options = None self._server_addr = None @@ -62,6 +65,7 @@ def __init__( self._metrics_writer = None def initialize(self, fl_ctx: FLContext): + self.fl_ctx = fl_ctx engine = fl_ctx.get_engine() self._data_loader = engine.get_component(self.data_loader_id) if not isinstance(self._data_loader, XGBDataLoader): @@ -95,6 +99,17 @@ def _xgb_train(self, num_rounds, xgb_params: dict, xgb_options: dict, train_data early_stopping_rounds = xgb_options.get("early_stopping_rounds", 0) verbose_eval = xgb_options.get("verbose_eval", False) + # Check for pre-trained model + job_id = self.fl_ctx.get_prop(FLContextKey.CURRENT_JOB_ID) + workspace = self.fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT) + custom_dir = workspace.get_app_custom_dir(job_id) + model_file = os.path.join(custom_dir, MODEL_FILE_NAME) + if os.path.isfile(model_file): + self.logger.info(f"Pre-trained model is used: {model_file}") + xgb_model = model_file + else: + xgb_model = None + # Run training, all the features in training API is available. bst = xgb.train( xgb_params, @@ -104,6 +119,7 @@ def _xgb_train(self, num_rounds, xgb_params: dict, xgb_options: dict, train_data early_stopping_rounds=early_stopping_rounds, verbose_eval=verbose_eval, callbacks=callbacks, + xgb_model=xgb_model, ) return bst @@ -112,7 +128,8 @@ def run(self, ctx: dict): self._rank = ctx.get(Constant.RUNNER_CTX_RANK) self._world_size = ctx.get(Constant.RUNNER_CTX_WORLD_SIZE) self._num_rounds = ctx.get(Constant.RUNNER_CTX_NUM_ROUNDS) - self._training_mode = ctx.get(Constant.RUNNER_CTX_TRAINING_MODE) + self._split_mode = ctx.get(Constant.RUNNER_CTX_SPLIT_MODE) + self._secure_training = ctx.get(Constant.RUNNER_CTX_SECURE_TRAINING) self._xgb_params = ctx.get(Constant.RUNNER_CTX_XGB_PARAMS) self._xgb_options = ctx.get(Constant.RUNNER_CTX_XGB_OPTIONS) self._server_addr = ctx.get(Constant.RUNNER_CTX_SERVER_ADDR) @@ -125,8 +142,10 @@ def run(self, ctx: dict): self._xgb_params["device"] = f"cuda:{self._rank}" self.logger.info( - f"XGB training_mode: {self._training_mode} " f"params: {self._xgb_params} XGB options: {self._xgb_options}" + f"XGB split_mode: {self._split_mode} secure_training: {self._secure_training} " + f"params: {self._xgb_params} XGB options: {self._xgb_options}" ) + self.logger.info(f"server address is {self._server_addr}") communicator_env = { @@ -136,7 +155,7 @@ def run(self, ctx: dict): "federated_rank": self._rank, } - if self._training_mode not in SECURE_TRAINING_MODES: + if not self._secure_training: self.logger.info("XGBoost non-secure training") else: xgb_plugin_name = ConfigService.get_str_var( @@ -166,13 +185,11 @@ def run(self, ctx: dict): lib_name = f"lib{xgb_plugin_params[PLUGIN_KEY_NAME]}.{lib_ext}" xgb_plugin_params[PLUGIN_KEY_PATH] = str(get_package_root() / "libs" / lib_name) - self.logger.info(f"XGBoost secure training: {self._training_mode} Params: {xgb_plugin_params}") - communicator_env[PLUGIN_PARAM_KEY] = xgb_plugin_params with xgb.collective.CommunicatorContext(**communicator_env): # Load the data. Dmatrix must be created with column split mode in CommunicatorContext for vertical FL - train_data, val_data = self._data_loader.load_data(self._client_name, self._training_mode) + train_data, val_data = self._data_loader.load_data(self._client_name, self._split_mode) bst = self._xgb_train(self._num_rounds, self._xgb_params, self._xgb_options, train_data, val_data) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py index 0f90c9b22c..56396f434a 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py @@ -20,7 +20,7 @@ from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable from nvflare.app_opt.xgboost.histogram_based_v2.aggr import Aggregator -from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant, TrainingMode +from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant, SplitMode from nvflare.app_opt.xgboost.histogram_based_v2.sec.dam import DamDecoder from nvflare.app_opt.xgboost.histogram_based_v2.sec.data_converter import FeatureAggregationResult from nvflare.app_opt.xgboost.histogram_based_v2.sec.partial_he.adder import Adder @@ -408,13 +408,14 @@ def handle_event(self, event_type: str, fl_ctx: FLContext): global tenseal_error if event_type == Constant.EVENT_XGB_JOB_CONFIGURED: task_data = fl_ctx.get_prop(FLContextKey.TASK_DATA) - training_mode = task_data.get(Constant.CONF_KEY_TRAINING_MODE) - if training_mode in {TrainingMode.VS, TrainingMode.VERTICAL_SECURE} and ipcl_imported: + split_mode = task_data.get(Constant.CONF_KEY_SPLIT_MODE) + secure_training = task_data.get(Constant.CONF_KEY_SECURE_TRAINING) + if secure_training and split_mode == SplitMode.COL and ipcl_imported: self.public_key, self.private_key = generate_keys(self.key_length) self.encryptor = Encryptor(self.public_key, self.num_workers) self.decrypter = Decrypter(self.private_key, self.num_workers) self.adder = Adder(self.num_workers) - elif training_mode in {TrainingMode.HS, TrainingMode.HORIZONTAL_SECURE}: + elif secure_training and split_mode == SplitMode.ROW: if not tenseal_imported: fl_ctx.set_prop(Constant.PARAM_KEY_CONFIG_ERROR, tenseal_error, private=True, sticky=False) return diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py b/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py index 3939bbd41e..f5514e950f 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py @@ -15,7 +15,7 @@ import xgboost as xgb from nvflare.app_opt.xgboost.data_loader import XGBDataLoader -from nvflare.app_opt.xgboost.histogram_based_v2.defs import TRAINING_MODE_MAPPING, SplitMode +from nvflare.app_opt.xgboost.histogram_based_v2.defs import SplitMode class SecureDataLoader(XGBDataLoader): @@ -29,22 +29,17 @@ def __init__(self, rank: int, folder: str): self.rank = rank self.folder = folder - def load_data(self, client_id: str, training_mode: str): + def load_data(self, client_id: str, split_mode: int): train_path = f"{self.folder}/{client_id}/train.csv" valid_path = f"{self.folder}/{client_id}/valid.csv" - if training_mode not in TRAINING_MODE_MAPPING: - raise ValueError(f"Invalid training_mode: {training_mode}") - - data_split_mode = TRAINING_MODE_MAPPING[training_mode] - - if self.rank == 0 or data_split_mode == SplitMode.ROW: + if self.rank == 0 or split_mode == SplitMode.ROW: label = "&label_column=0" else: label = "" - train_data = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=data_split_mode) - valid_data = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=data_split_mode) + train_data = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=split_mode) + valid_data = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=split_mode) return train_data, valid_data diff --git a/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py b/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py index ec37a48b01..6de75c5052 100644 --- a/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py +++ b/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py @@ -37,7 +37,8 @@ def test_configure(self): config = { Constant.CONF_KEY_CLIENT_RANKS: {"site-test": 1}, Constant.CONF_KEY_NUM_ROUNDS: 100, - Constant.CONF_KEY_TRAINING_MODE: "horizontal", + Constant.CONF_KEY_SPLIT_MODE: 0, + Constant.CONF_KEY_SECURE_TRAINING: False, Constant.CONF_KEY_XGB_PARAMS: {"depth": 1}, } ctx = FLContext() From 35830dd76f96a7681603894c32723b4717f64c0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Tue, 13 Aug 2024 18:40:56 -0700 Subject: [PATCH 13/26] Update xgboost example and ci (#2794) --- .../jobs/base_v2/app/config/config_fed_server.json | 3 ++- examples/advanced/xgboost/utils/prepare_job_config.py | 3 ++- .../test_configs/standalone_job/xgb_histogram_examples.yml | 4 ++-- .../data/test_configs/standalone_job/xgb_tree_examples.yml | 4 ++-- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json index 5ce8b11ddc..d8c19cddfa 100755 --- a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json +++ b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json @@ -18,7 +18,8 @@ "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_controller.XGBFedController", "args": { "num_rounds": "{num_rounds}", - "training_mode": "horizontal", + "split_mode": 0, + "secure_training": false, "xgb_params": { "max_depth": 8, "eta": 0.1, diff --git a/examples/advanced/xgboost/utils/prepare_job_config.py b/examples/advanced/xgboost/utils/prepare_job_config.py index 71b6b650ba..1f267a8754 100644 --- a/examples/advanced/xgboost/utils/prepare_job_config.py +++ b/examples/advanced/xgboost/utils/prepare_job_config.py @@ -52,7 +52,8 @@ def job_config_args_parser(): parser.add_argument( "--tree_method", type=str, default="hist", help="tree_method for xgboost - use hist for best perf" ) - parser.add_argument("--training_mode", type=str, default="horizontal", help="histogram_v2 training mode") + parser.add_argument("--split_mode", type=int, default=0, help="dataset split mode, 0 or 1") + parser.add_argument("--secure_training", type=bool, default=False, help="histogram_v2 secure training or not") return parser diff --git a/tests/integration_test/data/test_configs/standalone_job/xgb_histogram_examples.yml b/tests/integration_test/data/test_configs/standalone_job/xgb_histogram_examples.yml index d8d2898a20..28a3b24617 100644 --- a/tests/integration_test/data/test_configs/standalone_job/xgb_histogram_examples.yml +++ b/tests/integration_test/data/test_configs/standalone_job/xgb_histogram_examples.yml @@ -41,7 +41,7 @@ tests: --out_path "/tmp/nvflare/xgboost_higgs_dataset/2_uniform" - python3 ../../examples/advanced/xgboost/utils/prepare_job_config.py --site_num 2 - --training_mode histogram + --training_algo histogram --split_method uniform --lr_mode uniform --nthread 16 @@ -81,7 +81,7 @@ tests: - pip install -r ../../examples/advanced/xgboost/histogram-based/temp_requirements.txt - python3 ../../examples/advanced/xgboost/utils/prepare_job_config.py --site_num 2 - --training_mode histogram_v2 + --training_algo histogram_v2 --split_method uniform --lr_mode uniform --nthread 16 diff --git a/tests/integration_test/data/test_configs/standalone_job/xgb_tree_examples.yml b/tests/integration_test/data/test_configs/standalone_job/xgb_tree_examples.yml index 83eed7e1e3..b0abd1a6d3 100644 --- a/tests/integration_test/data/test_configs/standalone_job/xgb_tree_examples.yml +++ b/tests/integration_test/data/test_configs/standalone_job/xgb_tree_examples.yml @@ -41,7 +41,7 @@ tests: --out_path "/tmp/nvflare/xgboost_higgs_dataset/5_uniform" - python3 ../../examples/advanced/xgboost/utils/prepare_job_config.py --site_num 5 - --training_mode cyclic + --training_algo cyclic --split_method uniform --lr_mode uniform --nthread 16 @@ -75,7 +75,7 @@ tests: run_finished: true type: run_state setup: - - python3 ../../examples/advanced/xgboost/utils/prepare_job_config.py --site_num 5 --training_mode bagging + - python3 ../../examples/advanced/xgboost/utils/prepare_job_config.py --site_num 5 --training_algo bagging --split_method uniform --lr_mode uniform --nthread 16 --tree_method hist - python3 convert_to_test_job.py --job ../../examples/advanced/xgboost/tree-based/jobs/higgs_5_bagging_uniform_split_uniform_lr From f388c122ad8f43e280759e7fc0fb1e67610fab53 Mon Sep 17 00:00:00 2001 From: Yan Cheng <58191769+yanchengnv@users.noreply.github.com> Date: Tue, 13 Aug 2024 21:41:24 -0400 Subject: [PATCH 14/26] [2.5] Update flower CLI (#2792) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update flower cli * update flwr hello-world job (#9) * update flwr hello-world job * add license header * update readme --------- Co-authored-by: Holger Roth <6304754+holgerroth@users.noreply.github.com> Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- examples/hello-world/hello-flower/README.md | 24 ++++++++----- .../app/config/config_fed_client.json | 4 +-- .../app/config/config_fed_server.json | 4 +-- .../app/custom/flwr_pt/__init__.py | 14 ++++++++ .../app/custom/{ => flwr_pt}/client.py | 16 +++------ .../app/custom/{ => flwr_pt}/server.py | 32 +++++++---------- .../app/custom/{ => flwr_pt}/task.py | 0 .../hello-flwr-pt/app/custom/pyproject.toml | 34 +++++++++++++++++++ .../hello-world/hello-flower/requirements.txt | 4 --- nvflare/app_opt/flower/applet.py | 25 ++++---------- nvflare/app_opt/flower/controller.py | 4 --- nvflare/app_opt/flower/executor.py | 4 +-- 12 files changed, 90 insertions(+), 75 deletions(-) create mode 100644 examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/__init__.py rename examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/{ => flwr_pt}/client.py (82%) rename examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/{ => flwr_pt}/server.py (80%) rename examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/{ => flwr_pt}/task.py (100%) create mode 100644 examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/pyproject.toml delete mode 100644 examples/hello-world/hello-flower/requirements.txt diff --git a/examples/hello-world/hello-flower/README.md b/examples/hello-world/hello-flower/README.md index a0c303a832..628a7fc2ad 100644 --- a/examples/hello-world/hello-flower/README.md +++ b/examples/hello-world/hello-flower/README.md @@ -6,18 +6,26 @@ In this example, we run 2 Flower clients and Flower Server in parallel using NVF To run Flower code in NVFlare, we created a job, including an app with the following custom folder content ```bash -$ tree jobs/hello-flwr-pt -. -├── client.py # <-- contains `ClientApp` -├── server.py # <-- contains `ServerApp` -├── task.py # <-- task-specific code (model, data) +$ tree jobs/hello-flwr-pt/app/custom + +├── flwr_pt +│ ├── client.py # <-- contains `ClientApp` +│ ├── __init__.py # <-- to register the python module +│ ├── server.py # <-- contains `ServerApp` +│ └── task.py # <-- task-specific code (model, data) +└── pyproject.toml # <-- Flower project file ``` -Note, this code is directly copied from Flower's [app-pytorch](https://github.com/adap/flower/tree/main/examples/app-pytorch) example. +Note, this code is adapted from Flower's [app-pytorch](https://github.com/adap/flower/tree/main/examples/app-pytorch) example. ## Install dependencies -To run this job with NVFlare, we first need to install the dependencies. +If you haven't already, we recommend creating a virtual environment. +```bash +python3 -m venv nvflare_flwr +source nvflare_flwr/bin/activate +``` +To run a job with NVFlare, we first need to install its dependencies. ```bash -pip install -r requirements.txt +pip install ./jobs/hello-flwr-pt/app/custom ``` ## Run a simulation diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_client.json b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_client.json index e1e74ade3f..76c9ac716c 100644 --- a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_client.json +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_client.json @@ -5,9 +5,7 @@ "tasks": ["*"], "executor": { "path": "nvflare.app_opt.flower.executor.FlowerExecutor", - "args": { - "client_app": "client:app" - } + "args": {} } } ], diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_server.json b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_server.json index dfb4cab82d..8570885e4e 100644 --- a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_server.json +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_server.json @@ -8,9 +8,7 @@ { "id": "ctl", "path": "nvflare.app_opt.flower.controller.FlowerController", - "args": { - "server_app": "server:app" - } + "args": {} } ] } \ No newline at end of file diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/__init__.py b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/__init__.py new file mode 100644 index 0000000000..37b06a7242 --- /dev/null +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""flwr_pt.""" diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/client.py b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/client.py similarity index 82% rename from examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/client.py rename to examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/client.py index 9e674b27c3..205db46544 100644 --- a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/client.py +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/client.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. from flwr.client import ClientApp, NumPyClient -from task import DEVICE, Net, get_weights, load_data, set_weights, test, train +from flwr.common import Context + +from .task import DEVICE, Net, get_weights, load_data, set_weights, test, train # Load model and data (simple CNN, CIFAR-10) net = Net().to(DEVICE) @@ -32,7 +34,7 @@ def evaluate(self, parameters, config): return loss, len(testloader.dataset), {"accuracy": accuracy} -def client_fn(cid: str): +def client_fn(context: Context): """Create and return an instance of Flower `Client`.""" return FlowerClient().to_client() @@ -41,13 +43,3 @@ def client_fn(cid: str): app = ClientApp( client_fn=client_fn, ) - - -# Legacy mode -if __name__ == "__main__": - from flwr.client import start_client - - start_client( - server_address="127.0.0.1:8080", - client=FlowerClient().to_client(), - ) diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/server.py b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/server.py similarity index 80% rename from examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/server.py rename to examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/server.py index 8083a6b802..d93c453355 100644 --- a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/server.py +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/server.py @@ -13,10 +13,11 @@ # limitations under the License. from typing import List, Tuple -from flwr.common import Metrics, ndarrays_to_parameters -from flwr.server import ServerApp, ServerConfig +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig from flwr.server.strategy import FedAvg -from task import Net, get_weights + +from .task import Net, get_weights # Define metric aggregation function @@ -53,23 +54,16 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: ) -# Define config -config = ServerConfig(num_rounds=3) - - # Flower ServerApp -app = ServerApp( - config=config, - strategy=strategy, -) +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Define config + config = ServerConfig(num_rounds=num_rounds) + return ServerAppComponents(strategy=strategy, config=config) -# Legacy mode -if __name__ == "__main__": - from flwr.server import start_server - start_server( - server_address="0.0.0.0:8080", - config=config, - strategy=strategy, - ) +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/task.py b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/task.py similarity index 100% rename from examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/task.py rename to examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/flwr_pt/task.py diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/pyproject.toml b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/pyproject.toml new file mode 100644 index 0000000000..8624601c1b --- /dev/null +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/pyproject.toml @@ -0,0 +1,34 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "flwr_pt" +version = "1.0.0" +description = "" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.11.0,<2.0", + "nvflare~=2.5.0rc", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "nvidia" + +[tool.flwr.app.components] +serverapp = "flwr_pt.server:app" +clientapp = "flwr_pt.client:app" + +[tool.flwr.app.config] +num-server-rounds = 3 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 2 diff --git a/examples/hello-world/hello-flower/requirements.txt b/examples/hello-world/hello-flower/requirements.txt deleted file mode 100644 index 1d8990f84a..0000000000 --- a/examples/hello-world/hello-flower/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -nvflare~=2.5.0rc -flwr[simulation]>=1.8.0 -torch==2.2.1 -torchvision==0.17.1 diff --git a/nvflare/app_opt/flower/applet.py b/nvflare/app_opt/flower/applet.py index e3b8bcd5e2..f34361bab5 100644 --- a/nvflare/app_opt/flower/applet.py +++ b/nvflare/app_opt/flower/applet.py @@ -26,17 +26,9 @@ class FlowerClientApplet(CLIApplet): - def __init__( - self, - client_app: str, - ): - """Constructor of FlowerClientApplet, which extends CLIApplet. - - Args: - client_app: the client app specification of the Flower app - """ + def __init__(self): + """Constructor of FlowerClientApplet, which extends CLIApplet.""" CLIApplet.__init__(self) - self.client_app = client_app def get_command(self, ctx: dict) -> CommandDescriptor: """Implementation of the get_command method required by the super class CLIApplet. @@ -64,7 +56,7 @@ def get_command(self, ctx: dict) -> CommandDescriptor: job_id = fl_ctx.get_job_id() custom_dir = ws.get_app_custom_dir(job_id) app_dir = ws.get_app_dir(job_id) - cmd = f"flower-client-app --insecure --grpc-adapter --superlink {addr} --dir {custom_dir} {self.client_app}" + cmd = f"flower-supernode --insecure --grpc-adapter --superlink {addr} {custom_dir}" # use app_dir as the cwd for flower's client app. # this is necessary for client_api to be used with the flower client app for metrics logging @@ -76,7 +68,6 @@ def get_command(self, ctx: dict) -> CommandDescriptor: class FlowerServerApplet(Applet): def __init__( self, - server_app: str, database: str, superlink_ready_timeout: float, server_app_args: list = None, @@ -84,7 +75,6 @@ def __init__( """Constructor of FlowerServerApplet. Args: - server_app: Flower's server app specification database: database spec to be used by the server app superlink_ready_timeout: how long to wait for the superlink process to become ready server_app_args: an optional list that contains additional command args passed to flower server app @@ -92,7 +82,6 @@ def __init__( Applet.__init__(self) self._app_process_mgr = None self._superlink_process_mgr = None - self.server_app = server_app self.database = database self.superlink_ready_timeout = superlink_ready_timeout self.server_app_args = server_app_args @@ -148,8 +137,8 @@ def start(self, app_ctx: dict): db_arg = f"--database {self.database}" superlink_cmd = ( - f"flower-superlink --insecure {db_arg} " - f"--fleet-api-address {server_addr} --fleet-api-type grpc-adapter " + f"flower-superlink --insecure --fleet-api-type grpc-adapter {db_arg} " + f"--fleet-api-address {server_addr} " f"--driver-api-address {driver_addr}" ) @@ -175,9 +164,7 @@ def start(self, app_ctx: dict): if self.server_app_args: args_str = " ".join(self.server_app_args) - app_cmd = ( - f"flower-server-app --insecure --superlink {driver_addr} --dir {custom_dir} {args_str} {self.server_app}" - ) + app_cmd = f"flower-server-app --insecure --superlink {driver_addr} {args_str} {custom_dir}" cmd_desc = CommandDescriptor( cmd=app_cmd, log_file_name="server_app_log.txt", diff --git a/nvflare/app_opt/flower/controller.py b/nvflare/app_opt/flower/controller.py index 69498fc794..3739038258 100644 --- a/nvflare/app_opt/flower/controller.py +++ b/nvflare/app_opt/flower/controller.py @@ -26,7 +26,6 @@ class FlowerController(TieController): def __init__( self, num_rounds=1, - server_app: str = "server:app", database: str = "", server_app_args: list = None, superlink_ready_timeout: float = 10.0, @@ -43,7 +42,6 @@ def __init__( Args: num_rounds: number of rounds. Not used in this version. - server_app: the server app specification for Flower server app database: database name server_app_args: additional server app CLI args superlink_ready_timeout: how long to wait for the superlink to become ready before starting server app @@ -73,7 +71,6 @@ def __init__( check_object_type("server_app_args", server_app_args, list) self.num_rounds = num_rounds - self.server_app = server_app self.database = database self.server_app_args = server_app_args self.superlink_ready_timeout = superlink_ready_timeout @@ -86,7 +83,6 @@ def get_connector(self, fl_ctx: FLContext): def get_applet(self, fl_ctx: FLContext): return FlowerServerApplet( - server_app=self.server_app, database=self.database, superlink_ready_timeout=self.superlink_ready_timeout, server_app_args=self.server_app_args, diff --git a/nvflare/app_opt/flower/executor.py b/nvflare/app_opt/flower/executor.py index f11e8ee00f..2ee1d89e5a 100644 --- a/nvflare/app_opt/flower/executor.py +++ b/nvflare/app_opt/flower/executor.py @@ -22,7 +22,6 @@ class FlowerExecutor(TieExecutor): def __init__( self, - client_app: str = "client:app", start_task_name=Constant.START_TASK_NAME, configure_task_name=Constant.CONFIG_TASK_NAME, per_msg_timeout=10.0, @@ -40,7 +39,6 @@ def __init__( self.tx_timeout = tx_timeout self.client_shutdown_timeout = client_shutdown_timeout self.num_rounds = None - self.client_app = client_app def get_connector(self, fl_ctx: FLContext): return GrpcClientConnector( @@ -50,7 +48,7 @@ def get_connector(self, fl_ctx: FLContext): ) def get_applet(self, fl_ctx: FLContext): - return FlowerClientApplet(self.client_app) + return FlowerClientApplet() def configure(self, config: dict, fl_ctx: FLContext): self.num_rounds = config.get(Constant.CONF_KEY_NUM_ROUNDS) From 82b287495be53944863a8dde29fb5d0f4e7feaab Mon Sep 17 00:00:00 2001 From: Ziyue Xu Date: Wed, 14 Aug 2024 15:38:44 -0400 Subject: [PATCH 15/26] replace comet with tensorboard (#2798) --- research/fed-bn/README.md | 12 ++++++------ research/fed-bn/figs/loss.jpeg | Bin 29518 -> 0 bytes research/fed-bn/figs/loss.png | Bin 0 -> 18800 bytes research/fed-bn/requirements.txt | 2 +- research/fed-bn/src/fedbn_cifar10.py | 19 ++++++++++--------- 5 files changed, 17 insertions(+), 16 deletions(-) delete mode 100644 research/fed-bn/figs/loss.jpeg create mode 100644 research/fed-bn/figs/loss.png diff --git a/research/fed-bn/README.md b/research/fed-bn/README.md index 5e541d2235..4796cea225 100644 --- a/research/fed-bn/README.md +++ b/research/fed-bn/README.md @@ -39,7 +39,10 @@ Download the necessary datasets by running: ``` # Run FedBN on different data splits - +We first set the job template path +```commandline +nvflare config -jt ../../job_templates +``` We will use the in-process client API, we choose the sag_pt job template and run the following command to create the job: ``` ./create_job.sh @@ -53,12 +56,9 @@ Run the FedBN simulation with the following command: ``` ## Visualizing Results -To visualize training losses, we use the [Comet ML](https://www.comet.com/site/). -Below is an example of the loss visualization output: -![FedBN Loss Results](./figs/loss.jpeg) +With tensorboard, below is an example of the loss for the two sites: +![FedBN Loss Results](./figs/loss.png) -> **Note**: To use Comet ML experiment tracking system, you need to get Comet API key to get access. -> Alternatively, you can use Tensorboard or MLfow. ## Citation If you find the code and dataset useful, please cite our paper. diff --git a/research/fed-bn/figs/loss.jpeg b/research/fed-bn/figs/loss.jpeg deleted file mode 100644 index 3fb646bcbed483ff78b7467bf3d57cf9ac148adc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 29518 zcmeFa2UwHI);JzTTou$snp718qzf3Nv#W$&gb)IuDV>BGdQlV=sbT3Iq(i6yLhpp$ zq=k<34pIa`6#hZnyYAlgyZ8Hl$5DuBipP{=?+S7sqjr z4DD==pWnF%>F(l>QwIS0xW9w*9|fN`GPW}$AZ!r+ZXyYp6ADW~fGJE*;9JMA!3ivO z3_ID|*b{Ib9K%R;wfh9vhydR*{RTGp1~#-o9^?BHaKtPv9FFrkzK%;wYHS4o6Q1V? zfAj!5fEqvsaPPSOgm=Paodf^~?EnBozx)Qxcr*&Q4!A32-lgjXO6FPXU`I!J%8@Ph4bgmpT9tQ zne+n5C6e>!FOprnbos|0$$q>*dWHPTkK_dS$Kym!9e+u5<{aU}A4$%W5JLZx*HJBi z?A&S1GxG@Mj(P1g(Wzrr{CW`)pFMr%hg0Xy6QbpQ1e_u|MRfKY z=^5fP$7qD+IZbqi_$=8Ka*7+^5I%vQRn>!ndwyWze}uGqmWZkAow_6SynvGVo`EgZ z&HHNSs9As;p7n&JXpZwB`YjtmZf6K-5fh@s$OvTs5D}j_bLlh@fatg!$GMPQxxx3d zD)}SZXT%izL6l6)0{7Gmkim%sbv+xNnEKwSo8Y9Q5dbM6(bHr^WB^IPVdm50kN=G> zvQKZ9KDB?j*XDnx+^47yIe0-w04ALNUPcx5r>EaVq}dzs(5%5UdUrSVeQf-UI$isn zB+tOz!(E%)A7E$`SmH4W6#oHY=_@x07ViPuOS|qr{e8x#0xF@N?d8P`##6;;fjjnm zT*1P$xyUVMSU-ND3_^=f!y=PdvHa3hyE+F|Pq@;CN}_$`(b6gmU~LA)eBpwo)im*I z;hy$0#@&A|u*r zbu~G3UF+@M-e`6UH(i)3gN)^y(J*`!i;_DSUmv7iEG-f*`&I&+Z`emewq_TLLr#|_ zZn_hV78`S2E}!^O&9@(2fL@J(m!sR|-s3VegVW?@3}kU$QsL$UrCB3BtQY)(eN}U* zdB?kFL_*^ka}G?p9v@;>Iq!a^0cjgQfTZ-vrB@APr`n~MoR5)iOq22*Xw(hrCNePGAFm_V8%)*>JnbPM zIPS5_KO9-CxSA+#i*OqWY7*M7(7w>0YAwd5E9U=RTf^}e)8ka03sAMjh0=V%?CIzR zVNK1?x^icU$_nsVL=FEOY4{jX+Jj=eY0b3(g$Q6@OFNJ-{TFQTqrvDeP?0F?Fg_~Dp z$JKawcZG6IxI;v5$(tp~U9T5h338Fd)I!Cn2I5j^7K^jju76?2vtMwC{x6sX%q5S1 z#O&OJ+;5vV7#1jafj01=?O?XP+NC0L>i@dx-wn)P$Eh%N4A&*8pvu^46$Vif< z&!lq^7MuM4!)+4SRgd6V9pTXZFQfc5Sd&02oK>vlhwFbQPh92PH0-t-SCH;bhO+1( z#N~4u*x3S9Hz%`d%ao*=yBycYu@`6Zxekz|p_Z1StQuHbX<=}am4tDlxR-2yE@zCX z`e=y?`|ScmxGb2#ocNxpW+ZEh6vPdDNqXbPS+I8yj?(ozJK}lC^XF-B9S{nf>Y6qEMvo}qH1H+e5ea{$< z0A}=o>XUPDVW7>-EB4DNh$BFAtn7>u_nuRSOA}_kq6Z(S=`TF>7$G@V&emq1z}j)C zir5mEeqY6`r-tW3MwFOxLz`g`C3WV5RVsXsnIH5;64s;;SaKC`R`YKJ`~sXiw0?hR z?0tNH_=l+95dDq-l+;N_fFJ|jPwIMphd+H6)BlsDWVZg$G|qI!{IkJf^QniwrSd!I zG*Ci8+)(0{W`Ig?w`RBRe}H&Q<10A$u^fB-yBY7zW7jmqGv>=~tE)FsUVeR1rLOvp zBMWFOs7s5Bb}2*5lq1)Ynun#)87#Pq7^w524an~oLy*^71A%jepO(uZ{5wD@c5!rr z_4W7mtCPFr&6iB7RaXyIfC<3E;DBta2_XY1?XJgp4oN+;0FpPqs}11)6^;cEfj0sI zC`062A_s->#P*Ob=_GWv(5#|&e9rn{s~0PN5V$6TX_+tM1A!VT&p~^@Fnge&gCYMU z+Gj(4%sow*l2Rm0@$&QN@~HJxAcNh=as~I6R^jAW0xdNsnaylt#=zjKOG1e2fk9mz z$SQ~J+@%k=<|%zi?qHt<1@k$}&GpCx-0)N4#otr-v}DX@$tZ*4dnDEGqkem1Is!C; z7HeiTf92R;zKhB%NvE1k!wh%8c{q*~Y&>!EK;xJ#v2Ji1? z3;+P>XsB>CyPc>2X|VQf83Z_q^~I^65o9sA;!Wj>{@MSR@kTWG32kdle9 z1W;FQ{6?8$C~*D=pfay{@*sZt`^dk&ovYbYb>-cs`jwX7NBpX!a(x|NZN6LOuatY~ z-M`PZ4thq4;8jkF82%`|u z10Z-(Qet#hNG5L=`xCJn++0p%xs|=z=iSqjwTUfdqXkHcKC64UQc~7Hn#u z1-PNFN*t`kcWT0o#lsM9D-7DR@^izCvjmomEd)f~7+Wfd2&N88c66|a09!qQ>kwp8 zQW9hByVDbwbxlAfgd}qjQ#lSPZM!2_%RKoK>7a5;?wj45Vcr4y*NCO08qP=wjpz}5 zqJf25CK#zW&H09Rrbh>}_Cfn=m)PfAB{WmrZptV&SMkRVQ?}$bLSeag9E%ng3QdDF zOQsL?6jC8m21#2Zp~X277UKtkoK$SsP&FA8kd_ev}TE7n91iRx#Vh2hAn#cl71em zic;Uy^%C>*JRq&TgKNm;X1lbRt!OTB=O;ldTP`8+JP z7o_GF;;YLoTDiDA}f|g3QZg%@0zGsFSp4KGDStwJE`Vof)gkvQ?wnxd;&C`B*O*x=RV)k`d}`!*t_XYTXqC!9cA9H z>DzzWU6It;A2%bh?sr$sDI6ek0{EX0Q}fhUstT+x7CNxGJUnCf1#mw3YVIl7U*YrF z1kobz2$=}5ZWc7A%dzET7yug$F`%RMMwn*8^kxx*UuHj#&jFa~GYc<1v zvaT*1K4Uqi2QJ|KfT@OrPjLlb!-Rxv7LEXH76D)8=d-`> z_222_+p$Q_%_g?O%LkA0GL;|xZFc+}a8Y@?su;9VQT)*ei{-Cw%7Ezb$M7424sC+G zy(1sZ(|{cBEVt7&h6wo&GY3`&!z>8mHuJ4Xvn78Y8br2i^nn_~VFV<1YZ} z*WVWC{}o~WxK906q&`23SJ}Vy6lpHf+k?fPS5o~nR5`}uW}_q*Y}u4J>CrWhQE&GCBopCcIvAX?EKjikHYCAKeC#9!? zLMDlW0}H0giyE3NeP~kGPIorSU%0;J{MgF#0Qc&f27Uh4h(W8}21F-IfH`hdVmw9V z!yR5ntWt`?XWP6B95x2trR@2Uh*k%S(RNa+3>omCTCOgGy+~S`hp296*Fb+|nn+{i zZVrPx)aqj|NI{~433eDy{+}EX-?<0O`p>XCevg~&=K1uwCd!rbx z_C`|$W7zwg_;?*k7&;>dX(Nn|9?5LUaH`8$(t&mI(&9FKEEPsC^tdXA(6iENTl&dQ zwiHD~yH?z5fWX19E-R$Gg@cj$RNf_Xn?cm2!R)c*U|~ZGjCh`55B(s}76h79*HgV| z+fJ7h7U0I{I|yv`<>0-*fs9r!qUwIWkYkKm;wC8?fU20x&MDm+(1C35k_<&oWAaY3 z`E|JT3B?D_rWu|o84{R8{GbUqd|W9An9$9_E!ATOvUp|V;J7D(tJ>g_?c$!Hmv9Ht z*?nQeugB4xot<+zk22~a@O6&7TuN2|rk`WurWK@aZ|jjy+)bmpp6D@*`I@YS+?wyL zHQI2lUwWz&1LCFANunEdQk4{P$bWfv^*3gveVOTKNq4qfyVaKcp}bE7M|FqY_5-Mg z9n@3Uqk@$B8eMF+?g27*Q_B_>YUxI{JTFHP$vhR5^?b&ajVMS}RnaGH^wJSPYIMaV z(l;EKy$gb4X`}A+!7UubB7qLTmBqXp^9E|-3|1pC`3oZ`)x2I*%;$`$_ocX%rKyR> z%z8}+o6N)Uj(v;Wx)U9n)R)V{5KzHiIg`m5W9?ZyJg-T610V&Mh~|# zH}pP9a1zkhd<12s&J}W_8`Q|JY70_S!tG_CKnQ{&t`ca~*OR3{!*tl9Q$oZki!c!^E(WdJjRfavsFk*86*u;zrC26T(fT%fHf5EC z=de8IS53KtE%YGElru_wIW#M$N}ib_eHWHosopyI*(@K2F`;7RayFVZN>ipEvb&~B z#}JG%gtFLLvDa2EI<4)@F64g-G6Me$dN`o7g8!wY{v~9xysHpzW}D~E*I;@C5V^}? zY?3US#Xsiv9Kr%phe+zgLj0jAzPL6ib(u#wWX|BRSIqIOtfTNj8Ja5~l{xVo9cP`f zCuSg3(o#qNyLfl=LPTdOw4nlC@@yHk9jU7!9mVjq~;sl$w7M!5jaE zJ6R4$j0AV8JZMB|zP*4JMn`h+`a7NS@h&nKqsd7t%(gYZd#|_;!_6+nI5MDUANnAr z*jQmk2cF4SN8YBW&zP1MMds2!#t+}!SdUn~J=nC~X*ImruN5pqj4^3V)f%{@q!>ot z;c!=?dnklyjxN<$< zeLQw)Mvq}Lxm2frv$o5AWixa~Q(Led|KTQ3vfRLnOsS!ja075C` zAHw!)O4ou;=Ic`@q~or=W}#9KZja@6>!Liz@Nw??qoCa>YDDb}Qi z!`j;1jJ^GyQ+tYJlOL8xzgtt+U%GqR;m7-sGoe_rV%aoCZ@GgKwxoI0C+wAEQ;nRmz3nt4jo5-Nn=aBqcH<-8#< zBDR>ds~q>vXNA1pBR(YC_9@^&ECBFtKF|N5F}}QBC0THM>%5wlO7W4f+{Oga+Bx4q zzcvXv9CYD`ZPQrN6P!?B=P-H6o|PHD4F9NO3v=~ac9U%8;N{?;Nmj1RZ+EaPs(?ye;i5EpV>8zFpuU4XKW>}H$+Nnb`pQy%LCYG_$c~0mOT@WEuUVP$Pe!%yO5pY3 z{e@OLmLj)U8@PC#k!E55?<~c zV>vn1hcKQ3wxqNSW4#dV5ZVG-b2?A?Jo{_|I6OH_wIz~0!0DApbk-GIQ8rxw#C!mD z*D?ZK9u|ga3caIjb<6t$lc|wi!3aKyRrmrr=e||fIPKc~foF4E8~dgME#Zw}j+`9p zv4o{xO5L&)X`B}v|I=d7b$yyOIOON>RWNCl>?*~MAyxLEA0M9>;8fQ2KQscVL(QWe zJyF)AmLl5K{>4aPbjIR@pP$}fc zkw*Z9zE@F0>4Ks+&~G^}RrU2w98@quFh)s9pjIu#EdO74!Go*zFu-=ugB{F&ahw7cY{*^iG z)U)dTh=SbOpa>4M7pI1BVpz~1+ktCj46}yjO?5s`4i+QDEr|qP$(P*R zGG~e|@5SCMS1TS2#}`_}4C2v*~0} zwW3zFsX&?LB8&VhFito`wk0#RnR6zb7)5V{0tTBTGvdp%gZwbK53lXlayoNyV*Iaa zAFt+{-`sFB>>uhKD?dK$R}?gXIa66>B2t;|0yasrrr`D&~7+<))qceIvbYSTlhbNd5z zpGZYAapa~r(qCN}eQ#zT8+*;m`WpAx5ny)CT&v%)DnZo*_G2*|o-t05jkLjnrxDcB zgINUEo-(MXu|wj|)CY2JJpJ|3XtLVA!qErL^*#Y z>r=85UY({}+42MkIQdHR)W78O;Kz3e77?2#7Y04FD<|xVf1PE|{;xLi{|tk% z0Po1%Jm>ux$-#?y_pySo%KFs@^6-g=(8YhMb1eu>8)?oOBNmW(xcNVwWd1(zD&ony7Boyt$sEKa zLebUNP3lMAzh#)YSyetVySxArxOwbaDWIsxVW?*ALyOxA-*Yaht2B7j6+%Ci37+7D z&9`5SdU6w`Exz8VHX7h~5z>6NAeCUf*z?d6=A742Y7%A+7i=dV*%c@2w1X1labnP% zgc2!)Oyc~sQIPC_;m~dpwo|=MRn=MbL0_Dab_A7FMg?F0NHHUiJ^o%<7B>eE)z2_ISYPNR$(JmYGES(`$Hgi*9mPfbIqFs{lNG1eaq9*th;Q)mjIH~8sP z#ZP}vrC)u-zVFr8pjXjjI!?m2SCvfP%V&Z*3nD_M1if>-3KWKtLND`00ZH4f#Xt8_ z+J!$JG`VjprBPljJzdH}yTxQ@J2)pU43mj##5FYdK0I)YW3I^Fmr`8nRuax`ee`JB zKxwnex2f=Luh_f+a4xqpMm)hx6;^Z`rr~g(EiKbPjv`>v4*Kj#^TS?CA=V8yE$*p8 zCcE7C@3gq(1z9=e=RS3C&o{TjJwlbqk?HZY8O-SPpaPJ<71@b_M*0CoRq&80cT}ug zESF~Al}f|V_mS%fZ4N$@O{UyeZLgMsa~VoigjUGkUW+y7Yy9aHj6k4&ne*n~x6R)> zt0AZcy6kd@E1lHR=r~3i_VY9gUT0fhl3E2R+Ul>U&{HUH!dTm z>{XK=kZ$YmFSvJj{%RJg2p_#ECCE%;HQvs;+1E^*PBidFQdH%RnuzejAR)$PtSYSL zP)C+GtomX)HG7KUX7+)=+g;OYW)GowzC|!l5#B^)t>ryswwqkBCS%{V4yI>gy~sn? zh!`tldR+r<1jgiQ}9hkqo^KV;gojJw*K<1uH^cnv|hNFRnf@};Jx zuCqvN5$HT;R(B7zU7y7)QE_`V&I3glMBb!tykuHz> zV_G3{D|@B1NuwXjC-vBHvjgKWS;nona5lANL2$dfc91WsTI>M0hyC!$`~wbJVd7dv z`j~$o{!j*M9boO$7--*Fv74Mc6FrF4+)a+2ty2Y4sNZqFxnZilPuU&3G@#&H@@h{V z<-}w`pH=snWFB%iGQuUy~%BTxL%HTF-fC3l9oNL=SymfK^(^o=KT zqnk70+Av83|3>&+Gg|~spVH;_-Jg^q+xqxYOJPDVq2@yeA3R$ud;5G|1il=hHKR2p zdC+Sphf-_QUFkH`^Mwv*?kKPtuTX4?qfnYg%2eVgDGjtcmIka>lIt%RroUOiidNts z4e)MRF@V&qftv4flBpK(xkSK_96w_69HbnAbXTSn7-B4B(Ji{P;^qh&DTqD4@a?%R z`ZBqNDIqY>okW_-S~iN%b^4O!s8Qz7ZiJMZhXX9+M4^-b)jG*?s`(kBi zMgTv&-mX0+4(Y`TcZ&+D)M9n+@Hf~rcsZR&5dXYmE;Ukm{0TUW{L>_|skxzj(3;9V zaD8z_ppDtRd~(vjgd6jcw)=zodsJaP&7@hOHsg=kAmh1`rCV=}Ina>dB1A*;|Zd+d9RMvuCT7&+!9N_ISS5bI1yF%!`R<`d9mEk=Q9l|$<7 zmn8P$`p87%3TZ^=rg^hrg%z4{hMF_9JAw91dr>c+5}O~F{6F?DA-d+wjYd=%LPmi~ zcEbv^Wz5!L%!A;oZ$)D>`nSwW({B8>oFR`B^QXBjh<<&K>v& zWJ{vFYAG;4%+GgO9i&Lkk`W1)PD0XbTpGFxY@dQ&sARfx_+iJkxvAKYYN&WdAn!xP zYk)}E4c+@P<~^OJBNRRTV0At%E)_-Cd>&JDSfCkXY{ejcK|Ab<`U8BKT2o#KWy6_f zP+0qCGoR^-YcxQc<-7M2Er-G#rVH5$xHScew$m<2>Cg{=2=m&kx(+3EhfH+)$oADC zIt*t+O0!6}{mz`|OsrCrPNO;b^T3)*W3ITZBBy3$+j190r@Bl{!STm_=8edAitFKt ztB^v=ADCtbrTg)Nyf-^l*oB>4l!}%jI-mkd8RLBA2F|$m3F{%WidGR~O?tzK)vmp` zO}=`=Y!3&YanZ?N-9&zM8*e<1KW?`_yZ+m}p8qWIldwN=_*xtlI)!{Xf6JIE{jP-0HK~6;r&(lYt$)AgzEX<$>G=>yDKO%->>cD zLJ4b~4%ix0O}}8ON@<5lg11SvDeEYedzGX5lD*y(E*oZ~Wf(}XYnEa=_0)Obz!Hhv z#+>N!QY5%XT^{uqlE6$2%5h>^R_~NN7+nL2efGBq?38+YIlVV;e9+%Xd2ddjJnz2s z58~V9VDh}4l8L?-ADWgH$f`R_uZotqvGbCor#xnW#F_P=S(GknlNA(3wR)n(eM={2 z`_nAD5VU!2K{;>(be1O*)#S{hnO?rz3z~;V025)RraD}0<pKsr9@a04F!G~dO2nl8K)jh|SV%ry>fwOae7yr|9FW1=f^qmdjdJ)pAe z5s-5mjx@F4#2SpZ=rTk|<0X{rly};rO+AL+>C?0ys<#p4UQ#Y9LHLM5SHvorU&wgv zFYqq}#9~*5A2oG~$Vk%62+WiVOPzgbE|^!BgCw$!}T8l ze4KS~lR7%F8P@5|z&to+O@`o}o|JYg!xR|4#WW?$Fm&aBVp$Spns+*?sn zcvf-$4up)~nT4Po^t&-OGZD0dMoLN{)dx-50;7mmKD2t3glQZjmfmI`8R=!pJ+$QW zN~Cv;Fx`ivY!_&8xbE9ugw%f^0R5nb$KG1i=0^_}-R$gB#Vw^e@hE`l$>X5U$iozY z4T)$}^J6R=6lAWA%z3P4%v^xP%FuEcP8ZxU1nV>9UC8XMS_|IPBjK2nM)ho3W|@^y zU!IjZ0?ZXsAYIge*A%MvMA`8I+g&+E}!=99CT%d)hililqSLMgBW_RQI@NCtXP3V1Fv!3 zt+9M&8M86yuV`On7kJ(W1#V*PAnjL+Ly0?PHY<~p;|pu#Hhp{+=vm4BL^cvWIdPyV zi@OhN)gA|(JTxR&1cnjU z&=$dxg(R7bl_@E@J9;49Y=8}2OU`v2(v1tR+R#$?iS-(5pqrKUE|Dwng>1Y zO+eIVC&34&kU#$Uu=d}#+ooRsYM$bUC(b;U4h)p(%+m-sI?=wSFD{crq(yTI(y2w)6>+VUL7a`l5$snKe$Il3shjN?>CXZ; zYlFqfkTsFszJ2M2;;^-bw?FT%w{J9h`k3Sg6Sd4qH z69whYgX1T0a3&9SJ{e>9=W_^GC80v{j7z*hL22N@GtH-6)#+4_(fs+$sXrHexYhf5 z#4VKMw8(K_0LR7jvY-B^BEHzlo5IfajitPgypxUGG1-rZz4Ye4R18wx*>XqSS(Gpv zyl=3NN0-H&Jw#lf?-^`ez4>KUrptr+X5>4{qJfkeXd*Z5xt^;duv_*T15A%*x^i?N zP(3bqvIBC1b^JF{_)+)lyCb?-R+{}0d{hTLEdShmU9H7C+nk=PqcdG=`dhlT z|Bm7*LCsuNz8b()S5BQl)>|2sw=&zzJ-RonB|nAe$|Wq=Wv6rOWM8NX+_0iOZl{Es z@xg>%UB+wJ1Dh{9)>o3!*?CO4N6lT8J6GyG%vZ>}#Af8YBeS@pRvl7j;NdgZD(xfZ zB*58 z)SAUk=B}H86C${+1omD71DefIYoLg@K!I?8T$;u__;6TjvM>-?b$<_WA;-0!nePR zFeG!fAQ60P%tiGA$($6WPx_{9G4194rOn7)v~bqiV7pJy7g~o7+|K@mL(&rj>95(b zO=D%?L)^gy7`Q07iz!UOcwdPq+Ii(sCcE(2!$`@56gR`lb*NSVcf}HTbT9uWw{JZtl2b?3p9f(jp=Y+9j;LmVr9uv#Bk* zXER;qd`g|)P7Dy(;U*o8f2ezSY3P!`W^%ph$VSo1Zn{11X#0ZcoP9%$W}u%0A7gib zp-Lg$Tp5w_t9oWLSPL&?0-HTN;yrBQ8?QsDHbDsExI8fGo{nDY41Z5 zX1{Rqo4(tx@V_xEew)Xy2lISs@!*WiXRwZIM*wE(XD+pC-}I&bu_5}~vV1#S{cg1S zUBt=8f-e(?Tz`LUzA)LRF~6=K%>F^YU7*LbqwZrGUOk|%{boidrQ9yR9{v%sW5_sndjAL^=7FW-d;S7L{mJ+}=*#yIgUxcvv5V(5BGPhydw} zzdX|+f58_m-8AB&=i3<4DEp`}#}(L4PT7L?3ru>75R9=kVY&{uYV=L*zJmYA!HJ&L zi^DtN%qG8V3LLZQrV5)!Kg=M^2^r=^n=rl0EHDvmiQ@hWBu9C?h$bE$BfeZZzXfgz zW;qN~gjAAb5#N2F6xl9)r-{YXWV7C5Ohq%EAPOr=HEOwcOyYWbuyQlkmJL&y*|*{h zOlargrmoD5sIiD?d=?`|Go?^E_sQi2(J=v`Ll%60BKi&H6=b(-h;?R~s^O)*GlNt~ zj1;L~frDH^G6o0h;s!jW;+uU7nxYuOB1|Ezrwtbjc7ECu1F1k+LBfgX)V0JV{?n`3C(El56?lx3OdJ7*;!u=n8)ux&Z>T1*-|>|L1+ z^ZlR?z6Zm7(#4%kz0o)%SFsX;rA-kGlVRc;%WSJemtNK%+Fuogwjiqvt$rGyn-W~Y&H4^zqA~N32B5i`oN`q2k$>s%^MFN}h#V ztZ=%rH3mMjDzFi+5D)aA@ZfT%jynP@l}sf}VTr&_FXYY1X-U#uIcW)|P*l4c+2aCE za6*R2jy_Fv*8Ry5tc^-1jAA1xPcU8B1v}McI3-qeARu^v#nvb)NS-w@9P!SZz#=}T z(LDqLvo?~|GV(P`kS|N8%RVlp=?Lc=St^3VY%dj5vJ9k=PpmM$q6Fz)X|o%+Jqrqk zOesTtUg@)rsju9ZdXkyB%%>mQnBvlL7+Gj>Hhu-ZtE)9iIh&$w(B43aH8T$@__+b) z7>AxySJD$8Clyk?nB@5;5o^wrSBOr^OpnUgm7~LeK#JgL$J)+5Mw6Hof`NHl64&gm z*zAUqP}?J;9*1|6NbemQxTP_<#XUW?F#kYv6Lo8xD-Us%;8@b7Lq;Y9f&Ma(D|rOC zvpl*jSfYJdb}`7=AkhcHWF4i0-lr6v@M%$~XOP1CQ#dho$7w;V+oPjknf~^cgQ5{I z4kip`W)?B@Q6_|9^tva9CUe@eGCmy6kj^*vqEsEZ*22n)aFqn=+A_hDBfvx5-j&w1 z7_6V_^U=U#&bm)r9l-d!``5m9?oalZ^XB#PN-fvn%*kvaPoyT76Dfyg#V81l_@o;o=c_G=mmkHJj&Ls;&&Z^j+kzsTY-|AROKi+by*_yiL*3XH`+AsB$&lR($gafGOiK ze`f+vlbs3kqt8Z+RB|H?d>zfXG~-qIQXT2inX(KESBkP!t?tGyUe8VoY4bH9i#k1&HZv&B#LC30%>v9 zo7sq6m#o#T{o#FXc8@H9P1E{5(MoQS$GJ-VSKF^_4&U`;tL}>1Z_y1B7wai!PjqE6 z7;KuA0Xn3!(ho%jo^u%RF=}wKh@uP+sNsKbZx+-fG^o>S;FHBFmO8tjXj7X^E&S?L zL~Nre{LXl~@EqA&fpXElDWgq30kVini>`{DIr@3Gq`TdAbG=7^g*EMapX73B{mr;) zfH$plGRg6>zVASo7%hTR&_@N7ir<%*T_-j){pOL|Bogw%+&c<8=3K zt+?&Eyh}kTd@)V_?poNp8h54ay~i0{ZC6BE0PKptHuk?68ZZYS}=YfEl$ zil;MUh#I86jK5Dl_OihiZ%pXYczd(v$7bD_PI~*@0a5;uc~&wFT*5X%O#Nt00`0IM zoUb?DCfY1L#1<^?8tt@T&M~dSk`^Y!IIx#~IxY1^bNfWej(<%_moQ-^$Q_x$K2|}* zrvM(WBj{vqTo(;i^x(_BWmZ)S&qDEQbo}E%jT-k+=fxB{Dl_W``YjgR zs|})}9Yc=*L-DqJZI2rGBC27Z2~*FOVJL3f2k?W_uN5Uc|NHD7-z55b@E?gQ$JpOr zfVfIhXUA3Hs9wbEUhI%X`-n1GdUb4VAd`M}YxHw&nW%!`~YIio{|77V zQt@_H(3pqEjE$aRv&m$1;NCwR^6LJ zkFs8)-&pz*YuzFYBH7Awh>(wwv;FhAsVX8j&KjQHbB-^J*sTtGQsglrtRg{PAs2X` zH&9Vg64K!&Dg9Q%V?}Hx;_BOMW&5inIW8&1TEG|tSkP(v zscU9x6f6{3Ais;rtY6?c_fk{neRBihB#foQ4i!0+tr!cBAn^tt+l9@VgOp~?crd-r zv!{7AgB+bY(G@)=*aJazL79yyF9lm2+ieEk&w)n$2+S(t$$+vw zUi4}$UiEW~;^ace#}`m4A`^ocys%2{BR$S8p)1c~>zm`e$E~wO)5p6+XEfg-n_8zQ zrQgziHhar>2p$(vv8x1d$P>KY|0j0m{>FC4(EsKcPo%&q2d;_+AW&B^%zi-Kx2!x` z798roambw$Yp|+I;bu7oX@IYt&Gp&r!?RK zIr*hR0X&n(NQb1X$%gq=XXkKlR0!W;90v(iQ$ZgIe``{*ve|Vb_ec+Uwj1LBbZ9QU zMem&#B~=(U_%^|MZIYOy4Lx0tMKBL-zaEH%(r;sIKbM+}72(YUmSn8$a-- zV)s)vIxn#`%a77G_lI(tX=IXfXOJ%nv+CXU|AZ!6M$Y8t45h=Wii=GNcU`&mHgzIq8}#s#rL62PkXwkl+^POK zehtd){G6rnpU-TmjquIHzRog%Cq{B4?Pbj|J**ahbZ*B?E#$8`*W-Lr)5A&-Q(|mw zCay)emT8R;zgK*&%_8smnuMBTrR+^*|FFLw=JW9PZ1uOd(_c=L`-dl;{SE&c;9qpI z6W{K@TTI?X0dvb35soxIWHOZXa=_bN!@eK*+}G5>*DvzjUh>mQyD5d+irXNZ^0TMm zH`Re_78>Uk#sl{%g1I7|j98Y*pg8Zt%hc?*z-QIj_?hwu+b){fp%kkPPb{Jqd^?Jn z<8eh{Yr0peGCEQ~_{CX}=B!mJVevNZ#}U%UWyZtBotHNbG`wibU2dAF+wsv)FfzE4 zEKD)&DC$&y%K7kE8^Jdx(eF0TyqvRB$ zWlK>oB+5K%nSBs89S}l43k$p5Kr!H(H2v801x!=~Eh_O*t8je8M?Z)nj3hQS$Og1& z3OBOb*W_SAG|>N0Pg&P1U_0Ec^;VzLW@BVHYEdP5%`bb7tq2uVKH)GkQ-~!SD0%LW zl;N@rXTz?#GDpwn5=_Jw{MwM>mv|+ge$$|T15b{^`VaPNz@L!6t_X`z#apfv!!1Sy z%L{^|<*r90uCUzFlvDdfEgan#)^L7U{Sa$+ns z#+Q8%vv6i+EPT>fUXq>i9qQ^*5DKCbUWWGb!|08B#g16kXcBEb_R}&O8Pr1`OmHeE zR+CvA)H!mEPzGbxg8ZZDgmuXc+N4@y#MKwlR090MY+DCD*7okXk>W?5&M>w3_*=;E zh+SAcKR00SXf4q!*nV2rL6;$>JHH*hxH|FpX+CoVq-VJDfV4ap)wwW4^1m5_;dFeuBOtx2vpV6Nb?*zA2^Di9zt7`SjNez6&cW9;L z^9AT59!7O<&Jz`%J86jqo~-0Tm)+B~sFN!4FPDqnO?b%l7moh*kh-rGQ~c;Zd$ksK zXfXAu>l)?6L(zx-K2`Yxm47{ugT`F3CI2rzkRSa{MC&Gs{x6xp=O zA4zb;E9DXCQT|;&@>i5<3hq$Etz0xv@L>s}ODPS<71M3#@8&9grW=eiK<@d~&G3P{ zP(q-`+UN02S>2xQf%T8y8f7Y32)bQK)CuT>1xX~p zw#Bu7$y|`(fHyza43slosi1OXTay~xj_aG_cc+UiLcFB2-8JEU=lzz^_F^pq%yl3u zP+TFolTpZ6MnstJ2yp8aZ96F3odm4+5pD*{@Vhy1t|=++ELM@kafNbZ5XtSQRBS)7 zPNWi3|9C>{nu^iQ-GTe`0h-yRMw#=3y*INDBRA&gdnam&V?6d^!Y;-SA}sJ&J5`$< zRpyAC2YyprMO~4OqX~uN*6vLhMpy2=y8>jnERM`;iqLWLlY{Mr`Gc`ULzzU5^;6EDB~d$*0L`CY2i4F|`>a1rz< z-CmBmWvjfOa?Xymt}|g-PJk+DCQt49jz6BQbISZf4%t2wvHfPpJ@;Cd!O91O!2YP| z)<9$m!M(C-nBs15Tc)GXBT6G{oyUF)%7wRncdjTYgHmrrSWpTCdao%gBkERQdH~_{ zhw0_1S-xOr{<3v=3bmC82m)yeF3_Z=d{4 zXkN^x%UT`Kk=m$(Laif!9e4nV-?g2Vcn_28*D51YHFW0$mk>v<;2)M3B*zo0Z=*T; z##+v*OyAC4x(=;#N?yq*eUGz$*yFcY>~oui z>9jkaW17c)_;#;4Q943%SEutkyPp`_s{1y^fQ$gy!AgVU0qiq%3Rk-FC^$6cPe9p(T zPSv#WroFefw+}g_l}%TPlv+8*BkJv|CN$^{)4u=CxreDxel z5Nh;ZYI|rQ>`Haca^+rI?!59PLz6QeXJ>bdD>zn~U)Zk9OcYLW&3APpEQM_28N@_m znY?Z>TiljvnK5{X=gaW+nU@lNs!f)*2olKM|CM6!u!+lDO@k66Vo)`KrxL^w{NFSp{FW=>|MC2@N1I>dS0OGwtuL>zaBty3tGw@A zYFa?HuN*Zy<#MDAnuBS|ONePB#_|gdPDU9YL4EdTp(CrDlp}Gb%60?FhIa#FhS$~v zhXa`PRC^x&$u5R(OZI#CU!=+YuPko-zNUYbHy2Ycgh(Rq<>#X&oL0Rrv#adYbCXa; zmUlAd+t44dy_dR-WWrwaxE{k%8xT(<{kz~ z#LJ~1ncXdP=9*b3CGTr!6+0vqt(ZLkL%45tW~O&b4bG=o3G)k3Z0bbjUQ#mmjXTd{ z*%Q6l?9Ng_1zsD9ib3@{&?D6GQz4!TUTScNG9$72Bp7SU#4LvZ4ouhV4Z6L%n590N z4sR(|A`UZEP&IjzZDE%~IORmZD7y=xQG6H6EZaxC8Po-l0g}Hb^G~n1YlVWbFY$bi zIa7=*|$k%BVYG+%U%5>QkCt!4kL*?R}@{}p%kUr8Ns7`Jt@ z7j-+m;C3?WglPfEhJ_2ut=xoC)9JiiATfnRMJy9RSGiW5Ohv3&NML$SfDEpJ5?&*l zIcIv(g-X#KYC27PCDY#2u$T1*xS#jy^W*3H{PdjXd>>cwFN<7eDfQO@gHdvpyoR4 z^m-59bBERKP;#@^uQO_lv|QWSrq9Gxh?{MW?_O07nQr>cg}cw)7^4ZTSIpFq1#(wj zr3D62Ws)wFoTX}C(t7?n`W1Cw^O$Wc{<~DN;Hg$vlc|(LHkI6)usdpx^RNk z%G#_#IPv=i<#T<{W|V1<K(3{Gz^W7tdl8jqV*+h%Vu@-JrUcPSaB z_h$%Nq5jDM_x9|k){+W+*gR7~DG~%ECHrxQAWG8i&(&~89LSLQluvnL%XTKyN-j#-+s(=mf!>&E`%*!C?OZ52dhkFvf;4^VCkQNP$4**AZ?e>UYmW{=H ziL%rwo2VnKD1qMpARxs#29SM>j>&nB_RRPPm(e3Z>pA4tAU#{eO5yfEKRQ7G`Mv=;+vNYELH)i!^eH1QFIefstN(yjut za`4d_Tn3AKRMm2ZbZI(*rprntB7&Uca|@1B6K}V*3;locJ`!_?s`Cxag~nxQYOjCo z_=b9ZCLE$C-w8dT#*^*FO3yIb;~Ge;{yjeahScU+$=G7;mV532dWAmaMm<- zQmgfaD=SQT623EoB)Tv;yo4L)CQ9$QWUFsA8*n^lYCqtyefCsT9_iW$e*nQ{P)MDb zkb(t`v5T=n^%iOaQSJz8+A;ralXNt1hB=;wZw07|Ct@J( zs{pl0D(W<>M-^yXNcD?gaY-5QO3+lzB&K)A^v()ozU+j`uqtwPOT^uh9Ghm&fHG5j zBpPjp9l3@X_RNaS{6~aqUN&6IYrvsPW+MBj;b?Wv(}hUoJhv-qtz{a759vN8iP1pO zQ0O1;2q|W?_8{;kDwYfd+h9oKeBoDxW;1cBST4HJ#+nWq%$L8tm(Qk7^e3siOQ-0J zpzaSYZswHZ_LSUBTitgY?yDcZm`n?w--&wr1C`Sc4ceSAC+_#-Klrt`G`x YnpSXN%ibmJ|Mm1_G0Oj!w)8{Kzh?g3DF6Tf diff --git a/research/fed-bn/figs/loss.png b/research/fed-bn/figs/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..c4875ef835f52dd35d4b18f340d3637d08d283b0 GIT binary patch literal 18800 zcmch9cUV(jw`KqZ6_BPNAjKe{0)i+YHHbk_L{xf>N>jS@9;|>Uh$x6u0qGEwUV}dy zi1a2Pz4sPcAjz!YcfXnE&b>49*W~e0lXK2Kd+)RNTJL(-yH4o!YZ}Z4cn%;C2@)RUs>JNjofveZQQ+XyILc3?|Qhq zTf165Y}t=M2qG?{FX(yq&iDIy>y4LFD1NTTUtM-Vz6`y&_w4>#V}}mCarh9jN9Li) z;TycO@B28heIV2C)tdf=dud7dwA2*Krwe7LC!eby3Wvgu~oV0D+{CUV-VJ~#AR+THwiw^#SAnZ*+ zQTxjp0z*QmstOXRKhr#zVu645JXU!@UaZ+(2T3VLAabc$V4`55N6moPl zXSL~YcR0VY@6P6Z>VNI*>~alq2J)7?{5BRm&L8zH{RhV<@A}oIaJv5h-lni8{HT12 zm)Op`FT=kpy>QeM4RVUp(=RO=Y!;9Hh!9ZrCB*Bbb_>AG_w7St;U7UkRuT;Y5fr3K zV1Tc*w5|U?4nE%8>?M=7v6SsWTU%QVb@k59&g$jwys~kQEy))KDddgOk&*aolZuN~ zt1u%+n4b0&+UmrgZ(R);u%-5M==niE((LSPcb@5_yF$XXj1Y(41;d{jPMcP^Pe12A zOWv78h1CsI`$h08!!K>3iQY(2(Twu)9%>7s$WJ!?>AZ{HR~HSSF@(ab6cJ}!Xw#Kz zj7&+PJfLIHlO%r)<-l)_FtW+JPqmcYA7!Hd@`TIcu5>J!w21ZNZ{40VyJp)jQXNNa zV5;7aASo_h)Z=jSzI^Fiy?F7WPLj&J z4|=hXgY9xsoCs3hUT&tJ6*_ZK7JmPwq0cHR^`=f$6RGo+hE;^`jqMqP<%oq-YqIMk(++%5Nr_3G z!K2T~UKe)^?d@|u-#YKJk~YA+j5!#bnwR&&zELit-Dj%h7Ps-@M?jluFA`bs>EkrjSrvTwIehAxy-s)Qe_ZOCWAFV^1St zg<%SAeJ*TPQM}ud{O;Ymu+tTFPNguUr%#?R^D7z6c9-@z+_-TgQ9}t+_3`7GlP9wx z2E(=@p6}e z(?&X$0Nu>a$$@#TAhc;gr!m_jVUnk-2{*}I#HB{LLG-0dmozkHsn?jBn;RN>Eq~*p zej9f=f91-R&Gto8V`Jm1SFc)G?M{hoT;7Kd!m_em8|_tSim7w;T9>JFCqK^UBT_`f zhA`Ed39g1X)21c4rZloOaxnNJ^U)bg zmH|K0rrsBePQUZNQ#K_s+PM!MN^3Cfh=ApM;Sv3lCr_rKH;38T*?D+=GSmlmb#)C5 z-5Nj+GSOc|qxZB91`iDl6%Q3Jj+_@Z^aTCxnE)Y_Q=|6v%5% zRO^-;fv9+qBeC{HEeXEuQ>8gXQXl*5=iFWi<`)s9LMujWUk3XY6eP2hiY+Ntnn-Of z#%70O=}BPx1Y4MPrf^oK4V`q3J^~74Fg~o;=>rMP1z$t!Txb{gJ_Y&oN!82w#nb`1@K+V?+3*-hK+KH^!K(1wiB(WFYFm@_^xGK z%WhAoKVdHZKtca#brI^=zZciho*;%bs3n!_7pwg}cKMb%_FmFrtuS#Z*ZUPP*^FP;MnNex1T8kiS{iNb*sj?ITOHXNX!q_$X;u_{OA1 z!$VT$BI@lPt5rRm{O>-Xhc5lA57%ksh+(CCHIKu0FS$;`NBU+NEZMo-ygR!`ylIt* zmGes<6v$7WsK8uF4ejwIa7FyPduW2!;BeUAi#5?$w!b4ogpiT^i~;G*+Pc$8e-9o( zQws0$Q4aj=s8k5q_V3g@crithD1D-Gc=t*vDOOVRr?P-zf%l^VFa|otR*JZV*JjG! zZ-~~xgzkUcTNSH8@S5}pmfXFtWsr?jo&5JKmexVW<|iuZ$8G1o|1-i~9Y+r>xpTA* zR?c3%v^=#r=vABTRSCZZIU$LDG3=Iv-sR>3>g@=+1F00^+N|#GN5FULeS3yZn5MCD zPruh-0vMF&XoKt51(64XM`LXhW98j!?CfTuB}}XMr@Q}f{%fXHQRBiyLqkKHNwHmj zg@a+Vk{KNv2%%8BXIQKkB^o=yux#N})e zI<*TD30%*I=EPy+w@;>L-A3i^+EntxslmPZivA~`@7inZ1;QfjUsHf(Bf-P*BXULF zH#GWd*a#eGN`8I8SM3*Po@tg8`>WkV?i!*g$q=%6bX!eJMo8x2z}@W@L=hr-#diP8 zH-lj(VNCW2Hjc)HH)y;nP2!8#Q}R<4Kfu*JT7 zU52#(yqWemW#kK^wBaGnn5lkMIAzai=$c#quohb6xN!b2lgu{CKfm7Wx}T{hQ-fb% zTx|!bzdt2Q{xYj?p*ooZhdI{86R$LRCPJII9X5=3ou3y-SnWUJb+4q$LCX(TU^;B(pXbZOw4 zb2K&jQH>BH&i=~V#-EdDR9dsU{kOYCtkt{-wn!$eKOQ2Mp7(q#^z)wc3H1HuthCjE zoq>F%BB#CB1()9vTpF0g2aj}9Zl4)SRQ6japG2qK4Pr9T(q*RUDK!`ro;)ZHSD9S+JX%qY;x6 z6PM8FkUB7*c;ZZsglWmx*qFF!N$&gipQ#~EH)Ut;(|i02zH>6)jbh`?-bL>Q8oO(Y z>UvuieL~5tt*we)3rD4`Li(L>`T60Yp=^<*gMr%%m5VjQPkD+KmKz<{!{ zo`A+beE0xXOQkaK-c57?4+&!{8;g?;*D@zg1mssiuvRUMRyQ?B}=>`e2tyR3{%&%7tqfA#kE zmUa2tJ2{o~%~z?{$R{*`YfcvAd_48C-^uyjz2Qe=7qzq)yWcqjJ~y|g1{UBV;bsHE z0uZA0_4YPxae5pfot2j66gPSq8Trw7ZDwnAvb&?hNIV)$?)|}<02FyS!p+UCvO6~< zBt%Ffe2O%&DG9yp?adxQ7HWDH%`poQ&?Y7h7cR^tDr*I9ZBwJOiD_MeQ+gBh5Q2F- zsUy_T#m>=D<_HtSy;Mgce>A+LL^hBwLD$$gc0hZ{{?vxw{LI5Ur4WWGcK6CJb8dCD zwYkpy{>UMEZ3?1>P6!radD^rH5KnEd&&SC+nrLcj0vH1Dr2f@P`a^m;Jp%(o%=aOl z7QGfP6?@gB;QGHjXg~l--_ZQSM%*Z#2SSgZKfgccPUYhkbH&%q9T|;5e)L=SFACUb zd4G}4zbA>byu)nz^4cK6ns|39Q#V(2}1urefqSy>Af4% z(EWINGBjs$&}&P({qe3TYLT$;C25Paw|&rRZ*Nz(f=EtBWh*V@SDf5pk8P4ZA4Rfi zNljC;4)*No%8E_nJ6QEafCNKA_U$6UpjYgcS7T9&<7tmK46EN2dRpGTU!Q;?HNQTR zIxr6;l`3T@AQubqvq`V<#VcxRYKZ~s#@#{}E?m%xIlUIJN7;$IR^J`VnQL5{1zy!+ z;u*tYwIlrID@P^0NC=_x+e!N`hl&+)hU;7j$<6e}cIIs44Mv*k=Xj;U8AFR9WK!qP z+$RuH#)TX;U0t`7#LFlq=~KY^3~AME;r9?yy0+?#E6;c~CrYZ{$+knqzp9&}T4HZ? z!s4J=JJw45+tJ8x(Pd0p@w=T48oXVrgO33ZrQSu$7Mh#+cB$8X5$C%$p0M|CzXF4& zP^?!qub4#^t1^=e-LTG8qaXLiiX8}xg?14^XOAqMRfXg6FJGTuI1m=1w7$|8dH68Z z7I5MYMS=yMc54T>G}C_xPM->$LM3PV3iWECJ2~hfbdP{`&3Lo-JXj^US`Y68~x{nta#}jt4z>HYH$GWR0B| zixDsRn)RMyQQnC_s!==Lf@34mg?ESIRW>te9y|U6KM`3{u!1voL_@fTor%;eEj;2) zR0=j|<>OqlyiXoY85N@pL7@IXOJe)1C_XLK8rzhWT6`gviFzP7giIqL{N2c-Va)CH zw%fHtnV=$_ed%xrvBZJK);&Er9~PKs-;?BaQ$|%IZjUZ?KBhvZSlD9JQI+SGONCVE zX>U+Z2LKUqwxV<{!uii0>eT{htbcO?!IcuPY3^soqAqKZKhza*Ft&Q=yF;j(3c2dJ!`)6vkQd8s1zLihrVjL6<9V3GFq zi0+*l^Yx4$?f8FQCBub)2_g>OYaqu{Qj%&s?#a3Ui%?R3!y`MecS)*x@EFW0`SBDB z#@DhZDSY!S^HFSjk;HH$%)M=*{E5thp$3aQ>Y}6PPeC=kmu$y}0Mc1FAoo4J*$-=b z0j#r?`jc!L^zi|Q{HxdalU`rPU~unV+vhZfM9jYUtO=LWv9tlar# z6eC~R=X4U+)X~2t+KSHo&t>XjrLMk{uDwjTqFHX2d+(>rpxCA~F5B~(zO%MEG8#gGA3RJQyIP1>9|J@fYwQo)DSaU>A-EFd>v17_M*(qr8isO`X(F6i@rBF zA~U`p@hcI8Svukk4dfO^lcuj={7Q<=loZ$ce42^;W-)sL#&u@b1;owYmbB__y&lrDURG*1M<^RDd^lWxN*m?T#=!dUbKZ{mkO7O( zeSp|7Ld4XDKO$rcTWL?}*^OH#nk|S?#j~a3x&3boJx3oQc_-4d9zHI|!-)8xZN)!r z__%M;=ck=A0w+H-Yjd!@Av1<)8Q4fvHCZXOlQ8NVD0ohaDX5Ce65E9h266T|%1Jaz zlR^Pby=7wJjAsow4+~hlEJp@7y_4k+U7%VkxeFE%`53&44E3}2KA2x2fLP>N_34xA z&gL?&oYNt;;4jO+&Nlywmp>~b11xkiQDPS{o7e_i(&oFU4V$ zRZhN#DFs0M(npytu;8n2dM``BgWLc17zkeaywY;zL`8)M@v zAU9+BodEXi9=}GtsfkGqnK+xAOhcaB_Ae?fhRb?;-Q4f{hLRy&0GO;jU7K$3h z=J>$sPCE&4aV1~E6eDna zcHK1{Jy~BG3LvLH#H^Hd1;djQ zk(Rz0x0gW&u#ty{2ORtOFIGM+?JmZO@7@k5Th@DNNN&J;^Z^6^`THYipt=41cYp}# z5BIM5^ko>JYYTPk*D}tYI1y4>C->;H6tD`=bAjfqMz^r8+`iYa<4qTx_cJr=#~~al zJfE8oS7Ep@JEOJ1y6g-yZAr(D9Rrrd|E2$3M@Qy^2PY&}8gEIV(P&muz`lbA4>B^2 z0^YW?6#8HB_Fe{t;NalZ(_5@OJiW1R=Y@%JSJOXe#iVCsgw;*8rUJu$88;{&KqORE zRRICCckf;ZY{Zese;}lBrloy1nTTnt;rkq$fM4m^f>?VG>tZ$e-tNZ&Pv{#Y8z}EP zaKNO(J!>{bDFQ)U`Lk-TcJmIP3*n~;V7OFkhh+UxGx1o8!{V?hba8+k7+Ul zrVoOqB5G6SH*-YDb{8p6sGCcwg^k4K0@%k1gExuWoK@Hi-u*cUeoj{4Axh5oQ;;bY zl*<$_)VBnQLN4V8NnTnMKoQxdB04~l)Wz?-0yNRT%?5Ua{Aem7C><~u2i82892O$G zDf-u)y5-oFQ~ftl5_)B>;TL$a_Ya0GHVTunfB>LVZFOPd8PGrF{ZHr+gxAz~cFGyN zz~i*9faCOt*rkC!ARg{vv_sFNe?2~7&}kDBJ*nS0q2Jk*b&4%A>&*ARh-aXYfj$Hp zI(%5*^eL5G^0Y5EEy8zCHMy2sMTC51djzFldbP`mHuG}r8G(Q%HPzZJG4a!YM{l70 z&7LBHR?#@rl0;_%I1)<;oxvOf*7a4<=-SqFZH6Fz zbsWltQ@}5gcsDhGmx2XZkxDg-+4s7K!1KU5dr77l@@FMIaMMhRlU+E_ zP(x?n_-*;gH!*c9|C|57vuGlqKb>G5x;&ruUVDD+0f7af{A7wHK8}Z&q#~CGQ!_|o zfBSyuDY~NS;G)eD#nVxLtnE4KKhlW112X#VC-KfB&S*{mQ30@xz*#Dtto8S_hzROw z09~HMiuKGeA}XH2@h48rEbGeLL%-av+VY!qlZU*`wXS%V4Tv)PH+a}BZsD5bkDZK! zdFqF(HPK{pk|9isd&rnhb2|^QhVjz47D~EUYV#jj0DtAq@unc#C$#}|gjgq?`T2d2 z?5cni{8Se2>LYV|L?{e&t#MbALk5^yD+b>Er|KR4=S2RxA$YzvVI-n>^?e zCy?9E%*xiyON*55b4#*Wy34ky^^SbNXQ~~ORORgXzVRPmz+H%0V~+sw{K&xZfaumf1DUKGi(_*)9n!5 z4^&4!EgspcT6b&LWwJ*e9LjN<>^@5-bbD;cF)gLzUDPR(5s}NSXs>snP076 zsr7jNayFjk1Fz*{f2KV@Ln1gz_U%J}r84iY)@mSyN!$mPvlQnWXrmNS)_f)DRhh_l zsyPGT-`9;Z_~BZuj_p3|+-cis&x^QK3qp?vnmK#?2RT_|&Zs$+?j17=duAs`gMJ)` zcyKY`v4Z^9L64U@%{B{!@_TmdI}d*ywnNe2k`$l~{k?x9(sA7bw~}E`vo4oO2=eO& zD?2lWXekRK6NR2ARO}b*K+x&McrLD-smsvF1&TMF$9{USbeFD7B_?N^>hq>c-8fRK zdXc~|lrV`@b6r1jtdvsWTB-i>)$1usLDs#swJjg|EB#qkrcat{Q?iZp9UF48Uu;dS z0#aVt+9UPSJAyphjiY=KAPXYz-@l(@kjDdLVW(XHB$+|Qz{k%|{^Qnc?lb=8V6c#w zn3$ww^DU{JIa^emjD7SZTXpk6)rjSzLdW^nf;PqX>nXb!-n`?WKi{>a$yRUaJoF$% ztJ15nNpkwd1r>|DUnbt8d=cPcEDD`;MPqmsa<)-P;5O-=#Y|z$&8zJ?vzd8inYg8o z!<|G$Cq+?P^#Fzvy+=Z)+cPNh&e(=uzjkIZl!}1ub;!rtL399$1UzZI4~HictE!Yr zg}BUI9t?=?OXk|jWcWHM@z6VlE9-1rM5vODY=q90tD$GYE0GU)k_TaL5yQJpZ1Euk z<6z>m+`WMEJ@4g6R#0e|^!^R%)xSEG0&a~sDt{Bb06HFCr?LxZaZW8q$Gnh3{}pxm z{RMBv#(vr|9(M` z9H^j_%k53Z&${GdJw4Y!6Jz<&C|VM88o4ou#Ve3w@H^v)ehX}DagDN&E%K21-!q53 z*uROv70xZ2(!K}*6#vnqxm1iA5D)sh`7QA=E0d6z$^K>)APKl=Q`P33phMbco= zU2=fRKy&aF9dcobF&dJVc!7IUqh5jDS36!Msch~zyFx6Io0nI;+I25s=X-;?0akRT zknxMaY2_@D%wG{QO>!FdbnZGhfV2nHGyy&eBrQuzOI=-#?sqJZkDPXgfpkGWv#@7o zc9t4cWaDto_V$XC3|U{Ml7PCs_4(oR=g&KH44acs7(V~S>Wz+MEG)-b#QP6E-9{8+ z$JL|_ezL8(o0X3bZ~>?2JFq{Fn{^e+Aew$K#CiD__ymWDZ%seqQ=S}u_AW-OSv-l7 zu^E;x`*gOgQ`+P|NTEvjD6+&r%a{^H}{7^UibV2&G4E9QLB z2H{XZfUv#3tuQCSBgBcr#}m;qv`zn%y3oBij0h`X_$)@`8q@b-B_-Z-#vz_MjC zZGutn`2A{?d#|3ZZhLR<&*BMF$U!v({7769 zCPnw!(}%wVPfkuo06UC^usbFpAt5Tt@?0YK1c;W#{&P4}1=eM74}$czM55M1Bvtc{ z=S?*$pKmb&i@=|r0F1iM!<+P)72AH!hJIFUeqh|v&KZ+G)_yrV3sP*J9)Iw_>gHBh ze^QVv-s;|9;B|VgGAlpHL_|ufoV=j!zjm~HewE&n8=>lDk%F(cf|%l-uWv=jbADxF zy=c;*NX*U%mu%cTgjm;`eaKsrU=RPxbS#2t&#A7Aug9MqpRh<0M@lY=hxF?vOY4Ha zJz7{UE$`!hQ|(#3cS5};cMC&@?-rl-e5bW=(lv!r%H0;HFH)IL2|D6kF*8=%D9dyDS*CKXfc?%q8iCdPZ#<~b}7`|XWI z9S}LyMzCl^@XvSC1uA3#Yx+TWUNfP`3^GHuvebA(lZtc=4?6t^rEf$~*^e)Q^!4)S z55I$(t)7b}k;S!VU(FQ1(uvtr*(;!M);A+Hd9c1aW?i~=_06^=cQXA>W+(0Nn=ivB zPMvyp{(cJe?glv?n@ht?o68rNH@VN;dAykMF-|gwd}Urv{IrbfU*gQe#_fiyYeG-% zJeP<{1HwJD#4o11WiI%Ue^nci%w>~Lh$JhX^|f^vsnemyoIpaRb+E#+*GKkhpV&pE zp9shJI6|K^B4`0bmPBQJy0XKFwm=R+9dvIgbM$(`BVHb!>Bn-7-O12hO7cCVGTVQiPC7pXTQK9eJ-X{N7EovzVE$3kC>uQK?TwL^Fup` z%G+ysr_H=D$SKTmj?U)aTDhYS)A#3RYu)pa-dw@j~u`pqxl6_>t-JK&vlVd zGSt-W;&&%FwHGz7#*Ij(LxbOq@6$#&x}*OY7vkUk+;1Y?keA(WozqwxU`ltl@Nq0M zS9{)rZ)8x1VavPo(SzT`+o-=$>Gr1+ZKo`jjp^ej!@YN7)1aUsAC94=7!7!s9e1c~ zzwr0B$B_KiL48*)&=}EqL*Rj9*djbAte~uJr)&>KMM&Sey?o^WLJ(5BG%}42C*sJQKKfmsL z?FI9v!eq`NQCJ$ZH`=BexRhkO?|dv1IkqgzskZk#TYX;k;mCDmv*ve4q=Gc|khT&U zXA*Ac`Z2T#Ce;l0kZ_AHgm9ySzEz0p8;wQmH_(BEwMl;)1QsF>))s{;Y9rEQzdwgL z*+LJoMcOhnJU$s^?!1?2i0#4$3h~klc4J`Pj*PXNXZ!xj%T9>GwE5*=JwrV?R2v5t zvS@41E{D}|K$_DlJ3qOmxRgtUwuKv}-bLWxvH6QB&08;)0SRxug{`{tTb2!SDxut* zg`j&4cxG;3WVRA4W1GJ2#3(ng**ZCM{*ix$=q~ebrLVH3v+o+H`yfRv7SPG$-F=xa zA<|KTi>>r4{x^HZrLUlDakp%dyTP?K{*MMiL5;u=M78g{)Y0jY8qZz8qRMs0JQu~lS;QCf zHv4eYdX0GbeZn;QBWSrnQ~akq@6AFgLr31JrCiRAecas#J{P;X1SkPTo4Q3Gprc;f zAQ|SjDBW<{6&rE0pVHIiTKxER*)KjWHonOh?yfvRwj+x>xn9_~E#PhcpUocAU)|5_ zzvHuA6;OKm1UMwlun(o!m$PHHJkVlvAT&G>#$sQtq0i{_E$DQg)V)ftH@ms*e*}}l zL1cYgL7~@5+GDZLJSNg(`&CVj=Doc^#7U53Y{sQS3Oq5C62JF-qfJhJKEM1*u1sKu z19y7(2@u+5X5IygmYQz{AuBjJ$aqtQpB}(yPfSdJvUM|%r<#mt3Da|G@7nkCv_#C2 z)GJD{9@ol$-Rv3Y&RLMiq^u80??+_PH0C zfLc7%<2B?m{X_AqHLu7t$&?1b?laBtw98dUM@l|83z^ZY#9rl!i5kcH3lKfvSwo24bnmOEK3kvJ2ZX)Eh|_y!607x&Cu zXdXClJuAP-fjrPm*0sh}`-PGNKhKs4=3RG}psN6(jYrc*SCQo;zR${&G?TggzdPTk zDk@x%S&LmpsX7Z0>4NZgCFz>at+t{XJe!e8RcGW<2N4x#Id8DBE^!~2& zb*lVlb=qysQu+Mp%&5>o?F|Fkd8Y3aL_iP-{R8txeFblzJp;<2E4}Z3`4j z22x}j2~!Min2RgJll?mB^9b9X!n|?(pfM}{UQ}jve7;WC_VZTbgri(z2K4Td%azk( zSCaeB3qjM>K$IXz5uAg4S`X3XmL^X6j#ldBMJ)Z0$^{7YQrT}t?lPE-?Xj`((|#=W zy3b-v8%7xqq|N%3{WIu(?sT1_Z%{ffbawGDB7YRSV>M|>r6TES5s~bcuirY$3F$t# ziq_*!Cax8m()b{euMG8h@%MDWEIhOKO``F=sxkWT=2cr-cNWhN_Iqn<=(=F%`(-1} z+wbTc9>oSF0@)Gt;PPH+c|ROt-XN$5d|?x!9E)({Ok%bS4P$Oj=2C6AA}1R|j(@K6v{U z1hn>TsGn-?T*d{w-3}CWsHK3z5DY5oKk(bD_KK34bIT=~acm84s&@EQ8%}Z6TxI65 z=Dhsj!wFb6$K$*LuByj~bgF``)rS#;S3nF8hVm+`KCOq0z}&A@ef62X)20a})4%ni z&U_iE872UEw|3L1@7Bf~*-I6rPA!@Ue1Epp|6$$EY!p>drQhjvI4g&%bQ(XSu4jwy zo4KgQ92BHMJ(2VavxJMGOLs0txY0aM=Ta>0b%7;c& z^!j1vVh>H&-8_Qb)~lA*F?n4L?Nxkw_8j;8P8G_~5O!}*dOJhjI7%5^FDm@Ux9kS` zPWxmvzEH3vS+%!&uZdCBrzUESlasp5+O^NiIqF!ii#!6g7o_|KSC3NKvz*VyX|=bZ zkHap8iyS@QI|i7?#^)Xg%HqFl-pAF`=`nE7?j)*@nk2V&yE7F=EncDDCquS{4!O-W zmjC0M@<&A*jTHM?oQKnAZqbdRkruztjm-BFMO(qXu5Ykh3+!W6rQY+_kS3LDK618# zL^@NgcuyHt>|{+h3kWiRuhsDI_Q`0eP*bB0C$~d!Q0PftNCnN%Q9b z0_G^gJCqF4)Hpe3PPWSaftD62yN(lV@{kQ`*Fi}jAyMWBk&60Q?E|{AHoqWK!XxYO z_S+qK*RhbvNJu5k1U!U^2^fx-339(Lew}g(0h@E3ui#l7aNg$1x`fz+KZyACUVFGh z&D^JkiC?ioNRSF&O1*)8_~?-k5(&W|MC7R{DIHPTBD%nXK)Nl4`M?1?2ZxUBTo94C z6@D2Wf?U!6UAdJkNYw?+dHR89OW?Qwq`+vGOth+Y6q_Yt@r%j&=mGMrS@NR~Cd|oZ z^0F4_{G%L6=O$dC%z|@_u*@lpVg2Ov$ICMzdoZ9Pa2$UvlXj?=k%2PT_P*Qvyb;dr zk5`My&bJukCrVfwi9yR;N5Xdg()>pS4K{AME>{&)6BMq<$uaaYWNH1c@-Xb?7Xg)> zr7vOzx!xQ7Ucf92EG@Y}jZMuVetv#neLzwLdH+SfIH0 zO=(>zNVHQmBJ2b-z5fdpSbnh^6yZQIMWFV0U|b`$-s5jd^np(vw~!io47G^w^c@>S zuC%WABX9bS@v_}8?vXk3BcjR(&3@^{brx|U$@_jXg>`&LFEh{210p|?u9f=xW|JX- z$3eeWo9EC@%gYuEWwy@1&Xq41K7AIYZxL1JZ+GltyD?ozsFT}fiS|t#m5{R-UE332 zfA5~W=iKj54pHB=tJ^>$$#!B!x9wFRO`uRr&Hm?UVi`bE2W=TTmbN4G6k^WZTXDny4r% zOL_K8UcZ>{Fv8>~SJDNH)>hlMOvir)mZTP>D1s6{(@gcWw2|k~?eQR|SdX!|oXG+M8 zr7GgViJgt3Cd@`2uOGYg&df9~Udy7>xH-1h!WCd_ckFXyTYV*~)0?0^Lypq|Y&<-~ zB~mHl8&!r1)DZYyL||r=90OA9)U#8k{?HaZ+q5O~a%J$}J*8e^($I{5Vj?_b!k+0{ z>vd*mJF#|HGU8d?KUWv_$3tQjtT?vw#u-6mrGv+lZ^3cId`yM)}u9t$P79lIg@#$4kCkf zT2+$QHT}?a^ZakMONE&0mEG^jK229t^FqCUa6%h4}ivf?Zs zD~Zv_=%|{qUXEL@W#09c2=@sCChu^zpE##ONgVVw0ViX8X!F~Xxp|ILK?!1$xAcsf zLC#uTawg;Hvf7QuG`RHdN5-YY%xt*hML^@1@$NY$u{<}|}&7!_p7q=fa zW^5|F{kMfm`xK#+l;u2@J#)?(`ZZZiZ_VqtLgPvuJX-{Ut$$Fe&HYGRCmJgZw-se5)n zpZ(Bbf#~(?7Phi@oTuvQZ}tmOy=J2GDFlfc8c1gmmIN+meT@kDoMu(U0Y-l-4-0#=dhP>`ucFUt)>JPiq*MS3q z%Uw82tK0r`vAoF5Jd*T!s4D>^qmZ!uYTAn;q*ss=U@|%x4su zSExY%%;<=RC{3)`h}*eq6+hUzTQ%&aB4^Qi^OTnSll{++`)iQ@R^pzXS|9T3R`FZ= zO+BjCVk1(fZM`77d;1}cq5~KFUbd3B6t!Ls?5Z1+{{o}`u7LMQ{59JoQi0cCfZ^gQ z==Bh^dP$e44;bNlyO!Fm|E@H-CY%gR`>@MYKvzX93QL|Z9aOPxitlr(%qu9kt{4cl zr&OKzdY?0;UMCRbJEF*M-@Z}Rf@SgrF|ecXK!x7|)V98!PD>wHjDsTt#I^X&Ri$TW&HKv%3$`=Yy=tDKO zfV=X@TRW@G7|4;$JR*Px(5%JS1A=R+ulJeFD;)^jo*x;x!}hVqK>)k6T;ka0xj4|@ z)g^+Y;z`hO?F_(^faJ=wJN?`zHJDP9O3!&1ER-aF-_Id(_ebHUPtwxTWMVfS^zLM+ z!{v+wV}9!FSvRORvMJo6knj{yq$JeE!sK=3nF{#LeY8#VxwNP|%}Bi^_ydr1=RXr* z>!}IYHpB!JLorc&d_4RubMD+!|D^=rik>iX9pqOkOG!~v^acYh0F|a7&VdhP)+z=Z zJ9@y6k+<=bZ5=GJ9R_jf`4N!M+wHH*sV{}vDCPCvmQaUBT8_ZR$HrzIOoZxaNlBf6 zm-&#UtM1DzgfvJdMumC`%+>1Zs-tXQwtf!ucB558V0>aC5OAw+=?z=RAj~lL{9CE_ z7^A`GJ(HCI>}{tdIp1fEIE$`Q!b9?HFi$2-Noi_?BM70$_!z zLOc}SgHMck{rWgEDPX+^%DrKEbF;B6kH=t$n-+ZZJ5Z-vJGbejkRTx;@ra(;RdJxB z!yQWT_4h+jL@HpLj~_q6GhB)+y1z@x;=uf?JwqMs<`2wPxXiAv%U4kBg zK5or_<^9FrbnQ6K^QF%ZRrc3_GX3{&Beu($nwfcdZ^Ciu6%`dw9q;_R;KHK8MlqCb zY(M37Vr2{amNJ0MXz%&+=O`Q7@YYyuRaeb+{~}dP_xbZ@m;@Nr#ivX?rOtzo_j5qO zFO(sJ+R}Y3%w+#BdkF;v-^r!~IN@X8Jk0u;GiTbCaj8MoUFfk#EGkDJaOfgUjxP3& zd}4b8m*2a4w`}MkV-eK<7Q-?!b8l5gvU71wn#e%q`20Zi zHV|;VY~P6+2~ZUQGXYp+43d~z5C>)QK`jcul6Qfx!#N(E1O*QwVY;qP)8qK5Q&&hB z@0pI1sjVlFFa{9V!!Uf4ULFM5qc4ZqAouPP7Q+djU@&LxDEiRs)9Y+N``0QU_^gNY88XP6iS4p4(7@G%_;sltmyv0JN@)pwAOG{xtT+&;rbnA;9|OW-PgVwjEw> zARpoGE+qxHnh+|QxH&F~%~FVU84TE{-W&-7i&FvB@Yv-rOy(mhcunvc$Z`_in=Y&s zeHCg?z#=4KZW^|`H7nan9qDixt0fHhEgI(;tQ}V#e^2<^{p#Vm4 z@%s!sOFpRU+ydPND?2+93H=ln7XDkf3eBVv1^2EL8{E8^LasT;Eo}u|ha{gD$pf;x zsew1N5Ey$<#zGrNc(=f-cke2k2UVtrx^L84Ksv6tpkN6`RtC@0YvE69LIPgFaoFmC@k(5*`8q~L3uG0Z$}?xIC#_umOE5s+1o?V zN1;k&!$BCzBA{Z{zh@KNJp?7^;4KA(J@H9Nl1QqT0`;o871Yq$El{Yh2e}+2G94gG ze5>TM0^SBl0!U}?%3P~Lo`Wxe;#uG%8i_wrBUCEvyHB%i5!!V+O^+jC2L+BrYCvUj zJtE$U0&9?8#eZ{aGO3$rU2UQ5HrYgKbPg=^UcOU&;JPZ-jMgoTJ)~6wCSq+nUaTqmtwRmC&7ZRS@Nx%m)$po}?H?%edkCmbO0*vWb}%`H7O`4 z96xRaPz`i2T#-;jKL33VVnKi_UPHAsc~Y$AoEztkFg=hrvW9HL<;#~NAOeItDmncr z1D`d4rEFk3@9@ON#)9;xVl-S4P}w4$63FHQg^hu0IT)xjGtkpZqw04@BTyE4dglO& zf$YLw%Vp0yFpr|gkNvq!&5)=|L z@O!IPtAiN<9sE3$wt0Yo9Rw>2|9kUpPwt+HVp+@YOlwcmK742dvf=0JlTcti{Ld43 z+|}dJrsPo+`Ae~eh!jpHsvb(A+Xw^#*p2M$Yz=_`Jq5DIp@Qyr8R6u7bqx)iS-C6J zru8a62=e&6mkTN`=RgAR7T!bvL`Fm)L1evTw`WdQNnSOcfA^@fPOG6jtLyomv5yr1jUpdmzQ5F~iTxXC4dkCHLPB4&) uLk}1TyYJqhrG-op#Dl8I|1U4`p`e0|&U7DqZjes}{Fl|Qq4U*lJ^UZ84qOZX literal 0 HcmV?d00001 diff --git a/research/fed-bn/requirements.txt b/research/fed-bn/requirements.txt index eb6efcb233..3bbfea441b 100644 --- a/research/fed-bn/requirements.txt +++ b/research/fed-bn/requirements.txt @@ -1,4 +1,4 @@ nvflare~=2.4.0rc torch torchvision -comet_ml +tensorboard diff --git a/research/fed-bn/src/fedbn_cifar10.py b/research/fed-bn/src/fedbn_cifar10.py index e0747941f1..07530442d0 100644 --- a/research/fed-bn/src/fedbn_cifar10.py +++ b/research/fed-bn/src/fedbn_cifar10.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# (optional) metrics -import comet_ml import torch import torch.nn as nn import torch.optim as optim @@ -24,13 +22,16 @@ # (1) import nvflare client API import nvflare.client as flare -# (optional) set a fix place so we don't need to download everytime +# (optional) metrics +from nvflare.client.tracking import SummaryWriter + +# (optional) set a fix place for data storage +# so we don't need to download everytime DATASET_PATH = "/tmp/nvflare/data" + # (optional) We change to use GPU to speed things up. # if you want to use CPU, change DEVICE="cpu" DEVICE = "cuda:0" -# input your own comet ml account API key -COMET_API_KEY = "" # key function for FedBN @@ -56,9 +57,7 @@ def main(): # (2) initializes NVFlare client API flare.init() - comet_ml.init() - exp = comet_ml.Experiment(project_name="fedbn_cifar10", api_key=COMET_API_KEY) - + summary_writer = SummaryWriter() while flare.is_running(): # (3) receives FLModel from NVFlare input_model = flare.receive() @@ -75,6 +74,7 @@ def main(): # (optional) calculate total steps steps = epochs * len(trainloader) for epoch in range(epochs): # loop over the dataset multiple times + running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] @@ -96,7 +96,7 @@ def main(): print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}") global_step = input_model.current_round * steps + epoch * len(trainloader) + i - exp.log_metrics({"loss": running_loss}, step=global_step) + summary_writer.add_scalar(tag="loss", scalar=running_loss, global_step=global_step) running_loss = 0.0 print("Finished Training") @@ -131,6 +131,7 @@ def evaluate(input_weights): # (6) evaluate on received model for model selection accuracy = evaluate(input_model.params) + summary_writer.add_scalar(tag="global_model_accuracy", scalar=accuracy, global_step=input_model.current_round) # (7) construct trained FL model output_model = flare.FLModel( params=net.cpu().state_dict(), From eb7f650933b5e941907bcca3193ae71203d2aa9f Mon Sep 17 00:00:00 2001 From: Yuhong Wen Date: Wed, 14 Aug 2024 16:24:17 -0400 Subject: [PATCH 16/26] more app_opt scan example changes. (#2797) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- .../jobs/fedavg/app/config/config_fed_server.json | 2 +- .../brats_central/app/config/config_fed_server.json | 2 +- .../brats_fedavg/app/config/config_fed_server.json | 2 +- .../app/config/config_fed_server.json | 2 +- .../cifar10_fedavg_he/config/config_fed_client.json | 6 +++--- .../cifar10_fedavg_he/config/config_fed_server.json | 12 ++++++------ .../config/config_fed_server.json | 4 ++-- .../cifar10_central/config/config_fed_server.json | 4 ++-- .../cifar10_fedavg/config/config_fed_server.json | 4 ++-- .../cifar10_fedopt/config/config_fed_server.json | 6 +++--- .../cifar10_fedprox/config/config_fed_server.json | 4 ++-- .../cifar10_scaffold/config/config_fed_server.json | 4 ++-- .../app/config/config_fed_server.conf | 4 ++-- .../app/config/config_fed_server.json | 4 ++-- .../jobs/bert_ncbi/app/config/config_fed_server.json | 2 +- .../jobs/gpt2_ncbi/app/config/config_fed_server.json | 2 +- .../app/config/config_fed_server.json | 2 +- .../prostate_ditto/app/config/config_fed_server.json | 2 +- .../app/config/config_fed_server.json | 2 +- .../app/config/config_fed_server.json | 2 +- .../app/config/config_fed_server.json | 2 +- .../prostate_ditto/app/config/config_fed_server.json | 2 +- .../app/config/config_fed_server.json | 2 +- .../app/config/config_fed_server.json | 2 +- .../config/config_fed_client_multiprocess.json | 2 +- .../templates/server/config/config_fed_server.json | 2 +- .../config/config_fed_client_multiprocess.json | 2 +- .../templates/server/config/config_fed_server.json | 2 +- .../cifar10_autofedrl/config/config_fed_server.json | 4 ++-- .../condist/server/config/config_fed_server.json | 4 ++-- .../fedce_prostate/app/config/config_fed_server.json | 2 +- .../server/config/config_fed_server.json | 4 ++-- .../pt_use_name/app/config/config_fed_server.json | 2 +- 33 files changed, 52 insertions(+), 52 deletions(-) diff --git a/examples/advanced/bionemo/task_fitting/jobs/fedavg/app/config/config_fed_server.json b/examples/advanced/bionemo/task_fitting/jobs/fedavg/app/config/config_fed_server.json index 99ce96ab24..9dcf40dbcd 100644 --- a/examples/advanced/bionemo/task_fitting/jobs/fedavg/app/config/config_fed_server.json +++ b/examples/advanced/bionemo/task_fitting/jobs/fedavg/app/config/config_fed_server.json @@ -32,7 +32,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/examples/advanced/brats18/configs/brats_central/app/config/config_fed_server.json b/examples/advanced/brats18/configs/brats_central/app/config/config_fed_server.json index 5e665512b2..c586f9bfc1 100644 --- a/examples/advanced/brats18/configs/brats_central/app/config/config_fed_server.json +++ b/examples/advanced/brats18/configs/brats_central/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.segresnet.SegResNet", diff --git a/examples/advanced/brats18/configs/brats_fedavg/app/config/config_fed_server.json b/examples/advanced/brats18/configs/brats_fedavg/app/config/config_fed_server.json index 55fe8c6363..694ee740c2 100644 --- a/examples/advanced/brats18/configs/brats_fedavg/app/config/config_fed_server.json +++ b/examples/advanced/brats18/configs/brats_fedavg/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.segresnet.SegResNet", diff --git a/examples/advanced/brats18/configs/brats_fedavg_dp/app/config/config_fed_server.json b/examples/advanced/brats18/configs/brats_fedavg_dp/app/config/config_fed_server.json index 55fe8c6363..694ee740c2 100644 --- a/examples/advanced/brats18/configs/brats_fedavg_dp/app/config/config_fed_server.json +++ b/examples/advanced/brats18/configs/brats_fedavg_dp/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.segresnet.SegResNet", diff --git a/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_he/cifar10_fedavg_he/config/config_fed_client.json b/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_he/cifar10_fedavg_he/config/config_fed_client.json index 64a665dcd8..4ea41a53b3 100644 --- a/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_he/cifar10_fedavg_he/config/config_fed_client.json +++ b/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_he/cifar10_fedavg_he/config/config_fed_client.json @@ -23,7 +23,7 @@ "tasks": ["train"], "filters":[ { - "name": "HEModelEncryptor", + "path": "nvflare.app_opt.he.model_encryptor.HEModelEncryptor", "args": { "weigh_by_local_iter": true } @@ -34,7 +34,7 @@ "tasks": ["submit_model"], "filters":[ { - "name": "HEModelEncryptor", + "path": "nvflare.app_opt.he.model_encryptor.HEModelEncryptor", "args": { "weigh_by_local_iter": false } @@ -47,7 +47,7 @@ "tasks": ["train", "validate"], "filters":[ { - "name": "HEModelDecryptor", + "path": "nvflare.app_opt.he.model_encryptor.HEModelEncryptor", "args": { } } diff --git a/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_he/cifar10_fedavg_he/config/config_fed_server.json b/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_he/cifar10_fedavg_he/config/config_fed_server.json index fecf33d7f8..64d83501af 100644 --- a/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_he/cifar10_fedavg_he/config/config_fed_server.json +++ b/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_he/cifar10_fedavg_he/config/config_fed_server.json @@ -24,13 +24,13 @@ }, { "id": "serialize_filter", - "name": "HEModelSerializeFilter", + "path": "nvflare.app_opt.he.model_serialize_filter.HEModelSerializeFilter", "args": { } }, { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "pt.networks.cifar10_nets.ModerateCNN", @@ -41,12 +41,12 @@ }, { "id": "shareable_generator", - "name": "HEModelShareableGenerator", + "path": "nvflare.app_opt.he.model_sharable_generator.HEModelShareableGenerator", "args": {} }, { "id": "aggregator", - "name": "HEInTimeAccumulateWeightedAggregator", + "path": "nvflare.app_opt.he.intime_accumulate_model_aggregator.HEInTimeAccumulateWeightedAggregator", "args": { "weigh_by_local_iter": false } @@ -58,7 +58,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } @@ -87,7 +87,7 @@ }, { "id": "cross_site_model_eval", - "name": "HECrossSiteModelEval", + "path": "nvflare.app_opt.he.cross_site_model_eval.HECrossSiteModelEval", "args": { "model_locator_id": "model_locator", "submit_model_timeout": 600, diff --git a/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_stream_tb/cifar10_fedavg_stream_tb/config/config_fed_server.json b/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_stream_tb/cifar10_fedavg_stream_tb/config/config_fed_server.json index 40eea9af66..41792cb7a1 100644 --- a/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_stream_tb/cifar10_fedavg_stream_tb/config/config_fed_server.json +++ b/examples/advanced/cifar10/cifar10-real-world/jobs/cifar10_fedavg_stream_tb/cifar10_fedavg_stream_tb/config/config_fed_server.json @@ -21,7 +21,7 @@ }, { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "pt.networks.cifar10_nets.ModerateCNN", @@ -46,7 +46,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_central/cifar10_central/config/config_fed_server.json b/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_central/cifar10_central/config/config_fed_server.json index 97ee51162c..08506cf818 100644 --- a/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_central/cifar10_central/config/config_fed_server.json +++ b/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_central/cifar10_central/config/config_fed_server.json @@ -12,7 +12,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "pt.networks.cifar10_nets.ModerateCNN", @@ -27,7 +27,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedavg/cifar10_fedavg/config/config_fed_server.json b/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedavg/cifar10_fedavg/config/config_fed_server.json index 31829d5451..9c85cc8374 100644 --- a/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedavg/cifar10_fedavg/config/config_fed_server.json +++ b/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedavg/cifar10_fedavg/config/config_fed_server.json @@ -21,7 +21,7 @@ }, { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "pt.networks.cifar10_nets.ModerateCNN", @@ -36,7 +36,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedopt/cifar10_fedopt/config/config_fed_server.json b/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedopt/cifar10_fedopt/config/config_fed_server.json index eef7dff18c..8a88d9f388 100644 --- a/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedopt/cifar10_fedopt/config/config_fed_server.json +++ b/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedopt/cifar10_fedopt/config/config_fed_server.json @@ -29,14 +29,14 @@ }, { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": "model" } }, { "id": "shareable_generator", - "name": "PTFedOptModelShareableGenerator", + "path": "nvflare.app_opt.pt.fedopt.PTFedOptModelShareableGenerator", "args": { "device": "cpu", "source_model": "model", @@ -71,7 +71,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedprox/cifar10_fedprox/config/config_fed_server.json b/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedprox/cifar10_fedprox/config/config_fed_server.json index be5ed1e3ab..ab7762f368 100644 --- a/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedprox/cifar10_fedprox/config/config_fed_server.json +++ b/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_fedprox/cifar10_fedprox/config/config_fed_server.json @@ -24,7 +24,7 @@ }, { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "pt.networks.cifar10_nets.ModerateCNN", @@ -39,7 +39,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_scaffold/cifar10_scaffold/config/config_fed_server.json b/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_scaffold/cifar10_scaffold/config/config_fed_server.json index 1ab9a10d36..3af5c7d895 100644 --- a/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_scaffold/cifar10_scaffold/config/config_fed_server.json +++ b/examples/advanced/cifar10/cifar10-sim/jobs/cifar10_scaffold/cifar10_scaffold/config/config_fed_server.json @@ -21,7 +21,7 @@ }, { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "pt.networks.cifar10_nets.ModerateCNN", @@ -36,7 +36,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/examples/advanced/experiment-tracking/mlflow/jobs/hello-pt-tb-mlflow/app/config/config_fed_server.conf b/examples/advanced/experiment-tracking/mlflow/jobs/hello-pt-tb-mlflow/app/config/config_fed_server.conf index 3dceed2149..9f9932eb76 100644 --- a/examples/advanced/experiment-tracking/mlflow/jobs/hello-pt-tb-mlflow/app/config/config_fed_server.conf +++ b/examples/advanced/experiment-tracking/mlflow/jobs/hello-pt-tb-mlflow/app/config/config_fed_server.conf @@ -9,7 +9,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "pt.simple_network.SimpleNetwork" @@ -30,7 +30,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/examples/advanced/experiment-tracking/tensorboard/jobs/tensorboard-streaming/app/config/config_fed_server.json b/examples/advanced/experiment-tracking/tensorboard/jobs/tensorboard-streaming/app/config/config_fed_server.json index bce92f7627..a16deff3ac 100644 --- a/examples/advanced/experiment-tracking/tensorboard/jobs/tensorboard-streaming/app/config/config_fed_server.json +++ b/examples/advanced/experiment-tracking/tensorboard/jobs/tensorboard-streaming/app/config/config_fed_server.json @@ -9,7 +9,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "pt.simple_network.SimpleNetwork", @@ -31,7 +31,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", "args": { "pt_persistor_id": "persistor" } diff --git a/examples/advanced/nlp-ner/jobs/bert_ncbi/app/config/config_fed_server.json b/examples/advanced/nlp-ner/jobs/bert_ncbi/app/config/config_fed_server.json index 2e30eb7c77..63a91fb1c7 100644 --- a/examples/advanced/nlp-ner/jobs/bert_ncbi/app/config/config_fed_server.json +++ b/examples/advanced/nlp-ner/jobs/bert_ncbi/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "custom.models.nlp_models.BertModel", diff --git a/examples/advanced/nlp-ner/jobs/gpt2_ncbi/app/config/config_fed_server.json b/examples/advanced/nlp-ner/jobs/gpt2_ncbi/app/config/config_fed_server.json index 99722aadd6..4cb7d0da53 100644 --- a/examples/advanced/nlp-ner/jobs/gpt2_ncbi/app/config/config_fed_server.json +++ b/examples/advanced/nlp-ner/jobs/gpt2_ncbi/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "custom.models.nlp_models.GPTModel", diff --git a/examples/advanced/prostate/prostate_2D/job_configs/prostate_central/app/config/config_fed_server.json b/examples/advanced/prostate/prostate_2D/job_configs/prostate_central/app/config/config_fed_server.json index ce838849e2..a48a948ad5 100644 --- a/examples/advanced/prostate/prostate_2D/job_configs/prostate_central/app/config/config_fed_server.json +++ b/examples/advanced/prostate/prostate_2D/job_configs/prostate_central/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.unet.UNet", diff --git a/examples/advanced/prostate/prostate_2D/job_configs/prostate_ditto/app/config/config_fed_server.json b/examples/advanced/prostate/prostate_2D/job_configs/prostate_ditto/app/config/config_fed_server.json index 11b103cbf7..69ff02937d 100644 --- a/examples/advanced/prostate/prostate_2D/job_configs/prostate_ditto/app/config/config_fed_server.json +++ b/examples/advanced/prostate/prostate_2D/job_configs/prostate_ditto/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.unet.UNet", diff --git a/examples/advanced/prostate/prostate_2D/job_configs/prostate_fedavg/app/config/config_fed_server.json b/examples/advanced/prostate/prostate_2D/job_configs/prostate_fedavg/app/config/config_fed_server.json index 11b103cbf7..69ff02937d 100644 --- a/examples/advanced/prostate/prostate_2D/job_configs/prostate_fedavg/app/config/config_fed_server.json +++ b/examples/advanced/prostate/prostate_2D/job_configs/prostate_fedavg/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.unet.UNet", diff --git a/examples/advanced/prostate/prostate_2D/job_configs/prostate_fedprox/app/config/config_fed_server.json b/examples/advanced/prostate/prostate_2D/job_configs/prostate_fedprox/app/config/config_fed_server.json index 11b103cbf7..69ff02937d 100644 --- a/examples/advanced/prostate/prostate_2D/job_configs/prostate_fedprox/app/config/config_fed_server.json +++ b/examples/advanced/prostate/prostate_2D/job_configs/prostate_fedprox/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.unet.UNet", diff --git a/examples/advanced/prostate/prostate_3D/job_configs/prostate_central/app/config/config_fed_server.json b/examples/advanced/prostate/prostate_3D/job_configs/prostate_central/app/config/config_fed_server.json index c5ae4c7695..7c77429fd6 100644 --- a/examples/advanced/prostate/prostate_3D/job_configs/prostate_central/app/config/config_fed_server.json +++ b/examples/advanced/prostate/prostate_3D/job_configs/prostate_central/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.unet.UNet", diff --git a/examples/advanced/prostate/prostate_3D/job_configs/prostate_ditto/app/config/config_fed_server.json b/examples/advanced/prostate/prostate_3D/job_configs/prostate_ditto/app/config/config_fed_server.json index e941dec770..95bb5d3a14 100644 --- a/examples/advanced/prostate/prostate_3D/job_configs/prostate_ditto/app/config/config_fed_server.json +++ b/examples/advanced/prostate/prostate_3D/job_configs/prostate_ditto/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.unet.UNet", diff --git a/examples/advanced/prostate/prostate_3D/job_configs/prostate_fedavg/app/config/config_fed_server.json b/examples/advanced/prostate/prostate_3D/job_configs/prostate_fedavg/app/config/config_fed_server.json index e941dec770..95bb5d3a14 100644 --- a/examples/advanced/prostate/prostate_3D/job_configs/prostate_fedavg/app/config/config_fed_server.json +++ b/examples/advanced/prostate/prostate_3D/job_configs/prostate_fedavg/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.unet.UNet", diff --git a/examples/advanced/prostate/prostate_3D/job_configs/prostate_fedprox/app/config/config_fed_server.json b/examples/advanced/prostate/prostate_3D/job_configs/prostate_fedprox/app/config/config_fed_server.json index e941dec770..95bb5d3a14 100644 --- a/examples/advanced/prostate/prostate_3D/job_configs/prostate_fedprox/app/config/config_fed_server.json +++ b/examples/advanced/prostate/prostate_3D/job_configs/prostate_fedprox/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.unet.UNet", diff --git a/integration/nemo/examples/prompt_learning/jobs/templates/client/config/config_fed_client_multiprocess.json b/integration/nemo/examples/prompt_learning/jobs/templates/client/config/config_fed_client_multiprocess.json index 119bb08ec9..18a203159f 100644 --- a/integration/nemo/examples/prompt_learning/jobs/templates/client/config/config_fed_client_multiprocess.json +++ b/integration/nemo/examples/prompt_learning/jobs/templates/client/config/config_fed_client_multiprocess.json @@ -14,7 +14,7 @@ ], "executor": { "id": "Executor", - "name": "PTMultiProcessExecutor", + "path": "nvflare.app_opt.pt.multi_process_executor.PTMultiProcessExecutor", "args": { "executor_id": "learner_executor", "num_of_processes": "{devices}", diff --git a/integration/nemo/examples/prompt_learning/jobs/templates/server/config/config_fed_server.json b/integration/nemo/examples/prompt_learning/jobs/templates/server/config/config_fed_server.json index 2d1ebc1c5d..8d80c895f2 100644 --- a/integration/nemo/examples/prompt_learning/jobs/templates/server/config/config_fed_server.json +++ b/integration/nemo/examples/prompt_learning/jobs/templates/server/config/config_fed_server.json @@ -13,7 +13,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "nemo_nvflare.ServerPromptEncoder", diff --git a/integration/nemo/examples/supervised_fine_tuning/jobs/templates/client/config/config_fed_client_multiprocess.json b/integration/nemo/examples/supervised_fine_tuning/jobs/templates/client/config/config_fed_client_multiprocess.json index 602bbc951c..7009e216f6 100644 --- a/integration/nemo/examples/supervised_fine_tuning/jobs/templates/client/config/config_fed_client_multiprocess.json +++ b/integration/nemo/examples/supervised_fine_tuning/jobs/templates/client/config/config_fed_client_multiprocess.json @@ -13,7 +13,7 @@ ], "executor": { "id": "Executor", - "name": "PTMultiProcessExecutor", + "path": "nvflare.app_opt.pt.multi_process_executor.PTMultiProcessExecutor", "args": { "executor_id": "learner_executor", "num_of_processes": "{devices}", diff --git a/integration/nemo/examples/supervised_fine_tuning/jobs/templates/server/config/config_fed_server.json b/integration/nemo/examples/supervised_fine_tuning/jobs/templates/server/config/config_fed_server.json index 909c901fbf..82d84f40f9 100755 --- a/integration/nemo/examples/supervised_fine_tuning/jobs/templates/server/config/config_fed_server.json +++ b/integration/nemo/examples/supervised_fine_tuning/jobs/templates/server/config/config_fed_server.json @@ -12,7 +12,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "nemo_nvflare.ServerSFTModel", diff --git a/research/auto-fed-rl/jobs/cifar10_autofedrl/cifar10_autofedrl/config/config_fed_server.json b/research/auto-fed-rl/jobs/cifar10_autofedrl/cifar10_autofedrl/config/config_fed_server.json index 147cef9b84..99111bacd3 100644 --- a/research/auto-fed-rl/jobs/cifar10_autofedrl/cifar10_autofedrl/config/config_fed_server.json +++ b/research/auto-fed-rl/jobs/cifar10_autofedrl/cifar10_autofedrl/config/config_fed_server.json @@ -26,7 +26,7 @@ }, { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": "model" } @@ -115,7 +115,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "pt_persistor_id": "persistor" } diff --git a/research/condist-fl/jobs/condist/server/config/config_fed_server.json b/research/condist-fl/jobs/condist/server/config/config_fed_server.json index 7ab1dc5e45..a0ea60c9b2 100644 --- a/research/condist-fl/jobs/condist/server/config/config_fed_server.json +++ b/research/condist-fl/jobs/condist/server/config/config_fed_server.json @@ -13,7 +13,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": "model", "source_ckpt_file_full_name": null @@ -35,7 +35,7 @@ }, { "id": "shareable_generator", - "name": "PTFedOptModelShareableGenerator", + "path": "nvflare.app_opt.pt.fedopt.PTFedOptModelShareableGenerator", "args": { "device": "cpu", "source_model": "model", diff --git a/research/fed-ce/jobs/fedce_prostate/app/config/config_fed_server.json b/research/fed-ce/jobs/fedce_prostate/app/config/config_fed_server.json index b8c619cb74..d4007ce954 100644 --- a/research/fed-ce/jobs/fedce_prostate/app/config/config_fed_server.json +++ b/research/fed-ce/jobs/fedce_prostate/app/config/config_fed_server.json @@ -10,7 +10,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "networks.unet.UNet", diff --git a/research/quantifying-data-leakage/jobs/app_template/server/config/config_fed_server.json b/research/quantifying-data-leakage/jobs/app_template/server/config/config_fed_server.json index e9f8418c87..a9b0d0adac 100644 --- a/research/quantifying-data-leakage/jobs/app_template/server/config/config_fed_server.json +++ b/research/quantifying-data-leakage/jobs/app_template/server/config/config_fed_server.json @@ -12,7 +12,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "monai.networks.nets.torchvision_fc.TorchVisionFCModel", @@ -42,7 +42,7 @@ }, { "id": "model_locator", - "name": "PTFileModelLocator", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "pt_persistor_id": "persistor" } diff --git a/tests/integration_test/data/apps/pt_use_name/app/config/config_fed_server.json b/tests/integration_test/data/apps/pt_use_name/app/config/config_fed_server.json index 62954c688f..4422abf263 100644 --- a/tests/integration_test/data/apps/pt_use_name/app/config/config_fed_server.json +++ b/tests/integration_test/data/apps/pt_use_name/app/config/config_fed_server.json @@ -9,7 +9,7 @@ "components": [ { "id": "persistor", - "name": "PTFileModelPersistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { "model": { "path": "simple_network.SimpleNetwork" From faf10d0635fb1db3240a102aa491629df5c41376 Mon Sep 17 00:00:00 2001 From: nvkevlu <55759229+nvkevlu@users.noreply.github.com> Date: Wed, 14 Aug 2024 21:24:10 -0400 Subject: [PATCH 17/26] Add first version of release notes (#2800) * add first version of release notes * revise release notes --- docs/release_notes/flare_250.rst | 145 +++++++++++++++++++++++++++++++ docs/whats_new.rst | 3 +- 2 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 docs/release_notes/flare_250.rst diff --git a/docs/release_notes/flare_250.rst b/docs/release_notes/flare_250.rst new file mode 100644 index 0000000000..f407a3b89e --- /dev/null +++ b/docs/release_notes/flare_250.rst @@ -0,0 +1,145 @@ +************************** +What's New in FLARE v2.5.0 +************************** + +User Experience Improvements +============================ +NVFlare 2.5.0 offers several new sets of APIs that allows for end-to-end ease of use that can greatly improve researcher and data +scientists' experience working with FLARE. The new API covers client, server and job construction with end-to-end pythonic user experience. + +Model Controller API +-------------------- +The new Model Controller API greatly simplifies the experience of developing new federated learning workflows. Users can simply subclass +the ModelController to develop new workflows. The new API doesn't require users to know the details of NVFlare constructs except for FLModel +class, where it is simply a data structure that contains model weights, optimization parameters and metadata. + +You can easily construct a new workflow with basic python code, and when ready, the send_and_wait() communication function is all you need for +communication between clients and server. + +Client API +---------- +We introduced another :ref:`client_api` implementation, +:class:`InProcessClientAPIExecutor`. +This has the same interface and syntax of the previous Client API using +:class:`SubprocessLauncher`, except all communication is in memory. + +Using this in-process client API, we build a :class:`ScriptExecutor`, +which is directly used in the new Job API. + +Compared with SubProcessLauncherClientAPI, the in-process client API offers better efficiency and is easier to configure. All +the operations will be carried out within the memory space of the executor. + +SubProcessLauncherClientAPI can be used for cases where a separate training process is required. + +Job API +------- +The new Job API, or :ref:`fed_job_api`, combined with Client API and Model Controller API, will give users an end-to-end pythonic +user experience. The Job configuration, required prior to the current release, can now be directly generated automatically, so the +user doesn't need to edit the configuration files manually. + +We provide many examples to demonstrate the power of the new Job APIs making it very easy to experiment with new federated +learning algorithms or create new applications. + +Flower Integration +================== +Integration between NVFlare and the `Flower `_ framework aims to provide researchers the ability to leverage +the strengths of both frameworks by enabling Flower projects to seamlessly run on top of NVFlare. Through the seamless +integration of Flower and FLARE, applications crafted within the Flower framework can effortlessly operate within the FLARE runtime +environment without necessitating any modifications. This initial integration streamlines the process, eliminating complexities and +ensuring smooth interoperability between the two platforms, thus enhancing the overall efficiency and accessibility of FL applications. +Please find details `here `__. A hello-world example is available +:github_nvflare_link:`here `. + +Secure XGBoost +============== +The latest features from XGBoost introduced the support for secure federated learning via homomorphic encryption. For vertical federated +XGBoost learning, the gradients of each sample are protected by encryption such that the label information +will not be leaked to unintended parties; while for horizontal federated XGBoost learning, the local gradient histograms will not be +learnt by the central aggregation server. + +With our encryption plugins working with XGBoost, NVFlare now supports all secure federated schemes for XGBoost model training, with +both CPU and GPU. + +Tensorflow support +================== +With community contributions, we add FedOpt, FedProx and Scaffold algorithms using Tensorflow to create parity with Pytorch. You +can them :github_nvflare_link:`here `. + +FOBS Auto Registration +====================== +FOBS, the secure mechanism NVFlare uses for message serialization and deserialization, is enhanced with new auto registration features. +These changes will reduce the number of decomposers that users have to register. The changes are: + + - Auto registering of decomposers on deserialization. The decomposer class is stored in the serialized data and the decomposers are + registered automatically when deserializing. If a component only receives serialized data but it doesn't perform serialization, + decomposer registering is not needed anymore. + - Data Class decomposer auto registering on serialization. If a decomposer is not found for a class, FOBS will try to treat the class + as a Data Class and register DataClassDecomposer for it. This works in most cases but not all. + + +New Examples +============ +Secure Federated Kaplan-Meier Analysis +-------------------------------------- +The :github_nvflare_link:`Secure Federated Kaplan-Meier Analysis via Time-Binning and Homomorphic Encryption example ` +illustrates two features: + + - How to perform Kaplan-Meier survival analysis in a federated setting without and with secure features via time-binning and Homomorphic Encryption (HE). + - How to use the Flare ModelController API to contract a workflow to facilitate HE under simulator mode. + + +Federated Logistic Regression with NR optimization +-------------------------------------------------- +The :github_nvflare_link:`Federated Logistic Regression with Second-Order Newton-Raphson optimization example ` +shows how to implement a federated binary classification via logistic regression with second-order Newton-Raphson optimization. + +BioNemo example for Drug Discovery +---------------------------------- +`BioNeMo `_ is NVIDIA's generative AI platform for drug discovery. +We included several examples of running BioNeMo in a federated learning environment using NVFlare: + + - The :github_nvflare_link:`task fitting example ` includes a notebook that + shows how to obtain protein-learned representations in the form of embeddings using the ESM-1nv pre-trained model. The + model is trained with NVIDIA's BioNeMo framework for Large Language Model training and inference. + - The :github_nvflare_link:`downstream example ` shows three different downstream + tasks for fine-tuning a BioNeMo ESM-style model. + +Hearchical Federated Statistics +-------------------------------- +:github_nvflare_link:`Hierarchical Federated Statistics ` is helpful when there +are multiple organizations involved. For example, in the medical device applications, the medical devices usage statistics can be +viewed from both device, device-hosting site, and hospital or manufacturers' point of views. +Manufacturers would like to see the usage stats of their product (device) in different sites and hospitals. Hospitals +may like to see overall stats of devices including different products from different manufacturers. In such a case, the hierarchical +federated stats will be very helpful. + +FedAvg Early Stopping Example +------------------------------ +The `FedAvg Early Stopping example `_ tries to demonstrate that with the new server-side model +controller API, it is very easy to change the control conditions and adjust workflows with a few lines of python code. + +Tensorflow Algorithms & Examples +-------------------------------- +FedOpt, FedProx, Scaffold implementation for Tensorflow. + +FedBN: Federated Learning on Non-IID Features via Local Batch Normalization +--------------------------------------------------------------------------- +The `FedBN example `_ showcases a federated learning algorithm designed +to address the feature shift problem when aggregating models across different data distributions. + +In this work, we propose an effective method that uses local batch normalization to alleviate the feature shift before averaging models. +The resulting scheme, called FedBN, outperforms both classical FedAvg and FedProx on our extensive experiments. These empirical results +are supported by a convergence analysis that shows in a simplified setting that FedBN has a faster convergence rate than FedAvg. + + +End-to-end Federated XGBoost examples +------------------------------------- +In `this example `__, +we try to show that end-to-end process of feature engineering, pre-processing and training in federated settings. You +can use FLARE to perform federated ETL and then training. + +Developer Tutorial Page +======================= +To let users quickly learn Federated Learning with FLARE, we developed a `tutorial web page `_ with +both code and video to interactively learn how to convert and run FL in a few minutes. We also +created a tutorial catalog to help you easily search and find the examples you are interested in. diff --git a/docs/whats_new.rst b/docs/whats_new.rst index 1153edc716..845d108182 100644 --- a/docs/whats_new.rst +++ b/docs/whats_new.rst @@ -4,7 +4,7 @@ What's New ########## -.. include:: release_notes/flare_240.rst +.. include:: release_notes/flare_250.rst ************************** Previous Releases of FLARE @@ -13,6 +13,7 @@ Previous Releases of FLARE .. toctree:: :maxdepth: 1 + release_notes/flare_240 release_notes/flare_230 release_notes/flare_220 release_notes/flare_210 From 9fb0993865aa7f2ab7dc175f5e73de8e5eecbd02 Mon Sep 17 00:00:00 2001 From: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Date: Thu, 15 Aug 2024 12:11:46 -0700 Subject: [PATCH 18/26] FIX hard-coded sp_end_point in POC (#2795) --- nvflare/tool/poc/poc_commands.py | 19 ++++++++++++++++++- tests/unit_test/lighter/poc_commands_test.py | 14 +++++++++++++- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/nvflare/tool/poc/poc_commands.py b/nvflare/tool/poc/poc_commands.py index f38394e7ab..9026fa8491 100644 --- a/nvflare/tool/poc/poc_commands.py +++ b/nvflare/tool/poc/poc_commands.py @@ -249,6 +249,22 @@ def get_fl_client_names(project_config: OrderedDict) -> List[str]: return client_names +def replace_server_with_localhost(sp_end_point: str) -> str: + """ + :param sp_end_point:(str) example: server1:8002:8003 + :return: localhost:: + """ + parts = sp_end_point.split(":") + if len(parts) != 3: + raise ValueError("Input must be in the format 'server:port1:port2'") + for p in parts: + if not p: + raise ValueError("Input must be in the format 'server:port1:port2', each part can not be empty") + + parts[0] = "localhost" + return ":".join(parts) + + def prepare_builders(project_dict: OrderedDict) -> List: builders = list() for b in project_dict.get("builders"): @@ -257,7 +273,8 @@ def prepare_builders(project_dict: OrderedDict) -> List: if b.get("path") == "nvflare.lighter.impl.static_file.StaticFileBuilder": path = "nvflare.lighter.impl.local_static_file.LocalStaticFileBuilder" - args["overseer_agent"]["args"]["sp_end_point"] = "localhost:8002:8003" + sp_end_point = args["overseer_agent"]["args"]["sp_end_point"] + args["overseer_agent"]["args"]["sp_end_point"] = replace_server_with_localhost(sp_end_point) elif b.get("path") == "nvflare.lighter.impl.cert.CertBuilder": path = "nvflare.lighter.impl.local_cert.LocalCertBuilder" diff --git a/tests/unit_test/lighter/poc_commands_test.py b/tests/unit_test/lighter/poc_commands_test.py index 0843c7b1e3..f6a0cafb9a 100644 --- a/tests/unit_test/lighter/poc_commands_test.py +++ b/tests/unit_test/lighter/poc_commands_test.py @@ -24,6 +24,7 @@ get_service_command, get_service_config, prepare_builders, + replace_server_with_localhost, update_clients, ) from nvflare.tool.poc.service_constants import FlareServiceConstants as SC @@ -187,7 +188,6 @@ def test_prepare_builders(self): } project_config = collections.OrderedDict(project_config) - builders = prepare_builders(project_config) assert len(builders) == 2 for c in builders: @@ -227,3 +227,15 @@ def test_get_packages_config(self): assert "server1" == global_config[SC.FLARE_SERVER] assert "admin@nvidia.com" == global_config[SC.FLARE_PROJ_ADMIN] assert ["client-1", "client-2"] == global_config[SC.FLARE_CLIENTS] + + def test_replace_server_with_localhost(self): + + assert "localhost:8002:8003" == replace_server_with_localhost("server:8002:8003") + + with pytest.raises(ValueError): + assert "localhost:8002:8003" == replace_server_with_localhost("server:8002") + + with pytest.raises( + ValueError, match="Input must be in the format 'server:port1:port2', each part can not be empty" + ): + assert "localhost:8002:8003" == replace_server_with_localhost("server:8002:") From 77ef92d55cc8243e25fce76e7a6f3eaca0e56406 Mon Sep 17 00:00:00 2001 From: nvkevlu <55759229+nvkevlu@users.noreply.github.com> Date: Thu, 15 Aug 2024 20:26:24 -0400 Subject: [PATCH 19/26] Add hello examples with new APIs (#2785) * add hello examples with new APIs * move and reorganize hello-examples to keep old ones in CI * remove prepare data for hello tf * update wording * update dates * update wording * update wording * remove note * add information about dataset --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> --- docs/example_applications_algorithms.rst | 9 +- docs/examples/hello_cross_site_eval.rst | 166 +++++++++ docs/examples/hello_fedavg_numpy.rst | 191 +++++++++++ docs/examples/hello_pt.rst | 243 ------------- docs/examples/hello_pt_job_api.rst | 263 ++++++++++++++ docs/examples/hello_tf_job_api.rst | 223 ++++++++++++ docs/examples/hello_world_examples.rst | 6 +- .../hello-world/hello-cross-val/README.md | 25 ++ .../app/config/config_fed_client.conf | 88 +++++ .../app/config/config_fed_server.conf | 103 ++++++ .../jobs/hello-cross-val/app/custom/net.py | 37 ++ .../jobs/hello-cross-val/app/custom/train.py | 201 +++++++++++ .../jobs/hello-cross-val/meta.conf | 10 + .../requirements.txt | 0 examples/hello-world/hello-cyclic/README.md | 32 +- .../cyclic_script_executor_hello-cyclic.py | 48 +++ .../hello-cyclic/src/hello-cyclic_fl.py | 95 ++++++ .../hello-world/hello-cyclic/src/tf_net.py | 25 ++ .../hello-world/hello-fedavg-numpy/README.md | 36 ++ .../fedavg_script_executor_hello-numpy.py | 40 +++ .../hello-fedavg-numpy_flare_api.ipynb} | 16 +- .../hello-fedavg-numpy_getting_started.ipynb | 320 ++++++++++++++++++ .../hello-fedavg-numpy/requirements.txt | 1 + .../hello-fedavg-numpy/src/hello-numpy_fl.py | 75 ++++ .../hello-world/hello-numpy-sag/README.md | 32 -- examples/hello-world/hello-pt/README.md | 96 +----- .../fedavg_script_executor_hello-pt.py | 44 +++ examples/hello-world/hello-pt/prepare_data.sh | 3 - .../hello-pt/src/hello-pt_cifar10_fl.py | 95 ++++++ .../hello-pt/src/simple_network.py | 37 ++ .../{hello-tf2 => hello-tf}/README.md | 48 ++- .../fedavg_script_executor_hello-tf.py | 44 +++ .../{hello-tf2 => hello-tf}/requirements.txt | 0 .../hello-world/hello-tf/src/hello-tf_fl.py | 95 ++++++ examples/hello-world/hello-tf/src/tf_net.py | 25 ++ .../hello-world/hello-tf2/prepare_data.sh | 1 - .../standalone_job/hello_numpy_examples.yml | 38 --- .../hello_numpy_previous_examples.yml | 45 +++ .../standalone_job/hello_pt_examples.yml | 4 +- .../standalone_job/hello_tf_examples.yml | 6 +- .../app/config/config_fed_client.json | 0 .../app/config/config_fed_server.json | 0 .../hello-cyclic/app/custom/__init__.py | 0 .../app/custom/tf2_model_persistor.py | 0 .../hello-cyclic/app/custom/tf2_net.py | 0 .../hello-cyclic/app/custom/trainer.py | 0 .../previous_jobs}/hello-cyclic/meta.json | 0 .../app/config/config_fed_client.json | 26 ++ .../app/config/config_fed_server.json | 73 ++++ .../hello-numpy-cross-val/meta.json | 10 + .../app/config/config_fed_client.json | 0 .../app/config/config_fed_server.json | 0 .../previous_jobs}/hello-numpy-sag/meta.json | 0 .../app/config/config_fed_client.json | 0 .../app/config/config_fed_server.json | 0 .../hello-pt/app/custom/cifar10trainer.py | 0 .../hello-pt/app/custom/cifar10validator.py | 0 .../hello-pt/app/custom/pt_constants.py | 0 .../hello-pt/app/custom/pt_model_locator.py | 0 .../hello-pt/app/custom/simple_network.py | 0 .../hello-pt/app/custom/test_custom.py | 0 .../previous_jobs}/hello-pt/meta.json | 0 .../app/config/config_fed_client.json | 0 .../app/config/config_fed_server.json | 0 .../hello-tf2/app/custom/__init__.py | 0 .../hello-tf2/app/custom/filter.py | 0 .../app/custom/tf2_model_persistor.py | 0 .../hello-tf2/app/custom/tf2_net.py | 0 .../hello-tf2/app/custom/trainer.py | 0 .../previous_jobs}/hello-tf2/meta.json | 0 tests/integration_test/test_configs.yml | 1 + 71 files changed, 2504 insertions(+), 472 deletions(-) create mode 100644 docs/examples/hello_cross_site_eval.rst create mode 100644 docs/examples/hello_fedavg_numpy.rst delete mode 100644 docs/examples/hello_pt.rst create mode 100644 docs/examples/hello_pt_job_api.rst create mode 100644 docs/examples/hello_tf_job_api.rst create mode 100644 examples/hello-world/hello-cross-val/README.md create mode 100644 examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/config/config_fed_client.conf create mode 100644 examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/config/config_fed_server.conf create mode 100644 examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/custom/net.py create mode 100644 examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/custom/train.py create mode 100644 examples/hello-world/hello-cross-val/jobs/hello-cross-val/meta.conf rename examples/hello-world/{hello-numpy-sag => hello-cross-val}/requirements.txt (100%) create mode 100644 examples/hello-world/hello-cyclic/cyclic_script_executor_hello-cyclic.py create mode 100644 examples/hello-world/hello-cyclic/src/hello-cyclic_fl.py create mode 100644 examples/hello-world/hello-cyclic/src/tf_net.py create mode 100644 examples/hello-world/hello-fedavg-numpy/README.md create mode 100644 examples/hello-world/hello-fedavg-numpy/fedavg_script_executor_hello-numpy.py rename examples/hello-world/{hello-numpy-sag/hello_numpy_sag.ipynb => hello-fedavg-numpy/hello-fedavg-numpy_flare_api.ipynb} (92%) create mode 100644 examples/hello-world/hello-fedavg-numpy/hello-fedavg-numpy_getting_started.ipynb create mode 100644 examples/hello-world/hello-fedavg-numpy/requirements.txt create mode 100644 examples/hello-world/hello-fedavg-numpy/src/hello-numpy_fl.py delete mode 100644 examples/hello-world/hello-numpy-sag/README.md create mode 100644 examples/hello-world/hello-pt/fedavg_script_executor_hello-pt.py delete mode 100755 examples/hello-world/hello-pt/prepare_data.sh create mode 100644 examples/hello-world/hello-pt/src/hello-pt_cifar10_fl.py create mode 100644 examples/hello-world/hello-pt/src/simple_network.py rename examples/hello-world/{hello-tf2 => hello-tf}/README.md (65%) create mode 100644 examples/hello-world/hello-tf/fedavg_script_executor_hello-tf.py rename examples/hello-world/{hello-tf2 => hello-tf}/requirements.txt (100%) create mode 100644 examples/hello-world/hello-tf/src/hello-tf_fl.py create mode 100644 examples/hello-world/hello-tf/src/tf_net.py delete mode 100755 examples/hello-world/hello-tf2/prepare_data.sh create mode 100644 tests/integration_test/data/test_configs/standalone_job/hello_numpy_previous_examples.yml rename {examples/hello-world/hello-cyclic/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-cyclic/app/config/config_fed_client.json (100%) rename {examples/hello-world/hello-cyclic/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-cyclic/app/config/config_fed_server.json (100%) rename {examples/hello-world/hello-cyclic/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-cyclic/app/custom/__init__.py (100%) rename {examples/hello-world/hello-cyclic/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-cyclic/app/custom/tf2_model_persistor.py (100%) rename {examples/hello-world/hello-cyclic/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-cyclic/app/custom/tf2_net.py (100%) rename {examples/hello-world/hello-cyclic/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-cyclic/app/custom/trainer.py (100%) rename {examples/hello-world/hello-cyclic/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-cyclic/meta.json (100%) create mode 100755 tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/app/config/config_fed_client.json create mode 100755 tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/app/config/config_fed_server.json create mode 100644 tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/meta.json rename {examples/hello-world/hello-numpy-sag/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-numpy-sag/app/config/config_fed_client.json (100%) rename {examples/hello-world/hello-numpy-sag/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-numpy-sag/app/config/config_fed_server.json (100%) rename {examples/hello-world/hello-numpy-sag/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-numpy-sag/meta.json (100%) rename {examples/hello-world/hello-pt/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-pt/app/config/config_fed_client.json (100%) rename {examples/hello-world/hello-pt/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-pt/app/config/config_fed_server.json (100%) rename {examples/hello-world/hello-pt/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-pt/app/custom/cifar10trainer.py (100%) rename {examples/hello-world/hello-pt/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-pt/app/custom/cifar10validator.py (100%) rename {examples/hello-world/hello-pt/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-pt/app/custom/pt_constants.py (100%) rename {examples/hello-world/hello-pt/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-pt/app/custom/pt_model_locator.py (100%) rename {examples/hello-world/hello-pt/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-pt/app/custom/simple_network.py (100%) rename {examples/hello-world/hello-pt/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-pt/app/custom/test_custom.py (100%) rename {examples/hello-world/hello-pt/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-pt/meta.json (100%) rename {examples/hello-world/hello-tf2/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-tf2/app/config/config_fed_client.json (100%) rename {examples/hello-world/hello-tf2/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-tf2/app/config/config_fed_server.json (100%) rename {examples/hello-world/hello-tf2/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-tf2/app/custom/__init__.py (100%) rename {examples/hello-world/hello-tf2/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-tf2/app/custom/filter.py (100%) rename {examples/hello-world/hello-tf2/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-tf2/app/custom/tf2_model_persistor.py (100%) rename {examples/hello-world/hello-tf2/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-tf2/app/custom/tf2_net.py (100%) rename {examples/hello-world/hello-tf2/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-tf2/app/custom/trainer.py (100%) rename {examples/hello-world/hello-tf2/jobs => tests/integration_test/data/test_configs/standalone_job/previous_jobs}/hello-tf2/meta.json (100%) diff --git a/docs/example_applications_algorithms.rst b/docs/example_applications_algorithms.rst index 3b07038e97..52e0b65e2b 100644 --- a/docs/example_applications_algorithms.rst +++ b/docs/example_applications_algorithms.rst @@ -24,8 +24,8 @@ Can be run from the :github_nvflare_link:`hello_world notebook ` - Example using the Scatter And Gather (SAG) workflow with a Numpy trainer - * :ref:`Hello Cross-Site Validation ` - Example using the Cross Site Model Eval workflow with a Numpy trainer, also demonstrates running cross site validation using the previous training results. + * :ref:`Hello FedAvg with NumPy ` - Example using the FedAvg workflow with a NumPy trainer + * :ref:`Hello Cross-Site Validation ` - Example using the Cross Site Eval workflow, also demonstrates running cross site validation using the previous training results. * :github_nvflare_link:`Hello Cyclic Weight Transfer (GitHub) ` - Example using the CyclicController workflow to implement `Cyclic Weight Transfer `_ with TensorFlow as the deep learning training framework * :github_nvflare_link:`Swarm Learning ` - Example using Swarm Learning and Client-Controlled Cross-site Evaluation workflows. * :github_nvflare_link:`Client-Controlled Cyclic Weight Transfer ` - Example using Client-Controlled Cyclic workflow using Client API. @@ -33,9 +33,8 @@ Can be run from the :github_nvflare_link:`hello_world notebook ` - Example image classifier using FedAvg and PyTorch as the deep learning training framework - * :ref:`Hello TensorFlow ` - Example image classifier using FedAvg and TensorFlow as the deep learning training frameworks - + * :ref:`Hello PyTorch ` - Example image classifier using FedAvg and PyTorch as the deep learning training framework + * :ref:`Hello TensorFlow ` - Example image classifier using FedAvg and TensorFlow as the deep learning training frameworks 2. Step-By-Step Example Series diff --git a/docs/examples/hello_cross_site_eval.rst b/docs/examples/hello_cross_site_eval.rst new file mode 100644 index 0000000000..685f6491b6 --- /dev/null +++ b/docs/examples/hello_cross_site_eval.rst @@ -0,0 +1,166 @@ +.. _hello_cross_val: + +Hello Cross-Site Validation +=========================== + +Before You Start +---------------- + +Before jumping into this guide, make sure you have an environment +with `NVIDIA FLARE `_ installed. + +You can follow :ref:`getting_started` on the general concept of setting up a +Python virtual environment (the recommended environment) and how to install NVIDIA FLARE. + +Prerequisite +------------- + +This example introduces :class:`CrossSiteEval` and builds +on the :doc:`Hello PyTorch ` example +based on the :class:`FedAvg` workflow. + +Introduction +------------- +In this exercise, you will learn how to use NVIDIA FLARE to perform cross site validation +after training. + +The training process is similar to the train script uesd in the :doc:`Hello PyTorch ` example. This example does not +use the Job API to construct the job but instead has the job in the ``jobs`` folder of the example so you can see the server and +client configurations. + +The setup of this exercise consists of one **server** and two **clients**. +The server side model starts with the default weights when the model is loaded with :class:`PTFileModelPersistor`. + +Cross site validation consists of the following steps: + + - The :class:`CrossSiteEval` workflow + gets the client models with the ``submit_model`` task. + - The ``validate`` task is broadcast to the all participating clients with the model shareable containing the model data, + and results from the ``validate`` task are saved. + +During this exercise, we will see how NVIDIA FLARE takes care of most of the above steps with little work from the user. +We will be working with the ``hello-cross-val`` application in the examples folder. +Custom FL applications can contain the folders: + + #. **custom**: contains the custom components including our training script (``train.py``, ``net.py``) + #. **config**: contains client and server configurations (``config_fed_client.conf``, ``config_fed_server.conf``) + #. **resources**: can optionally contain the logger config (``log.config``) + +Let's get started. First clone the repo, if you haven't already: + +.. code-block:: shell + + $ git clone https://github.com/NVIDIA/NVFlare.git + +Remember to activate your NVIDIA FLARE Python virtual environment from the installation guide. + +Ensure PyTorch and torchvision are installed: + +.. code-block:: shell + + (nvflare-env) $ python3 -m pip install torch torchvision + +Now that you have all your dependencies installed, let's take a look at the job. + + +Training +-------------------------------- + +In the :doc:`Hello PyTorch ` example, we implemented the setup and the training script in ``hello-pt_cifar10_fl.py``. +In this example, we start from the same basic setup and training script but extend it to process the ``validate`` and ``submit_model`` tasks to +work with the :class:`CrossSiteEval` +workflow to get the client models. + +Note that the server also produces a global model. +The :class:`CrossSiteEval` +workflow submits the server model for evaluation after the client models. + +Implementing the Validator +-------------------------- + +The code for processing the ``validate`` task during +the :class:`CrossSiteEval` workflow is added to the +``while flare.is_running():`` loop in the training script. + +.. code-block:: python + + elif flare.is_evaluate(): + accuracy = evaluate(input_model.params) + print(f"({client_id}) accuracy: {accuracy}") + flare.send(flare.FLModel(metrics={"accuracy": accuracy})) + +It handles the ``validate`` task by performing calling the ``evaluate()`` method we have added to the ``train.py`` training script in our custom folder. + +Application Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Inside the config folder there are two files, ``config_fed_client.json`` and ``config_fed_server.json``. + +.. literalinclude:: ../../examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/config/config_fed_server.conf + :language: conf + :linenos: + :caption: config_fed_server.conf + +The server now has a second workflow, :class:`CrossSiteEval`, configured after Scatter and +Gather (:class:`ScatterAndGather` is an implementation of the :class:`FedAvg` workflow). + + +.. literalinclude:: ../../examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/config/config_fed_client.conf + :language: conf + :linenos: + :caption: config_fed_client.conf + +The client configuration now uses the Executor :class:`PTClientAPILauncherExecutor` +configured to launch the train script ``train.py`` with :class:`SubprocessLauncher`. +The "train", "validate", and "submit_model" tasks have been configured for the added to the ``PTClientAPILauncherExecutor`` Executor to +work with the :class:`CrossSiteEval` workflow. + +Cross site validation! +---------------------- + +To run the application, you can use a POC environment or a real provisioned environment and use the FLARE Console or the FLARE API to submit the job, +or you can run quickly run it with the FLARE Simulator with the following command: + +.. code-block:: shell + + (nvflare-env) $ nvflare simulator -w /tmp/nvflare/ -n 2 -t 1 examples/hello-world/hello-cross-val/jobs/hello-cross-val + +During the first phase, the model will be trained. + +During the second phase, cross site validation will happen. + +The workflow on the client will change to :class:`CrossSiteEval` +as it enters this second phase. + +During cross site evaluation, every client validates other clients' models and server models (if present). +This can produce a lot of results. All the results will be kept in the job's workspace when it is completed. + +Understanding the Output +^^^^^^^^^^^^^^^^^^^^^^^^ + +After running the job, you should begin to see outputs tracking the progress of the FL run. +As each client finishes training, it will start the cross site validation process. +During this you'll see several important outputs the track the progress of cross site validation. + +The server shows the log of each client requesting models, the models it sends and the results received. +Since the server could be responding to many clients at the same time, it may +require careful examination to make proper sense of events from the jumbled logs. + + +.. include:: access_result.rst + +.. note:: + You could see the cross-site validation results + at ``[DOWNLOAD_DIR]/[JOB_ID]/workspace/cross_site_val/cross_val_results.json`` + +The full source code for this exercise can be found in +:github_nvflare_link:`examples/hello-world/hello-numpy-cross-val `. + +Previous Versions of Hello Cross-Site Validation +------------------------------------------------ + + - `hello-numpy-cross-val for 2.0 `_ + - `hello-numpy-cross-val for 2.1 `_ + - `hello-numpy-cross-val for 2.2 `_ + - `hello-numpy-cross-val for 2.3 `_ + - `hello-numpy-cross-val for 2.4 `_ diff --git a/docs/examples/hello_fedavg_numpy.rst b/docs/examples/hello_fedavg_numpy.rst new file mode 100644 index 0000000000..cf39a1f6d3 --- /dev/null +++ b/docs/examples/hello_fedavg_numpy.rst @@ -0,0 +1,191 @@ +.. _hello_fedavg_w_numpy: + +Hello FedAvg with NumPy +======================= + +Before You Start +---------------- + +Before jumping into this guide, make sure you have an environment with +`NVIDIA FLARE `_ installed. + +You can follow :ref:`getting_started` on the general concept of setting up a +Python virtual environment (the recommended environment) and how to install NVIDIA FLARE. + + +Introduction +------------- + +This tutorial is meant solely to demonstrate how the NVIDIA FLARE system works, without introducing any actual deep +learning concepts. + +Through this exercise, you will learn how to use NVIDIA FLARE with numpy to perform basic +computations across two clients with the included :class:`FedAvg` workflow, +which sends the model to the clients then aggregates the results that come back. + +Due to the simplified weights, you will be able to clearly see and understand +the results of the FL aggregation and the model persistor process. + +The setup of this exercise consists of one **server** and two **clients**. +The model is set to the starting weights ``[[1, 2, 3], [4, 5, 6], [7, 8, 9]]``. + +The following steps compose one cycle of weight updates, called a **round**: + + #. Clients are responsible for adding a delta to the weights to calculate new weights for the model. + #. These updates are then sent to the server which will aggregate them to produce a model with new weights. + #. Finally, the server sends this updated version of the model back to each client, so the clients can continue to calculate the next model weights in future rounds. + +For this exercise, we will be working with the ``hello-fedavg-numpy`` application in the examples folder. + +Let's get started. First clone the repo, if you haven't already: + +.. code-block:: shell + + $ git clone https://github.com/NVIDIA/NVFlare.git + +Remember to activate your NVIDIA FLARE Python virtual environment from the installation guide. +Ensure numpy is installed. + +.. code-block:: shell + + (nvflare-env) $ python3 -m pip install numpy + +Now that you have all your dependencies installed, let's look into the ``fedavg_script_executor_hello-numpy.py`` script which +builds the job with the Job API. + + +NVIDIA FLARE Job API +-------------------- + +The ``fedavg_script_executor_hello-numpy.py`` script builds the job with the Job API. The following sections are the key lines to focus on: + +Define a FedJob +^^^^^^^^^^^^^^^^ +:class:`FedJob` allows you to generate job configurations in a Pythonic way. It is initialized with the +name for the job, which will also be used as the directory name if the job is exported. + +.. code-block:: python + + from nvflare import FedAvg, FedJob, ScriptExecutor + + job = FedJob(name="hello-fedavg-numpy") + +Define the Controller Workflow +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Define the controller workflow and send to server. We use :class:`FedAvg` and specify the number of +clients and rounds, then use the :func:`to` routine to send the component to the server for the job. + +.. code-block:: python + + n_clients = 2 + num_rounds = 3 + + controller = FedAvg( + num_clients=n_clients, + num_rounds=num_rounds, + ) + job.to(controller, "server") + +Add Clients +^^^^^^^^^^^^ +Next, we can use the :class:`ScriptExecutor` and send it to each of the +clients to run our training script. We will examine the training script ``hello-numpy_fl.py`` in the next main section. + +The :func:`to` routine sends the component to the specified client for the job. Here, our clients +are named "site-0" and "site-1" and we are using the same training script for both. + +.. code-block:: python + + from nvflare.client.config import ExchangeFormat + + train_script = "src/hello-numpy_fl.py" + + for i in range(n_clients): + executor = ScriptExecutor( + task_script_path=train_script, task_script_args="", params_exchange_format=ExchangeFormat.NUMPY + ) + job.to(executor, f"site-{i}") + + +Optionally Export the Job or Run in Simulator +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +With all the components needed for the job, you can export the job to a directory with :func:`export` +if you want to look at what is built and configured for each client. You can use the exported job to submit it to a real NVFlare deployment +using the :ref:`FLARE Console ` or :ref:`flare_api`. + +.. code-block:: python + + job.export_job("/tmp/nvflare/jobs/job_config") + +This is optional if you just want to run the job in a simulator environment directly, as :class:`FedJob` has +a :func:`simulator_run` function. + +.. code-block:: python + + job.simulator_run("/tmp/nvflare/jobs/workdir") + +The results are saved in the specified directory provided as an argument to the :func:`simulator_run` function. + + +NVIDIA FLARE Client Training Script +------------------------------------ +The training script ``hello-numpy_fl.py`` is the main script that will be run on the clients. It contains print statements to +help you follow the output while the FL system is running. + +On the client side, the training workflow is as follows: + + 1. Receive the model from the FL server (for this example we initialize the model in the client code to the numpy array [[1, 2, 3], [4, 5, 6], [7, 8, 9]] if the model params are empty). + 2. Perform training on the received global model and calculate metrics. + 3. Send the new model back to the FL server. + +Using NVFlare's Client API, there are three essential methods to help achieve this workflow: + + - `init()`: Initializes NVFlare Client API environment. + - `receive()`: Receives model from the FL server. + - `send()`: Sends the model to the FL server. + +The following code snippet highlights how these methods are used in the training script: + +.. code-block:: python + + import nvflare.client as flare + + flare.init() # 1. Initializes NVFlare Client API environment. + input_model = flare.receive() # 2. Receives model from the FL server. + params = input_model.params # 3. Obtain the required information from the received model. + + # original local training code + new_params = train(params) + + output_model = flare.FLModel(params=new_params) # 4. Put the results in a new `FLModel` + flare.send(output_model) # 5. Sends the model to the FL server. + +This has been simplified to ignore dealing with data formats to focus on the NVFlare Client API, but you can find the full training +script ``hello-numpy_fl.py`` in the ``src`` directory of :github_nvflare_link:`examples/hello-world/hello-fedavg-numpy `. + + +Running the Job API Script +--------------------------- +Now that you have a good understanding of the training script, you can run the job with the ``fedavg_script_executor_hello-numpy.py`` script: + +.. code-block:: shell + + (nvflare-env) $ python3 fedavg_script_executor_hello-numpy.py + +This will run the job in a simulator environment and you should be able to see the output as the job proceeds to completion. + +You've successfully run your first numpy federated learning system. + +You now have a decent grasp of the main FL concepts, and are ready to start exploring how NVIDIA FLARE can be applied to many other tasks. + +The full application for this exercise can be found in +:github_nvflare_link:`examples/hello-world/hello-fedavg-numpy `. + +Previous Versions of this Example (previously Hello Scatter and Gather) +----------------------------------------------------------------------- + + - `hello-numpy-sag for 2.0 `_ + - `hello-numpy-sag for 2.1 `_ + - `hello-numpy-sag for 2.2 `_ + - `hello-numpy-sag for 2.3 `_ + - `hello-numpy-sag for 2.4 `_ diff --git a/docs/examples/hello_pt.rst b/docs/examples/hello_pt.rst deleted file mode 100644 index 371f9c9817..0000000000 --- a/docs/examples/hello_pt.rst +++ /dev/null @@ -1,243 +0,0 @@ -.. _hello_pt: - -Hello PyTorch -============= - -Before You Start ----------------- - -Feel free to refer to the :doc:`detailed documentation <../programming_guide>` at any point -to learn more about the specifics of `NVIDIA FLARE `_. - -Make sure you have an environment with NVIDIA FLARE installed. - -You can follow :ref:`getting_started` on the general concept of setting up a -Python virtual environment (the recommended environment) and how to install NVIDIA FLARE. - - -Introduction -------------- - -Through this exercise, you will integrate NVIDIA FLARE with the popular -deep learning framework `PyTorch `_ and learn how to use NVIDIA FLARE to train a convolutional -network with the CIFAR10 dataset using the included Scatter and Gather workflow. - -The setup of this exercise consists of one **server** and two **clients**. - -The following steps compose one cycle of weight updates, called a **round**: - - #. Clients are responsible for generating individual weight-updates for the model using their own CIFAR10 dataset. - #. These updates are then sent to the server which will aggregate them to produce a model with new weights. - #. Finally, the server sends this updated version of the model back to each client. - -For this exercise, we will be working with the ``hello-pt`` application in the examples folder. -Custom FL applications can contain the folders: - - #. **custom**: contains the custom components (``simple_network.py``, ``cifar10trainer.py``) - #. **config**: contains client and server configurations (``config_fed_client.json``, ``config_fed_server.json``) - #. **resources**: contains the logger config (``log.config``) - -Now that you have a rough idea of what is going on, let's get started. First clone the repo: - -.. code-block:: shell - - $ git clone https://github.com/NVIDIA/NVFlare.git - -Now remember to activate your NVIDIA FLARE Python virtual environment from the installation guide. - -Since you will use PyTorch and torchvision for this exercise, let's go ahead and install both libraries: - -.. code-block:: shell - - (nvflare-env) $ python3 -m pip install torch torchvision - - -.. note:: - - There is a pending fix related to Pillow, PyTorch==1.9 and Numpy. If you see exception related to - ``enumerate(self.train_loader)``, downgrade your Pillow to 8.2.0. - - .. code-block:: shell - - (nvflare-env) $ python3 -m pip install torch torchvision Pillow==8.2.0 - -If you would like to go ahead and run the exercise now, you can skip directly to :ref:`hands-on`. - -NVIDIA FLARE Client -------------------- - -Neural Network -^^^^^^^^^^^^^^^ - -With all the required dependencies installed, you are ready to run a Federated Learning -with two clients and one server. The training procedure and network -architecture are modified from -`Training a Classifier `_. - - -Let's see what an extremely simplified CIFAR10 training looks like: - -.. literalinclude:: ../../examples/hello-world/hello-pt/jobs/hello-pt/app/custom/simple_network.py - :language: python - :caption: simple_network.py - -This ``SimpleNetwork`` class is your convolutional neural network to train with the CIFAR10 dataset. -This is not related to NVIDIA FLARE, so we implement it in a file called ``simple_network.py``. - -Dataset & Setup -^^^^^^^^^^^^^^^^ - -Now implement the custom class ``Cifar10Trainer`` as an NVIDIA FLARE Executor in a file -called ``cifar10trainer.py``. - -In a real FL experiment, each client would have their own dataset used for their local training. -For simplicity's sake, you can download the same CIFAR10 dataset from the Internet via torchvision's datasets module. -Additionally, you need to set up the optimizer, loss function and transform to process the data. -You can think of all of this code as part of your local training loop, as every deep learning training has a similar setup. - -Since you will encapsulate every training-related step in the ``Cifar10Trainer`` class, -let's put this preparation stage into the ``__init__`` method: - -.. literalinclude:: ../../examples/hello-world/hello-pt/jobs/hello-pt/app/custom/cifar10trainer.py - :language: python - - -Local Train -^^^^^^^^^^^ - -Now that you have your network and dataset setup, in the ``Cifar10Trainer`` class. -Let's also implement a local training loop in a method called ``local_train``: - -.. literalinclude:: ../../examples/hello-world/hello-pt/jobs/hello-pt/app/custom/cifar10trainer.py - :language: python - :pyobject: Cifar10Trainer._local_train - - -.. note:: - - Everything up to this point is completely independent of NVIDIA FLARE. It is just purely a PyTorch - deep learning exercise. You will now build the NVIDIA FLARE application based on this PyTorch code. - - -Integrate NVIDIA FLARE with Local Train -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -NVIDIA FLARE makes it easy to integrate your local train code into the NVIDIA FLARE API. - -The simplest way to do this is to subclass the ``Executor`` class and -implement one method ``execute``, which is called every time the client receives -an updated model from the server with the task "train" (the server will broadcast the "train" task in the Scatter and -Gather workflow we will configure below). -We can then call our local train inside the ``execute`` method. - -.. note:: - - The ``execute`` method inside the ``Executor`` class is where all of the client side computation occurs. - In these exercises, we update the weights by training on a local dataset, however, it is important to remember that NVIDIA FLARE is not restricted to just deep learning. - The type of data passed between the server and the clients, and the computations that the clients perform can be anything, as long as all of the FL Components agree on the same format. - -Take a look at the following code: - -.. literalinclude:: ../../examples/hello-world/hello-pt/jobs/hello-pt/app/custom/cifar10trainer.py - :language: python - :pyobject: Cifar10Trainer.execute - -The concept of ``Shareable`` is described in :ref:`shareable `. -Essentially, every NVIDIA FLARE client receives the model weights from the server in ``shareable`` format. -It is then passed into the ``execute`` method, and returns a new ``shareable`` back to the server. -The data is managed by using DXO (see :ref:`data_exchange_object` for details). - -Thus, the first thing is to retrieve the model weights delivered by server via ``shareable``, and this can be seen in -the first part of the code block above before ``local_train`` is called. - -We then perform a local train so the client's model is trained with its own dataset. - -After finishing the local train, the train method builds a new ``shareable`` with newly-trained weights -and metadata and returns it back to the NVIDIA FLARE server for aggregation. - -There is additional logic to handle the "submit_model" task, but that is for the CrossSiteModelEval workflow, -so we will be addressing that in a later example. - -FLContext -^^^^^^^^^ - -The ``FLContext`` is used to set and retrieve FL related information among the FL components via ``set_prop()`` and -``get_prop()`` as well as get services provided by the underlying infrastructure. You can find more details in the -:ref:`documentation `. - -NVIDIA FLARE Server & Application ---------------------------------- - -In this exercise, you can use the default settings, which leverage NVIDIA FLARE built-in components for NVIDIA FLARE server. - -These built-in components are commonly used in most deep learning scenarios. - -However, you are encouraged to build your own components to fully customize NVIDIA FLARE to meet your environment, - which we will demonstrate in the following exercises. - - -Application Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Inside the config folder there are two files, ``config_fed_client.json`` and ``config_fed_server.json``. - -.. literalinclude:: ../../examples/hello-world/hello-pt/jobs/hello-pt/app/config/config_fed_client.json - :language: json - :linenos: - :caption: config_fed_client.json - -Take a look at line 8. - -This is the ``Cifar10Trainer`` you just implemented. - -The NVIDIA FLARE client loads this application configuration and picks your implementation. - -You can easily change it to another class so your NVIDIA FLARE client has different training logic. - -The tasks "train" and "submit_model" have been configured to work with the ``Cifar10Trainer`` Executor. -The "validate" task for ``Cifar10Validator`` and the "submit_model" task are used for the ``CrossSiteModelEval`` workflow, -so we will be addressing that in a later example. - - -.. literalinclude:: ../../examples/hello-world/hello-pt/jobs/hello-pt/app/config/config_fed_server.json - :language: json - :linenos: - :caption: config_fed_server.json - -The server application configuration, like said before, leverages NVIDIA FLARE built-in components. -Remember, you are encouraged to change them to your own classes whenever you have different application logic. - -Note that on line 12, ``persistor`` points to ``PTFileModelPersistor``. -NVIDIA FLARE provides a built-in PyTorch implementation for a model persistor, -however for other frameworks/libraries, you will have to implement your own. - -The Scatter and Gather workflow is implemented by :class:`ScatterAndGather` -and is configured to make use of the components with id "aggregator", "persistor", and "shareable_generator". -The workflow code is all open source now, so feel free to study and use it as inspiration -to write your own workflows to support your needs. - -.. _hands-on: - -Train the Model, Federated! ---------------------------- - -.. |ExampleApp| replace:: hello-pt -.. include:: run_fl_system.rst - -.. include:: access_result.rst - -.. include:: shutdown_fl_system.rst - -Congratulations! -You've successfully built and run your first federated learning system. - -The full source code for this exercise can be found in -:github_nvflare_link:`examples/hello-world/hello-pt `. - -Previous Versions of Hello PyTorch ----------------------------------- - - - `hello-pt for 2.0 `_ - - `hello-pt for 2.1 `_ - - `hello-pt for 2.2 `_ - - `hello-pt for 2.3 `_ diff --git a/docs/examples/hello_pt_job_api.rst b/docs/examples/hello_pt_job_api.rst new file mode 100644 index 0000000000..751162ff7f --- /dev/null +++ b/docs/examples/hello_pt_job_api.rst @@ -0,0 +1,263 @@ +.. _hello_pt_job_api: + +Hello PyTorch with Job API +========================== + +Before You Start +---------------- + +Feel free to refer to the :doc:`detailed documentation <../programming_guide>` at any point +to learn more about the specifics of `NVIDIA FLARE `_. + +We recommend you first finish the :doc:`Hello FedAvg with NumPy ` exercise since it introduces the +federated learning concepts of `NVIDIA FLARE `_. + +Make sure you have an environment with NVIDIA FLARE installed. + +You can follow :ref:`getting_started` on the general concept of setting up a +Python virtual environment (the recommended environment) and how to install NVIDIA FLARE. + +Introduction +------------- + +Through this exercise, you will integrate NVIDIA FLARE with the popular +deep learning framework `PyTorch `_ and learn how to use NVIDIA FLARE to train a convolutional +network with the CIFAR10 dataset using the included :class:`FedAvg` workflow. + +The setup of this exercise consists of one **server** and two **clients**. + +The following steps compose one cycle of weight updates, called a **round**: + + #. Clients are responsible for generating individual weight-updates for the model using their own CIFAR10 dataset. + #. These updates are then sent to the server which will aggregate them to produce a model with new weights. + #. Finally, the server sends this updated version of the model back to each client. + +For this exercise, we will be working with the ``hello-pt`` application in the examples folder. + +Let's get started. First clone the repo: + +.. code-block:: shell + + $ git clone https://github.com/NVIDIA/NVFlare.git + +Remember to activate your NVIDIA FLARE Python virtual environment from the installation guide. + +Since you will use PyTorch and torchvision for this exercise, let's go ahead and install both libraries: + +.. code-block:: shell + + (nvflare-env) $ python3 -m pip install torch torchvision + +If you would like to go ahead and run the exercise now, you can run the ``fedavg_script_executor_hello-pt.py`` script which +builds the job with the Job API and runs the job with the FLARE Simulator. + +NVIDIA FLARE Job API +-------------------- + +The ``fedavg_script_executor_hello-pt.py`` script for this hello-pt example is very similar to the ``fedavg_script_executor_hello-numpy.py`` script +for the :doc:`Hello FedAvg with NumPy ` exercise. Other than changes to the names of the job and client script, the only difference +is a line to define the initial global model for the server: + +.. code-block:: python + + # Define the initial global model and send to server + job.to(SimpleNetwork(), "server") + + +NVIDIA FLARE Client Training Script +------------------------------------ +The training script for this example, ``hello-pt_cifar10_fl.py``, is the main script that will be run on the clients. It contains the PyTorch specific +logic for training. + +Neural Network +^^^^^^^^^^^^^^^ + +The training procedure and network architecture are modified from +`Training a Classifier `_. + +Let's see what an extremely simplified CIFAR10 training looks like: + +.. literalinclude:: ../../examples/hello-world/hello-pt/src/simple_network.py + :language: python + :caption: simple_network.py + +This ``SimpleNetwork`` class is your convolutional neural network to train with the CIFAR10 dataset. +This is not related to NVIDIA FLARE, so we implement it in a file called ``simple_network.py``. + +Dataset & Setup +^^^^^^^^^^^^^^^^ + +In a real FL experiment, each client would have their own dataset used for their local training. +You can download the CIFAR10 dataset from the Internet via torchvision's datasets module, so for simplicity's sake, this is +the dataset we will be using on each client. +Additionally, you need to set up the optimizer, loss function and transform to process the data. +You can think of all of this code as part of your local training loop, as every deep learning training has a similar setup. + +In the ``hello-pt_cifar10_fl.py`` script, we take care of all of this setup before the ``flare.init()``. + +Local Train +^^^^^^^^^^^ + +Now with the network and dataset setup, let's also implement the local training loop with the NVFlare's Client API: + +.. code-block:: python + + flare.init() + + summary_writer = SummaryWriter() + while flare.is_running(): + input_model = flare.receive() + + model.load_state_dict(input_model.params) + + steps = epochs * len(train_loader) + for epoch in range(epochs): + running_loss = 0.0 + for i, batch in enumerate(train_loader): + images, labels = batch[0].to(device), batch[1].to(device) + optimizer.zero_grad() + + predictions = model(images) + cost = loss(predictions, labels) + cost.backward() + optimizer.step() + + running_loss += cost.cpu().detach().numpy() / images.size()[0] + + output_model = flare.FLModel(params=model.cpu().state_dict(), meta={"NUM_STEPS_CURRENT_ROUND": steps}) + + flare.send(output_model) + + +The code above is simplified from the ``hello-pt_cifar10_fl.py`` script to focus on the three essential methods of the NVFlare's Client API to +achieve the training workflow: + + - `init()`: Initializes NVFlare Client API environment. + - `receive()`: Receives model from the FL server. + - `send()`: Sends the model to the FL server. + +NVIDIA FLARE Server & Application +--------------------------------- +In this example, the server runs :class:`FedAvg` with the default settings. + +If you export the job with the :func:`export` function, you will see the +configurations for the server and each client. The server configuration is ``config_fed_server.json`` in the config folder +in app_server: + +.. code-block:: json + + { + "format_version": 2, + "workflows": [ + { + "id": "controller", + "path": "nvflare.app_common.workflows.fedavg.FedAvg", + "args": { + "num_clients": 2, + "num_rounds": 2 + } + } + ], + "components": [ + { + "id": "json_generator", + "path": "nvflare.app_common.widgets.validation_json_generator.ValidationJsonGenerator", + "args": {} + }, + { + "id": "model_selector", + "path": "nvflare.app_common.widgets.intime_model_selector.IntimeModelSelector", + "args": { + "aggregation_weights": {}, + "key_metric": "accuracy" + } + }, + { + "id": "receiver", + "path": "nvflare.app_opt.tracking.tb.tb_receiver.TBAnalyticsReceiver", + "args": { + "events": [ + "fed.analytix_log_stats" + ] + } + }, + { + "id": "persistor", + "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", + "args": { + "model": { + "path": "src.simple_network.SimpleNetwork", + "args": {} + } + } + }, + { + "id": "model_locator", + "path": "nvflare.app_opt.pt.file_model_locator.PTFileModelLocator", + "args": { + "pt_persistor_id": "persistor" + } + } + ], + "task_data_filters": [], + "task_result_filters": [] + } + +This is automatically created by the Job API. The server application configuration leverages NVIDIA FLARE built-in components. + +Note that ``persistor`` points to ``PTFileModelPersistor``. This is automatically configured when the model SimpleNetwork is added +to the server with the :func:`to` function. The Job API detects that the model is a PyTorch model +and automatically configures :class:`PTFileModelPersistor` +and :class:`PTFileModelLocator`. + + +Client Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The client configuration is ``config_fed_client.json`` in the config folder of each client app folder: + +.. code-block:: json + + { + "format_version": 2, + "executors": [ + { + "tasks": [ + "*" + ], + "executor": { + "path": "nvflare.app_common.executors.script_executor.ScriptExecutor", + "args": { + "task_script_path": "src/hello-pt_cifar10_fl.py" + } + } + } + ], + "components": [ + { + "id": "event_to_fed", + "path": "nvflare.app_common.widgets.convert_to_fed_event.ConvertToFedEvent", + "args": { + "events_to_convert": [ + "analytix_log_stats" + ] + } + } + ], + "task_data_filters": [], + "task_result_filters": [] + } + +The ``task_script_path`` is set to the path of the client training script. + +The full source code for this exercise can be found in +:github_nvflare_link:`examples/hello-world/hello-pt `. + +Previous Versions of Hello PyTorch +---------------------------------- + + - `hello-pt for 2.0 `_ + - `hello-pt for 2.1 `_ + - `hello-pt for 2.2 `_ + - `hello-pt for 2.3 `_ + - `hello-pt for 2.4 `_ diff --git a/docs/examples/hello_tf_job_api.rst b/docs/examples/hello_tf_job_api.rst new file mode 100644 index 0000000000..5b73f4070c --- /dev/null +++ b/docs/examples/hello_tf_job_api.rst @@ -0,0 +1,223 @@ +.. _hello_tf_job_api: + +Hello TensorFlow with Job API +============================== + +Before You Start +---------------- +Feel free to refer to the :doc:`detailed documentation <../programming_guide>` at any point +to learn more about the specifics of `NVIDIA FLARE `_. + +We recommend you first finish the :doc:`Hello FedAvg with NumPy ` exercise since it introduces the +federated learning concepts of `NVIDIA FLARE `_. + +Make sure you have an environment with NVIDIA FLARE installed. + +You can follow :ref:`getting_started` on the general concept of setting up a +Python virtual environment (the recommended environment) and how to install NVIDIA FLARE. + +Here we assume you have already installed NVIDIA FLARE inside a python virtual environment +and have already cloned the repo. + +Introduction +------------- +Through this exercise, you will integrate NVIDIA FLARE with the popular deep learning framework +`TensorFlow `_ and learn how to use NVIDIA FLARE to train a convolutional +network with the MNIST dataset using the :class:`FedAvg` workflow. + +You will also be introduced to some new components and concepts, including filters, aggregators, and event handlers. + +The setup of this exercise consists of one **server** and two **clients**. + +The following steps compose one cycle of weight updates, called a **round**: + + #. Clients are responsible for generating individual weight-updates for the model using their own MNIST dataset. + #. These updates are then sent to the server which will aggregate them to produce a model with new weights. + #. Finally, the server sends this updated version of the model back to each client. + +For this exercise, we will be working with the ``hello-tf`` application in the examples folder. + +Let's get started. Since this task is using TensorFlow, let's go ahead and install the library inside our virtual environment: + +.. code-block:: shell + + (nvflare-env) $ python3 -m pip install tensorflow + +With all the required dependencies installed, you are ready to run a Federated Learning system +with two clients and one server. If you would like to go ahead and run the exercise now, you can run +the ``fedavg_script_executor_hello-tf.py`` script which builds the job with the Job API and runs the +job with the FLARE Simulator. + +NVIDIA FLARE Job API +-------------------- +The ``fedavg_script_executor_hello-tf.py`` script for this hello-tf example is very similar to the ``fedavg_script_executor_hello-numpy.py`` script +for the :doc:`Hello FedAvg with NumPy ` example and also the script for the :doc:`Hello PyTorch ` +example. Other than changes to the names of the job and client script, the only difference is the line to define the initial global model +for the server: + +.. code-block:: python + + # Define the initial global model and send to server + job.to(TFNet(), "server") + + +NVIDIA FLARE Client Training Script +------------------------------------ +The training script for this example, ``hello-tf_fl.py``, is the main script that will be run on the clients. It contains the TensorFlow specific +logic for training. + +Neural Network +^^^^^^^^^^^^^^^ +Let's see what a simplified MNIST network looks like. + +.. literalinclude:: ../../examples/hello-world/hello-tf/src/tf_net.py + :language: python + :lines: 15- + :lineno-start: 15 + :linenos: + :caption: tf_net.py + +This ``TFNet`` class is the convolutional neural network to train with MNIST dataset. +This is not related to NVIDIA FLARE, and it is implemented in a file called ``tf_net.py``. + +Dataset & Setup +^^^^^^^^^^^^^^^^ +Before starting training, you need to set up your dataset. +In this exercise, it is downloaded from the Internet via ``tf.keras``'s datasets module +and split in half to create a separate dataset for each client. Note that this is just for an example since in a real-world scenario, +you will likely have different datasets for each client. + +Additionally, the optimizer and loss function need to be configured. + +All of this happens before the ``while flare.is_running():`` line in ``hello-tf_fl.py``. + +.. literalinclude:: ../../examples/hello-world/hello-tf/src/hello-tf_fl.py + :language: python + :lines: 29-57 + :lineno-start: 29 + :linenos: + :caption: hello-tf_fl.py + +Client Local Train +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The client code gets the weights from the input_model received from the server then performs a simple :code:`self.model.fit` +so the client's model is trained with its own dataset: + +.. literalinclude:: ../../examples/hello-world/hello-tf/src/hello-tf_fl.py + :language: python + :lines: 58-91 + :lineno-start: 58 + :linenos: + +After finishing the local training, the newly-trained weights are sent back to the NVIDIA FLARE server in the params of +:mod:`FLModel`. + + +NVIDIA FLARE Server & Application +--------------------------------- +In this example, the server runs :class:`FedAvg` with the default settings. + +If you export the job with the :func:`export` function, you will see the +configurations for the server and each client. The server configuration is ``config_fed_server.json`` in the config folder +in app_server: + +.. code-block:: json + + { + "format_version": 2, + "workflows": [ + { + "id": "controller", + "path": "nvflare.app_common.workflows.fedavg.FedAvg", + "args": { + "num_clients": 2, + "num_rounds": 3 + } + } + ], + "components": [ + { + "id": "json_generator", + "path": "nvflare.app_common.widgets.validation_json_generator.ValidationJsonGenerator", + "args": {} + }, + { + "id": "model_selector", + "path": "nvflare.app_common.widgets.intime_model_selector.IntimeModelSelector", + "args": { + "aggregation_weights": {}, + "key_metric": "accuracy" + } + }, + { + "id": "persistor", + "path": "nvflare.app_opt.tf.model_persistor.TFModelPersistor", + "args": { + "model": { + "path": "src.tf_net.TFNet", + "args": {} + } + } + } + ], + "task_data_filters": [], + "task_result_filters": [] + } + +This is automatically created by the Job API. The server application configuration leverages NVIDIA FLARE built-in components. + +Note that ``persistor`` points to ``TFModelPersistor``. This is automatically configured when the model is added +to the server with the :func:`to` function. The Job API detects that the model is a TensorFlow model +and automatically configures :class:`TFModelPersistor`. + + +Client Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The client configuration is ``config_fed_client.json`` in the config folder of each client app folder: + +.. code-block:: json + + { + "format_version": 2, + "executors": [ + { + "tasks": [ + "*" + ], + "executor": { + "path": "nvflare.app_common.executors.script_executor.ScriptExecutor", + "args": { + "task_script_path": "src/hello-tf_fl.py" + } + } + } + ], + "components": [ + { + "id": "event_to_fed", + "path": "nvflare.app_common.widgets.convert_to_fed_event.ConvertToFedEvent", + "args": { + "events_to_convert": [ + "analytix_log_stats" + ] + } + } + ], + "task_data_filters": [], + "task_result_filters": [] + } + +The ``task_script_path`` is set to the path of the client training script. + +The full source code for this exercise can be found in +:github_nvflare_link:`examples/hello-tf `. + +Previous Versions of Hello TensorFlow (previously Hello TensorFlow 2) +--------------------------------------------------------------------- + + - `hello-tf2 for 2.0 `_ + - `hello-tf2 for 2.1 `_ + - `hello-tf2 for 2.2 `_ + - `hello-tf2 for 2.3 `_ + - `hello-tf2 for 2.4 `_ diff --git a/docs/examples/hello_world_examples.rst b/docs/examples/hello_world_examples.rst index 780c154340..e4279ff26c 100644 --- a/docs/examples/hello_world_examples.rst +++ b/docs/examples/hello_world_examples.rst @@ -7,9 +7,9 @@ Hello World examples can be run from the :github_nvflare_link:`hello_world noteb Deep Learning to Federated Learning (GitHub) Step-by-Step Examples (GitHub) - hello_scatter_and_gather + hello_fedavg_numpy hello_cross_val Hello Cyclic Weight Transfer (GitHub) - hello_pt - hello_tf2 + hello_pt_job_api + hello_tf_job_api Hello Client Controlled Workflow (GitHub) diff --git a/examples/hello-world/hello-cross-val/README.md b/examples/hello-world/hello-cross-val/README.md new file mode 100644 index 0000000000..0868c189ba --- /dev/null +++ b/examples/hello-world/hello-cross-val/README.md @@ -0,0 +1,25 @@ +# Hello Cross-Site Validation + +The cross-site model evaluation workflow uses the data from clients to run evaluation with the models of other clients. Data is not shared. Rather the collection of models is distributed to each client site to run local validation. The server collects the results of local validation to construct an all-to-all matrix of model performance vs. client dataset. It uses the [CrossSiteEval](https://nvflare.readthedocs.io/en/main/apidocs/nvflare.app_common.workflows.cross_site_eval.html) controller workflow. + +### 1. Install NVIDIA FLARE + +Follow the [Installation](https://nvflare.readthedocs.io/en/main/quickstart.html) instructions. + +### 2. Run the experiment + +Use nvflare simulator to run the example: + +``` +nvflare simulator -w /tmp/nvflare/ -n 2 -t 2 hello-cross-val/jobs/hello-cross-val +``` + +### 3. Access the logs and results + +You can find the running logs and results inside the simulator's workspace/simulate_job + +```bash +$ ls /tmp/nvflare/simulate_job/ +app_server app_site-1 app_site-2 log.txt + +``` diff --git a/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/config/config_fed_client.conf b/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/config/config_fed_client.conf new file mode 100644 index 0000000000..83455cf91f --- /dev/null +++ b/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/config/config_fed_client.conf @@ -0,0 +1,88 @@ +{ + # version of the configuration + format_version = 2 + + # This is the application script which will be invoked. Client can replace this script with user's own training script. + app_script = "train.py" + + # Additional arguments needed by the training code. For example, in lightning, these can be --trainer.batch_size=xxx. + app_config = "" + + # Client Computing Executors. + executors = [ + { + # tasks the executors are defined to handle + tasks = [ + "train", + "validate", + "submit_model" + ] + executor { + id = "Executor" + # This is an executor for pytorch + Client API. The underline data exchange is using Pipe. + path = "nvflare.app_opt.pt.client_api_launcher_executor.PTClientAPILauncherExecutor" + args { + # launcher_id is used to locate the Launcher object in "components" + launcher_id = "launcher" + # pipe_id is used to locate the Pipe object in "components" + pipe_id = "pipe" + # Timeout in seconds for waiting for a heartbeat from the training script. Defaults to 30 seconds. + # Please refer to the class docstring for all available arguments + heartbeat_timeout = 60 + # if the transfer_type is FULL, then it will be sent directly + # if the transfer_type is DIFF, then we will calculate the + # difference VS received parameters and send the difference + params_transfer_type = "FULL" + # if train_with_evaluation is true, the executor will expect + # the custom code need to send back both the trained parameters and the evaluation metric + # otherwise only trained parameters are expected + train_with_evaluation = true + # tasks for different modes + train_task_name = "train" + evaluate_task_name = "validate" + submit_model_task_name = "submit_model" + } + } + } + ], + + # this defined an array of task data filters. If provided, it will control the data from server controller to client executor + task_data_filters = [] + + # this defined an array of task result filters. If provided, it will control the result from client executor to server controller + task_result_filters = [] + + components = [ + { + # component id is "launcher" + id = "launcher" + + # the class path of this component + path = "nvflare.app_common.launchers.subprocess_launcher.SubprocessLauncher" + + args { + # the launcher will invoke the script + script = "python3 custom/{app_script} {app_config} " + # if launch_once is true, the SubprocessLauncher will launch once for the whole job + # if launch_once is false, the SubprocessLauncher will launch a process for each task it receives from server + launch_once = true + } + }, + { + id = "pipe" + + path = "nvflare.fuel.utils.pipe.file_pipe.FilePipe" + + args { + # Mode of the endpoint. A pipe has two endpoints. + # An endpoint can be either the one that initiates communication or the one listening. + # PASSIVE is the one listening. + mode = "PASSIVE" + + # root_path: is the directory location of the parameters exchange. + # You can also set it to an absolute path in your system. + root_path = "{WORKSPACE}/{JOB_ID}/{SITE_NAME}" + } + } + ] +} diff --git a/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/config/config_fed_server.conf b/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/config/config_fed_server.conf new file mode 100644 index 0000000000..46dda194a1 --- /dev/null +++ b/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/config/config_fed_server.conf @@ -0,0 +1,103 @@ +{ + # version of the configuration + format_version = 2 + + # task data filter: if filters are provided, the filter will filter the data flow out of server to client. + task_data_filters =[] + + # task result filter: if filters are provided, the filter will filter the result flow out of client to server. + task_result_filters = [] + + # This assumes that there will be a "net.py" file with class name "Net". + # If your model code is not in "net.py" and class name is not "Net", please modify here + model_class_path = "net.Net" + + # workflows: Array of workflows the control the Federated Learning workflow lifecycle. + # One can specify multiple workflows. The NVFLARE will run them in the order specified. + workflows = [ + { + # 1st workflow" + id = "scatter_and_gather" + + # name = ScatterAndGather, path is the class path of the ScatterAndGather controller. + path = "nvflare.app_common.workflows.scatter_and_gather.ScatterAndGather" + args { + # argument of the ScatterAndGather class. + # min number of clients required for ScatterAndGather controller to move to the next round + # during the workflow cycle. The controller will wait until the min_clients returned from clients + # before move to the next step. + min_clients = 2 + + # number of global round of the training. + num_rounds = 1 + + # starting round is 0-based + start_round = 0 + + # after received min number of clients' result, + # how much time should we wait further before move to the next step + wait_time_after_min_received = 0 + + # For ScatterAndGather, the server will aggregate the weights based on the client's result. + # the aggregator component id is named here. One can use the this ID to find the corresponding + # aggregator component listed below + aggregator_id = "aggregator" + + # The Scatter and Gather controller use an persistor to load the model and save the model. + # The persistent component can be identified by component ID specified here. + persistor_id = "persistor" + + # Shareable to a communication message, i.e. shared between clients and server. + # Shareable generator is a component that responsible to take the model convert to/from this communication message: Shareable. + # The component can be identified via "shareable_generator_id" + shareable_generator_id = "shareable_generator" + + # train task name: Client will start training once received such task. + train_task_name = "train" + + # train timeout in second. If zero, meaning no timeout. + train_timeout = 0 + } + } + { + # Server-side Cross Site Evaluation Workflow + id = "cross_site_model_eval", + path = "nvflare.app_common.workflows.cross_site_eval.CrossSiteEval", + args { + persistor_id = "persistor" + submit_model_timeout = 600, + validation_timeout = 6000, + } + } + ] + + # List of components used in the server side workflow. + components = [ + { + # This is the persistence component used in above workflow. + # PTFileModelPersistor is a Pytorch persistor which save/read the model to/from file. + + id = "persistor" + path = "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor" + + # the persitor class take model class as argument + # This imply that the model is initialized from the server-side. + # The initialized model will be broadcast to all the clients to start the training. + args.model.path = "{model_class_path}" + }, + { + # This is the generator that convert the model to shareable communication message structure used in workflow + id = "shareable_generator" + path = "nvflare.app_common.shareablegenerators.full_model_shareable_generator.FullModelShareableGenerator" + args = {} + }, + { + # This is the aggregator that perform the weighted average aggregation. + # the aggregation is "in-time", so it doesn't wait for client results, but aggregates as soon as it received the data. + id = "aggregator" + path = "nvflare.app_common.aggregators.intime_accumulate_model_aggregator.InTimeAccumulateWeightedAggregator" + args.expected_data_kind = "WEIGHTS" + }, + ] + +} diff --git a/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/custom/net.py b/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/custom/net.py new file mode 100644 index 0000000000..47ac7e9589 --- /dev/null +++ b/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/custom/net.py @@ -0,0 +1,37 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = torch.flatten(x, 1) # flatten all dimensions except batch + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x diff --git a/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/custom/train.py b/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/custom/train.py new file mode 100644 index 0000000000..3bc1ebc679 --- /dev/null +++ b/examples/hello-world/hello-cross-val/jobs/hello-cross-val/app/custom/train.py @@ -0,0 +1,201 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision +import torchvision.transforms as transforms +from net import Net + +# (1) import nvflare client API +import nvflare.client as flare +from nvflare.app_common.app_constant import ModelName + +# (optional) set a fixed location so we don't need to download everytime +CIFAR10_ROOT = "/tmp/nvflare/data/cifar10" + +MODEL_SAVE_PATH_ROOT = "/tmp/nvflare/workdir/cifar10" + +# (optional) We change to use GPU to speed things up. +# if you want to use CPU, change DEVICE="cpu" +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + +def define_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset_path", type=str, default=CIFAR10_ROOT, nargs="?") + parser.add_argument("--batch_size", type=int, default=4, nargs="?") + parser.add_argument("--num_workers", type=int, default=1, nargs="?") + parser.add_argument("--local_epochs", type=int, default=2, nargs="?") + parser.add_argument("--model_path", type=str, default=f"{MODEL_SAVE_PATH_ROOT}/cifar_net.pth", nargs="?") + return parser.parse_args() + + +def main(): + # define local parameters + args = define_parser() + + dataset_path = args.dataset_path + batch_size = args.batch_size + num_workers = args.num_workers + local_epochs = args.local_epochs + model_path = args.model_path + + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + trainset = torchvision.datasets.CIFAR10(root=dataset_path, train=True, download=True, transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers) + + testset = torchvision.datasets.CIFAR10(root=dataset_path, train=False, download=True, transform=transform) + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers) + + net = Net() + best_accuracy = 0.0 + + # wraps evaluation logic into a method to re-use for + # evaluation on both trained and received model + def evaluate(input_weights): + net = Net() + net.load_state_dict(input_weights) + # (optional) use GPU to speed things up + net.to(DEVICE) + + correct = 0 + total = 0 + # since we're not training, we don't need to calculate the gradients for our outputs + with torch.no_grad(): + for data in testloader: + # (optional) use GPU to speed things up + images, labels = data[0].to(DEVICE), data[1].to(DEVICE) + # calculate outputs by running images through the network + outputs = net(images) + # the class with the highest energy is what we choose as prediction + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + return 100 * correct // total + + # (2) initialize NVFlare client API + flare.init() + + # (3) run continously when launch_once=true + while flare.is_running(): + + # (4) receive FLModel from NVFlare + input_model = flare.receive() + client_id = flare.get_site_name() + + model_path = os.path.join(MODEL_SAVE_PATH_ROOT, client_id, "cifar_net.pth") + + # Based on different "task" we will do different things + # for "train" task (flare.is_train()) we use the received model to do training and/or evaluation + # and send back updated model and/or evaluation metrics, if the "train_with_evaluation" is specified as True + # in the config_fed_client we will need to do evaluation and include the evaluation metrics + # for "evaluate" task (flare.is_evaluate()) we use the received model to do evaluation + # and send back the evaluation metrics + # for "submit_model" task (flare.is_submit_model()) we just need to send back the local model + # (5) performing train task on received model + if flare.is_train(): + print(f"({client_id}) current_round={input_model.current_round}, total_rounds={input_model.total_rounds}") + + # (5.1) loads model from NVFlare + net.load_state_dict(input_model.params) + + criterion = nn.CrossEntropyLoss() + optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + + # (optional) use GPU to speed things up + net.to(DEVICE) + + if client_id == "site-1": + local_epochs = 1 + else: + local_epochs = 3 + + steps = local_epochs * len(trainloader) + + for epoch in range(local_epochs): # loop over the dataset multiple times + + running_loss = 0.0 + for i, data in enumerate(trainloader, 0): + # get the inputs; data is a list of [inputs, labels] + # (optional) use GPU to speed things up + inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE) + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + backward + optimize + outputs = net(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + # print statistics + running_loss += loss.item() + if i % 2000 == 1999: # print every 2000 mini-batches + print(f"({client_id}) [{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}") + running_loss = 0.0 + + print(f"({client_id}) Finished Training") + + # (5.2) evaluation on local trained model to save best model + local_accuracy = evaluate(net.state_dict()) + print(f"({client_id}) Evaluating local trained model. Accuracy on the 10000 test images: {local_accuracy}") + if local_accuracy > best_accuracy: + best_accuracy = local_accuracy + torch.save(net.state_dict(), model_path) + + # (5.3) evaluate on received model for model selection + accuracy = evaluate(input_model.params) + print( + f"({client_id}) Evaluating received model for model selection. Accuracy on the 10000 test images: {accuracy}" + ) + + # (5.4) construct trained FL model + output_model = flare.FLModel( + params=net.cpu().state_dict(), + metrics={"accuracy": accuracy}, + meta={"NUM_STEPS_CURRENT_ROUND": steps}, + ) + + # (5.5) send model back to NVFlare + flare.send(output_model) + + # (6) performing evaluate task on received model + elif flare.is_evaluate(): + accuracy = evaluate(input_model.params) + print(f"({client_id}) accuracy: {accuracy}") + flare.send(flare.FLModel(metrics={"accuracy": accuracy})) + + # (7) performing submit_model task to obtain best local model + elif flare.is_submit_model(): + model_name = input_model.meta["submit_model_name"] + if model_name == ModelName.BEST_MODEL: + try: + weights = torch.load(model_path) + net = Net() + net.load_state_dict(weights) + flare.send(flare.FLModel(params=net.cpu().state_dict())) + except Exception as e: + raise ValueError("Unable to load best model") from e + else: + raise ValueError(f"Unknown model_type: {model_name}") + + +if __name__ == "__main__": + main() diff --git a/examples/hello-world/hello-cross-val/jobs/hello-cross-val/meta.conf b/examples/hello-world/hello-cross-val/jobs/hello-cross-val/meta.conf new file mode 100644 index 0000000000..df04ef4ed7 --- /dev/null +++ b/examples/hello-world/hello-cross-val/jobs/hello-cross-val/meta.conf @@ -0,0 +1,10 @@ +{ + name = "hello-cross-val" + resource_spec {} + min_clients = 2 + deploy_map { + app = [ + "@ALL" + ] + } +} diff --git a/examples/hello-world/hello-numpy-sag/requirements.txt b/examples/hello-world/hello-cross-val/requirements.txt similarity index 100% rename from examples/hello-world/hello-numpy-sag/requirements.txt rename to examples/hello-world/hello-cross-val/requirements.txt diff --git a/examples/hello-world/hello-cyclic/README.md b/examples/hello-world/hello-cyclic/README.md index 60c8c632ea..a5e3f748db 100644 --- a/examples/hello-world/hello-cyclic/README.md +++ b/examples/hello-world/hello-cyclic/README.md @@ -1,11 +1,12 @@ # Hello Cyclic Weight Transfer ["Cyclic Weight Transfer"](https://pubmed.ncbi.nlm.nih.gov/29617797/ -) (CWT) is an alternative to the scatter-and-gather approach used in [FedAvg](https://arxiv.org/abs/1602.05629). CWT uses the [CyclicController](https://nvflare.readthedocs.io/en/main/apidocs/nvflare.app_common.workflows.cyclic_ctl.html) to pass the model weights from one site to the next for repeated fine-tuning. +) (CWT) is an alternative to [FedAvg](https://arxiv.org/abs/1602.05629). CWT uses the [Cyclic Controller](https://nvflare.readthedocs.io/en/main/apidocs/nvflare.app_common.workflows.cyclic.html) to pass the model weights from one site to the next for repeated fine-tuning. > **_NOTE:_** This example uses the [MNIST](http://yann.lecun.com/exdb/mnist/) handwritten digits dataset and will load its data within the trainer code. -You can follow the [hello_world notebook](../hello_world.ipynb) or the following: +To run this example with the FLARE API, you can follow the [hello_world notebook](../hello_world.ipynb), or you can quickly get +started with the following: ### 1. Install NVIDIA FLARE @@ -25,42 +26,31 @@ Prepare the data first: bash ./prepare_data.sh ``` -Use nvflare simulator to run the hello-examples: +Run the script using the job API to create the job and run it with the simulator: -```bash -nvflare simulator -w /tmp/nvflare/ -n 2 -t 2 ./jobs/hello-cyclic +``` +python3 cyclic_script_executor_hello-cyclic.py ``` ### 3. Access the logs and results -You can find the running logs and results inside the simulator's workspace/simulate_job +You can find the running logs and results inside the simulator's workspace: ```bash -$ ls /tmp/nvflare/simulate_job/ -app_server app_site-1 app_site-2 log.txt - +$ ls /tmp/nvflare/jobs/workdir ``` ### 4. Notes on running with GPUs -For running with GPUs, we recommend using +For running with GPUs, we recommend using the [NVIDIA TensorFlow docker](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tensorflow) -If you choose to run the example using GPUs, it is important to note that, -by default, TensorFlow will attempt to allocate all available GPU memory at the start. +If you choose to run the example using GPUs, it is important to note that by default, TensorFlow will attempt to allocate all available GPU memory at the start. In scenarios where multiple clients are involved, you have a couple of options to address this. One approach is to include specific flags to prevent TensorFlow from allocating all GPU memory. For instance: ```bash -TF_FORCE_GPU_ALLOW_GROWTH=true nvflare simulator -w /tmp/nvflare/ -n 2 -t 2 ./jobs/hello-cyclic -``` - -If you possess more GPUs than clients, -an alternative strategy is to run one client on each GPU. -This can be achieved as illustrated below: - -```bash -TF_FORCE_GPU_ALLOW_GROWTH=true nvflare simulator -w /tmp/nvflare/ -n 2 -gpu 0,1 ./jobs/hello-cyclic +TF_FORCE_GPU_ALLOW_GROWTH=true python3 cyclic_script-executor-hello-cyclic.py ``` diff --git a/examples/hello-world/hello-cyclic/cyclic_script_executor_hello-cyclic.py b/examples/hello-world/hello-cyclic/cyclic_script_executor_hello-cyclic.py new file mode 100644 index 0000000000..28e88cb76e --- /dev/null +++ b/examples/hello-world/hello-cyclic/cyclic_script_executor_hello-cyclic.py @@ -0,0 +1,48 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from src.tf_net import Net + +from nvflare import FedJob, ScriptExecutor +from nvflare.app_common.workflows.cyclic import Cyclic +from nvflare.client.config import ExchangeFormat + +if __name__ == "__main__": + n_clients = 2 + num_rounds = 3 + train_script = "src/hello-cyclic_fl.py" + + job = FedJob(name="hello-tf_cyclic") + + # Define the controller workflow and send to server + controller = Cyclic( + num_clients=n_clients, + num_rounds=num_rounds, + ) + job.to(controller, "server") + + # Define the initial global model and send to server + job.to(Net(), "server") + + # Add clients + for i in range(n_clients): + executor = ScriptExecutor( + task_script_path=train_script, + task_script_args="", # f"--batch_size 32 --data_path /tmp/data/site-{i}" + params_exchange_format=ExchangeFormat.NUMPY, + ) + job.to(executor, f"site-{i+1}", gpu=0) + + # job.export_job("/tmp/nvflare/jobs/job_config") + job.simulator_run("/tmp/nvflare/jobs/workdir") diff --git a/examples/hello-world/hello-cyclic/src/hello-cyclic_fl.py b/examples/hello-world/hello-cyclic/src/hello-cyclic_fl.py new file mode 100644 index 0000000000..3664edb25f --- /dev/null +++ b/examples/hello-world/hello-cyclic/src/hello-cyclic_fl.py @@ -0,0 +1,95 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tensorflow as tf +from tf_net import Net + +import nvflare.client as flare + +WEIGHTS_PATH = "./tf_model.weights.h5" + + +def main(): + model = Net() + model.build(input_shape=(None, 28, 28)) + model.compile( + optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"] + ) + model.summary() + + flare.init() + + sys_info = flare.system_info() + print(f"system info is: {sys_info}", flush=True) + + (train_images, train_labels), ( + test_images, + test_labels, + ) = tf.keras.datasets.mnist.load_data() + train_images, test_images = ( + train_images / 255.0, + test_images / 255.0, + ) + + # simulate separate datasets for each client by dividing MNIST dataset in half + client_name = sys_info["site_name"] + if client_name == "site-1": + train_images = train_images[: len(train_images) // 2] + train_labels = train_labels[: len(train_labels) // 2] + test_images = test_images[: len(test_images) // 2] + test_labels = test_labels[: len(test_labels) // 2] + elif client_name == "site-2": + train_images = train_images[len(train_images) // 2 :] + train_labels = train_labels[len(train_labels) // 2 :] + test_images = test_images[len(test_images) // 2 :] + test_labels = test_labels[len(test_labels) // 2 :] + + while flare.is_running(): + input_model = flare.receive() + print(f"current_round={input_model.current_round}") + + sys_info = flare.system_info() + print(f"system info is: {sys_info}") + + for k, v in input_model.params.items(): + model.get_layer(k).set_weights(v) + + _, test_global_acc = model.evaluate(test_images, test_labels, verbose=2) + print( + f"Accuracy of the received model on round {input_model.current_round} on the test images: {test_global_acc * 100} %" + ) + + # training + model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels)) + + print("Finished Training") + + model.save_weights(WEIGHTS_PATH) + + sys_info = flare.system_info() + print(f"system info is: {sys_info}", flush=True) + print(f"finished round: {input_model.current_round}", flush=True) + + output_model = flare.FLModel( + params={layer.name: layer.get_weights() for layer in model.layers}, + params_type="FULL", + metrics={"accuracy": test_global_acc}, + current_round=input_model.current_round, + ) + + flare.send(output_model) + + +if __name__ == "__main__": + main() diff --git a/examples/hello-world/hello-cyclic/src/tf_net.py b/examples/hello-world/hello-cyclic/src/tf_net.py new file mode 100644 index 0000000000..487db26ea3 --- /dev/null +++ b/examples/hello-world/hello-cyclic/src/tf_net.py @@ -0,0 +1,25 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tensorflow.keras import layers, models + + +class Net(models.Sequential): + def __init__(self, input_shape=(None, 28, 28)): + super().__init__() + self._input_shape = input_shape + self.add(layers.Flatten()) + self.add(layers.Dense(128, activation="relu")) + self.add(layers.Dropout(0.2)) + self.add(layers.Dense(10)) diff --git a/examples/hello-world/hello-fedavg-numpy/README.md b/examples/hello-world/hello-fedavg-numpy/README.md new file mode 100644 index 0000000000..380efeb094 --- /dev/null +++ b/examples/hello-world/hello-fedavg-numpy/README.md @@ -0,0 +1,36 @@ +# Hello FedAvg NumPy + +This example showcases Federated Averaging ([FedAvg](https://arxiv.org/abs/1602.05629)) with NumPy. + +> **_NOTE:_** This example uses a NumPy-based trainer and will generate its data within the code. + +You can follow the [Getting Started with NVFlare (NumPy)](hello-fedavg-numpy_getting_started.ipynb) +for a detailed walkthrough of the basic concepts. + +See the [Hello FedAvg with NumPy](https://nvflare.readthedocs.io/en/main/examples/hello_fedavg_numpy.html) example documentation page for details on this +example. + +To run this example with the FLARE API, you can follow the [hello_world notebook](../hello_world.ipynb), or you can quickly get +started with the following: + +### 1. Install NVIDIA FLARE + +Follow the [Installation](https://nvflare.readthedocs.io/en/main/quickstart.html) instructions. + +### 2. Run the experiment + +Run the script using the job API to create the job and run it with the simulator: + +``` +python3 fedavg_script_executor_hello-numpy.py +``` + +### 3. Access the logs and results + +You can find the running logs and results inside the simulator's workspace: + +```bash +$ ls /tmp/nvflare/jobs/workdir/ +``` + +For how to use the FLARE API to run this app, see [this notebook](hello-fedavg-numpy_flare_api.ipynb). diff --git a/examples/hello-world/hello-fedavg-numpy/fedavg_script_executor_hello-numpy.py b/examples/hello-world/hello-fedavg-numpy/fedavg_script_executor_hello-numpy.py new file mode 100644 index 0000000000..587b2b3b38 --- /dev/null +++ b/examples/hello-world/hello-fedavg-numpy/fedavg_script_executor_hello-numpy.py @@ -0,0 +1,40 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nvflare import FedAvg, FedJob, ScriptExecutor +from nvflare.client.config import ExchangeFormat + +if __name__ == "__main__": + n_clients = 2 + num_rounds = 3 + train_script = "src/hello-numpy_fl.py" + + job = FedJob(name="hello-fedavg-numpy") + + # Define the controller workflow and send to server + controller = FedAvg( + num_clients=n_clients, + num_rounds=num_rounds, + ) + job.to(controller, "server") + + # Add clients + for i in range(n_clients): + executor = ScriptExecutor( + task_script_path=train_script, task_script_args="", params_exchange_format=ExchangeFormat.NUMPY + ) + job.to(executor, f"site-{i+1}") + + # job.export_job("/tmp/nvflare/jobs/job_config") + job.simulator_run("/tmp/nvflare/jobs/workdir") diff --git a/examples/hello-world/hello-numpy-sag/hello_numpy_sag.ipynb b/examples/hello-world/hello-fedavg-numpy/hello-fedavg-numpy_flare_api.ipynb similarity index 92% rename from examples/hello-world/hello-numpy-sag/hello_numpy_sag.ipynb rename to examples/hello-world/hello-fedavg-numpy/hello-fedavg-numpy_flare_api.ipynb index 6a2db8b6bc..06749fad21 100644 --- a/examples/hello-world/hello-numpy-sag/hello_numpy_sag.ipynb +++ b/examples/hello-world/hello-fedavg-numpy/hello-fedavg-numpy_flare_api.ipynb @@ -5,7 +5,7 @@ "id": "e129ede5", "metadata": {}, "source": [ - " # Hello Numpy SAG" + " # Hello Numpy with FLARE API" ] }, { @@ -13,7 +13,7 @@ "id": "9bf7e391", "metadata": {}, "source": [ - "In this notebook, Hello Numpy SAG is run with the FLARE API to execute commands for submitting the job and following along to see the progress." + "In this notebook, Hello Numpy is run with the FLARE API to execute commands for submitting the job and following along to see the progress." ] }, { @@ -84,7 +84,7 @@ "source": [ "### 4. Submit the Job with the FLARE API\n", "\n", - "With a session successfully connected, you can use `submit_job()` to submit your job. You can change `path_to_example_job` to the location of the job you are submitting. If your session is not active, go back to the previous step and connect with a session.\n", + "With a session successfully connected, you can use `submit_job()` to submit your job. You can change `path_to_example_job` to the location of the job you are submitting (make sure you have exported the job with the line containing `job.export_job()` uncommented in [fedavg_script_executor_hello-numpy.py](fedavg_script_executor_hello-numpy.py)). If your session is not active, go back to the previous step and connect with a session.\n", "\n", "With POC command, we link the examples to the following directory ``` /tmp/nvflare/poc/example_project/prod_00/admin@nvidia.com/transfer```" ] @@ -110,7 +110,7 @@ }, "outputs": [], "source": [ - "path_to_example_job = \"hello-world/hello-numpy-sag/jobs/hello-numpy-sag\"\n", + "path_to_example_job = \"/tmp/nvflare/jobs/job_config/hello-fedavg-numpy\"\n", "job_id = sess.submit_job(path_to_example_job)\n", "print(job_id + \" was submitted\")" ] @@ -168,14 +168,6 @@ "\n", "sess.close()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "331c0ba2-8abe-47a3-a864-18dcb7489a44", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/examples/hello-world/hello-fedavg-numpy/hello-fedavg-numpy_getting_started.ipynb b/examples/hello-world/hello-fedavg-numpy/hello-fedavg-numpy_getting_started.ipynb new file mode 100644 index 0000000000..78f6413ba4 --- /dev/null +++ b/examples/hello-world/hello-fedavg-numpy/hello-fedavg-numpy_getting_started.ipynb @@ -0,0 +1,320 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "7a5c3d67-a6ea-4f59-84d2-effc3ef016e1", + "metadata": {}, + "source": [ + "# Getting Started with NVFlare (Numpy)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/NVFlare/blob/main/examples/getting_started/pt/nvflare_pt_getting_started.ipynb)\n", + "\n", + "NVFlare is an open-source framework that allows researchers and\n", + "data scientists to seamlessly move their machine learning and deep\n", + "learning workflows into a federated paradigm." + ] + }, + { + "cell_type": "markdown", + "id": "fcf2b4a8-ed42-421d-8898-c0c93f9d8a09", + "metadata": {}, + "source": [ + "## Basic Concepts\n", + "At the heart of NVFlare lies the concept of collaboration through\n", + "\"tasks.\" An FL controller assigns tasks (e.g., training on local data) to one or more FL clients, processes returned\n", + "results (e.g., model weight updates), and may assign additional\n", + "tasks based on these results and other factors (e.g., a pre-configured\n", + "number of training rounds). The clients run executors which can listen for tasks and perform the necessary computations locally, such as model training. This task-based interaction repeats\n", + "until the experiment’s objectives are met. \n", + "\n", + "We can also add data filters (for example, for [homomorphic encryption](https://www.usenix.org/conference/atc20/presentation/zhang-chengliang) or [differential privacy filters](https://arxiv.org/abs/1910.00962)) to the task data or results received or produced by the server or clients.\n", + "\n", + "\"NVIDIA" + ] + }, + { + "cell_type": "markdown", + "id": "907933a8-20fd-4aa7-a3bf-3f5b5829a544", + "metadata": {}, + "source": [ + "## Setup environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33bba668-72ac-4e69-aaed-8d4254f547c0", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install -q -r nvflare~=2.5.0rc" + ] + }, + { + "cell_type": "markdown", + "id": "b68cb248-dc6a-48d1-880d-33c4324d9723", + "metadata": {}, + "source": [ + "## Federated Averaging with NVFlare\n", + "Given the flexible controller and executor concepts, it is easy to implement different computing & communication patterns with NVFlare, such as [FedAvg](https://proceedings.mlr.press/v54/mcmahan17a?ref=https://githubhelp.com). \n", + "\n", + "The controller's `run()` routine is responsible for assigning tasks and processing task results from the Executors. " + ] + }, + { + "cell_type": "markdown", + "id": "b2f84fb1-9dd3-4c72-a727-c4614260f02f", + "metadata": {}, + "source": [ + "### Server Code\n", + "First, we use a simple implementation of the [FedAvg](https://proceedings.mlr.press/v54/mcmahan17a?ref=https://githubhelp.com) algorithm with NVFlare. " + ] + }, + { + "cell_type": "markdown", + "id": "d62a13d5-1130-44e6-8818-70e30de401e6", + "metadata": {}, + "source": [ + "```python\n", + "class FedAvg(BaseFedAvg):\n", + " def run(self) -> None:\n", + " self.info(\"Start FedAvg.\")\n", + "\n", + " model = self.load_model()\n", + " model.start_round = self.start_round\n", + " model.total_rounds = self.num_rounds\n", + "\n", + " for self.current_round in range(self.start_round, self.start_round + self.num_rounds):\n", + " self.info(f\"Round {self.current_round} started.\")\n", + " model.current_round = self.current_round\n", + "\n", + " clients = self.sample_clients(self.num_clients)\n", + "\n", + " results = self.send_model_and_wait(targets=clients, data=model)\n", + "\n", + " aggregate_results = self.aggregate(results)\n", + "\n", + " model = self.update_model(model, aggregate_results)\n", + "\n", + " self.save_model(model)\n", + "\n", + " self.info(\"Finished FedAvg.\")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "d24b6476-089a-4e9d-825b-07107bd5d84a", + "metadata": {}, + "source": [ + "### Client Code \n", + "For this numpy example, we just have mock training by adding one to the data, so there are no deep learning concepts and we can focus on the flow of NVFlare." + ] + }, + { + "cell_type": "markdown", + "id": "1c551053-5460-4d83-8578-796074170342", + "metadata": {}, + "source": [ + "On the client side, the training workflow is as follows:\n", + "1. Receive the model from the FL server (for this example we initialize the model in the client code to the numpy array [[1, 2, 3], [4, 5, 6], [7, 8, 9]] if the model params are empty).\n", + "2. Perform local training on the received global model\n", + "and/or evaluate the received global model for model\n", + "selection.\n", + "3. Send the new model back to the FL server." + ] + }, + { + "cell_type": "markdown", + "id": "c02bfc2a-783c-494f-9427-c38f40a2e870", + "metadata": {}, + "source": [ + "Using NVFlare's client API, we can easily adapt machine learning code that was written for centralized training and apply it in a federated scenario.\n", + "For a general use case, there are three essential methods to achieve this using the Client API :\n", + "- `init()`: Initializes NVFlare Client API environment.\n", + "- `receive()`: Receives model from the FL server.\n", + "- `send()`: Sends the model to the FL server." + ] + }, + { + "cell_type": "markdown", + "id": "9115ee07-d848-4a7c-99ad-64e20ab7093c", + "metadata": {}, + "source": [ + "With these simple methods, the developers can use the Client API\n", + "to change their centralized training code to an FL scenario with\n", + "five lines of code changes as shown below.\n", + "\n", + "```python\n", + " import nvflare.client as flare\n", + " \n", + " flare.init() # 1. Initializes NVFlare Client API environment.\n", + " input_model = flare.receive() # 2. Receives model from the FL server.\n", + " params = input_model.params # 3. Obtain the required information from the received model.\n", + " \n", + " # original local training code\n", + " new_params = local_train(params)\n", + " \n", + " output_model = flare.FLModel(params=new_params) # 4. Put the results in a new `FLModel`\n", + " flare.send(output_model) # 5. Sends the model to the FL server. \n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "67432f44-4144-4347-8d74-e7f57e065a14", + "metadata": {}, + "source": [ + "The full client training script is saved in a separate file, e.g. [./src/hello-numpy_fl.py](./src/hello-numpy_fl.py)." + ] + }, + { + "cell_type": "markdown", + "id": "5da34414-bac4-4352-8077-ab7ade998eec", + "metadata": {}, + "source": [ + "## Run an NVFlare Job\n", + "Now that we have defined the FedAvg controller to run our workflow on the FL server and our client training script to receive the global models, run local training, and send the results back to the FL server, we can put everything together using NVFlare's Job API." + ] + }, + { + "cell_type": "markdown", + "id": "1b70da5d-ba8b-4e65-b47f-44bb9bddae4d", + "metadata": {}, + "source": [ + "#### 1. Define a FedJob\n", + "The `FedJob` is used to define how controllers and executors are placed within a federated job using the `to(object, target)` routine." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13771bfb-901f-485a-9a23-84db1ccd5fe4", + "metadata": {}, + "outputs": [], + "source": [ + "from nvflare import FedAvg, FedJob, ScriptExecutor\n", + "\n", + "job = FedJob(name=\"hello-fedavg-numpy\")" + ] + }, + { + "cell_type": "markdown", + "id": "9361d9f8-54f3-4363-b3ba-706a7ae3a8e9", + "metadata": {}, + "source": [ + "#### 2. Define the Controller Workflow\n", + "Define the controller workflow and send to server." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6962e6cc-995e-4356-8156-3ceba2c7a249", + "metadata": {}, + "outputs": [], + "source": [ + "n_clients = 2\n", + "num_rounds = 3\n", + "\n", + "controller = FedAvg(\n", + " num_clients=n_clients,\n", + " num_rounds=num_rounds,\n", + ")\n", + "job.to(controller, \"server\")" + ] + }, + { + "cell_type": "markdown", + "id": "548966c2-90bf-47ad-91d2-5c6c22c3c4f0", + "metadata": {}, + "source": [ + "#### 3. Add Clients\n", + "Next, we can use the `ScriptExecutor` and send it to each of the clients to run our training script.\n", + "\n", + "Note that our script could have additional input arguments, such as batch size or data path, but we don't use them here for simplicity.\n", + "We can also specify, which GPU should be used to run this client, which is helpful for simulated environments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ad5d36fe-9ae5-43c3-80bc-2cdc66bf7a7e", + "metadata": {}, + "outputs": [], + "source": [ + "from nvflare.client.config import ExchangeFormat\n", + "\n", + "train_script = \"src/hello-numpy_fl.py\"\n", + "\n", + "for i in range(n_clients):\n", + " executor = ScriptExecutor(\n", + " task_script_path=train_script, task_script_args=\"\", params_exchange_format=ExchangeFormat.NUMPY\n", + " )\n", + " job.to(executor, f\"site-{i}\")" + ] + }, + { + "cell_type": "markdown", + "id": "113fd6af-85be-4f75-8a8e-4666771252b3", + "metadata": {}, + "source": [ + "That's it!\n", + "\n", + "#### 4. Optionally export the job\n", + "Now, we could export the job and submit it to a real NVFlare deployment using the [Admin client](https://nvflare.readthedocs.io/en/main/real_world_fl/operation.html) or [FLARE API](https://nvflare.readthedocs.io/en/main/real_world_fl/flare_api.html). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99a270bf-c906-425b-b999-2306cb76eb62", + "metadata": {}, + "outputs": [], + "source": [ + "job.export_job(\"/tmp/nvflare/jobs/job_config\")" + ] + }, + { + "cell_type": "markdown", + "id": "9ac3f0a8-06bb-4bea-89d3-4a5fc5b76c63", + "metadata": {}, + "source": [ + "#### 5. Run FL Simulation\n", + "Finally, we can run our FedJob in simulation using NVFlare's [simulator](https://nvflare.readthedocs.io/en/main/user_guide/nvflare_cli/fl_simulator.html) under the hood. The results will be saved in the specified `workdir`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13068ab7-35cf-49e7-91ed-10993049ef0d", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "job.simulator_run(\"/tmp/nvflare/jobs/workdir\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/hello-world/hello-fedavg-numpy/requirements.txt b/examples/hello-world/hello-fedavg-numpy/requirements.txt new file mode 100644 index 0000000000..e4605852b5 --- /dev/null +++ b/examples/hello-world/hello-fedavg-numpy/requirements.txt @@ -0,0 +1 @@ +nvflare~=2.4.0rc diff --git a/examples/hello-world/hello-fedavg-numpy/src/hello-numpy_fl.py b/examples/hello-world/hello-fedavg-numpy/src/hello-numpy_fl.py new file mode 100644 index 0000000000..12fe798741 --- /dev/null +++ b/examples/hello-world/hello-fedavg-numpy/src/hello-numpy_fl.py @@ -0,0 +1,75 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +import numpy as np + +import nvflare.client as flare + + +def train(input_arr): + output_arr = copy.deepcopy(input_arr) + # mock training with plus 1 + return output_arr + 1 + + +def evaluate(input_arr): + # mock evaluation metrics + return np.mean(input_arr) + + +def main(): + flare.init() + + sys_info = flare.system_info() + print(f"system info is: {sys_info}", flush=True) + + while flare.is_running(): + input_model = flare.receive() + print(f"current_round={input_model.current_round}") + print(f"received weights: {input_model.params}") + + sys_info = flare.system_info() + print(f"system info is: {sys_info}") + + if input_model.params == {}: + params = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32) + else: + params = np.array(input_model.params["numpy_key"], dtype=np.float32) + + # training + new_params = train(params) + + # evaluation + metrics = evaluate(params) + + sys_info = flare.system_info() + print(f"system info is: {sys_info}", flush=True) + print(f"finished round: {input_model.current_round}", flush=True) + + print(f"sending weights: {new_params}") + + output_model = flare.FLModel( + params={"numpy_key": new_params}, + params_type="FULL", + metrics={"accuracy": metrics}, + current_round=input_model.current_round, + ) + + flare.send(output_model) + + +if __name__ == "__main__": + main() diff --git a/examples/hello-world/hello-numpy-sag/README.md b/examples/hello-world/hello-numpy-sag/README.md deleted file mode 100644 index a1ca18e092..0000000000 --- a/examples/hello-world/hello-numpy-sag/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Hello Numpy Scatter and Gather - -"[Scatter and Gather](https://nvflare.readthedocs.io/en/main/apidocs/nvflare.app_common.workflows.scatter_and_gather.html)" is the standard workflow to implement Federated Averaging ([FedAvg](https://arxiv.org/abs/1602.05629)). -This workflow follows the hub and spoke model for communicating the global model to each client for local training (i.e., "scattering") and aggregates the result to perform the global model update (i.e., "gathering"). - -> **_NOTE:_** This example uses a Numpy-based trainer and will generate its data within the code. - -You can follow the [hello_world notebook](../hello_world.ipynb) or the following: - -### 1. Install NVIDIA FLARE - -Follow the [Installation](https://nvflare.readthedocs.io/en/main/quickstart.html) instructions. - -### 2. Run the experiment - -Use nvflare simulator to run the hello-examples: - -``` -nvflare simulator -w /tmp/nvflare/hello-numpy-sag -n 2 -t 2 hello-world/hello-numpy-sag/jobs/hello-numpy-sag -``` - -### 3. Access the logs and results - -You can find the running logs and results inside the simulator's workspace/simulate_job - -```bash -$ ls /tmp/nvflare/hello-numpy-sag/simulate_job/ -app_server app_site-1 app_site-2 log.txt model models - -``` - -For how to use the FLARE API to run this app, see [this notebook](hello_numpy_sag.ipynb). diff --git a/examples/hello-world/hello-pt/README.md b/examples/hello-world/hello-pt/README.md index adabac1e53..6795ce2543 100644 --- a/examples/hello-world/hello-pt/README.md +++ b/examples/hello-world/hello-pt/README.md @@ -4,9 +4,16 @@ Example of using [NVIDIA FLARE](https://nvflare.readthedocs.io/en/main/index.htm using federated averaging ([FedAvg](https://arxiv.org/abs/1602.05629)) and [PyTorch](https://pytorch.org/) as the deep learning training framework. -> **_NOTE:_** This example uses the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset and will load its data within the trainer code. +> **_NOTE:_** This example uses the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset and will load its data within the client train code. -You can follow the [hello_world notebook](../hello_world.ipynb) or the following: +You can follow the [Getting Started with NVFlare (PyTorch) notebook](../../getting_started/pt/nvflare_pt_getting_started.ipynb) +for a detailed walkthrough of the basic concepts. + +See the [Hello PyTorch](https://nvflare.readthedocs.io/en/main/examples/hello_pt.html) example documentation page for details on this +example. + +To run this example with the FLARE API, you can follow the [hello_world notebook](../hello_world.ipynb), or you can quickly get +started with the following: ### 1. Install NVIDIA FLARE @@ -21,93 +28,16 @@ pip3 install -r requirements.txt ### 2. Run the experiment -Prepare the data first: - -``` -bash ./prepare_data.sh -``` - -Use nvflare simulator to run the hello-examples: +Run the script using the job API to create the job and run it with the simulator: ``` -nvflare simulator -w /tmp/nvflare/ -n 2 -t 2 hello-pt/jobs/hello-pt +python3 fedavg_script_executor_hello-pt.py ``` ### 3. Access the logs and results -You can find the running logs and results inside the simulator's workspace/simulate_job +You can find the running logs and results inside the simulator's workspace: ```bash -$ ls /tmp/nvflare/simulate_job/ -app_server app_site-1 app_site-2 log.txt - -``` - -### 4. Global Model Initialization Approaches - -There are various methods for initializing a global model in federated learning, which can be done either on the FL server or on the client side. The choice of model initialization approach depends on the specific use case of the user. - -When the global model is initialized on the FL server-side, it is then broadcasted to all the FL clients. -The clients can use this initial model as a starting point for training. -The advantage of server-side model initialization is that the model only needs to be initialized once in one location -(the server), and then distributed to all clients, ensuring that all clients have the same initial model. -However, it is important to note that server-side model initialization may present potential security risks -if custom Python code is run on the server. An alternative approach for server-side initialization is to use -a predefined model file as the initialization model. The ScatterAndGather controller is using persistor to reads / init -model from server-side. - -In this example, we are using **client-side** model initialization approach. - -The client-side model initialization avoids server-side custom code as well as without extra setup in the model-file based approach. -client side initialization asks every client to send the initialized model as a pre-task in the workflow, before the training starts. -On the server side, once the server receive the initial models from clients, server can choose different strategies to leverage the models -from different clients: -* Select one model randomly among all clients' models, then use it as the global initial model -* Apply aggregation function to generate the global initial model - -In this example,we use _InitializeGlobalWeights_ controller, which have implemented the following strategies ( weight_method) -* Weight_method = "first" , then use the weights reported from the first client; -* weight_method = "client", then only use the weights reported from the specified client. - -If one's use case demands a different strategy, then you can implement a new model initialization controller. - -Looking at the job workflow, we have defined three workflows in config_fed_server.json - * pre_train ( model initialization ) with _InitializeGlobalWeights_ controller - * scatter_and_gather (train) with _ScatterAndGather_ controller - * cross_site_validate (cross validation) with _CrossSiteModelEval_ - +$ ls /tmp/nvflare/jobs/workdir/ ``` - "workflows": [ - { - "id": "pre_train", - "name": "InitializeGlobalWeights", - "args": { - "task_name": "get_weights" - } - }, - { - "id": "scatter_and_gather", - "name": "ScatterAndGather", - "args": { - ... skip arguments ... - } - }, - { - "id": "cross_site_validate", - "name": "CrossSiteModelEval", - "args": { - "model_locator_id": "model_locator" - } - } - ] -``` - -Once the global model is initialized, it is set to the fl_ctx as sticky property and then pass to the -next controller (ScatterAndGather) in the training step. The sticky property allows properties pass cross controllers. -The ScatterAndGather still leverage _persistor_ to load the initial global model, but since there is no model file -or server-side initialized model, the ScatterAndGather then try to load the model from fl_ctx's _"global_model"_ property, -which is initialized from the client-side and set by the previous controller in the workflow. - - - - diff --git a/examples/hello-world/hello-pt/fedavg_script_executor_hello-pt.py b/examples/hello-world/hello-pt/fedavg_script_executor_hello-pt.py new file mode 100644 index 0000000000..32d0acc999 --- /dev/null +++ b/examples/hello-world/hello-pt/fedavg_script_executor_hello-pt.py @@ -0,0 +1,44 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from src.simple_network import SimpleNetwork + +from nvflare import FedAvg, FedJob, ScriptExecutor + +if __name__ == "__main__": + n_clients = 2 + num_rounds = 2 + train_script = "src/hello-pt_cifar10_fl.py" + + job = FedJob(name="hello-pt_cifar10_fedavg") + + # Define the controller workflow and send to server + controller = FedAvg( + num_clients=n_clients, + num_rounds=num_rounds, + ) + job.to(controller, "server") + + # Define the initial global model and send to server + job.to(SimpleNetwork(), "server") + + # Add clients + for i in range(n_clients): + executor = ScriptExecutor( + task_script_path=train_script, task_script_args="" # f"--batch_size 32 --data_path /tmp/data/site-{i}" + ) + job.to(executor, f"site-{i+1}", gpu=0) + + # job.export_job("/tmp/nvflare/jobs/job_config") + job.simulator_run("/tmp/nvflare/jobs/workdir") diff --git a/examples/hello-world/hello-pt/prepare_data.sh b/examples/hello-world/hello-pt/prepare_data.sh deleted file mode 100755 index b9bad39f45..0000000000 --- a/examples/hello-world/hello-pt/prepare_data.sh +++ /dev/null @@ -1,3 +0,0 @@ -DATASET_ROOT="~/data" - -python3 -c "import torchvision.datasets as datasets; datasets.CIFAR10(root='${DATASET_ROOT}', train=True, download=True)" diff --git a/examples/hello-world/hello-pt/src/hello-pt_cifar10_fl.py b/examples/hello-world/hello-pt/src/hello-pt_cifar10_fl.py new file mode 100644 index 0000000000..0cf8d706d0 --- /dev/null +++ b/examples/hello-world/hello-pt/src/hello-pt_cifar10_fl.py @@ -0,0 +1,95 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import torch +from simple_network import SimpleNetwork +from torch import nn +from torch.optim import SGD +from torch.utils.data.dataloader import DataLoader +from torchvision.datasets import CIFAR10 +from torchvision.transforms import Compose, Normalize, ToTensor + +import nvflare.client as flare + +DATASET_PATH = "/tmp/nvflare/data" + + +def main(): + batch_size = 4 + epochs = 5 + lr = 0.01 + model = SimpleNetwork() + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + model.to(device) + loss = nn.CrossEntropyLoss() + optimizer = SGD(model.parameters(), lr=lr, momentum=0.9) + transforms = Compose( + [ + ToTensor(), + Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), + ] + ) + + flare.init() + sys_info = flare.system_info() + client_name = sys_info["site_name"] + + train_dataset = CIFAR10( + root=os.path.join(DATASET_PATH, client_name), transform=transforms, download=True, train=True + ) + train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) + + summary_writer = SummaryWriter() + while flare.is_running(): + input_model = flare.receive() + print(f"current_round={input_model.current_round}") + + model.load_state_dict(input_model.params) + + steps = epochs * len(train_loader) + for epoch in range(epochs): + running_loss = 0.0 + for i, batch in enumerate(train_loader): + images, labels = batch[0].to(device), batch[1].to(device) + optimizer.zero_grad() + + predictions = model(images) + cost = loss(predictions, labels) + cost.backward() + optimizer.step() + + running_loss += cost.cpu().detach().numpy() / images.size()[0] + if i % 3000 == 0: + print(f"Epoch: {epoch}/{epochs}, Iteration: {i}, Loss: {running_loss/3000}") + global_step = input_model.current_round * steps + epoch * len(train_loader) + i + summary_writer.add_scalar(tag="loss_for_each_batch", scalar=running_loss, global_step=global_step) + running_loss = 0.0 + + print("Finished Training") + + PATH = "./cifar_net.pth" + torch.save(model.state_dict(), PATH) + + output_model = flare.FLModel( + params=model.cpu().state_dict(), + meta={"NUM_STEPS_CURRENT_ROUND": steps}, + ) + + flare.send(output_model) + + +if __name__ == "__main__": + main() diff --git a/examples/hello-world/hello-pt/src/simple_network.py b/examples/hello-world/hello-pt/src/simple_network.py new file mode 100644 index 0000000000..609b0b1581 --- /dev/null +++ b/examples/hello-world/hello-pt/src/simple_network.py @@ -0,0 +1,37 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class SimpleNetwork(nn.Module): + def __init__(self): + super(SimpleNetwork, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = torch.flatten(x, 1) # flatten all dimensions except batch + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x diff --git a/examples/hello-world/hello-tf2/README.md b/examples/hello-world/hello-tf/README.md similarity index 65% rename from examples/hello-world/hello-tf2/README.md rename to examples/hello-world/hello-tf/README.md index e348bae7ee..f909715378 100644 --- a/examples/hello-world/hello-tf2/README.md +++ b/examples/hello-world/hello-tf/README.md @@ -6,7 +6,11 @@ and [TensorFlow](https://tensorflow.org/) as the deep learning training framewor > **_NOTE:_** This example uses the [MNIST](http://yann.lecun.com/exdb/mnist/) handwritten digits dataset and will load its data within the trainer code. -You can follow the [hello_world notebook](../hello_world.ipynb) or the following: +See the [Hello TensorFlow](https://nvflare.readthedocs.io/en/main/examples/hello_tf.html) example documentation page for details on this +example. + +To run this example with the FLARE API, you can follow the [hello_world notebook](../hello_world.ipynb), or you can quickly get +started with the following: ### 1. Install NVIDIA FLARE @@ -18,42 +22,34 @@ Install additional requirements: pip3 install tensorflow ``` -#### Notes on running with GPUs +### 2. Run the experiment -For running with GPUs, we recommend using -[NVIDIA TensorFlow docker](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tensorflow) +Run the script using the job API to create the job and run it with the simulator: -If you choose to run the example using GPUs, it is important to note that, -by default, TensorFlow will attempt to allocate all available GPU memory at the start. -In scenarios where multiple clients are involved, you have to prevent TensorFlow from allocating all GPU memory -by setting the following flags. -```bash -TF_FORCE_GPU_ALLOW_GROWTH=true TF_GPU_ALLOCATOR=cuda_malloc_async +``` +python3 fedavg_script_executor_hello-tf.py ``` -If you possess more GPUs than clients, a good strategy is to run one client on each GPU. -This can be achieved using the `-gpu` argument during simulation, e.g., `nvflare simulator -n 2 -gpu 0,1 [job]`. - - -### 2. Run the experiment +### 3. Access the logs and results -Prepare the data first: +You can find the running logs and results inside the simulator's workspace: ```bash -bash ./prepare_data.sh +$ ls /tmp/nvflare/jobs/workdir ``` -Use nvflare simulator to run the hello-examples: - -```bash -TF_FORCE_GPU_ALLOW_GROWTH=true TF_GPU_ALLOCATOR=cuda_malloc_async nvflare simulator -w /tmp/nvflare/ -n 2 -t 2 ./jobs/hello-tf2 -``` +#### Notes on running with GPUs -### 3. Access the logs and results +For running with GPUs, we recommend using +[NVIDIA TensorFlow docker](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tensorflow) -You can find the running logs and results inside the simulator's workspace/simulate_job +If you choose to run the example using GPUs, it is important to note that by default, TensorFlow will attempt to allocate all available GPU memory at the start. +In scenarios where multiple clients are involved, you have to prevent TensorFlow from allocating all GPU memory +by setting the following flags. ```bash -$ ls /tmp/nvflare/simulate_job/ -app_server app_site-1 app_site-2 log.txt +TF_FORCE_GPU_ALLOW_GROWTH=true TF_GPU_ALLOCATOR=cuda_malloc_async ``` + +If you possess more GPUs than clients, a good strategy is to run one client on each GPU. +This can be achieved using the `-gpu` argument if using the nvflare simulator command, e.g., `nvflare simulator -n 2 -gpu 0,1 [job]`. diff --git a/examples/hello-world/hello-tf/fedavg_script_executor_hello-tf.py b/examples/hello-world/hello-tf/fedavg_script_executor_hello-tf.py new file mode 100644 index 0000000000..e34c419b83 --- /dev/null +++ b/examples/hello-world/hello-tf/fedavg_script_executor_hello-tf.py @@ -0,0 +1,44 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from src.tf_net import TFNet + +from nvflare import FedAvg, FedJob, ScriptExecutor + +if __name__ == "__main__": + n_clients = 2 + num_rounds = 3 + train_script = "src/hello-tf_fl.py" + + job = FedJob(name="hello-tf_fedavg") + + # Define the controller workflow and send to server + controller = FedAvg( + num_clients=n_clients, + num_rounds=num_rounds, + ) + job.to(controller, "server") + + # Define the initial global model and send to server + job.to(TFNet(), "server") + + # Add clients + for i in range(n_clients): + executor = ScriptExecutor( + task_script_path=train_script, task_script_args="" # f"--batch_size 32 --data_path /tmp/data/site-{i}" + ) + job.to(executor, f"site-{i+1}", gpu=0) + + # job.export_job("/tmp/nvflare/jobs/job_config") + job.simulator_run("/tmp/nvflare/jobs/workdir") diff --git a/examples/hello-world/hello-tf2/requirements.txt b/examples/hello-world/hello-tf/requirements.txt similarity index 100% rename from examples/hello-world/hello-tf2/requirements.txt rename to examples/hello-world/hello-tf/requirements.txt diff --git a/examples/hello-world/hello-tf/src/hello-tf_fl.py b/examples/hello-world/hello-tf/src/hello-tf_fl.py new file mode 100644 index 0000000000..8a820b78be --- /dev/null +++ b/examples/hello-world/hello-tf/src/hello-tf_fl.py @@ -0,0 +1,95 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tensorflow as tf +from tf_net import TFNet + +import nvflare.client as flare + +WEIGHTS_PATH = "./tf_model.weights.h5" + + +def main(): + flare.init() + + sys_info = flare.system_info() + print(f"system info is: {sys_info}", flush=True) + + model = TFNet() + model.build(input_shape=(None, 28, 28)) + model.compile( + optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"] + ) + model.summary() + + (train_images, train_labels), ( + test_images, + test_labels, + ) = tf.keras.datasets.mnist.load_data() + train_images, test_images = ( + train_images / 255.0, + test_images / 255.0, + ) + + # simulate separate datasets for each client by dividing MNIST dataset in half + client_name = sys_info["site_name"] + if client_name == "site-1": + train_images = train_images[: len(train_images) // 2] + train_labels = train_labels[: len(train_labels) // 2] + test_images = test_images[: len(test_images) // 2] + test_labels = test_labels[: len(test_labels) // 2] + elif client_name == "site-2": + train_images = train_images[len(train_images) // 2 :] + train_labels = train_labels[len(train_labels) // 2 :] + test_images = test_images[len(test_images) // 2 :] + test_labels = test_labels[len(test_labels) // 2 :] + + while flare.is_running(): + input_model = flare.receive() + print(f"current_round={input_model.current_round}") + + sys_info = flare.system_info() + print(f"system info is: {sys_info}") + + for k, v in input_model.params.items(): + model.get_layer(k).set_weights(v) + + _, test_global_acc = model.evaluate(test_images, test_labels, verbose=2) + print( + f"Accuracy of the received model on round {input_model.current_round} on the test images: {test_global_acc * 100} %" + ) + + # training + model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels)) + + print("Finished Training") + + model.save_weights(WEIGHTS_PATH) + + sys_info = flare.system_info() + print(f"system info is: {sys_info}", flush=True) + print(f"finished round: {input_model.current_round}", flush=True) + + output_model = flare.FLModel( + params={layer.name: layer.get_weights() for layer in model.layers}, + params_type="FULL", + metrics={"accuracy": test_global_acc}, + current_round=input_model.current_round, + ) + + flare.send(output_model) + + +if __name__ == "__main__": + main() diff --git a/examples/hello-world/hello-tf/src/tf_net.py b/examples/hello-world/hello-tf/src/tf_net.py new file mode 100644 index 0000000000..9ea8549162 --- /dev/null +++ b/examples/hello-world/hello-tf/src/tf_net.py @@ -0,0 +1,25 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tensorflow.keras import layers, models + + +class TFNet(models.Sequential): + def __init__(self, input_shape=(None, 28, 28)): + super().__init__() + self._input_shape = input_shape + self.add(layers.Flatten()) + self.add(layers.Dense(128, activation="relu")) + self.add(layers.Dropout(0.2)) + self.add(layers.Dense(10)) diff --git a/examples/hello-world/hello-tf2/prepare_data.sh b/examples/hello-world/hello-tf2/prepare_data.sh deleted file mode 100755 index a9bd5c7741..0000000000 --- a/examples/hello-world/hello-tf2/prepare_data.sh +++ /dev/null @@ -1 +0,0 @@ -python3 -c "from tensorflow.keras.datasets import mnist; mnist_data = mnist.load_data()" \ No newline at end of file diff --git a/tests/integration_test/data/test_configs/standalone_job/hello_numpy_examples.yml b/tests/integration_test/data/test_configs/standalone_job/hello_numpy_examples.yml index f3a87da6d7..588e8591f8 100644 --- a/tests/integration_test/data/test_configs/standalone_job/hello_numpy_examples.yml +++ b/tests/integration_test/data/test_configs/standalone_job/hello_numpy_examples.yml @@ -5,44 +5,6 @@ cleanup: True tests: - - test_name: "run hello-numpy-sag" - event_sequence: - - "trigger": - "type": "server_log" - "data": "Server started" - "actions": [ "submit_job hello-numpy-sag/jobs/hello-numpy-sag" ] - "result": - "type": "job_submit_success" - - "trigger": - "type": "run_state" - "data": { "run_finished": True } - "actions": [ "ensure_current_job_done" ] - "result": - "type": "run_state" - "data": { "run_finished": True } - validators: - - path: tests.integration_test.src.validators.NumpySAGResultValidator - args: { expected_result: [ [ 4, 5, 6 ], [ 7, 8, 9 ], [ 10, 11, 12 ] ] } - - test_name: "run hello-numpy-cross-val" - event_sequence: - - "trigger": - "type": "server_log" - "data": "Server started" - "actions": [ "submit_job hello-numpy-cross-val/jobs/hello-numpy-cross-val" ] - "result": - "type": "job_submit_success" - - "trigger": - "type": "run_state" - "data": { "run_finished": True } - "actions": [ "ensure_current_job_done" ] - "result": - "type": "run_state" - "data": { "run_finished": True } - validators: - - path: tests.integration_test.src.validators.CrossValResultValidator - args: { server_model_names: ["server"] } - - path: tests.integration_test.src.validators.NumpySAGResultValidator - args: { expected_result: [ [ 4, 5, 6 ], [ 7, 8, 9 ], [ 10, 11, 12 ] ] } - test_name: "run hello-ccwf" # TODO: add a result validator for the "models" saved on client site (ccwf) event_sequence: diff --git a/tests/integration_test/data/test_configs/standalone_job/hello_numpy_previous_examples.yml b/tests/integration_test/data/test_configs/standalone_job/hello_numpy_previous_examples.yml new file mode 100644 index 0000000000..a8efb14b22 --- /dev/null +++ b/tests/integration_test/data/test_configs/standalone_job/hello_numpy_previous_examples.yml @@ -0,0 +1,45 @@ +n_servers: 1 +n_clients: 2 +jobs_root_dir: data/test_configs/standalone_job/previous_jobs +cleanup: True + + +tests: + - test_name: "run hello-numpy-sag" + event_sequence: + - "trigger": + "type": "server_log" + "data": "Server started" + "actions": [ "submit_job hello-numpy-sag" ] + "result": + "type": "job_submit_success" + - "trigger": + "type": "run_state" + "data": { "run_finished": True } + "actions": [ "ensure_current_job_done" ] + "result": + "type": "run_state" + "data": { "run_finished": True } + validators: + - path: tests.integration_test.src.validators.NumpySAGResultValidator + args: { expected_result: [ [ 4, 5, 6 ], [ 7, 8, 9 ], [ 10, 11, 12 ] ] } + - test_name: "run hello-numpy-cross-val" + event_sequence: + - "trigger": + "type": "server_log" + "data": "Server started" + "actions": [ "submit_job hello-numpy-cross-val" ] + "result": + "type": "job_submit_success" + - "trigger": + "type": "run_state" + "data": { "run_finished": True } + "actions": [ "ensure_current_job_done" ] + "result": + "type": "run_state" + "data": { "run_finished": True } + validators: + - path: tests.integration_test.src.validators.CrossValResultValidator + args: { server_model_names: ["server"] } + - path: tests.integration_test.src.validators.NumpySAGResultValidator + args: { expected_result: [ [ 4, 5, 6 ], [ 7, 8, 9 ], [ 10, 11, 12 ] ] } diff --git a/tests/integration_test/data/test_configs/standalone_job/hello_pt_examples.yml b/tests/integration_test/data/test_configs/standalone_job/hello_pt_examples.yml index 52af5c9417..df5b2bbdc9 100644 --- a/tests/integration_test/data/test_configs/standalone_job/hello_pt_examples.yml +++ b/tests/integration_test/data/test_configs/standalone_job/hello_pt_examples.yml @@ -1,6 +1,6 @@ n_servers: 1 n_clients: 2 -jobs_root_dir: ../../examples/hello-world +jobs_root_dir: data/test_configs/standalone_job/previous_jobs cleanup: True @@ -10,7 +10,7 @@ tests: - "trigger": "type": "server_log" "data": "Server started" - "actions": [ "submit_job hello-pt/jobs/hello-pt" ] + "actions": [ "submit_job hello-pt" ] "result": "type": "job_submit_success" - "trigger": diff --git a/tests/integration_test/data/test_configs/standalone_job/hello_tf_examples.yml b/tests/integration_test/data/test_configs/standalone_job/hello_tf_examples.yml index 39b1a59c3b..eeb7a853ec 100644 --- a/tests/integration_test/data/test_configs/standalone_job/hello_tf_examples.yml +++ b/tests/integration_test/data/test_configs/standalone_job/hello_tf_examples.yml @@ -1,6 +1,6 @@ n_servers: 1 n_clients: 2 -jobs_root_dir: ../../examples/hello-world +jobs_root_dir: data/test_configs/standalone_job/previous_jobs cleanup: True @@ -10,7 +10,7 @@ tests: - "trigger": "type": "server_log" "data": "Server started" - "actions": [ "submit_job hello-cyclic/jobs/hello-cyclic" ] + "actions": [ "submit_job hello-cyclic" ] "result": "type": "job_submit_success" - "trigger": @@ -31,7 +31,7 @@ tests: - "trigger": "type": "server_log" "data": "Server started" - "actions": [ "submit_job hello-tf2/jobs/hello-tf2" ] + "actions": [ "submit_job hello-tf2" ] "result": "type": "job_submit_success" - "trigger": diff --git a/examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/config/config_fed_client.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/config/config_fed_client.json similarity index 100% rename from examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/config/config_fed_client.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/config/config_fed_client.json diff --git a/examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/config/config_fed_server.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/config/config_fed_server.json similarity index 100% rename from examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/config/config_fed_server.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/config/config_fed_server.json diff --git a/examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/__init__.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/custom/__init__.py similarity index 100% rename from examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/__init__.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/custom/__init__.py diff --git a/examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/tf2_model_persistor.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/custom/tf2_model_persistor.py similarity index 100% rename from examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/tf2_model_persistor.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/custom/tf2_model_persistor.py diff --git a/examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/tf2_net.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/custom/tf2_net.py similarity index 100% rename from examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/tf2_net.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/custom/tf2_net.py diff --git a/examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/trainer.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/custom/trainer.py similarity index 100% rename from examples/hello-world/hello-cyclic/jobs/hello-cyclic/app/custom/trainer.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/app/custom/trainer.py diff --git a/examples/hello-world/hello-cyclic/jobs/hello-cyclic/meta.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/meta.json similarity index 100% rename from examples/hello-world/hello-cyclic/jobs/hello-cyclic/meta.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-cyclic/meta.json diff --git a/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/app/config/config_fed_client.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/app/config/config_fed_client.json new file mode 100755 index 0000000000..21c98c3291 --- /dev/null +++ b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/app/config/config_fed_client.json @@ -0,0 +1,26 @@ +{ + "format_version": 2, + "executors": [ + { + "tasks": [ + "train", + "submit_model" + ], + "executor": { + "path": "nvflare.app_common.np.np_trainer.NPTrainer", + "args": {} + } + }, + { + "tasks": [ + "validate" + ], + "executor": { + "path": "nvflare.app_common.np.np_validator.NPValidator" + } + } + ], + "task_result_filters": [], + "task_data_filters": [], + "components": [] +} \ No newline at end of file diff --git a/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/app/config/config_fed_server.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/app/config/config_fed_server.json new file mode 100755 index 0000000000..8e978fa8b2 --- /dev/null +++ b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/app/config/config_fed_server.json @@ -0,0 +1,73 @@ +{ + "format_version": 2, + "server": { + "heart_beat_timeout": 600 + }, + "task_data_filters": [], + "task_result_filters": [], + "components": [ + { + "id": "persistor", + "path": "nvflare.app_common.np.np_model_persistor.NPModelPersistor", + "args": {} + }, + { + "id": "shareable_generator", + "path": "nvflare.app_common.shareablegenerators.full_model_shareable_generator.FullModelShareableGenerator", + "args": {} + }, + { + "id": "aggregator", + "path": "nvflare.app_common.aggregators.intime_accumulate_model_aggregator.InTimeAccumulateWeightedAggregator", + "args": { + "expected_data_kind": "WEIGHTS", + "aggregation_weights": { + "site-1": 1.0, + "site-2": 1.0 + } + } + }, + { + "id": "model_locator", + "path": "nvflare.app_common.np.np_model_locator.NPModelLocator", + "args": {} + }, + { + "id": "formatter", + "path": "nvflare.app_common.np.np_formatter.NPFormatter", + "args": {} + }, + { + "id": "json_generator", + "path": "nvflare.app_common.widgets.validation_json_generator.ValidationJsonGenerator", + "args": {} + } + ], + "workflows": [ + { + "id": "scatter_and_gather", + "path": "nvflare.app_common.workflows.scatter_and_gather.ScatterAndGather", + "args": { + "min_clients": 2, + "num_rounds": 3, + "start_round": 0, + "wait_time_after_min_received": 10, + "aggregator_id": "aggregator", + "persistor_id": "persistor", + "shareable_generator_id": "shareable_generator", + "train_task_name": "train", + "train_timeout": 6000 + } + }, + { + "id": "cross_site_model_eval", + "path": "nvflare.app_common.workflows.cross_site_model_eval.CrossSiteModelEval", + "args": { + "model_locator_id": "model_locator", + "submit_model_timeout": 600, + "validation_timeout": 6000, + "cleanup_models": false + } + } + ] +} \ No newline at end of file diff --git a/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/meta.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/meta.json new file mode 100644 index 0000000000..202f437746 --- /dev/null +++ b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-cross-val/meta.json @@ -0,0 +1,10 @@ +{ + "name": "hello-numpy-cross-val", + "resource_spec": {}, + "min_clients" : 2, + "deploy_map": { + "app": [ + "@ALL" + ] + } +} diff --git a/examples/hello-world/hello-numpy-sag/jobs/hello-numpy-sag/app/config/config_fed_client.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-sag/app/config/config_fed_client.json similarity index 100% rename from examples/hello-world/hello-numpy-sag/jobs/hello-numpy-sag/app/config/config_fed_client.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-sag/app/config/config_fed_client.json diff --git a/examples/hello-world/hello-numpy-sag/jobs/hello-numpy-sag/app/config/config_fed_server.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-sag/app/config/config_fed_server.json similarity index 100% rename from examples/hello-world/hello-numpy-sag/jobs/hello-numpy-sag/app/config/config_fed_server.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-sag/app/config/config_fed_server.json diff --git a/examples/hello-world/hello-numpy-sag/jobs/hello-numpy-sag/meta.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-sag/meta.json similarity index 100% rename from examples/hello-world/hello-numpy-sag/jobs/hello-numpy-sag/meta.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-numpy-sag/meta.json diff --git a/examples/hello-world/hello-pt/jobs/hello-pt/app/config/config_fed_client.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/config/config_fed_client.json similarity index 100% rename from examples/hello-world/hello-pt/jobs/hello-pt/app/config/config_fed_client.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/config/config_fed_client.json diff --git a/examples/hello-world/hello-pt/jobs/hello-pt/app/config/config_fed_server.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/config/config_fed_server.json similarity index 100% rename from examples/hello-world/hello-pt/jobs/hello-pt/app/config/config_fed_server.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/config/config_fed_server.json diff --git a/examples/hello-world/hello-pt/jobs/hello-pt/app/custom/cifar10trainer.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/cifar10trainer.py similarity index 100% rename from examples/hello-world/hello-pt/jobs/hello-pt/app/custom/cifar10trainer.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/cifar10trainer.py diff --git a/examples/hello-world/hello-pt/jobs/hello-pt/app/custom/cifar10validator.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/cifar10validator.py similarity index 100% rename from examples/hello-world/hello-pt/jobs/hello-pt/app/custom/cifar10validator.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/cifar10validator.py diff --git a/examples/hello-world/hello-pt/jobs/hello-pt/app/custom/pt_constants.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/pt_constants.py similarity index 100% rename from examples/hello-world/hello-pt/jobs/hello-pt/app/custom/pt_constants.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/pt_constants.py diff --git a/examples/hello-world/hello-pt/jobs/hello-pt/app/custom/pt_model_locator.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/pt_model_locator.py similarity index 100% rename from examples/hello-world/hello-pt/jobs/hello-pt/app/custom/pt_model_locator.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/pt_model_locator.py diff --git a/examples/hello-world/hello-pt/jobs/hello-pt/app/custom/simple_network.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/simple_network.py similarity index 100% rename from examples/hello-world/hello-pt/jobs/hello-pt/app/custom/simple_network.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/simple_network.py diff --git a/examples/hello-world/hello-pt/jobs/hello-pt/app/custom/test_custom.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/test_custom.py similarity index 100% rename from examples/hello-world/hello-pt/jobs/hello-pt/app/custom/test_custom.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/app/custom/test_custom.py diff --git a/examples/hello-world/hello-pt/jobs/hello-pt/meta.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/meta.json similarity index 100% rename from examples/hello-world/hello-pt/jobs/hello-pt/meta.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-pt/meta.json diff --git a/examples/hello-world/hello-tf2/jobs/hello-tf2/app/config/config_fed_client.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/config/config_fed_client.json similarity index 100% rename from examples/hello-world/hello-tf2/jobs/hello-tf2/app/config/config_fed_client.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/config/config_fed_client.json diff --git a/examples/hello-world/hello-tf2/jobs/hello-tf2/app/config/config_fed_server.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/config/config_fed_server.json similarity index 100% rename from examples/hello-world/hello-tf2/jobs/hello-tf2/app/config/config_fed_server.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/config/config_fed_server.json diff --git a/examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/__init__.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/custom/__init__.py similarity index 100% rename from examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/__init__.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/custom/__init__.py diff --git a/examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/filter.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/custom/filter.py similarity index 100% rename from examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/filter.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/custom/filter.py diff --git a/examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/tf2_model_persistor.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/custom/tf2_model_persistor.py similarity index 100% rename from examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/tf2_model_persistor.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/custom/tf2_model_persistor.py diff --git a/examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/tf2_net.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/custom/tf2_net.py similarity index 100% rename from examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/tf2_net.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/custom/tf2_net.py diff --git a/examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/trainer.py b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/custom/trainer.py similarity index 100% rename from examples/hello-world/hello-tf2/jobs/hello-tf2/app/custom/trainer.py rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/app/custom/trainer.py diff --git a/examples/hello-world/hello-tf2/jobs/hello-tf2/meta.json b/tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/meta.json similarity index 100% rename from examples/hello-world/hello-tf2/jobs/hello-tf2/meta.json rename to tests/integration_test/data/test_configs/standalone_job/previous_jobs/hello-tf2/meta.json diff --git a/tests/integration_test/test_configs.yml b/tests/integration_test/test_configs.yml index f12b2019c3..537175a319 100644 --- a/tests/integration_test/test_configs.yml +++ b/tests/integration_test/test_configs.yml @@ -17,6 +17,7 @@ test_configs: - ./data/test_configs/ha/fladminapi.yml numpy: - ./data/test_configs/standalone_job/hello_numpy_examples.yml + - ./data/test_configs/standalone_job/hello_numpy_previous_examples.yml - ./data/test_configs/standalone_job/internal_numpy.yml tensorflow: - ./data/test_configs/standalone_job/hello_tf_examples.yml From a5955bf454857593bcd24b89814c31bd84ff211a Mon Sep 17 00:00:00 2001 From: Ziyue Xu Date: Fri, 16 Aug 2024 10:40:35 -0400 Subject: [PATCH 20/26] Update autofedrl example (#2801) * update autofedrl example to make it run correctly * remove redundant import --- .../config/config_fed_server.json | 8 +- .../autofedrl/autofedrl_cifar10_learner.py | 5 +- .../autofedrl/autofedrl_learner_executor.py | 4 + .../src/autofedrl/cifar10_learner.py | 443 ++++++++++++++++++ 4 files changed, 454 insertions(+), 6 deletions(-) create mode 100644 research/auto-fed-rl/src/autofedrl/cifar10_learner.py diff --git a/research/auto-fed-rl/jobs/cifar10_autofedrl/cifar10_autofedrl/config/config_fed_server.json b/research/auto-fed-rl/jobs/cifar10_autofedrl/cifar10_autofedrl/config/config_fed_server.json index 99111bacd3..c851102f55 100644 --- a/research/auto-fed-rl/jobs/cifar10_autofedrl/cifar10_autofedrl/config/config_fed_server.json +++ b/research/auto-fed-rl/jobs/cifar10_autofedrl/cifar10_autofedrl/config/config_fed_server.json @@ -42,7 +42,8 @@ "args": { "lr": 1.0, "momentum": 0.0 - } + }, + "config_type": "dict" } } }, @@ -60,7 +61,8 @@ 0.7, 0.7 ] - } + }, + "config_type": "dict" }, "n_clients": "{min_clients}", "search_lr": true, @@ -117,7 +119,7 @@ "id": "model_locator", "path": "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor", "args": { - "pt_persistor_id": "persistor" + "model": "model" } }, { diff --git a/research/auto-fed-rl/src/autofedrl/autofedrl_cifar10_learner.py b/research/auto-fed-rl/src/autofedrl/autofedrl_cifar10_learner.py index 67c9f1837a..d6cf862a8e 100644 --- a/research/auto-fed-rl/src/autofedrl/autofedrl_cifar10_learner.py +++ b/research/auto-fed-rl/src/autofedrl/autofedrl_cifar10_learner.py @@ -19,7 +19,6 @@ import torch import torch.nn.functional as F import torch.optim as optim -from pt.learners.cifar10_learner import CIFAR10Learner from pt.utils.cifar10_data_utils import CIFAR10_ROOT from pt.utils.cifar10_dataset import CIFAR10_Idx from torchvision import datasets @@ -34,7 +33,7 @@ from nvflare.fuel.utils import fobs from .autofedrl_constants import AutoFedRLConstants - +from .cifar10_learner import CIFAR10Learner class CIFAR10AutoFedRLearner(CIFAR10Learner): # TODO: also support CIFAR10ScaffoldLearner def __init__( @@ -116,7 +115,7 @@ def _create_datasets(self, fl_ctx: FLContext): else: site_idx = None # use whole training dataset if self.central=True - self.log_debug(fl_ctx, msg)(fl_ctx, f"site_idx: {site_idx}") + self.log_debug(fl_ctx, f"site_idx: {site_idx}") # Train set n_img_for_search = self.batch_size * 10 diff --git a/research/auto-fed-rl/src/autofedrl/autofedrl_learner_executor.py b/research/auto-fed-rl/src/autofedrl/autofedrl_learner_executor.py index a193289eda..e33d4704c1 100644 --- a/research/auto-fed-rl/src/autofedrl/autofedrl_learner_executor.py +++ b/research/auto-fed-rl/src/autofedrl/autofedrl_learner_executor.py @@ -53,6 +53,10 @@ def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort """Same as LearnerExecutor.execute() apart for additional support for an `validate_for_search_task`.""" self.log_info(fl_ctx, f"Client trainer got task: {task_name}") + if not self.is_initialized: + self.is_initialized = True + self.initialize(fl_ctx) + try: if task_name == self.train_task: return self.train(shareable, fl_ctx, abort_signal) diff --git a/research/auto-fed-rl/src/autofedrl/cifar10_learner.py b/research/auto-fed-rl/src/autofedrl/cifar10_learner.py new file mode 100644 index 0000000000..8b8261723a --- /dev/null +++ b/research/auto-fed-rl/src/autofedrl/cifar10_learner.py @@ -0,0 +1,443 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import os + +import numpy as np +import torch +import torch.optim as optim +from pt.networks.cifar10_nets import ModerateCNN +from pt.utils.cifar10_data_utils import CIFAR10_ROOT +from pt.utils.cifar10_dataset import CIFAR10_Idx +from torch.utils.tensorboard import SummaryWriter +from torchvision import datasets, transforms + +from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable +from nvflare.apis.fl_constant import FLContextKey, ReturnCode +from nvflare.apis.fl_context import FLContext +from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_reply +from nvflare.apis.signal import Signal +from nvflare.app_common.abstract.learner_spec import Learner +from nvflare.app_common.app_constant import AppConstants, ModelName, ValidateType +from nvflare.app_opt.pt.fedproxloss import PTFedProxLoss + + +class CIFAR10Learner(Learner): # also supports CIFAR10ScaffoldLearner + def __init__( + self, + train_idx_root: str = "./dataset", + aggregation_epochs: int = 1, + lr: float = 1e-2, + fedproxloss_mu: float = 0.0, + central: bool = False, + analytic_sender_id: str = "analytic_sender", + batch_size: int = 64, + num_workers: int = 0, + ): + """Simple CIFAR-10 Trainer. + + Args: + train_idx_root: directory with site training indices for CIFAR-10 data. + aggregation_epochs: the number of training epochs for a round. Defaults to 1. + lr: local learning rate. Float number. Defaults to 1e-2. + fedproxloss_mu: weight for FedProx loss. Float number. Defaults to 0.0 (no FedProx). + central: Bool. Whether to simulate central training. Default False. + analytic_sender_id: id of `AnalyticsSender` if configured as a client component. + If configured, TensorBoard events will be fired. Defaults to "analytic_sender". + batch_size: batch size for training and validation. + num_workers: number of workers for data loaders. + + Returns: + a Shareable with the updated local model after running `execute()` + or the best local model depending on the specified task. + """ + super().__init__() + # trainer init happens at the very beginning, only the basic info regarding the trainer is set here + # the actual run has not started at this point + self.train_idx_root = train_idx_root + self.aggregation_epochs = aggregation_epochs + self.lr = lr + self.fedproxloss_mu = fedproxloss_mu + self.best_acc = 0.0 + self.central = central + self.batch_size = batch_size + self.num_workers = num_workers + + self.writer = None + self.analytic_sender_id = analytic_sender_id + + # Epoch counter + self.epoch_of_start_time = 0 + self.epoch_global = 0 + + # following will be created in initialize() or later + self.app_root = None + self.client_id = None + self.local_model_file = None + self.best_local_model_file = None + self.writer = None + self.device = None + self.model = None + self.optimizer = None + self.criterion = None + self.criterion_prox = None + self.transform_train = None + self.transform_valid = None + self.train_dataset = None + self.valid_dataset = None + self.train_loader = None + self.valid_loader = None + + def initialize(self, parts: dict, fl_ctx: FLContext): + """ + Note: this code assumes a FL simulation setting + Datasets will be initialized in train() and validate() when calling self._create_datasets() + as we need to make sure that the server has already downloaded and split the data. + """ + + # when the run starts, this is where the actual settings get initialized for trainer + + # Set the paths according to fl_ctx + self.app_root = fl_ctx.get_prop(FLContextKey.APP_ROOT) + fl_args = fl_ctx.get_prop(FLContextKey.ARGS) + self.client_id = fl_ctx.get_identity_name() + self.log_info( + fl_ctx, + f"Client {self.client_id} initialized at \n {self.app_root} \n with args: {fl_args}", + ) + + self.local_model_file = os.path.join(self.app_root, "local_model.pt") + self.best_local_model_file = os.path.join(self.app_root, "best_local_model.pt") + + # Select local TensorBoard writer or event-based writer for streaming + self.writer = parts.get(self.analytic_sender_id) # user configured config_fed_client.json for streaming + if not self.writer: # use local TensorBoard writer only + self.writer = SummaryWriter(self.app_root) + + # set the training-related parameters + # can be replaced by a config-style block + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.model = ModerateCNN().to(self.device) + self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9) + self.criterion = torch.nn.CrossEntropyLoss() + if self.fedproxloss_mu > 0: + self.log_info(fl_ctx, f"using FedProx loss with mu {self.fedproxloss_mu}") + self.criterion_prox = PTFedProxLoss(mu=self.fedproxloss_mu) + self.transform_train = transforms.Compose( + [ + transforms.ToTensor(), + transforms.ToPILImage(), + transforms.Pad(4, padding_mode="reflect"), + transforms.RandomCrop(32), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize( + mean=[x / 255.0 for x in [125.3, 123.0, 113.9]], + std=[x / 255.0 for x in [63.0, 62.1, 66.7]], + ), + ] + ) + self.transform_valid = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize( + mean=[x / 255.0 for x in [125.3, 123.0, 113.9]], + std=[x / 255.0 for x in [63.0, 62.1, 66.7]], + ), + ] + ) + + def _create_datasets(self, fl_ctx: FLContext): + """To be called only after Cifar10DataSplitter downloaded the data and computed splits""" + + if self.train_dataset is None or self.train_loader is None: + if not self.central: + # Set datalist, here the path and filename are hard-coded, can also be fed as an argument + site_idx_file_name = os.path.join(self.train_idx_root, self.client_id + ".npy") + self.log_info(fl_ctx, f"IndexList Path: {site_idx_file_name}") + if os.path.exists(site_idx_file_name): + self.log_info(fl_ctx, "Loading subset index") + site_idx = np.load(site_idx_file_name).tolist() # TODO: get from fl_ctx/shareable? + else: + self.system_panic(f"No subset index found! File {site_idx_file_name} does not exist!", fl_ctx) + return + self.log_info(fl_ctx, f"Client subset size: {len(site_idx)}") + else: + site_idx = None # use whole training dataset if self.central=True + + self.train_dataset = CIFAR10_Idx( + root=CIFAR10_ROOT, + data_idx=site_idx, + train=True, + download=False, + transform=self.transform_train, + ) + self.train_loader = torch.utils.data.DataLoader( + self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers + ) + + if self.valid_dataset is None or self.valid_loader is None: + self.valid_dataset = datasets.CIFAR10( + root=CIFAR10_ROOT, + train=False, + download=False, + transform=self.transform_valid, + ) + self.valid_loader = torch.utils.data.DataLoader( + self.valid_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers + ) + + def finalize(self, fl_ctx: FLContext): + # collect threads, close files here + pass + + def local_train(self, fl_ctx, train_loader, model_global, abort_signal: Signal, val_freq: int = 0): + for epoch in range(self.aggregation_epochs): + if abort_signal.triggered: + return + self.model.train() + epoch_len = len(train_loader) + self.epoch_global = self.epoch_of_start_time + epoch + self.log_info(fl_ctx, f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})") + avg_loss = 0.0 + for i, (inputs, labels) in enumerate(train_loader): + if abort_signal.triggered: + return + inputs, labels = inputs.to(self.device), labels.to(self.device) + # zero the parameter gradients + self.optimizer.zero_grad() + # forward + backward + optimize + outputs = self.model(inputs) + loss = self.criterion(outputs, labels) + + # FedProx loss term + if self.fedproxloss_mu > 0: + fed_prox_loss = self.criterion_prox(self.model, model_global) + loss += fed_prox_loss + + loss.backward() + self.optimizer.step() + current_step = epoch_len * self.epoch_global + i + avg_loss += loss.item() + self.writer.add_scalar("train_loss", avg_loss / len(train_loader), current_step) + if val_freq > 0 and epoch % val_freq == 0: + acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_local_model", fl_ctx=fl_ctx) + if acc > self.best_acc: + self.best_acc = acc + self.save_model(is_best=True) + + def save_model(self, is_best=False): + # save model + model_weights = self.model.state_dict() + save_dict = {"model_weights": model_weights, "epoch": self.epoch_global} + if is_best: + save_dict.update({"best_acc": self.best_acc}) + torch.save(save_dict, self.best_local_model_file) + else: + torch.save(save_dict, self.local_model_file) + + def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: + self._create_datasets(fl_ctx) + + # Check abort signal + if abort_signal.triggered: + return make_reply(ReturnCode.TASK_ABORTED) + + # get round information + current_round = shareable.get_header(AppConstants.CURRENT_ROUND) + total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS) + self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}") + self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}") + + # update local model weights with received weights + dxo = from_shareable(shareable) + global_weights = dxo.data + + # Before loading weights, tensors might need to be reshaped to support HE for secure aggregation. + local_var_dict = self.model.state_dict() + model_keys = global_weights.keys() + for var_name in local_var_dict: + if var_name in model_keys: + weights = global_weights[var_name] + try: + # reshape global weights to compute difference later on + global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape) + # update the local dict + local_var_dict[var_name] = torch.as_tensor(global_weights[var_name]) + except Exception as e: + raise ValueError(f"Convert weight from {var_name} failed") from e + self.model.load_state_dict(local_var_dict) + + # local steps + epoch_len = len(self.train_loader) + self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}") + + # make a copy of model_global as reference for potential FedProx loss or SCAFFOLD + model_global = copy.deepcopy(self.model) + for param in model_global.parameters(): + param.requires_grad = False + + # local train + self.local_train( + fl_ctx=fl_ctx, + train_loader=self.train_loader, + model_global=model_global, + abort_signal=abort_signal, + val_freq=1 if self.central else 0, + ) + if abort_signal.triggered: + return make_reply(ReturnCode.TASK_ABORTED) + self.epoch_of_start_time += self.aggregation_epochs + + # perform valid after local train + acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_local_model", fl_ctx=fl_ctx) + if abort_signal.triggered: + return make_reply(ReturnCode.TASK_ABORTED) + self.log_info(fl_ctx, f"val_acc_local_model: {acc:.4f}") + + # save model + self.save_model(is_best=False) + if acc > self.best_acc: + self.best_acc = acc + self.save_model(is_best=True) + + # compute delta model, global model has the primary key set + local_weights = self.model.state_dict() + model_diff = {} + for name in global_weights: + if name not in local_weights: + continue + model_diff[name] = np.subtract(local_weights[name].cpu().numpy(), global_weights[name], dtype=np.float32) + if np.any(np.isnan(model_diff[name])): + self.system_panic(f"{name} weights became NaN...", fl_ctx) + return make_reply(ReturnCode.EXECUTION_EXCEPTION) + + # build the shareable + dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff) + dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len) + + self.log_info(fl_ctx, "Local epochs finished. Returning shareable") + return dxo.to_shareable() + + def get_model_for_validation(self, model_name: str, fl_ctx: FLContext) -> Shareable: + # Retrieve the best local model saved during training. + if model_name == ModelName.BEST_MODEL: + model_data = None + try: + # load model to cpu as server might or might not have a GPU + model_data = torch.load(self.best_local_model_file, map_location="cpu") + except Exception as e: + raise ValueError("Unable to load best model") from e + + # Create DXO and shareable from model data. + if model_data: + # convert weights to numpy to support FOBS + model_weights = model_data["model_weights"] + for k, v in model_weights.items(): + model_weights[k] = v.numpy() + dxo = DXO(data_kind=DataKind.WEIGHTS, data=model_weights) + return dxo.to_shareable() + else: + # Set return code. + self.log_error(fl_ctx, f"best local model not found at {self.best_local_model_file}.") + return make_reply(ReturnCode.EXECUTION_RESULT_ERROR) + else: + raise ValueError(f"Unknown model_type: {model_name}") # Raised errors are caught in LearnerExecutor class. + + def local_valid(self, valid_loader, abort_signal: Signal, tb_id=None, fl_ctx=None): + self.model.eval() + with torch.no_grad(): + correct, total = 0, 0 + for _i, (inputs, labels) in enumerate(valid_loader): + if abort_signal.triggered: + return None + inputs, labels = inputs.to(self.device), labels.to(self.device) + outputs = self.model(inputs) + _, pred_label = torch.max(outputs.data, 1) + + total += inputs.data.size()[0] + correct += (pred_label == labels.data).sum().item() + metric = correct / float(total) + if tb_id: + self.writer.add_scalar(tb_id, metric, self.epoch_global) + return metric + + def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: + self._create_datasets(fl_ctx) + + # Check abort signal + if abort_signal.triggered: + return make_reply(ReturnCode.TASK_ABORTED) + + # get validation information + self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}") + model_owner = shareable.get(ReservedHeaderKey.HEADERS).get(AppConstants.MODEL_OWNER) + if model_owner: + self.log_info(fl_ctx, f"Evaluating model from {model_owner} on {fl_ctx.get_identity_name()}") + else: + model_owner = "global_model" # evaluating global model during training + + # update local model weights with received weights + dxo = from_shareable(shareable) + global_weights = dxo.data + + # Before loading weights, tensors might need to be reshaped to support HE for secure aggregation. + local_var_dict = self.model.state_dict() + model_keys = global_weights.keys() + n_loaded = 0 + for var_name in local_var_dict: + if var_name in model_keys: + weights = torch.as_tensor(global_weights[var_name], device=self.device) + try: + # update the local dict + local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape)) + n_loaded += 1 + except Exception as e: + raise ValueError(f"Convert weight from {var_name} failed") from e + self.model.load_state_dict(local_var_dict) + if n_loaded == 0: + raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}") + + validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE) + if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE: + # perform valid before local train + global_acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_global_model", fl_ctx=fl_ctx) + if abort_signal.triggered: + return make_reply(ReturnCode.TASK_ABORTED) + self.log_info(fl_ctx, f"val_acc_global_model ({model_owner}): {global_acc}") + + return DXO(data_kind=DataKind.METRICS, data={MetaKey.INITIAL_METRICS: global_acc}, meta={}).to_shareable() + + elif validate_type == ValidateType.MODEL_VALIDATE: + # perform valid + train_acc = self.local_valid(self.train_loader, abort_signal) + if abort_signal.triggered: + return make_reply(ReturnCode.TASK_ABORTED) + self.log_info(fl_ctx, f"training acc ({model_owner}): {train_acc}") + + val_acc = self.local_valid(self.valid_loader, abort_signal) + if abort_signal.triggered: + return make_reply(ReturnCode.TASK_ABORTED) + self.log_info(fl_ctx, f"validation acc ({model_owner}): {val_acc}") + + self.log_info(fl_ctx, "Evaluation finished. Returning shareable") + + val_results = {"train_accuracy": train_acc, "val_accuracy": val_acc} + + metric_dxo = DXO(data_kind=DataKind.METRICS, data=val_results) + return metric_dxo.to_shareable() + + else: + return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN) From 5c2922639da56255e3f4dfaccbb74c1f54edf4cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Fri, 16 Aug 2024 12:55:18 -0700 Subject: [PATCH 21/26] Refactor XGBDataLoader (#2804) --- .../code/vertical_xgb/vertical_data_loader.py | 12 ++--- .../jobs/base/app/custom/higgs_data_loader.py | 10 ++-- .../base_v2/app/custom/higgs_data_loader.py | 10 ++-- .../app/custom/higgs_data_loader.py | 10 ++-- .../app/custom/higgs_data_loader.py | 10 ++-- nvflare/app_opt/xgboost/data_loader.py | 21 ++++++++- .../xgboost/histogram_based/executor.py | 6 ++- .../runners/xgb_client_runner.py | 47 +++++++++++++------ 8 files changed, 84 insertions(+), 42 deletions(-) diff --git a/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py b/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py index 246824d819..c3758297c6 100644 --- a/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py +++ b/examples/advanced/vertical_xgboost/code/vertical_xgb/vertical_data_loader.py @@ -62,9 +62,9 @@ def __init__(self, data_split_path, psi_path, id_col, label_owner, train_proport self.label_owner = label_owner self.train_proportion = train_proportion - def load_data(self, client_id: str, split_mode: int = 1): - client_data_split_path = self.data_split_path.replace("site-x", client_id) - client_psi_path = self.psi_path.replace("site-x", client_id) + def load_data(self): + client_data_split_path = self.data_split_path.replace("site-x", self.client_id) + client_psi_path = self.psi_path.replace("site-x", self.client_id) data_split_dir = os.path.dirname(client_data_split_path) train_path = os.path.join(data_split_dir, "train.csv") @@ -78,13 +78,13 @@ def load_data(self, client_id: str, split_mode: int = 1): train_df.to_csv(path_or_buf=train_path, header=False, index=False) valid_df.to_csv(path_or_buf=valid_path, header=False, index=False) - if client_id == self.label_owner: + if self.client_id == self.label_owner: label = "&label_column=0" else: label = "" # for Vertical XGBoost, read from csv with label_column and set data_split_mode to 1 for column mode - dtrain = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=split_mode) - dvalid = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=split_mode) + dtrain = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=self.data_split_mode) + dvalid = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=self.data_split_mode) return dtrain, dvalid diff --git a/examples/advanced/xgboost/histogram-based/jobs/base/app/custom/higgs_data_loader.py b/examples/advanced/xgboost/histogram-based/jobs/base/app/custom/higgs_data_loader.py index 8c5a8da612..124268cfce 100644 --- a/examples/advanced/xgboost/histogram-based/jobs/base/app/custom/higgs_data_loader.py +++ b/examples/advanced/xgboost/histogram-based/jobs/base/app/custom/higgs_data_loader.py @@ -41,7 +41,7 @@ def __init__(self, data_split_filename): """ self.data_split_filename = data_split_filename - def load_data(self, client_id: str): + def load_data(self): with open(self.data_split_filename, "r") as file: data_split = json.load(file) @@ -49,9 +49,9 @@ def load_data(self, client_id: str): data_index = data_split["data_index"] # check if site_id and "valid" in the mapping dict - if client_id not in data_index.keys(): + if self.client_id not in data_index.keys(): raise ValueError( - f"Data does not contain Client {client_id} split", + f"Data does not contain Client {self.client_id} split", ) if "valid" not in data_index.keys(): @@ -59,7 +59,7 @@ def load_data(self, client_id: str): "Data does not contain Validation split", ) - site_index = data_index[client_id] + site_index = data_index[self.client_id] valid_index = data_index["valid"] # training @@ -72,6 +72,6 @@ def load_data(self, client_id: str): x_valid, y_valid, total_valid_data_num = _read_higgs_with_pandas( data_path=data_path, start=valid_index["start"], end=valid_index["end"] ) - dmat_valid = xgb.DMatrix(x_valid, label=y_valid) + dmat_valid = xgb.DMatrix(x_valid, label=y_valid, data_split_mode=self.data_split_mode) return dmat_train, dmat_valid diff --git a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py index 3edb2d7408..6623e35fa3 100644 --- a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py +++ b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/custom/higgs_data_loader.py @@ -41,7 +41,7 @@ def __init__(self, data_split_filename): """ self.data_split_filename = data_split_filename - def load_data(self, client_id: str, split_mode: int): + def load_data(self): with open(self.data_split_filename, "r") as file: data_split = json.load(file) @@ -49,9 +49,9 @@ def load_data(self, client_id: str, split_mode: int): data_index = data_split["data_index"] # check if site_id and "valid" in the mapping dict - if client_id not in data_index.keys(): + if self.client_id not in data_index.keys(): raise ValueError( - f"Data does not contain Client {client_id} split", + f"Data does not contain Client {self.client_id} split", ) if "valid" not in data_index.keys(): @@ -59,7 +59,7 @@ def load_data(self, client_id: str, split_mode: int): "Data does not contain Validation split", ) - site_index = data_index[client_id] + site_index = data_index[self.client_id] valid_index = data_index["valid"] # training @@ -72,6 +72,6 @@ def load_data(self, client_id: str, split_mode: int): x_valid, y_valid, total_valid_data_num = _read_higgs_with_pandas( data_path=data_path, start=valid_index["start"], end=valid_index["end"] ) - dmat_valid = xgb.DMatrix(x_valid, label=y_valid) + dmat_valid = xgb.DMatrix(x_valid, label=y_valid, data_split_mode=self.data_split_mode) return dmat_train, dmat_valid diff --git a/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/custom/higgs_data_loader.py b/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/custom/higgs_data_loader.py index 8c5a8da612..124268cfce 100644 --- a/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/custom/higgs_data_loader.py +++ b/examples/advanced/xgboost/tree-based/jobs/bagging_base/app/custom/higgs_data_loader.py @@ -41,7 +41,7 @@ def __init__(self, data_split_filename): """ self.data_split_filename = data_split_filename - def load_data(self, client_id: str): + def load_data(self): with open(self.data_split_filename, "r") as file: data_split = json.load(file) @@ -49,9 +49,9 @@ def load_data(self, client_id: str): data_index = data_split["data_index"] # check if site_id and "valid" in the mapping dict - if client_id not in data_index.keys(): + if self.client_id not in data_index.keys(): raise ValueError( - f"Data does not contain Client {client_id} split", + f"Data does not contain Client {self.client_id} split", ) if "valid" not in data_index.keys(): @@ -59,7 +59,7 @@ def load_data(self, client_id: str): "Data does not contain Validation split", ) - site_index = data_index[client_id] + site_index = data_index[self.client_id] valid_index = data_index["valid"] # training @@ -72,6 +72,6 @@ def load_data(self, client_id: str): x_valid, y_valid, total_valid_data_num = _read_higgs_with_pandas( data_path=data_path, start=valid_index["start"], end=valid_index["end"] ) - dmat_valid = xgb.DMatrix(x_valid, label=y_valid) + dmat_valid = xgb.DMatrix(x_valid, label=y_valid, data_split_mode=self.data_split_mode) return dmat_train, dmat_valid diff --git a/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/custom/higgs_data_loader.py b/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/custom/higgs_data_loader.py index 8c5a8da612..124268cfce 100644 --- a/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/custom/higgs_data_loader.py +++ b/examples/advanced/xgboost/tree-based/jobs/cyclic_base/app/custom/higgs_data_loader.py @@ -41,7 +41,7 @@ def __init__(self, data_split_filename): """ self.data_split_filename = data_split_filename - def load_data(self, client_id: str): + def load_data(self): with open(self.data_split_filename, "r") as file: data_split = json.load(file) @@ -49,9 +49,9 @@ def load_data(self, client_id: str): data_index = data_split["data_index"] # check if site_id and "valid" in the mapping dict - if client_id not in data_index.keys(): + if self.client_id not in data_index.keys(): raise ValueError( - f"Data does not contain Client {client_id} split", + f"Data does not contain Client {self.client_id} split", ) if "valid" not in data_index.keys(): @@ -59,7 +59,7 @@ def load_data(self, client_id: str): "Data does not contain Validation split", ) - site_index = data_index[client_id] + site_index = data_index[self.client_id] valid_index = data_index["valid"] # training @@ -72,6 +72,6 @@ def load_data(self, client_id: str): x_valid, y_valid, total_valid_data_num = _read_higgs_with_pandas( data_path=data_path, start=valid_index["start"], end=valid_index["end"] ) - dmat_valid = xgb.DMatrix(x_valid, label=y_valid) + dmat_valid = xgb.DMatrix(x_valid, label=y_valid, data_split_mode=self.data_split_mode) return dmat_train, dmat_valid diff --git a/nvflare/app_opt/xgboost/data_loader.py b/nvflare/app_opt/xgboost/data_loader.py index f49d6dc796..ba197c9fff 100644 --- a/nvflare/app_opt/xgboost/data_loader.py +++ b/nvflare/app_opt/xgboost/data_loader.py @@ -20,8 +20,27 @@ class XGBDataLoader(ABC): + def initialize( + self, client_id: str, rank: int, data_split_mode: xgb.core.DataSplitMode = xgb.core.DataSplitMode.ROW + ): + self._client_id = client_id + self._rank = rank + self._data_split_mode = data_split_mode + + @property + def client_id(self): + return self._client_id + + @property + def rank(self): + return self._rank + + @property + def data_split_mode(self): + return self._data_split_mode + @abstractmethod - def load_data(self, client_id: str, split_mode: int) -> Tuple[xgb.DMatrix, xgb.DMatrix]: + def load_data(self) -> Tuple[xgb.DMatrix, xgb.DMatrix]: """Loads data for xgboost. Returns: diff --git a/nvflare/app_opt/xgboost/histogram_based/executor.py b/nvflare/app_opt/xgboost/histogram_based/executor.py index 8336d31aba..20888c676e 100644 --- a/nvflare/app_opt/xgboost/histogram_based/executor.py +++ b/nvflare/app_opt/xgboost/histogram_based/executor.py @@ -274,10 +274,14 @@ def train(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) - communicator_env["federated_client_cert_path"] = self._client_cert_path try: + self._data_loader.initialize( + client_id=self.client_id, + rank=self.rank, + ) with xgb.collective.CommunicatorContext(**communicator_env): # Load the data. Dmatrix must be created with column split mode in CommunicatorContext for vertical FL if not self.train_data or not self.val_data: - self.train_data, self.val_data = self.data_loader.load_data(self.client_id) + self.train_data, self.val_data = self.data_loader.load_data() bst = self.xgb_train(params) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py index b9490467c5..d66a116902 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py @@ -36,11 +36,26 @@ MODEL_FILE_NAME = "model.json" +def _check_ctx(ctx: dict): + required_ctx_keys = [ + Constant.RUNNER_CTX_CLIENT_NAME, + Constant.RUNNER_CTX_RANK, + Constant.RUNNER_CTX_WORLD_SIZE, + Constant.RUNNER_CTX_NUM_ROUNDS, + Constant.RUNNER_CTX_XGB_PARAMS, + Constant.RUNNER_CTX_SERVER_ADDR, + Constant.RUNNER_CTX_MODEL_DIR, + ] + for k in required_ctx_keys: + if k not in ctx: + raise RuntimeError(f"Missing {k} in context.") + + class XGBClientRunner(AppRunner, FLComponent): def __init__( self, data_loader_id: str, - model_file_name, + model_file_name: str, metrics_writer_id: str = None, ): FLComponent.__init__(self) @@ -53,7 +68,7 @@ def __init__( self._rank = None self._world_size = None self._num_rounds = None - self._split_mode = None + self._data_split_mode = None self._secure_training = None self._xgb_params = None self._xgb_options = None @@ -124,16 +139,17 @@ def _xgb_train(self, num_rounds, xgb_params: dict, xgb_options: dict, train_data return bst def run(self, ctx: dict): - self._client_name = ctx.get(Constant.RUNNER_CTX_CLIENT_NAME) - self._rank = ctx.get(Constant.RUNNER_CTX_RANK) - self._world_size = ctx.get(Constant.RUNNER_CTX_WORLD_SIZE) - self._num_rounds = ctx.get(Constant.RUNNER_CTX_NUM_ROUNDS) - self._split_mode = ctx.get(Constant.RUNNER_CTX_SPLIT_MODE) - self._secure_training = ctx.get(Constant.RUNNER_CTX_SECURE_TRAINING) - self._xgb_params = ctx.get(Constant.RUNNER_CTX_XGB_PARAMS) - self._xgb_options = ctx.get(Constant.RUNNER_CTX_XGB_OPTIONS) - self._server_addr = ctx.get(Constant.RUNNER_CTX_SERVER_ADDR) - self._model_dir = ctx.get(Constant.RUNNER_CTX_MODEL_DIR) + _check_ctx(ctx) + self._client_name = ctx[Constant.RUNNER_CTX_CLIENT_NAME] + self._rank = ctx[Constant.RUNNER_CTX_RANK] + self._world_size = ctx[Constant.RUNNER_CTX_WORLD_SIZE] + self._num_rounds = ctx[Constant.RUNNER_CTX_NUM_ROUNDS] + self._data_split_mode = ctx.get(Constant.RUNNER_CTX_SPLIT_MODE, 0) + self._secure_training = ctx.get(Constant.RUNNER_CTX_SECURE_TRAINING, False) + self._xgb_params = ctx[Constant.RUNNER_CTX_XGB_PARAMS] + self._xgb_options = ctx.get(Constant.RUNNER_CTX_XGB_OPTIONS, {}) + self._server_addr = ctx[Constant.RUNNER_CTX_SERVER_ADDR] + self._model_dir = ctx[Constant.RUNNER_CTX_MODEL_DIR] use_gpus = self._xgb_options.get("use_gpus", False) if use_gpus: @@ -142,7 +158,7 @@ def run(self, ctx: dict): self._xgb_params["device"] = f"cuda:{self._rank}" self.logger.info( - f"XGB split_mode: {self._split_mode} secure_training: {self._secure_training} " + f"XGB data_split_mode: {self._data_split_mode} secure_training: {self._secure_training} " f"params: {self._xgb_params} XGB options: {self._xgb_options}" ) @@ -187,9 +203,12 @@ def run(self, ctx: dict): communicator_env[PLUGIN_PARAM_KEY] = xgb_plugin_params + self._data_loader.initialize( + client_id=self._client_name, rank=self._rank, data_split_mode=self._data_split_mode + ) with xgb.collective.CommunicatorContext(**communicator_env): # Load the data. Dmatrix must be created with column split mode in CommunicatorContext for vertical FL - train_data, val_data = self._data_loader.load_data(self._client_name, self._split_mode) + train_data, val_data = self._data_loader.load_data() bst = self._xgb_train(self._num_rounds, self._xgb_params, self._xgb_options, train_data, val_data) From 800eb77fa4ef200aec7b388510caf340275f51c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Fri, 16 Aug 2024 15:56:39 -0700 Subject: [PATCH 22/26] Fix docstring typo (#2802) Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> --- nvflare/app_opt/he/homomorphic_encrypt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nvflare/app_opt/he/homomorphic_encrypt.py b/nvflare/app_opt/he/homomorphic_encrypt.py index 0b1c830d0c..3e1a7255fe 100644 --- a/nvflare/app_opt/he/homomorphic_encrypt.py +++ b/nvflare/app_opt/he/homomorphic_encrypt.py @@ -23,7 +23,7 @@ def load_tenseal_context_from_workspace(ctx_file_name: str, fl_ctx: FLContext): """Loads homomorphic encryption (HE) context from TenSEAL (https://github.com/OpenMined/TenSEAL) containing encryption keys and parameters. Args: - ctx_file_name: filepath of TensSEAL context file + ctx_file_name: filepath of TenSEAL context file fl_ctx: FL context Returns: From 7e22c75e6218d15cfd4fa86f3d41b2953662f84f Mon Sep 17 00:00:00 2001 From: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Date: Fri, 16 Aug 2024 19:02:26 -0700 Subject: [PATCH 23/26] re-arrange getting started examples (#2805) * re-arrange getting started examples * re-arrange getting started examples * fix README.md --------- Co-authored-by: Sean Yang --- .../controller_executor_no_filter.png | Bin 0 -> 24290 bytes examples/advanced/job_api/README.md | 29 ++ examples/advanced/job_api/pt/README.md | 53 +++ .../pt/cyclic_cc_script_executor_cifar10.py | 0 .../fedavg_model_learner_xsite_val_cifar10.py | 0 .../pt/fedavg_script_executor_cifar10.py | 3 + ...edavg_script_executor_dp_filter_cifar10.py | 0 ...edavg_script_executor_lightning_cifar10.py | 0 examples/advanced/job_api/pt/figs/tb_loss.png | Bin 0 -> 23302 bytes examples/advanced/job_api/pt/requirements.txt | 5 + .../advanced/job_api/pt/src/cifar10_fl.py | 137 ++++++ .../job_api/pt/src/cifar10_lightning_fl.py | 108 +++++ examples/advanced/job_api/pt/src/lit_net.py | 93 ++++ .../pt/src/net.py} | 5 +- .../job_api/pt/src/train_eval_submit.py | 188 ++++++++ .../pt/swarm_script_executor_cifar10.py | 0 examples/advanced/job_api/sklearn/README.md | 25 + .../sklearn/kmeans_script_executor_higgs.py | 147 ++++++ .../advanced/job_api/sklearn/requirements.txt | 4 + .../job_api/sklearn/src/kmeans_assembler.py | 75 +++ .../advanced/job_api/sklearn/src/kmeans_fl.py | 182 +++++++ .../advanced/job_api/sklearn/src/split_csv.py | 86 ++++ examples/advanced/job_api/tf/README.md | 213 +++++++++ .../tf/figs/fedavg-diff-algos-new.png | Bin .../job_api}/tf/figs/fedavg-diff-algos.png | Bin .../job_api/tf/figs/fedavg-diff-alphas.png | Bin 0 -> 81019 bytes .../job_api/tf/figs/fedavg-vs-centralized.png | Bin 0 -> 158790 bytes .../tf/nvflare_tf_getting_started.ipynb | 446 ++++++++++++++++++ examples/advanced/job_api/tf/requirements.txt | 2 + examples/advanced/job_api/tf/run_jobs.sh | 87 ++++ .../job_api/tf/src/cifar10_data_split.py | 125 +++++ .../advanced/job_api/tf/src/cifar10_tf_fl.py | 79 ++++ .../tf/src/cifar10_tf_fl_alpha_split.py | 219 +++++++++ .../src/cifar10_tf_fl_alpha_split_scaffold.py | 294 ++++++++++++ examples/advanced/job_api/tf/src/tf_net.py | 65 +++ .../tf/tf_fl_script_executor_cifar10.py | 164 +++++++ .../hello-pt/add_shareable_parameter.py | 24 - .../job_config/hello-pt/cifar10trainer.py | 200 -------- .../job_config/hello-pt/cifar10validator.py | 112 ----- .../job_config/hello-pt/hello_pt_job.py | 120 ----- .../hello-pt/print_shareable_parameter.py | 33 -- .../job_config/hello-pt/pt_constants.py | 21 - .../job_config/hello-pt/pt_model_locator.py | 66 --- examples/getting_started/README.md | 7 +- .../pt/fedavg_script_executor_cifar10_all.py | 43 -- .../nvflare_lightning_getting_started.ipynb | 6 +- .../pt/nvflare_pt_getting_started.ipynb | 355 +++++++++++++- examples/getting_started/tf/README.md | 75 +-- .../tf/nvflare_tf_getting_started.ipynb | 6 +- 49 files changed, 3185 insertions(+), 717 deletions(-) create mode 100644 docs/resources/controller_executor_no_filter.png create mode 100644 examples/advanced/job_api/README.md create mode 100644 examples/advanced/job_api/pt/README.md rename examples/{getting_started => advanced/job_api}/pt/cyclic_cc_script_executor_cifar10.py (100%) rename examples/{getting_started => advanced/job_api}/pt/fedavg_model_learner_xsite_val_cifar10.py (100%) rename examples/{getting_started => advanced/job_api}/pt/fedavg_script_executor_cifar10.py (93%) rename examples/{getting_started => advanced/job_api}/pt/fedavg_script_executor_dp_filter_cifar10.py (100%) rename examples/{getting_started => advanced/job_api}/pt/fedavg_script_executor_lightning_cifar10.py (100%) create mode 100644 examples/advanced/job_api/pt/figs/tb_loss.png create mode 100644 examples/advanced/job_api/pt/requirements.txt create mode 100644 examples/advanced/job_api/pt/src/cifar10_fl.py create mode 100644 examples/advanced/job_api/pt/src/cifar10_lightning_fl.py create mode 100644 examples/advanced/job_api/pt/src/lit_net.py rename examples/advanced/{job_config/hello-pt/simple_network.py => job_api/pt/src/net.py} (94%) create mode 100644 examples/advanced/job_api/pt/src/train_eval_submit.py rename examples/{getting_started => advanced/job_api}/pt/swarm_script_executor_cifar10.py (100%) create mode 100644 examples/advanced/job_api/sklearn/README.md create mode 100644 examples/advanced/job_api/sklearn/kmeans_script_executor_higgs.py create mode 100644 examples/advanced/job_api/sklearn/requirements.txt create mode 100644 examples/advanced/job_api/sklearn/src/kmeans_assembler.py create mode 100644 examples/advanced/job_api/sklearn/src/kmeans_fl.py create mode 100644 examples/advanced/job_api/sklearn/src/split_csv.py create mode 100644 examples/advanced/job_api/tf/README.md rename examples/{getting_started => advanced/job_api}/tf/figs/fedavg-diff-algos-new.png (100%) rename examples/{getting_started => advanced/job_api}/tf/figs/fedavg-diff-algos.png (100%) create mode 100755 examples/advanced/job_api/tf/figs/fedavg-diff-alphas.png create mode 100755 examples/advanced/job_api/tf/figs/fedavg-vs-centralized.png create mode 100644 examples/advanced/job_api/tf/nvflare_tf_getting_started.ipynb create mode 100644 examples/advanced/job_api/tf/requirements.txt create mode 100755 examples/advanced/job_api/tf/run_jobs.sh create mode 100644 examples/advanced/job_api/tf/src/cifar10_data_split.py create mode 100644 examples/advanced/job_api/tf/src/cifar10_tf_fl.py create mode 100644 examples/advanced/job_api/tf/src/cifar10_tf_fl_alpha_split.py create mode 100644 examples/advanced/job_api/tf/src/cifar10_tf_fl_alpha_split_scaffold.py create mode 100644 examples/advanced/job_api/tf/src/tf_net.py create mode 100644 examples/advanced/job_api/tf/tf_fl_script_executor_cifar10.py delete mode 100644 examples/advanced/job_config/hello-pt/add_shareable_parameter.py delete mode 100644 examples/advanced/job_config/hello-pt/cifar10trainer.py delete mode 100644 examples/advanced/job_config/hello-pt/cifar10validator.py delete mode 100644 examples/advanced/job_config/hello-pt/hello_pt_job.py delete mode 100644 examples/advanced/job_config/hello-pt/print_shareable_parameter.py delete mode 100644 examples/advanced/job_config/hello-pt/pt_constants.py delete mode 100644 examples/advanced/job_config/hello-pt/pt_model_locator.py delete mode 100644 examples/getting_started/pt/fedavg_script_executor_cifar10_all.py diff --git a/docs/resources/controller_executor_no_filter.png b/docs/resources/controller_executor_no_filter.png new file mode 100644 index 0000000000000000000000000000000000000000..ff4f17c15ade81584c2880315551ac72d9dcf6a7 GIT binary patch literal 24290 zcmeEuc{J4T`!}L2g{(!E@{uk2RF)!JM96L|GnTTOv1JVj71~hQO4)Z~>@#C0iXyui z%ve%n8G|rnf9`4de7@&7&pFROzki%Q*mzOL8ny6#6t2HHoM`Iu>F zXpZPy(=?%>InYc)LwACa4m`oA8l3?@44f==ob~l-B*E{DH1xE5Gz{Q3TJTqemj9pM zwP?j@4*Yx$zDmdW`*-SFzy4eUKdAru`P1v8NG%PG8jX&oy4h`7^4OtzF5Fn#tr9z%k`aivrYT@5zXlUu#)qZ_Mu+!1cvRj&6{r6coO;^vqt`Wsz11K_YU&A_ZFJw-=pz)#VBl4M{}VU z4a1qOj<}@TU`B_)J0)ME4uutdO2m3GjgS`;(hRLjPx3)obf3ru*`p3@Zs^O*S z1c)6#Y!ol`UVBJczuq_n7GZDHG`eQO0|9;3e zgv{IY<2|-%mmD}Al<%JJKVh*MveaM^;8oCa&Y^8~;&5o_`iM&=YtDJFwVt^}EQRfF zr=g0KmBfSZ-zRX@|saz>631eaJ70mTv!bE(&pmf)~1!+DGm6uoINVr3lgSz*I&z%qxd~9&b;y6iMgJxvb#Bx zzBJL)6lO8Z(FG?>92Rh5F=!Dq2nE|W;}P9+uO=2=nLD2&4h^pQ_%YYX-X{2(Ao zl~+Gq@E5D6;9+FmNBZ?L1~D{rb4P{M6yRs7zSuX1)Am7!z8sMrLb$orY<_zkNc2!Z zXksa7Ac?p47_bv}Lq=#q7s}fk_qIsX;ff|(+ecsC`|-7Qj+7jierM%l-fPd`w&O0Y zg6{(ad_J5ks^8z+F0EUv1@fEGJKIn-ad%tdyI1vORLEjY{=I4l(JeofYgg#bvU*6T z6jg?IBR*j5^e=~P&E?msjcu6Q202M%2duS{Wo%^G8rt{})hwmK2<7GG1O5v@+96{} z6c%NHlI768tGre$K;T zN62f%@a5{<8q3{Z$2Qfdlts|c>GK#FeU;LpeO-;q5DeS-ZW!Fib<>}R!JOQGh*|25 zaXQ0b3=Eghq*v7S@|;F|uVQ3}e-BzH6+0f>SwvYJgNfGOBEt03tK%dkpCFQL6Z0Dq z(T@pytAr>~d5=FB z90^X69LwyB129mbE0$PNUyEjxjWzA^CIA_GL>yGq-Rk|1fIU#13TtSzW(N- zvGi!@#@PCLdAs01@MR*NzS6l7GBLillH6pb&zpfxn_B5SYvGM$T?S{Q8~Y8W=&{l- zVK~2g8*cntqBmk@yjR(`HVeM9hsjCX<3-43C98X2>2OhkS@{;I+%)A7j3DV|^epx< zbTXgJZEmT~Qi{$DyX-d+xPN!{qaeX4F^GCTpFhdIxsF5*=o?lZWG;oO=l8rRm;wtl zsBPrj{ig8!F}ZOldd&bq{xlzcfVJ0V?Q5KBBbc}B^IJnUfrZwF*Zaj`Srf zy9+J^eCZLifnihq&Re#-H|o|Df#D-#C&n9`4$#u~3#)~D4vIfLcJ9m0YEN8maJow9 z8jF&0*xGwuyEB)p2RL+_JKkabE^#c~K=FhO!=%IU$$>+_bQ$tykQE*gNb>VKF&o> z@9yL#+XgQ+Y|Nk077WTU*<(3df5W9}ZK2vPIed7ZAC4w3QeDt zAXYc=i2|2Gjg@|_u+g_i*@8D~sFh9CTEXNhY**gU^nLKbroB8!6^vx8Es#rt3gF66 z-I^6am(rjt(0Ore^A2KRyCge(!$7w&q~4x;68Q^ogiJ)6eKTJFeIjKVSHZmDKcKIW z=y!$f6z7~)iA$w6v_ZJ<-QzO z3WeB8CNz!;*?z~L9a$W2s7n-o8=tA?eftRw-7B76#|L%0a19pu(P?)y&OF5dpZu+i zQV{iq$wmfq#{wuq^|u`48MW__h>XD z<$=-=D?hNX9JJ0EW6IKLG3#U^OvAI4=t+(Y{;N_2m#?~u65B*FM7oc;P_!Xyjz-($ zv@}w2Pqp}=!1=+CDU`-}c4Zxhwb{NDqrT`m^b13*v+~@?o>usJl39R2rBMmfi;10Y z3c*S6_E>>2nWe~|dYhx)JXSm!g^hM%fn?@bBN8ah&Y>TAJfjFmCdOTE%P|8K+6Zet zL<_6T#MTzp<_pe!mDv)_Oj9nhI|dJZnC1;EEXfUr#$OLnGVj+IIt`}Ls~N$=uz5Ny z&2;Eg2AeBFKT=sMJ84VQpDRPo4JCH2W+o)T?6Wl0RaScxo+Q$fsuYMq2p2suC%_*Z^^e2;e}0Q5b-X;&3n3P;JyX}E-C#9f5T-oG6y5d>at&Cn;di$fF@A9)aJdN>gia8pe$1G)3ecW}4xjtUI z*uc+#7}xlkuTT4fP@|I{p;kK#h#~-Vf`-ZEp57qlmHGDHVdRohBYBq8u*aO>?2*|2 z-teuZX8ov!lWST)^2*z+5XJ;r%qhWo=@_0gX2 z2ZwEz)9GbYp8zpe%E750wrlc7s4og^uO(NfRZb>_<=&*3)nK4qoa6Q$j#NuwsGi6R z^6OZS+4ivFVLr;pA>NU`f=T4hnaN1M@0UbU^hp?(PO8eSLPM`Tz%P9A16{c4cYq!U7k^u_IEy&B*P;`}J$W=NPTQZQ)93wsm$-KFVeWxZFNd2s&qrCN zE~^nIsycVYc_!&7cIvg)j}+iFcqfBvwCK-^fwB8)@C@Eu#Hyc95cy2Maw6ju>#f!| z_m7;u>ht$Q>npLK?{9A2HQ+XCuGu`u>Rue^Xgi_7cT&O!$+@Ay;2UmhAK=CK#p0j=GQazjLHdl0l9 z?B(bSV`&K)Ar$fp`CD72ap9;>m0>87TeQPbHDRhgmB-6t)fcf{l$iwy-VYw(n7GPi zi#pT2RbEucCpB%~uj2H4e>Lx7)XUslleRthv?5e;jD*u^343fkTQOXF&A1WKyGTnh z;b;+d>M)0OO#6j5hzW=3$FAk8sT^3#Hif18wJmWDY+m5iD|LV9;PzMx=&~T_n=ey+;S7qQ`zm|t@WOQiv>cn+Cx`D%Zl?SNrRNo9iie>78+N#w1j_kC#2F&?r zr=VAYnu_USpp78Dt`~^OyD+*I82ua2MpfNq`TsyS9$HYx>a4j&`|x(T>k%qgk)a7h z#Kh=zR4s#%Zu4;O)nhd+w%NOZNA)f;Zy#um1R<9_&_2%WZnU|iOj9_5lIRmXKfJcMKr6}bI@7WS)Jo9mGFb2QRv+}>k@R#KB zI??NF-9m--iCdbuN)YZ<76kz(+5)8$`IMPG)6P zd(ZOyJCiMX#hg;TS4(|m((TC&Q8yh9t}jkBEXXkbMsb9F=?s-AgOnjVRk!{e8JpU} zf|q|Z?U9<~$NWqKxl_s;S30DYYG%?D#&0|1rv@mJiYvF)=Dx01Ja+?!qfe`XFQIGouI@O( zV1e&v5Ce>A7v27kc2%mvfY2vcsnG44+dy9E^4mk^!;PUK*gzx-)4%@d)dd@-p94{V z|44|k5yoGVbt&#Gep7V2=-K>O?iGs>C3NwX5K@2Tl}aV^5b4N6HsP@zL->l+v2Qj0 z3Ar;BVm|sKw!9rSmw?knb!fQEKZ8XcLIaObrxa1z5D?8OFdn*LQ`$CKoFOBrq7Q zm<;_dwRj31lW09A$-?U>JtWClH_f@I#*4OL(QV64yVMN10b;nHdyh{tL(F~Df5?XI z@^bU}&NJEVxRIJ}rKS2-RweiJ(Dh5-UOF@a$gD40o8%Rk4$zGfF#!AY0tm@}HZ%Gz z&~{$i+mmo4DgZ1|U4y$LG@NFvVAAxHe`USsjCXvyZa+qE(lV0gxyQF*4`giDsbpD` z9$4~w+(UzzEmOnSCqS$MRzS^`p`>wZ9zarF!H+A`riA8qhx+z3ZQ)9*t|^vkUaga%^XnH`bWZTM z7qw`hd~?SJT$$d&XSsy!yC-*ueA0lKSgo9!kMQNmey_AZ#Krol2<-_T>Vkl#0)brCz&JfAH{ny2P(b zkEKdxQPve*b_No(AKPJce%WotCvmTx$g)YH+>X+nl8&$>u*Y?)u#|12pu}gWT$cth z{~$~%3_a9Ru5hZ(#t!1PYlpxqW*_uF74kH7L$k<1Y(?o=aADU?#_t*3z;69xfcNw-A0S@A}FO!)&^_zuqR;r)g10h)d*WLloz}Pi}U+ z<=~*y=Ky}mUq2ogbneQZbTx>Vi*TyI-`x7O{J(VIfAZYA{nX|H<&wgm1L@522a&kH z?zC+gV!y^J;U^fS-vM+W7Pz#(4`N|5of5gqlu$QIvXc=~T&U|f!(H}3L@)cQ-M@@M ziVQ#$w%CDR{O9u`?H?l?4AP(lE+3X@P|d&VVS)j-WtsoD0HX_D_Jfz zM0~_4R*T)7`S62#x~*f8zk!?2BO>^AwI<27&Txm!4O@rC@PC$RLVebTMlBXrjCcH7?l zA|bUS^gd#0;oW1;eAIs_ovcU#7`Iv=9K zvsQD&hWPwjoozxqNBc7!n zw@`rQF}j|(_iL5*7`U=k(^orM6rA^`zUbXcYspm2@xjyHuXO6`?>9lWCS}U)y33(6 z-P}}6Epe!e+%F?!x-ETkf64Cwk7cc6`KgtOv;29*^;d?vGvj&|Xg_JV1kApey-2Ib zNK=XYFMW_ zuD`Y7Gj~;)5?;~(R*|0GN2+WrfG7?8%4BOFon4C`v)OSa)n6XZqz2mV>K8`J;Vm@; zR>M*OJ8+Z`1}F^NAOIv!Oj@$4(?l52F+A;aoHbHC`M&TI-|UyCau(1t5dZ>RoXbzg z%T3%_?E*xEK`L((;DS7wcGq8f*1DyVhbO780PhSPy1%<&n5sy5=@4rLJh8O^z=4!k z8HxbZ#)$SwpGKfiMr0qsbGD|x_s7GdBGw8~Y(gd1$m12pmcaJjj80?escDW;7%z~z zywh-PJ9O@!=D#;sRX;d6<&y?2O8Ibmyoy&d?D1-Sl|R? z!a!wn>hA7V;@~7efO4#x?fp8Gtim?x$^(G*KzfV*_a61PcSu@BVw0;FPt6Z^n_qO$rkpb6&(7b+1hb2@`P)gt$_I| zUwL)Gh4sY6Ufdfw1itKMYn7&BkMhRCU|~GLLdmAquN)pYdaHf5FMEUN)imA^LV`q& zS6YlW0(NTya5BIj&af!8)!hes{T|XTlUNcfFH0$TwXk$&MhVJK{)y1CGfJrivr3u2 z*#frBrMRY_jtLyYHy1~1pf#B%Mw`OILU6Gnmvmyv*j5TDS}? zSg9cPUbAw#68i!ft&cyVRd+HuRwE7xx(=0;P`^Q+hsFG)M}b=T;t3+X+uA?I5J5ICsUUA7fU`~|2KE1$| zY!i;(DbFfOf{TkU)h#tb`EJgccm6Gl2Fw#ahnux(;K^@J<9ngI{SYu?|}Xz($GN_Vv%k>Yd;K1-`O(5gLi zR9wWH=z(=>+W@?bZ)F*Cb?|uPDYuQqG5_!+tv5m6UOBYK;)L)}=T}Yv6QP0nYXM{e zQ&#S?5?UG1 ztT9Pro!{Y(>nDW)4memXT5--J{|XkO-}7u7kZrOEPh@$+p$aM;GbocW zi2TTaD}9!R@QO(Z;=XZEu~ekfpWEy+pl65Z4x&Q zhXS#31wpjq9z!9-^<4f4yOc+!Iy0!pC_Ie;ITbQN9+ez_H#L{kO1?TSn z?cH7hh)u=^97a(AP1}+o8Fx(;ZZChXs1rZ)kcPHa_&}Ev&(&rXzJfao4L78t!S-9p z<(NPH+CA$|;ol8_0!Ewa8cg2Dt-J7GgX`8L-xYCV?D>JZa_nO?RW;6IYZ_91WFpz#{>)UFEI z4vMj|sqT1(y7~ZO&A)D#>IQz{z7eEVpY{q7BYNT$|9QoyYNE8LYj~4Gp|WK;F9g~0 zIi%z8?eyqgE1|i8`)?@am!+fd#VF~Ji-3iuh#8zVOe=1cyMUT$x4FAHz2%t6YL*sI z`nU#P{VgA@IR%KZA9IFbaKq|V^Gd}%4ylbrT)#^LME|M_DhaPyopz++##_4WI9f#J z6>LMkC0M2Xof%cq{kW}nIeYK08`i&F2KHn%y=gm>cm634h;zgFP2IQOR|_(wCl82< zcoR;Rk~E(<8|Z!W3sUX``3LAKjY9kYH8?YsfFQ#>1`h(^Z zqrEfdQ8mcUF9-E5R$PRy9gjFhFZ+Dq+lz)S|Jh!3L)>ywf;%Zl0P2QfIGOSWpMKH! z1wQ_3m9XZ82~jzbdYr1e?hH5U;rW0_jh~z@h<@sUpLko@?MMW0#s{LI4k$L^T0LAo z2c@0WLo`at@mD|`Q4jp5RmBQxUb+~L;hWI{YhrbjGS*$Rw{njGXWLd?RE*+r@~`}$ zXDZ-O-)egxc=A~=t+Ww7UcqESU-2POOS7ZB6B?5devqfbHU{A|HU^RBfBWp>;c6s1 zf#P-T8kXi}(I}qJ=K?Z6UKIAA16Y6lQWfR#-X9;>7C<{e0wY=0$yuvcEuN=6IX`MG z_A8j7lE~T##xLbOY)4vyHS#D$UON^_31ARN2t$G&!c-*}*w($x4=A5d}cUFGwgwaUC!7ffh0b1xNr;IX-0z3wC+L#Hpuuep-U9!f(%lL)?FWt0`5 zmz#DTrvyX4SpxfR{Q2vQP-*#T9B{59MnM)?Hd}7Lug<8MB;+s1lgE`9JugUsM`CqD zM{z*00)=4vq*5|4CInX22=5p%$m~!o$}yTLx^oI?*YCRoLrhTK5q13AWO@}pk2!S( zD>+z@rQ@7J@J}Q02GO2liD_t5j3qSh=DwAL*ej{ThV1|hkL>rI%{LZ`elT?R!AmE8$sHuTiGqZ;_Y|+h zhD#77E)#B9ISHjo>HC7-uRv{?hR3f(~PRNg_lxpPB(cCnSgjfqb?@or?4^qway;fr!2WjReRGk;Jol6~!AU zmTbkMDYbp8j-={AlB)4Xvs)vrI9k%3k5jb}yiDg-FnZ9oU#E_TJ1HSq*eOCU7c;r{*~R}f^Au)&1&d5PP>XuLaq1Y?s~1W~rI=1#~&CyA8cT@U?H zmIBGbCM?==!&+{|F$k>z`9s)GLPH6aH8NDtUDWZpUFQd?k)5nB0YGOQ7Ks{4vbGe z)Y}6S)ra{&y5}`=5!q1Wd}tm*l&Rh}GDRM7Bv{X^J>-tjvvJtrsPG^*HxY!WmAU$p)MJJ8T= zvb$Q<`njo;ybIx8FI<*tqb&1Or1D1$um;%-Rk(hu8nr&kdrvI@5Ms`O3-Gy18$@VA zfvFm;il$|=`pk=eaiT0h0cR6${qOcpQ!XQL zSp2u4|H|l}4CVjo(f?0dBtwI?^pzep*O>i7d~>neDwWPF0d^-SbSrRatf>w|WhnFP zvi+~`mUkD#OWyK&J+7<>u)gQOx;&q|$WApb8Jl#vxk+g#sgB+)4B7I0m7E|-g7nmu z>)rh|3K>HqfG`hN zQ=xwu(cOO0hnxEDQ3fpmg%KH8--x;AFx?|$?CathTZReK_QO@oj|qe_rW@E#8ls-& zmXRg*UjL2BHuF3Voi?A4L>h@IWpLSn5Ns7VXi2xfDnW*h$E24pi2)N0p-ao6XctwDqYmQzD&kSS(g z5}Zo|)tj)+qMTjso%B5VkikCJw@#EW)-Qk%>pzZq7kL{JfHe~Mvc)raPGHu)R=`!X zL|v(uB3U5N0nz{@sH8%f!e&lwO~?{uK4$MydCTdIrvp@J$+3vPug&G+X04!K=tsem z&MX@}>Y@$dH?Bo9bd6uZk^^co76^e2l8;&zEs5TTibC+MFML8s24+4NsVN3EpzUv4 z3X=c=N)6piRe}UK*?SO&^*_AUh3fD!X6EZ)he@&qJVnx z<`_`!0;}e+E_Z(fn{dOYxVMBk6<;42g$J>GL2^C=%`~qbp0$uvYtoO~Td31|{ z*v^hjLw|Cl*DRYowQFzfN`(d+l=7#LaZ@GRgBfrrH+INZ^Oe5WCtNUM{~kM}*J`1A zj?Y3P0A5!LJ%hI~i-ewWu|cz1_Kf*n9*uCSy>b+EuM4Zrxg z7D2j;;vg68M0oCAaM-7@G*ujh`bO8ap{BRCI{Tgui+#qJv*IfgPY+$j)!(Jee|=*WJAxq6~M&$NF7 zROt3qnuscVLL6DnMtX8y$O*va|CmRVpxiCG`sIoyEWjn`v zOh5eitgGjoVuui%V)(GjQFCvorJ{P@n`M@#%R|YAXYC-Ur#NGDNtq=N9!dB0*4)nm z9=4-V&AJ|t_g1eR);G$!!=yU|OGb+atfJP_pL6^P%Y-L@vR-~#imJG_qFZ(1+1$)u zZ!huwcC>P|frQh_AC7Z6S&2fWu@A>JUy5)l8NW0z=KbS%UAqR}VfWK8bYPL@fSKM* z*K#>no9UqY^{0yoOa&Cgeemeh*6lB>-)H6+1hXn!9{g#PK^S@h1h&5CUp_*8xv{Hd zaM!;2@b7Uw1hrGKlF1cc-L0;UL_5DDnkq4zQ?!LZHGUFPG!ciHmQTzcK4P9*5(PES z{DEM}lMX;^M!)~U$Ln0sC}3HrvGDs<5Cc&idclMEe(#v?7Kz%x;9hl6(v4iVVTl%K z`O8n*d=L;M)Q%%|nEpPH8hsA3juKTSzN^ftl-wrO9aeV#)ZC$`US$#ATNF0`G9u{n zSH3^|UBusXpVXrJbe(J%M)G_Wg5QV2#d9ewg|p zSpyZu%vzu1Bu-oHcFc{yYaNS|q!f_&H4kYLZrs@<-rzS=MMHNJoHDH0iLtdwR@~m` zFISMyq}r{$k`r$=c-_aKQo1?J-oi>iZq+5VQoPiX4Cars9bpZE)JnzT%&bZkm4arQ z?cAuMVrRtDZ|K1Al;2Alt_BAOWT8OHMWsBp9h{1>hZtsN^!*4E;mR*LQo~L_YMwrv z9pkeyN=@Lg)V51Y2oWs&{eFWD3arbME-Zn6qrtXTl^WesRqHkw9U-Royjl=CR$i{yo&a3u6y#I zOH4mI+-C8rA8+=R-Z4Mmmvk56bzlUUCH-0=YJeXXc~E&37vV%~4vwkNB;FVZcrtYa3iC zFez4$MfRqc8$g1lT@i9*?*{07P6B$Me7qH9oruaK)ykDLpx@aA7H3uZS4^EL8Ob=J z)Pd$Si}CrVv1HI#a5U};sy~M%;c?{_o>*ksqCSJc??e+NI=)#%-gteL6+Qr4teuxG z4uuI6R7MhduIm5ueGj_gh>}3fgwq}`h9``qE4%lyt#w9P6RrIfJ9|s=hO23MW%p^y#m4@C~M=DhMHlX7qurT7q#gb zO^i# zd6gF6Sb;s7J9^__?Ra^aj@|SSRHfNs>0@KCwbsYORjAo(cV+7LIx{0J>0?38lIqSR z8?}(@?lyS?kQQ-5sV{=rFaW*VW6Un$|9!&Oni5XvWm%21lbxE z9|46G0O*O5roF9+?eUE@^MNWr47`!&+sO3|-QS+D*ajSKY=S3X1m_0wsg;E>O4-~3++#hWb;0C`|k+4smM6cp8*)=!x-03zK83)@>wH&osAv#{qUqC?$6Kvo!YFO|ZK z{y>>H7QEfBcUD5Ju6H7161-L)w3Ql8kGF!G{ z{?e%bkdPT~UCq%dJdJ$>=yx~@l~pOfAc=NYySW9R+_GG(Ml>t#Qx)%xIOX+Y^6oFO zQ$W{L<_of`=o|s(-uNvAFOk^ zsvroURw)J}@iL7+>^yiGSPB5y0l=MRH}C%ea|o1CZKOV z7NiMu5nWJg7pD_Ux7NvZpup1g;8=;VwtlK2WTxK{g9rkJoRWYDbg_Qqp?N^U?ZK~% zCJ9hC*#0;B`c5_d>2NaF-Z2lRaUP!p>$oN#oyZQaPQen+5zX6l$<{R?3yI+>hp~X~ zmeTiZ-29eocXtz6+E{dDngCRHm`6c%W7M}J$QDV>?gXpkExN@%nW27P_9!4sw;Mr` z=i@R^fruhQ^6u<>Z#TUB{at0ZqMneUwQrXkH9KIs8}MYV>i6h!ogrb%_l3m6kc|-Y zM;J6$NQ@l5H!819u8-IdqMwe2C}_nAHoQH^Kl@a19WcE0AnO&KdDf;y{g6OT8(6$y zfq>yjkdiwDLC#=?8j3DJ6|xvh_OE$IIw`0CiV2iaIVM8PjS)o|Q((Jlr-$ur!dIIQ zu)R6w5GCu!qh$Vl1JtG_&3|tfDoqr7yaX6@WPlr`=FWp}2B6H&t3Xq)#O8k!B%mn{ zAGqmO(H6y}BRvOQz<(-#-}}O;OC43%C@_n<(GIP|*!b{paBTXbKg_}>K#dDu#PdO_?upgvC{D79deAwB+}oKoVv%J+Ai)Km}L8N?yU zEva`>8wd^y(OP;@H~rlGAr+F=sxe;ocRfgO*>kT_rpea^MDyf^=cw)D6#S85R+o#RA?2$hZ`G@Ea zD0J8|q|=Wk-A9__Y}3#PYEbV2cpVJ;YC->u50>{c`B`m+dfs$3l%wIWBo&B$(BP>; zy;{j)aH@8H@(Du|Vd(P*N(F;N*0Er|s4MxH{k42m6i^R{Qjtro#964EvX+Kk0{6p-V3PE-vy_>MSuh@x zq%58as-{;vNEie#>-Z>@$1Yx^AczI}0krd)ZP>0qU@MaqGoB(m;FAx!LB-;4#=j~@ z^Fr4+_qc8WR5r_Rn`F1ISiqmsjIUrPLwJUeJP%HIH`AJ1b-^`~OC=UocA@^L0bH{E zm&s_MyhZ7taAe8Y!;Yui#Mb$f)M5oFiR2d9cvJPYWH&+4EPUSTh;LpQVge zEbvPBX{{ZveI|v;T^O8O^MgcGv~@oPE+t%YC`mtEa!^LE4Z?5c^xABFXt2b%=S0Sx z1;a^9XDr$ZzCW21=U-ivSe3ysbZ=ixH$^<7r`GLb>9*zT;EKgsK(u`seOK05hh7`V z%27dD=uICZiKoFMqj(;;o{Xj|`IZUtjyb)yfkob^avNB?t` zEgh!WrOMg(*p7+u><&+<;~-0Tgqpqqvg`#zeT@AVN_$-WAM=#oZmMFx8j|=GeGw+4 zkbvQN<+_Vavad_h_+Fd+lM`qEPy#@iFT)ZDR-#Y6tw$rqM=Gguw{S2dp^Nj7s6&|k z5baV)5piC_=z3skF@cfWL^Q*2MZz2?AZ3dsti(aG4?~w_ix$5!P_XEg;OYz&y6+2e z)aGE^&_@{ZQs%7s?QzAU7;8swnK!MhANwm_+qPC zo~{5y6fCl7e|HI;@!mihhD!(4x&pb{%<#tqbVe(htYUwR=iN_K1H|zJs;e8U$t5d| zfcnY2Dx@g5P^9u*lse8=%_FBfx{BmNVV%Kf?4D_|w6T+ZG=igciVJTrq9K00GF-(i zUo|u9kgh9vB{N#EsKz98d=X?arG7~EyGE(l6?jCdc9p@qar|g<|N2B&nDkk5 zABeuPGPGy3po~3B>&RCk3LDMV> zZxh;DbDul&kQx^{j97e9`(mB9X$5LEDd1E05) zZo>OD3Z|iC+|E@AVilY}9Ybic{nhuW>RnZv1`{#uuE%cQDIaBSO%^?_0Z5JHL%9#s zI|ahkwiuwYh!=adQRJKLD!cw4xW9g}d*@~hDMzo%*x)c*mu7uK_5xb)7A(-~Sqrd` z9o$+a>f?HvvKhENQz7!=aNZvVxX6gFAuC421w7q^?$?GfhVD@oON|#_6 zIpC2eNa73r;_o|W9mX;4I=U-A$ujBZ1gNtlr%ZO*ByZUrm*%u>o_Sftzsz>a52(yhq3^3Fnm3(h~L6L}+)xpG02 zbm`>Q$`VKFF7DXAjPB3K&j%zc7}#6hPYX6)AJxUiu=XmRy;XBYCGww;6GDquvk)!9 z-s3qD4Zq^1v~V($kWann;uqqfrVP$r@nE^TA9~~j;96FRdTDW!gzsMB9>f&4KfV0y zijR6LO+Gxm$}}7ktv2LSCY@Ap&UeUCe+E5U+(Ren4J(Du9sYTD#}>nB{YR5=*rACd zP-=XdA`0M!-W3@QsfGDE_-(Y|LSY_{vsoI{{B%jKI>oZGTOMgcS;#yacK~!10jFc` z-D@f!6Q`RjsUKBNXa?rH8&jbs7dBWEdzf?*OqzE z^bR?=kkgHb{@TG(T~2zg!EzgCeI7)6v%RS8FPjjavfO;^8r3vBGzPc)9LVD+>pC^m zZ=O1mJU7GPxXWp40t)NcuW706FAZ$W_(J!QdvD8y$mbm2J9c+kQ3aR>2w8_?AmcxkCoeS{QFLYAoiv_$_2xHjq81GS+v{*kly5;9%7EMLBpxO51pP_H`x4-bnxo2p<& z(UgxY9^2CAq%Oc19m}_Rd#TKgWBsVWiEe+aHuVA|wSzweDm;?EC40`=OXGx)4mIZ` z&hNw_P-)*lml9$tyRQs8fLi;s6%(v}2~;Onh=7pNrI-T*lLBbS30*S!@|s8*LN8G2 zGQIHFSB<5WGt1vs?NsmGUwbq2{2KveqXJxN{rQ74MoId&x2%ps#q>X4cydzoMakZ> zbauRoC_u0xU&dr7*Y66;{l1^{TIuoh1{PebJ#iNNOJ1zAy7!Z0P? z<^m-LPz(su3!^;AHo>g880_>UdR3Yf9}lcmjVXK&z439YFhV01?pZ zT=;jY@4+2iybf<_bpJqFG!ZYs9bD(s`w#zXInRM7?~l2Z0OIKP6ogNM{c!!6?0wMB zuO5vKgD24QtS;N^_;JI+0p$KpADy`2DV zJb;Zynt-$kG_^v%B)WK(*Ja>i2gE%8Z!>j9qER;79X>w{^J8~=xQV*KmO zvmP3cf%rYz|ZonqW&!it4X zY(Fm{-Mc9^wK88VPJdn!q)lfZu&5!9dP!}UVL#K=9O<1&C!~}Mo=|G~eBe!Gr%S3+ z_rZkDkJMWx?mTt)2=Q)`rdFcZ{=Ca1cw)w_&LMrT_6@kjV;t!0SBS2&P@v856f{0YP8ouW z3cA}8zz68?k&SQPua35_+Q5(p)k;9xOCvmNt6Juw1Ix z7C|&=+)(_hPUKn-%PAndLkGIR$NdkYe+fU^r$HqK-KiCgNY7~l(KO3U(XHLqPxCWv zCkM{cm4l{ZBFi6v+fO`BspNMjsqG!8YWq3~(~{C44Lk@ye84ku*JHPwvi{fD{0_TU z>HY9$nm%6?=<-qLIPe!lyCufE)KcbV?qN}Uq7 zwrjdaXMa7hSMsINU10O440yQ74`-koyBE97pREzlA(`jz)H^v(k|L3@D-FS{ix&lGaL#!XR;u!e1gL2cLO z#@?>xdAaTQ1kugb6+7STeD|y>rAzPZHQU#w)2E+wRSXaS`ZH5514PLFnpV`P#FoK( z(ltERyy!~lt$?)GU1RXfi8R|W1Ae*GOa``$Or^Vig`#nzwv(DM7hOW^V*N1%hH zS)|uSzT1BBjs;hvP-D|M2WwVy(?vqMJ@2k==kB(W@mp+u>)++2Uz6u{r~|Z)?p&rl+6( zamOrX@~p*0^-TvwL7Po}3Ip@Qff+@>q&72HB&LDk?^phYyw_F9*^BqHu&m?ZDbF@6>SY+9uiY&H9Japl_nmmAtkZ)DG`5@{8@u=n<=+s78< z{@yvs;A{YJZ9(g`&=LrFboyH7} z3BclP18+5~!3b^1K1PR* RUj>~w@9FC2vd$@?2>_@bz~2A> literal 0 HcmV?d00001 diff --git a/examples/advanced/job_api/README.md b/examples/advanced/job_api/README.md new file mode 100644 index 0000000000..ef4b70fdc0 --- /dev/null +++ b/examples/advanced/job_api/README.md @@ -0,0 +1,29 @@ +# Additional Examples for NVIDIA FLARE Job API + +you probably already have looked at [getting started](../../getting_started) examples, +and [hello-world](../../hello-world) examples. Here are additional examples for advanced algorithms + +### Basic Concepts +At the heart of NVFlare lies the concept of collaboration through "tasks." An FL controller assigns tasks +(e.g., training on local data) to one or more FL clients, processes returned results (e.g., model weight updates), +and may assign additional tasks based on these results and other factors (e.g., a pre-configured number of training rounds). +The clients run executors which can listen for tasks and perform the necessary computations locally, such as model training. +This task-based interaction repeats until the experiment’s objectives are met. + +We can also add data filters (for example, for [homomorphic encryption](https://www.usenix.org/conference/atc20/presentation/zhang-chengliang) +or [differential privacy filters](https://arxiv.org/abs/1910.00962)) to the task data +or results received or produced by the server or clients. + +![NVIDIA FLARE Overview](../../../docs/resources/nvflare_overview.svg) + +### Examples +We have several examples to illustrate job APIs +Each example folder includes basic job configurations for running different FL algorithms. +such as [FedOpt](https://arxiv.org/abs/2003.00295), or [SCAFFOLD](https://arxiv.org/abs/1910.06378). + +### 1. [PyTorch Examples](./pt/README.md) +### 2. [Tensorflow Examples](./tf/README.md) +### 3. [Scikit-Learn Examples](./sklearn/README.md) + +> [!NOTE] +> More examples can be found at https://nvidia.github.io/NVFlare. diff --git a/examples/advanced/job_api/pt/README.md b/examples/advanced/job_api/pt/README.md new file mode 100644 index 0000000000..180e22d5c4 --- /dev/null +++ b/examples/advanced/job_api/pt/README.md @@ -0,0 +1,53 @@ +# Advanced Job API Examples with PyTorch + +[![PyTorch Logo](https://upload.wikimedia.org/wikipedia/commons/c/c6/PyTorch_logo_black.svg)](https://pytorch.org) + +We provide several advanced examples with NVFlare's Job API. +All examples in this folder are based on using [PyTorch](https://pytorch.org/) as the model training framework. +Furthermore, we support [PyTorch Lightning](https://lightning.ai). + +## Setup environment +First, install nvflare and dependencies: +```commandline +pip install -r requirements.txt +``` + +## Examples +You can also run any of the below scripts directly using +```commandline +python "script_name.py" +``` + +```commandline +python fedavg_script_executor_lightning_cifar10.py +``` +### 1. [Federated averaging using the script executor](./fedavg_script_executor_cifar10.py) +Implementation of [FedAvg](https://arxiv.org/abs/1602.05629) using the [Client API](https://nvflare.readthedocs.io/en/main/programming_guide/execution_api_type/client_api.html). + + +### 2. [Federated averaging using script executor and differential privacy filter](./fedavg_script_executor_dp_filter_cifar10.py) +Implementation of [FedAvg](https://arxiv.org/abs/1602.05629) using the [Client API](https://nvflare.readthedocs.io/en/main/programming_guide/execution_api_type/client_api.html) +with additional [differential privacy filters](https://arxiv.org/abs/1910.00962) on the client side. +```commandline +python fedavg_script_executor_dp_filter_cifar10.py +``` +### 3. [Swarm learning using script executor](./swarm_script_executor_cifar10.py) +Implementation of [swarm learning](https://www.nature.com/articles/s41586-021-03583-3) using the [Client API](https://nvflare.readthedocs.io/en/main/programming_guide/execution_api_type/client_api.html) +```commandline +python swarm_script_executor_cifar10.py +``` +### 4. [Cyclic weight transfer using script executor](./cyclic_cc_script_executor_cifar10.py) +Implementation of [cyclic weight transfer](https://arxiv.org/abs/1709.05929) using the [Client API](https://nvflare.readthedocs.io/en/main/programming_guide/execution_api_type/client_api.html) +```commandline +python cyclic_cc_script_executor_cifar10.py +``` +### 5. [Federated averaging using model learning](./fedavg_model_learner_xsite_val_cifar10.py)) +Implementation of [FedAvg](https://arxiv.org/abs/1602.05629) using the [model learner class](https://nvflare.readthedocs.io/en/main/programming_guide/execution_api_type/model_learner.html), +followed by [cross site validation](https://nvflare.readthedocs.io/en/main/programming_guide/controllers/cross_site_model_evaluation.html) +for federated model evaluation. +```commandline +python fedavg_model_learner_xsite_val_cifar10.py +``` + +> [!NOTE] +> More examples can be found at https://nvidia.github.io/NVFlare. diff --git a/examples/getting_started/pt/cyclic_cc_script_executor_cifar10.py b/examples/advanced/job_api/pt/cyclic_cc_script_executor_cifar10.py similarity index 100% rename from examples/getting_started/pt/cyclic_cc_script_executor_cifar10.py rename to examples/advanced/job_api/pt/cyclic_cc_script_executor_cifar10.py diff --git a/examples/getting_started/pt/fedavg_model_learner_xsite_val_cifar10.py b/examples/advanced/job_api/pt/fedavg_model_learner_xsite_val_cifar10.py similarity index 100% rename from examples/getting_started/pt/fedavg_model_learner_xsite_val_cifar10.py rename to examples/advanced/job_api/pt/fedavg_model_learner_xsite_val_cifar10.py diff --git a/examples/getting_started/pt/fedavg_script_executor_cifar10.py b/examples/advanced/job_api/pt/fedavg_script_executor_cifar10.py similarity index 93% rename from examples/getting_started/pt/fedavg_script_executor_cifar10.py rename to examples/advanced/job_api/pt/fedavg_script_executor_cifar10.py index 9a252770a0..b6241a563d 100644 --- a/examples/getting_started/pt/fedavg_script_executor_cifar10.py +++ b/examples/advanced/job_api/pt/fedavg_script_executor_cifar10.py @@ -29,9 +29,11 @@ num_rounds=num_rounds, ) job.to(controller, "server") + # job.to_server(controller) # Define the initial global model and send to server job.to(Net(), "server") + # job.to_server(Net()) # Add clients for i in range(n_clients): @@ -39,6 +41,7 @@ task_script_path=train_script, task_script_args="" # f"--batch_size 32 --data_path /tmp/data/site-{i}" ) job.to(executor, f"site-{i}", gpu=0) + # job.to_clients(executor) # job.export_job("/tmp/nvflare/jobs/job_config") job.simulator_run("/tmp/nvflare/jobs/workdir") diff --git a/examples/getting_started/pt/fedavg_script_executor_dp_filter_cifar10.py b/examples/advanced/job_api/pt/fedavg_script_executor_dp_filter_cifar10.py similarity index 100% rename from examples/getting_started/pt/fedavg_script_executor_dp_filter_cifar10.py rename to examples/advanced/job_api/pt/fedavg_script_executor_dp_filter_cifar10.py diff --git a/examples/getting_started/pt/fedavg_script_executor_lightning_cifar10.py b/examples/advanced/job_api/pt/fedavg_script_executor_lightning_cifar10.py similarity index 100% rename from examples/getting_started/pt/fedavg_script_executor_lightning_cifar10.py rename to examples/advanced/job_api/pt/fedavg_script_executor_lightning_cifar10.py diff --git a/examples/advanced/job_api/pt/figs/tb_loss.png b/examples/advanced/job_api/pt/figs/tb_loss.png new file mode 100644 index 0000000000000000000000000000000000000000..485d16c1148d0b9b7a39a3d6b0b75bf2b1fab3a0 GIT binary patch literal 23302 zcmbTe1yogSv@T2t7=&~vAgzRSmnb1fDBWGsT@oS<0s_)0h=_DahcwbHDJk6{&7J%E z?)lF-hK!Lmwogh&{-c?v#v^rss^&;NV>O+wmca zU9e9MC2jm*S7`dpO4^%0^7~zhj{T?iWA8qFARSE`Agk`G_9U8i&58y!k5*bSxT}d# zI-0Krj@3m|37<6z1EkTlQRZK^rDz)edt^GJzfrj<{KQGoVmHVYhyJ| zBMWU|iTsYH@u{u?BG(s=eS$l6kE`QoFH;Urh%BEyL&Xt3ysM_`PG>omPeo7vr8aLQ zOZfPg(X?Z~@Qedd&TOj4?~S8O)jYzliEta;0yBoa z-Q9~nnTnR`4*HCKetfjFyo?rci%PxJ_?A)odny=U?RHJr&!0bCA}dTU&yHl%gh=0} zq@+A~(PL8?lbrlQRh1|)F;VDZx8>K?aZ}02h%yn2rntO3_KP^?*9T6u)d>|x&Hd7o_CkofN1yQb=jii(Tl%0`pcVBGhloPjIp zJ|cFH-*zm{NBUgQv;^TWFfau9Tpgs7*L29|#9&Z*elTbWy8i20$1 zJPX3UMdk0dQBc{sI+)cKK`z+(!w;hkF3D`#n>4yzGRDQj6FO7t8usxc!7F8D+7$Vm zyHA6EU;N=UoGgEBI9{l;FlF0FsPsh^_h6-;_~}i9vI%h+nb4~Fz1!r`r z)j>Ra!|mznX45j5lV9bpW1cH05Zl%t#qqgpCaBlBm5>N~lz%NMB2M=@PJAvcjaz2g zi+6f@Y6$T}&B7w*d3CyCJ@zX4^LJ;);#r^TOT^j4I?VdhScsU_ANb!j+*@c{INK;9 z7D-pQN!2=-CDjItVR*abI{^VfFx>rhZufzbo3MbnHp!y zS`$i9Z*9a&_zdzljtkUFo=$rn^gg{wCG+Bign@y<+%E_S37G${d3pG74*W-t9%WWl z@!5I87=7QAm=99h&p3~xMewJ}Mjh}E?1CG^h+r6>tqe|x#=zkxej_<=!*X?5-?Vn1bF8M4!GF7J6f zVFnYya@RlL?mzd52=l4@ELv&n4Q0v=UsQ~$z4m*4$Ke#hNs%IFH{WX6R0kju#_J*4 z$Bh>bg?0NfHZ^NsTq2#@uP)Eo6J;?;IcSSnX=qUWel&f076cD~3rVx!`%LNa)>P2l zcX}5K#J*k>kwGUXZb)Z`Yb};V_3e#UN38NGe7Gk`J1({S4of#*y?T}MX1m(gF-I=B zz5Mks$`rifd%bbw(cIjOqSrrV$VT$+J0}MRpZ5hH(yd#!>UNv&c%ClDC-j;MQQ$xz zSy$x4s$ugwJ8b$clf6KBeS$GZNtmIUcMAsx=RPl4I2RXJxt5f4wB~qj)3@QZB;lL; zr=C?aHb{z!iYE(UtS9^3tf%n1YB^3Tf!zrG4iU5Yfx6koT_U1sZf5_R-=xgCr>g9Y z1{%Knt#VkF8Ozsb(Yx9w4#XsFo^J_C`)zn!kX*NjcVOrA{P#6QWJk+n zh2`3C4vuZz9yDx2-8XLZ;Z6$!2;i#as7?9HC19HrO&+dO(pMFi4jJ;jti-R z@DTwl8cd(d4L#ha#MXMqvKFa=avo)4cXPss28ZBW0Ewz zMMhm6FVf3st8qX0)ATI8xVShYZo0`AwM3m6g7?O;_3wsMH9fBiC1>-H;Wf<3tE=-J zQZ{`gBt4%??#7E{R>Vl3)E0C@B$V3zZpiTJ7ddCfz@fP4I{HYPYm$09-*wiVfzs!K z-RJuJH^0j!36pC6TwYOwAxsDo0TI#uYF1>aPm+`ae+GBgnf`_#G1joEaghGV9co&lsg1^>Froh>L5tuAFJ_N#yM6=~)UO*5xZG zP4l^OM-++8iLz=q$)Gt`Pr4}O1GbMAuyilh6nqfL3G+T58=I_ES2UftgoOQKI4_6m zwsv9F!fhC3!|=C`+{d+>9FQ#$+1%}XtI~bDdgZyiJo;SqN?MCW#eSaNW$5*Jf zEG#VIbsc+?xQ)%sTs=0e)H?h6{&Yssgw{83&blpPAtFNJXWZv>KNr*uSXB^x>S`kr zub@U>pQc~WLl!iH{5_+SYGc3oi+!w6XKH5j!{@Tue)?inRn^SlHK@N&v(`@hIn(=d z^b2a@08Wi?Ph#fd)643^kcD=eP<>y=XnnVuCvH4J)6md>2ZoW{i+*B;)SJv33E0A> zc8kZfKaHGkWj(LR^x#jgwdxX-R}(0;n3N(Z@XyrJQjR9k)B_x!6I2AU0C01w$1#X< zXeQO?>U8PPpM^a)P3GFqpZzxqYq4u*`p}5An;^EanxN9ORM{^g?z*Id;tu9^BNXK- zh}LSzcJ6;?T(t@g3Uz7`;RQelf<=_=&d)8EgWVRKnAq61lfA{$IX@hf70;tlHEr+H zWe7_)v%dQlmX@d;JT}vv@Bvb83xXY(#^k?u<_aojsR8KhEp}k+b3f@$A57x5ber?V z_|@nm3M&iaqTmEp6r1OX-Q$|o%nVD-0@dxBjmLpFlzl~zm6}$Vr4=#CvG=s3@xagM)zXWxU}W1!C6v!)SN|?nE9@2%%odctV^SE_Fs3j($^t!pLJg z%e=m^;rEj}xtE%bPU0q&xQS+i?_MgH9aNxd> z+e?^qZOA&I`}+>;FjD)A6Z=92H z10;x15P5_MMEKl89_vY3$p6#;xxVJ+;?hXIzw7Sq-n{G(B6=~85Zd#He7+!heHux^W0ml=puqZo^K7%MzZEbK;lstXoOB{ivpxbC zX=X;o6Ic>qMja6$DtW3Q07gT|1zmz`Y6R?ixyOS?^VBLeD5ayl2Xced=wm|u&c2C! z{~psxfE?m_4p1YK=h<4$wAbI6){YKGfIyD(0mSHi-CbQ&92}*i4v@&N0l+km1$~n^Hv9&5iH+q(ii7|0k2+ zh3;5^RtEy^y04^485`3!p3m%@j5l5p@_BDnj1N`X{#LEUYPi^c(I@;@n_eO9{SSS8 zcmN1MhBWPczQwFvbvIlR5&k#t+{r30|5(&;7791ZUh#ZQBMN9FUL;(U)|=G zmg>#YZh(?orp;%gMcxuHlz2UF{%3)h^8oT+!$g(=^6IMj$4tHPHAcB9F-7= z>OTIJl&0^R1z!}>MLx#Hvb*maGcqwH0taxZO%>d(@)=(NVQPAYk#?WqSloah|J-}cx2_QN0@@>IHR$N*#KpzA zx&tPYT3f||)cmS{!&~dR16ffR26Jndbu}J>3CgrBBps4Y*CZe&<@dv9Y|K{rQppP) zwHO&0gTuls5l;!QFvjQ#02E~vm4MG8(T9#>Hu~I&vINA$i+gUcsKidx$PpoI^{bd_ z;9C;cYrO7w)|@Y20&N>F<5;x5-}w0pPl6QbO5#<$; zRo|iAvE#en+TNZyzF}Xjy*X7G2|!2b)hjV^@f(n^Qh#=MK$z|W^R-(YpxSg1gtUb> zuI(rn)KAH4bN|iX?PXZ0IDisZ8!w^>nN*Rk#)@UKn{6t~fZ4(4Mbabf7u)UUnvfx# z0rOn$_qlRRu2!BpXuoHxUpaNU6r=F);X{20Vry?n$t-~ONlj!L0NT;uhB|C2mDJTc z^EJwV6$oyfJ&#@*x6!W$DC!0aebA!1ek%p(~daA+IC`d(iea>p3BNE zL0m6G!8e*~^7Z9N^W2v{-kca-=Z1yy&#?%c3XJt+$LG2`l#mHg{(<|zbT~jF;4t|~ zU<0@mpa7|;w-7L<1xVKP6x@Bfr%+BDTypIGxMZd@2XizL0H1nlpbJ# z=xbSPD=TJ*O5^$F0LZWY2?+@h-cdAC(FqA8UsnJc%R$PiT8e&Awe@{Uy~>Wt4;@S3 zpZOs!)kYRcXMI?9%~Cnti~v`NycxP6xLH`cecO}Hmcx3IE6w|CZJ4pBVpE;ca~JvY z{I6&MN58-2i)g2_r$p+%JrCr*<>ggFzMS}OeGdU_(_!u&4A;|25A{prNTQW511ng8 zVoU?~;O~DUj#-1+?;EYO=nD?(jDOUba^L?8LFRwTI7Fq73e5799KXNwVfku3^_?A5 z1BpUcNW5o%cOvC;?zm52VPV0_VApSY`yn3dDy7(?K25jY4ol#ci2md`3vo zPzfNfPQz#Hpa2>g8cu3UFFKtwf5spms&$-+wjfXA84g-vy-aRNk>1GMMvPlyD`2h(X@|RjrC*yKi?vB`dag; zHt&J7BJT%Z733F{_oa@`72KB=TV`|!rK4+ndk6<2E-o%Itc%9Ox|ay}||F z9=dcJ>h;L@_@a{lOy?5~4bsukQOJb;byn&9Qt%*Up5R*l{HH^4-UVx^-XTr&_KuEgjvXIDd(3#YZl-n%%1OsFs4>DGdq* zs#%J;lQyr`FIT4LnC2pbZVyJRxiJcaOZutiseEqxHfn*o%p{gI_)xb2UZ5>@X{&#= z?s#GUN!`OQ#j1%0LeaEGLL9}661MNW{elT>Y<3BY?GI+R3!`YIbv{dB zRh~hj$EpxFlH1Ob%Y z;FSDMt1p9oI#+AE)z4;lMT%bWt*xyw>((UzN1}MMS@NMSi1NS!YgO{S1dXTc_f7*f zsBwcdD0z3^0icS~C^M~ayI71A{o{*4@qn889l#Zv`XklvRug!94QBlm2$^BFzPHha z6^c1Ry!_t3Lc`7;RbteUshCRwaGofP?$Gx3KvMIk-w(1QO%;g!`t>VOB&&MyTgU!OgJVUASCHqB@--_$>*|Ccdjr-+-54vdJkW$-?@8um zEw<)K#(nBPr&InU%hz$3YWljNVPdq7`?2)UM9$Q?`U@Eu1XE}P0H)sP?NwVOYQJzp z&~@9m{XHoJ9^64Qpb2Dn4i6pw^cVm^@;sRjLb@;D6ui4@7jd8eDdhge_Ha~vfCc&5 z)zP4IqV?HIdwF>ceD7XvUO8~)qY5bcA+h96Q@nE#+7*}TsmBp;HCV8MY;JDeWKpr; zQY)zJZ3fB?6j;I+Ukoaz_B`r)zaR(64A;M6ycIU|S2Ab#k)msQKXDbW3@l%OR}wqn zy{0WI2LW|f@!i?0^(9kVEZaNBO=qBH@3rD9IIUJd(gisB0g3_MTfp3}KzJUJvfxQZ zMv<1~BEu3$#je+-FNE$3ZlKGCf!h2dkftcT0=6dAqjLWuGyh8$my_b z2hX~_;v7!yZ)R;+R5Mc2^768_W}%Ofj+$Gt;{)Wc!hEhQCx?aZhaB0F_54)MS&gw+ zlbJ|@!tmYo%FxCUW3+Jh%E6vIbWZP;^PRicl9 zcA})C(;H3q0_j@KGrd1(#%AUFl+7KF0)Ql-4K80_HeO44dJ4@cq(sHUJhQQ}DJx}= zOS*9~d=*a0NzXRa1W-g%Q^vzXfH5`{D9u2ntsVen5Q(0YIRFnJDA&=|jgZgTunYX! z{t$n%L7HIeg!R|v$mmFgd*Rd;JvX;X$)+m0pBVh@QBRRI_*`2%g?gEni;K-}?nzsz zR${LyXl_rTB-Lgh(pk67vXcPexwfT+1>|7BoeEL=0e27AhNa*PZw>!`sSt3r)F%gF z5&93etcvs+DT>CsE5>!FHY*fFPH7Rcl^@CsWJv^cb*UBJMGp%N)x`Xeqgif_%cNF_ zS^l#7=g;TQpWkfLvXHdAjgb=@j7z*Qt1W5TWqR8(Cu;gdVjSd;nWCv$6wNaC15@n# zd`;x8(?4pQH$V)(qgxb@b&m(>E+HXe>38<_nt+<}Ip-e$DK9VoQa@Grjmv;uEW|bN zgFYZy9T{SN4<9`;SRctvkfk@7?d&R2sPVoky5_7ut*d*K?yjbj(Q^bj#zmO1Sl4+} z#rHarTrdt2IZ|Q$@g$K}2@)wODFVeJkW*Gx^0LnRkdW6deM?<2iG23dre%phHY6G} z(O2c%l*y>i>6GxI#6`6p1xSS=K~Y_^8l?%;s`!!}BwbMdSVW{jsmp5Af*X|6pXj48 z{_T3Ky`3+RgR+N1NJatiOe#8Pgc4Hsy-+ z2OymqrO$zvE7WRQw63N}LV1y<&{nzDsxBnV#}}F|7|(|^WYEV7?e>x5KvcjWIfUW6b3<$Fn0@{_dLP`}?huutEkoO+pU3iM<&CNdifmrO0__(-Y z%E}+9fAj-LQ&Y=PpGl6cy8!IC6&xOpHR)2V{dIJmDCg(*oWa);Q>#tc!^!=?gl&F?_KLbTRy8mC)2I3J^;uk=WUQ>guj;3`xVWvYtq+OR>vf9^or?+z z=mZ2bVYSKqmAs`}L|mYnr+x~wNL)&aDQ-rl>&>tx+4G5x$pB9QR3w2&{2>l|NxEoe z=8v1x^pu9uLmJ}fV~;wi1aI=%ZMx{sQp*oCgX5$BO&mA!i% zn>)I5yBqyU=yHOpv_M#A(`1_vqfaH=f+*gDKhaFc{9ja}`{>~q_M$$~&#fEgyE`OX zdQbn{>Zh+3`c9il2XSNgbO$G2pua08q(oX7b8qE!!R0=xjP+0ERt9Os@wMZ5Ramef ziYUE&`NYA2gVVIf+&*I$x@PVmJVAw!P2>n*H)?-tof7e*@`x;oahesBtO%^1S@t1_cx zI9WiO;p^iXfc^*_X8_b_S$cfOl|DLFR#rPt;L|_|1MA=XpU8_I?bl+#&~jD^SB z(N%n3FS!tn_Hply_y8R((0-ogX`x!GtCN6Ga>pvU*VO-RuP!g-TbG?#@?Te=gpmrl zu_5KD7ErUYM&{=;^H}|Ag6#IxL5`05&093FXGJ0xratjw{|dPxs&7*TT}z?Ofgli2 z#*@kWJ3Hr~sk#JomR3k81&I3rh*b-Ae2^et%%woO+kMyhdA`;d3^WJzxmPI~K6 ziO;3u^)2n2%=Gu|*Jf_Btl|1{|G%p3J5}?UGBcdz<>h?cda?r5ZiI*pySV2HjFQ&RxyC5@oU%5XporU+PMMX;1}bZJRVvICyln?0 z81M^1OusOsi{=v~`qcb~7odCb&)`bhF}LP|lK`Nm*UsJt3MwF2=oYClcMky+Ly@+7 z+6#qKPcL%D2FHD3&PN`KMRupB8&CtlQ$cFn+xcf_9@>xNsS~S_pcR;#m)D}t=3lqj z-Yy9}Z-VX~6P{$yBoxXnXUNpITm-YTv!TKOI45B@ya5faYyhIjsOUFzfvxV?gBXmF zHvJn|JoI@R=l?cd3-j~ygSgDY^k#_m`efkZ!V_><%fP2+g3i74phJ5PxPamVog8DJ zc%p0dym9v>n1p^UoZ%as&10ZABVNs{?NYf$kQ}j)KWVv<)R;vH4(T#vdnOQc#z~iV&?Qsfhcxtm5P1;94F3 z7IfQ}Oj$JUih>Su+1wQ@@?LMzGRUJ)Ph#|UemNGu*Z>l4?qnIp4Iv4~JO1|ii|C@u z$jBg6a?nCSws!3bZ18A|go}o5vv1G6)N&hKAk- zLF=7nTA5it_SuVHW8AgTYG&e#rDv4-5*GuE)~kXyE!~8=jxAAA+9@!~x8fc*o$;qn4HG2^VL6Hgkq4|Z)MdKTL!=Vu zV>X||_i^gimMxpM955)~%QeRcpdgiOiI$EsN}Js!Rj-Yi_2-rKlXqjU93cO281@=T z+wxi-yFU6eXE0{|Opnp~8&5XMdENv|XwpOOzkcVJUKn2h+5Z`2|K7vFdZb`{?9GTj z&x(@bY^ukuQa3GQKXdxSp>hR3?){;ZzyERTTh|h)o1sSxGxLb|DKe95oU=U|o%DLX zP5Og@VHH(XbZZbYfYt#cr>YI?&w8q+7xZlONo<>#lM~?e$9CF+){h6T-J%(Ul1a{g zoEjgK^;W;C;gPY@0DFtGvolq}c`jgTgkE8SSE|metft1qas11oLJ|5>&_@Av*4Nhu)|~k6f7)4h0OEb!!$3#JJ;e(AT3b%zN@3qk z=)|+Z^xOw896PcjXx6M{+q`wz0S%2oZRklk=_32qWvKMg@4ZMJRJXb2z7-zm=2p#Y z4~$((Ee8bzJW};G%DSF?h^wyiSS`bojw{w{owDcU-l=EkSUr=s&9n_KV2h~I*gD>V zSSGk=pD-_rw@~6ep>Osu`3Uu_!o^cx!6wIRAx>Tz?~B0)&9_LnzwtX z+R!rplB+g$cuhHfyWo=jgIB(=|J4#s?2i=-5dowJJhyrix{xkr%A!Q;jwHHN%XO_CF6Fc zx9G_QN8>YRf=Fq*RwxIzCJ@(u4~M_k+R^KJ!l#wMJV48W|9DrlGbv}PA3vQU79w(8 ztVS3VHeC?Hr6DIk2g0J(&mVFzq6c4ln4}|3zD=1iYuKKrBzMIPqyHTzRa)Hn82R5L zK|^1JlwIbwX9!xVUtFn8R~TzUC$zK&=J(<0m1WxszUeKbZ62xOx*~mS?Sj!tk49na zwXDNbdxJI?W9@&94~~wYANdI!Rfp?@LTuju6&0BMiwaVXxoiE{PUF*G39f5V3{Nh7 z#L$q_LAfI;JSkBDU3+e*C@sy+U|7=CpR(eq6zH$}^vM@&Ju%RsY3ifR0|K#i?o~QdndIw;g z*aw}O$NY9;%e;Pf9E-1GAU63=L_3_vnvBx>gbFDLo17Z*G_=3&-n-WgtsVi--tDD9 zvVj`wuVm|faUC|M`1VR#3nZ*a7M!~}5EO+we z??zGc+DD&Sa%b**0h_;E!&Tft@q>%i2j?~|g2MhF4{#Jse(2bE7UN*kFcIUK6|^fnI#$<+<8 z0DwJ(nwt8lE{HZ+RaNRyH(2NTv*sMCQL5>mZh@lz%ZlfH`vC6^SjAB9Lkrrqm6V1L zO0_1oTmU1$9Z6*?vKzMEJUiZc0T)3<$Gll*bF@B+=;R}@JbX9@(zFq9%%!EJ56Q_> z`J2))7kW_#2M45_rkJ3oY>jXyU%2TxJ97)9Mu1-@Xj3HR=e(UAm;|5`vKP1Z9FL-h zJ*3sYGODz|$EP^`ZU5N*lNPl)V$taA+6MvxNm4;qFZ-b*R<2oK=&Y7E`u|9f78OH{ zK0XrO(x7qXf`qX-dy{GvKzCSo<+ZX%I!^6Oq54d(X<4SUIyiIylfjrlL_XZr%zZP= ztT7}#Gi#{xAN-u?AAWvQKIHF;=rzCj8)#n$9gQeKqcQHWPAEVsRiEW2t>~eq4)@hD!py!(!iCy=O7VQ^Q6!)n&|W&0}7%J9y7?$uov2)5d7f zCo(3Eq^oDM;q5hXajqxeesoN9zlFb&AqdX`VLdM_`;s@qgz1(_e>mtKy4!;9h@g>| z1=b`P1w|i-5eTqP!exFJ+Fu~25rQoHr#G1&5O!IRN)9xh5S_!*gO!-HG|M1um<D-t_9J9&d?q@zPWBE@}aLVvLuDy$)U6b+<7YFf(mX?<5jvb4ztVM`S zHZ@2D8ZU)YEsyzduSBZV6@=-yS@M@Aqu-?e%^?=5N^`HdBp4G|UV>cr6rNn{hCvsYEckN@M~O{+|ufvO0;_*J)}U7x{>&#z}L>FaO^;w z<#BviQegYB-^LOtn+ZCSo=UxVY4L7eIH7}S9PJ}n^f68eWoGko^MD}n*qBlevkt$F zE&{(P6g$bSqzYfv$yN9FI&8yaq7KPfqbJM z+&s5B$giIn8Q)*gz&=Pl(}@lU(Ae3I`Jn>6jfnc!QqP|4;ynrv55Gx8pyk}O>@aV~ z2VRDP2FdKPtkS6_6|wYRj*rSc_*IviNpl$wL%my%kie3SO%C!4(f^66buB6Zy@RsR_j8nT2$1`=shVI4V`@5p=CzbGZ03W93OgHEK~Z~ zyOL$fm!P{;^oE}-FVcFIB(1))J*hRLK`q|RaVFXi>sxL($Wo}?mYXQBvM%u4#ZTe0 ze+M#{M46VelcJ1SY<#>CXtUim&Y;3j)6hU4oRL-$L67y0|G_4WL%1A2OYLe7oaT(CVHbcE#QsQiVY{oB6EarS-# zSr=q|P-p%tnR${kjC=3i3#Y@i>v@at{*{)+y@$t~8vljS6shJRh}ui}$d}MdMl{K` zuM4IWMc41H+SC> zCHc#jCOhhOXg-xqU5|JJR7<0)Nw3qAs`sP6D!J?J(1qiOrX2g>ezrso zcFWe0^W&%yxLJ?7dsuKJyU<~>{s-_hkCo4W(Q z&g%U1Sq^K78Y-SfkbL-z&1ckJLPS7N3{7=qM#AAQax{X1=jGk|9`(TSlis~UiH?a$ zWH$;lL=CP@(L}voGc2y={&kBCSvw*t4KJ7|h&Qg0!E*X+pUU(_@^ zE5l_eB3+m1pWMV)@f+sSicEdML>o{M-7k;E-FJUroX+fccYp!v7s%Z~eb%qrJE18f z4=Q*VF)O{GpkQ(Su?8LEaS$$Qnxvx8=%La?ZiS%WI13UeTI{Y%ie~T4zVN17Hn;_< z@oay%EQ}rr<)kjA5b;bvxZ0ts1IiBADVX%Uj#TDO(uCiDxf%;>1=cRxL)kALJ$!f* z;nT&#$0rqZVFu;>RhFbdiF(bFmlJ;l`q}3vPM!~D=G-%hQio;4B*>RlT{}>%H0jxK z>iszxql^vat5ff|pI7i20}3We!Sy|Y7CtriQPAr+0FP9{!sKtkI208hF9p4fdpuSM ztFm<*ArVn~o%;dGlxZ0$mB65fAihK!xT1)a4u5u@&k%X%Vsb$^HiO z$-heJQFiT4YxCb2+@kBa9h+B6o9f$_0BVt0x>sC*RwV~m;~&=_D&EJ&fa@BZ?k^)@ zk_&v~cl^T!-FT6U-DgN3D2anz2Z%lu4^JF;CXkE4EsF4-fMkTSQn>`MNNfsBH6V-t z1|oEi1f>PawM6Lyeop}lA%j(BRprM%FF5aiWgfSe#g|t$=F`-{P_0QlvGn|MbG`Bp z0+!DXWqAr2&}(?Lu#;dGm|mRbf};C~g++x{?d`wPufy*j=^5?I4v(TU+E)(Hw!`rN z)}bVJe|)K-aVtX|FAj{=U|B%4DSY%|5Sp)O7Rm<5|HV^V2=_5;Ux-Rbcn%)Hmz22^ zR_Hfr*}=iN>LhUJ;We=T3!GlynU4k;20RF_!KpH~fYTQl^9g*r04*jJq8*aTHCX_4 zMCsWVS6`V{G_ z!lbVjOmzDbdQDbx2DIkiWQprL*UtFv#||ie$6JOtlGrF5UV{n~8u$B9kjw%K#UN<7 z)n?-Qz5V@Wogb(mYav`z;9qrL2w^~&|2x4A#>-nd!5>2}cRNMUw1))X`|T60l>dzo z#l^$`xWjzvpG_Vik&$Y1sb|+H?QLz3_Wht82R-QC_1Rzg9hSLNJyXx$@1n>Hk>MwG zlK6;PTQ0B$eso$-OIIk(O1kdy@oi%$WH>F<2z^fiYB{}#h;CHV$ElK5DMD=W9tCqIh$v6B);NR52qHcy_|&o2^M8tHEl|uVN!nB2xLCV2g_kVe zvrze!#NiQAq;f;Y&$j;E{_E}ScS$c4`<*1q)j{u-r|`M3Z#Y@F1KzaQ*jPlnu(+7b z!O<~M)JFt5z(6fN#Kho_?5F(G8EXFDbcU>bjxPDxB*pdL2WPZe(Ui>J5Xd9NDgW(& z7X3#^=CXFM%Ehw_e_L+pvNj=pV<&o1Cp$eG@=9P(P?>QTUcM?5V=UvtJ8vW<(ZPnD z0k){s)zu7d<+os@FC4zUFdTQ{yt`*T?!X1-#MKW!BX4CN%} z_ZP#??2liK5;Tq;rHP7meybFXE>K&0?;wE3(9Smilk_RW)VnL|ESMJ&yOFy1uHBhE`mgyorBYd%*>eX z)pEem0IH0PF>(@BgSQgID@^ofo!vH8R-g0p^GEqFMPnzdL=FJ71@-jwWNd*BicKN( zTv9TSdR!@Q6t=T4#`$O0)uoh|mw))Ua(vrDL3yBi{H?rONI|~1_;Kd=Ti?~g)Xu)x zDCBs(?vhGX{fli*dM=`82J8e^luY>a94Dccq%qtiR5$(?`f(XXtZXyb83@i3tmy?@U!AGr?<^o6l;yvMoMvr7SH7w3NkpNy1 zlW|d{E``ST!H($BWW;BBPv~}PhnCa3oYiVB3ctR20ny-%m#Qhz*sA-4m9YTpM4W~` zHt=C2xy9DV2x-Y`W=I4ZUhcTl!_G9`BaZ|R;rp|_g@3OlJWoYR$s(dFHXgda5&07~ zvM1ZHuXvVC%XE1RV6L-?F+kSM|5nyuFi&%EWg&XSlvs-0-4%zIeN+5iEzk)gM)E2ajq2jWNr)B-*UFa zuOhqXwiCR!%4Bs&$dgZWCm`-7o?)}#sN4t7ZteY`@a)X7cY6muc4^COO6x9741%5K z`J(Avs`(>+FsMgTw(tBYQS!~LXooiF@i@l>?j5P3u6LSyNI2D1B)Si(H$QH;iM{h@ zS85~p;Z@qL@KQpiBW755KLttkRSMDV_;Go{M4LROUur4%Nhqj`a|H$myErzWe>RDB zNpDQnA6H9e^Q6}*QS7He%^Pt(q#sCrDbe;W4>`W=X~L9m*UGyynXg5yW}(!wd8qZs zTns^075G+V?RqyBr)=8jFAd4qNhjc<&LK89`M?VS)gY|?ib_hSM@2p}>rdt(`p1g& z6qJ<-DyJQ~U+WET9F=+;S-=(v*cM?5J&{o6dZr37+o$QPde}KN-_WzBB&+%*)Sr2k z#d|b!xixd0tXmYTs$_Eb<9`{&GdLUUm)0uy_1C3%BTJdbL|1Zu^^OUXs-kkG!fPF9 z{(^6heIFk2aL7C(A<^(%jy`?vW3ch1FkLmTR>i*Xi00H2n8AV&3@28cENm z_a{GJDU;ws1^O&Y=&O?5QlN;MvFFa_mg4`0EZAubS4B@=T#g+pTViB1!M=97_h%28 z5AKR^$^C7e=6T=6N3S|QjM{w+`(7srhFdI*w5qVso$t;^TCv{rH5F}|YHA2GHf%va zo&$l-6b$#hx+^ZV?qGIx1LQkso}}guisI7BN=#zngw=5ufYIuOP$raaz5j%xNLiBM zhJfWhd0Q$ntc@^6FBlbbi$8Q05 zDB-VTHz5}J<>@+iPVh*vgM;HUq@EY7^&i6?*W5%PS%Ah>-~b2Tk#GLR zUwlh`am{fa=&70kj|VEhA!8;`(*(;t$LtAo)LhT_MB&%5mj2xeW0q{QV(g*=W^1$F}&#^Teq77ZgUad-o9bpO0Xz1jF#1G*heh&C6p8Hs((j z%RlXD45DU!|7a15E&IwmoW8t3*NuQoWcx zT%~0OF43gVW(PK69>G6$dZv z-*dNF;);gevZEIb`DKl=x%wD=4#N8439r=>>TdrGp*3Pb?1%}S25#JHIGT9;{HlISTo4f zW53moT3FlrMBdDYJtHQF?PS_j#chlA9!IdZ1MNE&I;u_Oy#m!2u=f~z(7k<$V9G{< zZKkk)mVThaqH+qD1|5*DSQ*65lhvt8R&YCGlCWiH@Qe1jD1(1WHP3pl;?uyuz!Pw$ zf;^HvXb!Cv%>o#}zin2Sb;JQJzEiJrTg=Mz_p4Wj%N4So;^T(56HsuyXQHP^KUlRo zbKl54@kG5pyG2>5a^<=&f4EX)&|*(-#`ZV#mF)Aa(X|=`9VpY9u;OVG84nJ=3IFB> z-359)AZ&mY^jsJ=Q+w>riOqU!u)vnIQ&l}q*v1QtrQQD)K9WzwNO?III`;ie==g(I z&KD;eoP;yURj^MLNE@m7r?}YIs1F|o{_qbacHAXL9c1UT;;G}g;}Lnsuj7`p{8!YX zzF-0RKGBBn(wg+ruNjmiY|crh&SGVY2|0hL$<9Z~F>AZvmNlPo8jRev>EC|l!0LDZ z9uxQQmalVP3O|BHiC*nR`g)D)xROK4hi!d!9ptWT`b|jSplAaE3+`gxj!4RtM6)z1 zX6E&2geMypdvQ7V3KhKgFp9+d}?SHZTijh zg7>(%cX2_L_|$GeGh`yhB(yq>{gwGg9_J9r*wZ%y=-fz1WKxLl1+c5r=fEYF4Q#2k zFZ2#vI+MBDo>&$APP*T)*8j8kO1+P?ot5GJgV#@dNR8<-Fm8<1)zzs+F2gXcd|-=_ z1MKaE?1gafY=Wgizb%vyT*r6G$!C}kyk%u&%N!UK(t3dKuqXGn0+||PczDDd<$)D- zMlwBX-cv!OY!f6ZT{-3ZLp5Ncpn1!IS7B(T{O9KU%bxGe8HJYJNpW0sxlD4Q2c=$E#tmfq8L>^jSdCTM1>A6ZBJ!4#4 zc&cwuJ~`Z-&u>#vkqmhp=WmW5bw+vV@!90*Bp&7*PU(~R2;1O^12pys3!e`KjDdee z(DdUU#eRNHE}P~{FN?-s>)jw}Zh8HDRVwSp+n>uBMaA)S<1pXR9gUK*p2s-e0G8=GL$ZM=)h7%VwX_N!CU zZE-yhBhmIr0Hy%i?@|r^OIPZFin{{@`okOs`d=`=n~@+~c^!xJt1%0DmrXWQeTxs0 zBpI8j`Q|zFoiB?&IsgC+Nq#=PDsERwnS$ket((t_&&`q6Dcy4NZMw@RY>Qb)RbeRP zQdxaQn_6DvCaO;px7yokYQDKICe2Hv9Gz*%h&ve*Z#z3^9!nnNySKVhkX1>A7pah8 z?UaezJN~Wc6=_LkOL$Un4D$Hb{ZIQIFRSnC=bF~tYEQ?AG&w@|DBE%qZYZ|*)R}CG zqrTI@B2Q*%x#|}lCVp$tGy9O3C>m!cum$Ig)yd*Rxb)`aId#Pgu4iu2{v4T=h7{TNM);8A1u4aNnELs{zELT|`@Gx32VB z*zM=pnnnM3n7tMfd{MB;ENvL(%$HmsS$IIc!1G(zr~CNhgAK9|VkZOOGK$I4Hn#je zh4@Yka$LduBJZj2Yq#oyeU#VU550dpAChFWA!T%61aF$yV|H4%s8S54(&5O1^tYhx3Wm; z_4%Iq{&3h^M^J7(m*!)N0Lsn<=G)_s_ovt94ceUf@jFX3UQ_T#webH$eOB0K?-Fi*myVOD;(}F*;cY|9~IW@!O>N7L8&ZpKT6ET}$wCaq>cBQqN z(D5WfZ?!ic?S&ZN;l6X3eYL#Sm#!N?GQsWR(F4Jw`t>uO!e>8bA91R$Hto!kr-tY?_1{KIlO-T8sO41l)MS6 zc0dWLuHY`Uu=qBg9**m)GR#FF!G2f~wO$c06NMozGf@B{In8)Du25RTwMSGFW9wbJ<5P{d*tTRxHoVoLFP zKgDF9H>jGZLcvB6mXG$D|5U|avqO1rg0p!zNGpGkchV!}YvJR4Zr(>RGPiUNxPrc6 zE4pqgb6+1hCQ0cc|JHsg6Nsw#{`H>MD`|Z~!(fNc>IUMEHQDa?7&%r2Omqz2e#H@J zAYJn5J8w+Zsa^FHiSu&#uAR32MucnPZevkk(A@X?;Y<9h6oNs2ie{$G)O=UvU@yh9 z)V5HYe#}ZmR&g1!xVpiwy*c*gw;yer3KX0g=j48a zyaNA88SHvZl*KP>7aIr5B?ZXO^$8ccI}O_uU%o5Kj#6%ub67E}@}qg)kF6lc`DXow zse%{D+wrSiqOKrO60aH?CjXNw0*4ho)kK<9UAxs=6q*WxaS{ex#KbVpgnOMV!I%1=_;i& zAZHIfth=y7&Nll}Q!UGn?UHWikl!2DWo&3=iYF0BSyEYrG4|v^6YRN#)_XQ=C5KIp z_{qIvmbK9I-|G;)!VPO{M(m4&@6vdDmhkP{KI~&dYz%;Jwtz3(38BJaxWi7sC%Lp> z;rH=v7?&->j||b2<-Ev-$$PMselGp`*3BG=rI~mV3OmKGE>XlzPdgIV7Vcfmu9RFD z6}GyxaXsU3scTM7_$b@wwl5NKgtUPmt*(7I5~);ym_Pef7Eg2(NOp8AD<}Mx)G%)YoUZB z(U4^Dea`PZ=RD8z z@5~%$%=i2Ge6H_xyHwV zTX2T&ZHy3h=TZr;s_FChR~-9t6_zFIPK3+~9c=LE^@O=L%$Wx+# zvy~*1$-$HS6olA;suh$M_wL3yUBH_c*=acp&-r9iX9NqSno}BE&nW39izHTCe&}k! zmD|rPiJjs^8HI((9~4=WW z=!v=0`>#Hr9a(TSWG+4b?GC!l!NYo;tMElq&3KxVn@W)Q^??V|J^dLQYxVjs^W<)4 z@5$9qGh7XAI{#eZ%mmY5{e$0jqRsVrp-rTWdxVe!kA_o~kIp;{y3sHLprkjT(LN1s zfITXDj-$XI1&Mn@$e}v@WsrV82Ci=k-n}ZuZ;U1L378OIf_)L#CTLtvf==T!3VcRL zzzL4v;D*Eb*%F3d2!w1Zy7AV}>toLKm@Ll5Tix-t_{Ljjuh(&5vf?McbA0a-JHpO) zy)MT|_U)D@J|<`UtVN*JtCufa+Md01^(D);EL=-c?6#B7U-zx~3~si${hAZN1s z5G&2g^TpZQuUH)n$%+i#V+ZH&=^B$v8E>IOp0p?Poug+stXGS$V3)4vS3Dwkc3K)$ z1QoWSAM)g|%g$I0F@6`Iru@JwRr3 z_sP}3yf<%<$sh`t!rq}F9ayvu{JCz`m z1AwaAYTq=rwmx23Ksfy{`GqZAOc$ChGs#!U&(5*Dkl@q*jK-ckcGBp4 z6py@GBGIk~nQFH9w+k9DrzZN>|8CG{ryFqKRNc> z-|l6vNy2sqi209f^> zx4e9Le^gX0_s|K=TXfLbAeb9GO48k55>Cfm`h5f%Qjf3rW;}V)4Cyxisf5dcZlO&F z6i`5%w7^aw8d3n9IAo?E*A0-Z70^Aq4HalE7J0t93Y}{SVVP?+m?n|`QA)BlS0BvMJIMoS*aHdfR0{S9q2E(fiPm+_p zK-lqyT`t!k1N}obbmgU>R_O?IWsX8TO{@PV^Iv&aoaSZ&TH-C4)0&z@TU{L!V=R+) zelez!gml#fr^95FbItzbW0wh=*q5sPbS3n$>$s?;NN z>+4>IJ+T6p+5rFV$lO^G2T<0~bHR#+^yuWi9yb;-jYgG>ip;YS9ac--Bw?bDBsH9l zYpY?Aav8WU{9Jf#a>l_KwtA8CXyl$Lh%X{wUxy+$E{ zxZEyv?xJyAHRzx2cYN#~8v|OcbDt_j`AE1Ls-EJ}YIc`-Mg+?2`%_`vz8S!8tE+wq z$qH5xBoO!#)tEj6q%GpAg<~EVo4l^B#)EjIrN)(ch63Sw=fXkmz%aFJJ|$d%EpOT& zwZ*;MzVzcxzr;hkSzb*OlLAwhe$H=ZUX@l5ise6OCUuxiEa~&RE^R{|XxAUuc>R)! zb}#?>t_pue3^qyI?_2b{AX0#&o2-ly4=We*UiSQ;(8cR41v9eF{cqH3@8+S=XgEma zU)`R>RU;1;q)SKG6Vub3hoZ}-r?HUhXFqyxba*(1NWC9&0T>`)aErUz@A2uS=k)aU z+L@8Rj*P%obpI)(uTR6kAXd0hhLgFQOm;IPZ^5d?Fmu^M@%I0}{%4~-$Vd8H@Aq#8K@ETb#}s&7_CiPap;5;sFKg5g@F@@tGL)MZxD@zAVa#8&7>tJP zp!{nEe-Ts&LF{;Rz**eV=w}mW944mw2GcN@ADIUvkl18x;)x`J^{4CnOidl04a(+Cq6`0xl7;Q zJD}+7>Ux5c`Dg8q5Se8Q=v}&(E5(-1Q-JDe2RI6r7}Q;t_YDBVoC;%O#GeoriGT5{ zh1W)N&=(o1Kvo_IB0D(yps{;$J79gK?42QnMbm`%4)bSjF5KaG?Pukz_ zRT$@9&72lj-CzOBMO;4}oI{B9CxP1XMwvSWk9;W?7tp<|)YLx({!<6MczzqJ@qJDg+hDCKF;<6escM{BENAL?5to26x7+KdD1!*F5foFFDQVCD?78}cJZ)l z&E|Z*D+t*J1_o|!DPl24SbiO&0bPoE(iYytaQME1k-c01_a?3L9lXt{Dq^58{)|@^#U8BBP4{6DSC#1 z$4`48kI)9H%C_wdTAGrd`!ScJWniL#ppB@m>ixEWp^Cpm*?6S^P>}T)Qx+PX2`M9` z0N@PPkUQ?~u?u}YJ&ho?(g3%X9^7nJ^VT*tUGNLGfqRSMlF@->9GA4&UAys{I{ordf4S|v)5M($t`J^u(3S0%K$)mvy!oGz45$Yj; ztZ%~R|5kOSTO%b8$C#g&*K>ntm6uN#`Eem8SO9Q@I(IUf4(n*vn@-vJtw4v~Z;!9_ zAg|2L0)X7rC;(;MF0l3BQ2rUViwbF`DXIuG}7pRe`R3M%&9s}{b? z&_0f{kcKwloDajTQj1}b@WF|{mm%EW*yCDfhzx|z;WycXG@j#AQa*tf?ob>-tE9BF z$f-{ZmWH6Ow^UD;hKb3ctZU3Dka{t4C^t|G>;F)LH6suwUc5L%B}%a36+93FT|bFr z{X38m!swaM+ISu~e+pQu|4X2Op(O|rt$q3a(AgR^H`?^dObNK+@o>cv+Zr^7AD{ZFqsrE zY@t9Z0x=Fd4hImHBK3v&_ik4|3Q*W3fAFdXVqaN~ZQ z^SKV55nWxDjEsx~H)7>99|W>s_hR!S``*|-9V7somMSvogLnc6VOFZPTrS`@e#M2h z!>#8(o#x1nXLzkj9W4B`hyy}cXQ)_$GMY^ z9V$`OW{tu?#!CZxBbpcbC2)Lb6q{Pk3TU%N#;Cc0TDq=3o1R;l6J zU`PT=GID)6+FX#TCRNc@_=uGGGsbmw|BjC23I|Ep)GA|$X2K$D=h8Y>^XC*X@^6}} z>oeXT7Rx?f;poe0Yh`+&`*KLdKsvj|@hlh4= zwS}RC>xQH&KN~OC1YM(NM37Z_+@3k|uc#a}hR^dCOATmN2AquKoZ%}DmrtOt*hgh9 z-9S|=vQo85oufE4ZnQH{XDs7+6D7y5pY+o6ZXtc%p12c_d0>8zz-fc&wUcl4@X&u+ zy@LtX)g5WrtHWHqkh?z@OWjE@dx^#D6c* d{_{%HRW-`T8I8ZC^nq&{rK@G2NyOZ`_b)8SZ~6cL literal 0 HcmV?d00001 diff --git a/examples/advanced/job_api/pt/requirements.txt b/examples/advanced/job_api/pt/requirements.txt new file mode 100644 index 0000000000..5db2fd4d24 --- /dev/null +++ b/examples/advanced/job_api/pt/requirements.txt @@ -0,0 +1,5 @@ +nvflare~=2.5.0rc +torch +torchvision +pytorch_lightning +tensorboard \ No newline at end of file diff --git a/examples/advanced/job_api/pt/src/cifar10_fl.py b/examples/advanced/job_api/pt/src/cifar10_fl.py new file mode 100644 index 0000000000..cb5b297af5 --- /dev/null +++ b/examples/advanced/job_api/pt/src/cifar10_fl.py @@ -0,0 +1,137 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision +import torchvision.transforms as transforms +from net import Net + +# (1) import nvflare client API +import nvflare.client as flare + +# (optional) metrics +from nvflare.client.tracking import SummaryWriter + +# (optional) set a fix place so we don't need to download everytime +DATASET_PATH = "/tmp/nvflare/data" +# If available, we use GPU to speed things up. +DEVICE = "cuda" if torch.cuda.is_available() else "cpu" + + +def main(): + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + + batch_size = 4 + epochs = 2 + + trainset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=True, download=True, transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2) + + testset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=False, download=True, transform=transform) + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2) + + net = Net() + + # (2) initializes NVFlare client API + flare.init() + + summary_writer = SummaryWriter() + while flare.is_running(): + # (3) receives FLModel from NVFlare + input_model = flare.receive() + print(f"current_round={input_model.current_round}") + + # (4) loads model from NVFlare + net.load_state_dict(input_model.params) + + criterion = nn.CrossEntropyLoss() + optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + + # (optional) use GPU to speed things up + net.to(DEVICE) + # (optional) calculate total steps + steps = epochs * len(trainloader) + for epoch in range(epochs): # loop over the dataset multiple times + + running_loss = 0.0 + for i, data in enumerate(trainloader, 0): + # get the inputs; data is a list of [inputs, labels] + # (optional) use GPU to speed things up + inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE) + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + backward + optimize + outputs = net(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + # print statistics + running_loss += loss.item() + if i % 2000 == 1999: # print every 2000 mini-batches + print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}") + global_step = input_model.current_round * steps + epoch * len(trainloader) + i + + summary_writer.add_scalar(tag="loss_for_each_batch", scalar=running_loss, global_step=global_step) + running_loss = 0.0 + + print("Finished Training") + + PATH = "./cifar_net.pth" + torch.save(net.state_dict(), PATH) + + # (5) wraps evaluation logic into a method to re-use for + # evaluation on both trained and received model + def evaluate(input_weights): + net = Net() + net.load_state_dict(input_weights) + # (optional) use GPU to speed things up + net.to(DEVICE) + + correct = 0 + total = 0 + # since we're not training, we don't need to calculate the gradients for our outputs + with torch.no_grad(): + for data in testloader: + # (optional) use GPU to speed things up + images, labels = data[0].to(DEVICE), data[1].to(DEVICE) + # calculate outputs by running images through the network + outputs = net(images) + # the class with the highest energy is what we choose as prediction + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + print(f"Accuracy of the network on the 10000 test images: {100 * correct // total} %") + return 100 * correct // total + + # (6) evaluate on received model for model selection + accuracy = evaluate(input_model.params) + summary_writer.add_scalar(tag="global_model_accuracy", scalar=accuracy, global_step=input_model.current_round) + # (7) construct trained FL model + output_model = flare.FLModel( + params=net.cpu().state_dict(), + metrics={"accuracy": accuracy}, + meta={"NUM_STEPS_CURRENT_ROUND": steps}, + ) + # (8) send model back to NVFlare + flare.send(output_model) + + +if __name__ == "__main__": + main() diff --git a/examples/advanced/job_api/pt/src/cifar10_lightning_fl.py b/examples/advanced/job_api/pt/src/cifar10_lightning_fl.py new file mode 100644 index 0000000000..996a49be17 --- /dev/null +++ b/examples/advanced/job_api/pt/src/cifar10_lightning_fl.py @@ -0,0 +1,108 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torchvision +import torchvision.transforms as transforms +from lit_net import LitNet +from pytorch_lightning import LightningDataModule, Trainer, seed_everything +from torch.utils.data import DataLoader, random_split + +# (1) import nvflare lightning client API +import nvflare.client.lightning as flare + +seed_everything(7) + + +DATASET_PATH = "/tmp/nvflare/data" +BATCH_SIZE = 4 + +transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + + +class CIFAR10DataModule(LightningDataModule): + def __init__(self, data_dir: str = DATASET_PATH, batch_size: int = BATCH_SIZE): + super().__init__() + self.data_dir = data_dir + self.batch_size = batch_size + + def prepare_data(self): + torchvision.datasets.CIFAR10(root=self.data_dir, train=True, download=True, transform=transform) + torchvision.datasets.CIFAR10(root=self.data_dir, train=False, download=True, transform=transform) + + def setup(self, stage: str): + # Assign train/val datasets for use in dataloaders + if stage == "fit" or stage == "validate": + cifar_full = torchvision.datasets.CIFAR10( + root=self.data_dir, train=True, download=False, transform=transform + ) + self.cifar_train, self.cifar_val = random_split(cifar_full, [0.8, 0.2]) + + # Assign test dataset for use in dataloader(s) + if stage == "test" or stage == "predict": + self.cifar_test = torchvision.datasets.CIFAR10( + root=self.data_dir, train=False, download=False, transform=transform + ) + + def train_dataloader(self): + return DataLoader(self.cifar_train, batch_size=self.batch_size) + + def val_dataloader(self): + return DataLoader(self.cifar_val, batch_size=self.batch_size) + + def test_dataloader(self): + return DataLoader(self.cifar_test, batch_size=self.batch_size) + + def predict_dataloader(self): + return DataLoader(self.cifar_test, batch_size=self.batch_size) + + +def main(): + model = LitNet() + cifar10_dm = CIFAR10DataModule() + if torch.cuda.is_available(): + trainer = Trainer(max_epochs=1, accelerator="gpu", devices=1 if torch.cuda.is_available() else None) + else: + trainer = Trainer(max_epochs=1, devices=None) + + # (2) patch the lightning trainer + flare.patch(trainer) + + while flare.is_running(): + # (3) receives FLModel from NVFlare + # Note that we don't need to pass this input_model to trainer + # because after flare.patch the trainer.fit/validate will get the + # global model internally + input_model = flare.receive() + print(f"\n[Current Round={input_model.current_round}, Site = {flare.get_site_name()}]\n") + + # (4) evaluate the current global model to allow server-side model selection + print("--- validate global model ---") + trainer.validate(model, datamodule=cifar10_dm) + + # perform local training starting with the received global model + print("--- train new model ---") + trainer.fit(model, datamodule=cifar10_dm) + + # test local model + print("--- test new model ---") + trainer.test(ckpt_path="best", datamodule=cifar10_dm) + + # get predictions + print("--- prediction with new best model ---") + trainer.predict(ckpt_path="best", datamodule=cifar10_dm) + + +if __name__ == "__main__": + main() diff --git a/examples/advanced/job_api/pt/src/lit_net.py b/examples/advanced/job_api/pt/src/lit_net.py new file mode 100644 index 0000000000..d70b85ca5a --- /dev/null +++ b/examples/advanced/job_api/pt/src/lit_net.py @@ -0,0 +1,93 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from pytorch_lightning import LightningModule +from torchmetrics import Accuracy + +NUM_CLASSES = 10 +criterion = nn.CrossEntropyLoss() + + +class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = torch.flatten(x, 1) # flatten all dimensions except batch + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x + + +class LitNet(LightningModule): + def __init__(self): + super().__init__() + self.save_hyperparameters() + self.model = Net() + self.train_acc = Accuracy(task="multiclass", num_classes=NUM_CLASSES) + self.valid_acc = Accuracy(task="multiclass", num_classes=NUM_CLASSES) + # (optional) pass additional information via self.__fl_meta__ + self.__fl_meta__ = {} + + def forward(self, x): + out = self.model(x) + return out + + def training_step(self, batch, batch_idx): + x, labels = batch + outputs = self(x) + loss = criterion(outputs, labels) + self.train_acc(outputs, labels) + self.log("train_loss", loss) + self.log("train_acc", self.train_acc, on_step=True, on_epoch=False) + return loss + + def evaluate(self, batch, stage=None): + x, labels = batch + outputs = self(x) + loss = criterion(outputs, labels) + self.valid_acc(outputs, labels) + + if stage: + self.log(f"{stage}_loss", loss) + self.log(f"{stage}_acc", self.valid_acc, on_step=True, on_epoch=True) + return outputs + + def validation_step(self, batch, batch_idx): + self.evaluate(batch, "val") + + def test_step(self, batch, batch_idx): + self.evaluate(batch, "test") + + def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any: + return self.evaluate(batch) + + def configure_optimizers(self): + optimizer = optim.SGD(self.parameters(), lr=0.001, momentum=0.9) + return {"optimizer": optimizer} diff --git a/examples/advanced/job_config/hello-pt/simple_network.py b/examples/advanced/job_api/pt/src/net.py similarity index 94% rename from examples/advanced/job_config/hello-pt/simple_network.py rename to examples/advanced/job_api/pt/src/net.py index 0f2d2bbe08..47ac7e9589 100644 --- a/examples/advanced/job_config/hello-pt/simple_network.py +++ b/examples/advanced/job_api/pt/src/net.py @@ -17,10 +17,9 @@ import torch.nn.functional as F -class SimpleNetwork(nn.Module): +class Net(nn.Module): def __init__(self): - super(SimpleNetwork, self).__init__() - + super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) diff --git a/examples/advanced/job_api/pt/src/train_eval_submit.py b/examples/advanced/job_api/pt/src/train_eval_submit.py new file mode 100644 index 0000000000..0273652746 --- /dev/null +++ b/examples/advanced/job_api/pt/src/train_eval_submit.py @@ -0,0 +1,188 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision +import torchvision.transforms as transforms +from net import Net + +# (1) import nvflare client API +import nvflare.client as flare +from nvflare.app_common.app_constant import ModelName + +# (optional) set a fix place so we don't need to download everytime +CIFAR10_ROOT = "/tmp/nvflare/data/cifar10" +# (optional) We change to use GPU to speed things up. +# if you want to use CPU, change DEVICE="cpu" +DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu" + + +def define_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset_path", type=str, default=CIFAR10_ROOT, nargs="?") + parser.add_argument("--batch_size", type=int, default=4, nargs="?") + parser.add_argument("--num_workers", type=int, default=1, nargs="?") + parser.add_argument("--local_epochs", type=int, default=2, nargs="?") + parser.add_argument("--model_path", type=str, default=f"{CIFAR10_ROOT}/cifar_net.pth", nargs="?") + return parser.parse_args() + + +def main(): + # define local parameters + args = define_parser() + + dataset_path = args.dataset_path + batch_size = args.batch_size + num_workers = args.num_workers + local_epochs = args.local_epochs + model_path = args.model_path + + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + trainset = torchvision.datasets.CIFAR10(root=dataset_path, train=True, download=True, transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers) + testset = torchvision.datasets.CIFAR10(root=dataset_path, train=False, download=True, transform=transform) + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers) + + net = Net() + best_accuracy = 0.0 + + # wraps evaluation logic into a method to re-use for + # evaluation on both trained and received model + def evaluate(input_weights): + net = Net() + net.load_state_dict(input_weights) + # (optional) use GPU to speed things up + net.to(DEVICE) + + correct = 0 + total = 0 + # since we're not training, we don't need to calculate the gradients for our outputs + with torch.no_grad(): + for data in testloader: + # (optional) use GPU to speed things up + images, labels = data[0].to(DEVICE), data[1].to(DEVICE) + # calculate outputs by running images through the network + outputs = net(images) + # the class with the highest energy is what we choose as prediction + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + return 100 * correct // total + + # (2) initialize NVFlare client API + flare.init() + + # (3) run continously when launch_once=true + while flare.is_running(): + + # (4) receive FLModel from NVFlare + input_model = flare.receive() + client_id = flare.get_site_name() + + # Based on different "task" we will do different things + # for "train" task (flare.is_train()) we use the received model to do training and/or evaluation + # and send back updated model and/or evaluation metrics, if the "train_with_evaluation" is specified as True + # in the config_fed_client we will need to do evaluation and include the evaluation metrics + # for "evaluate" task (flare.is_evaluate()) we use the received model to do evaluation + # and send back the evaluation metrics + # for "submit_model" task (flare.is_submit_model()) we just need to send back the local model + # (5) performing train task on received model + if flare.is_train(): + print(f"({client_id}) current_round={input_model.current_round}, total_rounds={input_model.total_rounds}") + + # (5.1) loads model from NVFlare + net.load_state_dict(input_model.params) + + criterion = nn.CrossEntropyLoss() + optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + + # (optional) use GPU to speed things up + net.to(DEVICE) + # (optional) calculate total steps + steps = local_epochs * len(trainloader) + for epoch in range(local_epochs): # loop over the dataset multiple times + + running_loss = 0.0 + for i, data in enumerate(trainloader, 0): + # get the inputs; data is a list of [inputs, labels] + # (optional) use GPU to speed things up + inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE) + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + backward + optimize + outputs = net(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + # print statistics + running_loss += loss.item() + if i % 2000 == 1999: # print every 2000 mini-batches + print(f"({client_id}) [{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}") + running_loss = 0.0 + + print(f"({client_id}) Finished Training") + + # (5.2) evaluation on local trained model to save best model + local_accuracy = evaluate(net.state_dict()) + print(f"({client_id}) Evaluating local trained model. Accuracy on the 10000 test images: {local_accuracy}") + if local_accuracy > best_accuracy: + best_accuracy = local_accuracy + torch.save(net.state_dict(), model_path) + + # (5.3) evaluate on received model for model selection + accuracy = evaluate(input_model.params) + print( + f"({client_id}) Evaluating received model for model selection. Accuracy on the 10000 test images: {accuracy}" + ) + + # (5.4) construct trained FL model + output_model = flare.FLModel( + params=net.cpu().state_dict(), + metrics={"accuracy": accuracy}, + meta={"NUM_STEPS_CURRENT_ROUND": steps}, + ) + + # (5.5) send model back to NVFlare + flare.send(output_model) + + # (6) performing evaluate task on received model + elif flare.is_evaluate(): + accuracy = evaluate(input_model.params) + flare.send(flare.FLModel(metrics={"accuracy": accuracy})) + + # (7) performing submit_model task to obtain best local model + elif flare.is_submit_model(): + model_name = input_model.meta["submit_model_name"] + if model_name == ModelName.BEST_MODEL: + try: + weights = torch.load(model_path) + net = Net() + net.load_state_dict(weights) + flare.send(flare.FLModel(params=net.cpu().state_dict())) + except Exception as e: + raise ValueError("Unable to load best model") from e + else: + raise ValueError(f"Unknown model_type: {model_name}") + + +if __name__ == "__main__": + main() diff --git a/examples/getting_started/pt/swarm_script_executor_cifar10.py b/examples/advanced/job_api/pt/swarm_script_executor_cifar10.py similarity index 100% rename from examples/getting_started/pt/swarm_script_executor_cifar10.py rename to examples/advanced/job_api/pt/swarm_script_executor_cifar10.py diff --git a/examples/advanced/job_api/sklearn/README.md b/examples/advanced/job_api/sklearn/README.md new file mode 100644 index 0000000000..e08d217b7c --- /dev/null +++ b/examples/advanced/job_api/sklearn/README.md @@ -0,0 +1,25 @@ +# Getting Started with NVFlare (scikit-learn) +[![Scikit-Learn Logo](https://upload.wikimedia.org/wikipedia/commons/0/05/Scikit_learn_logo_small.svg)](https://scikit-learn.org/) + +We provide examples to quickly get you started using NVFlare's Job API. +All examples in this folder are based on using [scikit-learn](https://scikit-learn.org/), a popular library for general machine learning with Python. + +## Setup environment +First, install nvflare and dependencies: +```commandline +pip install -r requirements.txt +``` + +## Examples +You can also run any of the below scripts directly using +```commandline +python "script_name.py" +``` +### 1. [Federated K-Means Clustering](./kmeans_script_executor_higgs.py) +Implementation of [K-Means](https://arxiv.org/abs/1602.05629). For more details see this [example](../../../advanced/sklearn-kmeans/README.md) +```commandline +python kmeans_script_executor_higgs.py +``` + +> [!NOTE] +> More examples can be found at https://nvidia.github.io/NVFlare. diff --git a/examples/advanced/job_api/sklearn/kmeans_script_executor_higgs.py b/examples/advanced/job_api/sklearn/kmeans_script_executor_higgs.py new file mode 100644 index 0000000000..af20adc3bb --- /dev/null +++ b/examples/advanced/job_api/sklearn/kmeans_script_executor_higgs.py @@ -0,0 +1,147 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import os + +from src.kmeans_assembler import KMeansAssembler +from src.split_csv import distribute_header_file, split_csv + +from nvflare import FedJob, ScriptExecutor +from nvflare.app_common.aggregators.collect_and_assemble_aggregator import CollectAndAssembleAggregator +from nvflare.app_common.shareablegenerators.full_model_shareable_generator import FullModelShareableGenerator +from nvflare.app_common.workflows.scatter_and_gather import ScatterAndGather +from nvflare.app_opt.sklearn.joblib_model_param_persistor import JoblibModelParamPersistor +from nvflare.client.config import ExchangeFormat + +preprocess = True # if False, assume data is already preprocessed and split + + +def split_higgs(input_data_path, input_header_path, output_dir, site_num, sample_rate, site_name_prefix="site-"): + input_file = input_data_path + output_directory = output_dir + num_parts = site_num + site_name_prefix = site_name_prefix + sample_rate = sample_rate + split_csv(input_file, output_directory, num_parts, site_name_prefix, sample_rate) + distribute_header_file(input_header_path, output_directory, num_parts, site_name_prefix) + + +if __name__ == "__main__": + n_clients = 3 + num_rounds = 2 + train_script = "src/kmeans_fl.py" + data_input_dir = "/tmp/nvflare/higgs/data" + data_output_dir = "/tmp/nvflare/higgs/split_data" + + # Download data + os.makedirs(data_input_dir, exist_ok=True) + higgs_zip_file = os.path.join(data_input_dir, "higgs.zip") + if not os.path.exists(higgs_zip_file): + os.system( + f"curl -o {higgs_zip_file} https://archive.ics.uci.edu/static/public/280/higgs.zip" + ) # This might take a while. The file is 2.8 GB. + os.system(f"unzip -d {data_input_dir} {higgs_zip_file}") + os.system( + f"gunzip -c {os.path.join(data_input_dir, 'HIGGS.csv.gz')} > {os.path.join(data_input_dir, 'higgs.csv')}" + ) + + if preprocess: # if False, assume data is already preprocessed and split + # Generate the csv header file + # Your list of data + features = [ + "label", + "lepton_pt", + "lepton_eta", + "lepton_phi", + "missing_energy_magnitude", + "missing_energy_phi", + "jet_1_pt", + "jet_1_eta", + "jet_1_phi", + "jet_1_b_tag", + "jet_2_pt", + "jet_2_eta", + "jet_2_phi", + "jet_2_b_tag", + "jet_3_pt", + "jet_3_eta", + "jet_3_phi", + "jet_3_b_tag", + "jet_4_pt", + "jet_4_eta", + "jet_4_phi", + "jet_4_b_tag", + "m_jj", + "m_jjj", + "m_lv", + "m_jlv", + "m_bb", + "m_wbb", + "m_wwbb", + ] + + # Specify the file path + file_path = os.path.join(data_input_dir, "headers.csv") + + with open(file_path, "w", newline="") as file: + csv_writer = csv.writer(file) + csv_writer.writerow(features) + + print(f"features written to {file_path}") + + # Split the data + split_higgs( + input_data_path=os.path.join(data_input_dir, "higgs.csv"), + input_header_path=os.path.join(data_input_dir, "headers.csv"), + output_dir=data_output_dir, + site_num=n_clients, + sample_rate=0.3, + ) + + # Create the federated learning job + job = FedJob(name="kmeans") + + # ScatterAndGather also expects an "aggregator" which we define here. + # The actual aggregation function is defined by an "assembler" to specify how to handle the collected updates. + # We use KMeansAssembler which is the assembler designed for k-Means algorithm. + aggregator = CollectAndAssembleAggregator(assembler_id=job.as_id(KMeansAssembler())) + + # For kmeans with sklean, we need a custom persistor + # JoblibModelParamPersistor is a persistor which save/read the model to/from file with JobLib format. + persistor = JoblibModelParamPersistor(initial_params={"n_clusters": 2}) + + controller = ScatterAndGather( + min_clients=n_clients, + num_rounds=num_rounds, + wait_time_after_min_received=0, + aggregator_id=job.as_id(aggregator), + persistor_id=job.as_id(persistor), + shareable_generator_id=job.as_id(FullModelShareableGenerator()), + train_task_name="train", # Client will start training once received such task. + train_timeout=0, + ) + job.to(controller, "server") + + # Add clients + for i in range(n_clients): + executor = ScriptExecutor( + task_script_path=train_script, + task_script_args=f"--data_root_dir {data_output_dir}", + params_exchange_format=ExchangeFormat.RAW, # kmeans requires raw values only rather than PyTorch Tensors (the default) + ) + job.to(executor, f"site-{i+1}") # HIGGs data splitter assumes site names start from 1 + + # job.export_job("/tmp/nvflare/jobs/job_config") + job.simulator_run("/tmp/nvflare/jobs/workdir") diff --git a/examples/advanced/job_api/sklearn/requirements.txt b/examples/advanced/job_api/sklearn/requirements.txt new file mode 100644 index 0000000000..aa0d89bd1b --- /dev/null +++ b/examples/advanced/job_api/sklearn/requirements.txt @@ -0,0 +1,4 @@ +nvflare~=2.5.0rc +pandas +scikit-learn +joblib diff --git a/examples/advanced/job_api/sklearn/src/kmeans_assembler.py b/examples/advanced/job_api/sklearn/src/kmeans_assembler.py new file mode 100644 index 0000000000..a56a7e74ed --- /dev/null +++ b/examples/advanced/job_api/sklearn/src/kmeans_assembler.py @@ -0,0 +1,75 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict + +import numpy as np +from sklearn.cluster import KMeans + +from nvflare.apis.dxo import DXO, DataKind +from nvflare.apis.fl_context import FLContext +from nvflare.app_common.aggregators.assembler import Assembler +from nvflare.app_common.app_constant import AppConstants + + +class KMeansAssembler(Assembler): + def __init__(self): + super().__init__(data_kind=DataKind.WEIGHTS) + # Aggregator needs to keep record of historical + # center and count information for mini-batch kmeans + self.center = None + self.count = None + self.n_cluster = 0 + + def get_model_params(self, dxo: DXO): + data = dxo.data + return {"center": data["center"], "count": data["count"]} + + def assemble(self, data: Dict[str, dict], fl_ctx: FLContext) -> DXO: + current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND) + if current_round == 0: + # First round, collect the information regarding n_feature and n_cluster + # Initialize the aggregated center and count to all zero + client_0 = list(self.collection.keys())[0] + self.n_cluster = self.collection[client_0]["center"].shape[0] + n_feature = self.collection[client_0]["center"].shape[1] + self.center = np.zeros([self.n_cluster, n_feature]) + self.count = np.zeros([self.n_cluster]) + # perform one round of KMeans over the submitted centers + # to be used as the original center points + # no count for this round + center_collect = [] + for _, record in self.collection.items(): + center_collect.append(record["center"]) + centers = np.concatenate(center_collect) + kmeans_center_initial = KMeans(n_clusters=self.n_cluster) + kmeans_center_initial.fit(centers) + self.center = kmeans_center_initial.cluster_centers_ + else: + # Mini-batch k-Means step to assemble the received centers + for center_idx in range(self.n_cluster): + centers_global_rescale = self.center[center_idx] * self.count[center_idx] + # Aggregate center, add new center to previous estimate, weighted by counts + for _, record in self.collection.items(): + centers_global_rescale += record["center"][center_idx] * record["count"][center_idx] + self.count[center_idx] += record["count"][center_idx] + # Rescale to compute mean of all points (old and new combined) + alpha = 1 / self.count[center_idx] + centers_global_rescale *= alpha + # Update the global center + self.center[center_idx] = centers_global_rescale + params = {"center": self.center} + dxo = DXO(data_kind=self.expected_data_kind, data=params) + + return dxo diff --git a/examples/advanced/job_api/sklearn/src/kmeans_fl.py b/examples/advanced/job_api/sklearn/src/kmeans_fl.py new file mode 100644 index 0000000000..d4c91ac9c3 --- /dev/null +++ b/examples/advanced/job_api/sklearn/src/kmeans_fl.py @@ -0,0 +1,182 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import csv +from typing import Dict, List, Tuple + +import pandas as pd +from sklearn.cluster import KMeans, MiniBatchKMeans, kmeans_plusplus +from sklearn.metrics import homogeneity_score +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import StandardScaler + +# (1) import nvflare client API +from nvflare import client as flare + + +def to_dataset_tuple(data: dict): + dataset_tuples = {} + for dataset_name, dataset in data.items(): + dataset_tuples[dataset_name] = _to_data_tuple(dataset) + return dataset_tuples + + +def _to_data_tuple(data): + data_num = data.shape[0] + # split to feature and label + x = data.iloc[:, 1:] + y = data.iloc[:, 0] + return x.to_numpy(), y.to_numpy(), data_num + + +def load_features(feature_data_path: str) -> List: + try: + features = [] + with open(feature_data_path, "r") as file: + # Create a CSV reader object + csv_reader = csv.reader(file) + line_list = next(csv_reader) + features = line_list + return features + except Exception as e: + raise Exception(f"Load header for path'{feature_data_path} failed! {e}") + + +def load_data( + data_path: str, data_features: List, random_state: int, test_size: float, skip_rows=None +) -> Dict[str, pd.DataFrame]: + try: + df: pd.DataFrame = pd.read_csv( + data_path, names=data_features, sep=r"\s*,\s*", engine="python", na_values="?", skiprows=skip_rows + ) + + train, test = train_test_split(df, test_size=test_size, random_state=random_state) + + return {"train": train, "test": test} + + except Exception as e: + raise Exception(f"Load data for path '{data_path}' failed! {e}") + + +def transform_data(data: Dict[str, Tuple]) -> Dict[str, Tuple]: + # Standardize features by removing the mean and scaling to unit variance + scaler = StandardScaler() + scaled_datasets = {} + for dataset_name, (x_data, y_data, data_num) in data.items(): + x_scaled = scaler.fit_transform(x_data) + scaled_datasets[dataset_name] = (x_scaled, y_data, data_num) + return scaled_datasets + + +def main(): + parser = define_args_parser() + args = parser.parse_args() + data_root_dir = args.data_root_dir + random_state = args.random_state + test_size = args.test_size + skip_rows = args.skip_rows + + # (2) initializes NVFlare client API + flare.init() + + site_name = flare.get_site_name() + feature_data_path = f"{data_root_dir}/{site_name}_header.csv" + features = load_features(feature_data_path) + n_features = len(features) - 1 # remove label + + data_path = f"{data_root_dir}/{site_name}.csv" + data = load_data( + data_path=data_path, data_features=features, random_state=random_state, test_size=test_size, skip_rows=skip_rows + ) + + data = to_dataset_tuple(data) + dataset = transform_data(data) + x_train, y_train, train_size = dataset["train"] + x_test, y_test, test_size = dataset["test"] + + model = None + n_clusters = 0 + while flare.is_running(): + # (3) receives FLModel from NVFlare + input_model = flare.receive() + global_params = input_model.params + curr_round = input_model.current_round + + print(f"current_round={curr_round}") + if curr_round == 0: + # (4) first round, initialize centers with kmeans++ + n_clusters = global_params["n_clusters"] + center_local, _ = kmeans_plusplus(x_train, n_clusters=n_clusters, random_state=random_state) + params = {"center": center_local, "count": None} + homo = 0.0 + else: + # (5) following rounds, starting from global centers + center_global = global_params["center"] + model = MiniBatchKMeans( + n_clusters=n_clusters, + batch_size=train_size, + max_iter=1, + init=center_global, + n_init=1, + reassignment_ratio=0, + random_state=random_state, + ) + # train model + model.fit(x_train) + center_local = model.cluster_centers_ + count_local = model._counts + params = {"center": center_local, "count": count_local} + + # (6) evaluate global center + model_eval = KMeans(n_clusters=n_clusters, init=center_global, n_init=1) + model_eval.fit(center_global) + homo = evaluate_model(x_test, model_eval, y_test) + # Print the results + print(f"{site_name}: global model homogeneity_score: {homo:.4f}") + + # (7) construct trained FL model + metrics = {"accuracy": homo} + output_model = flare.FLModel(params=params, metrics=metrics) + + # (8) send model back to NVFlare + flare.send(output_model) + + +def evaluate_model(x_test, model, y_test): + # Make predictions on the testing set + y_pred = model.predict(x_test) + + # Evaluate the model + homo = homogeneity_score(y_test, y_pred) + return homo + + +def define_args_parser(): + parser = argparse.ArgumentParser(description="scikit learn linear model with SGD") + parser.add_argument("--data_root_dir", type=str, help="root directory path to csv data file") + parser.add_argument("--random_state", type=int, default=0, help="random state") + parser.add_argument("--test_size", type=float, default=0.2, help="test ratio, default to 20%") + parser.add_argument( + "--skip_rows", + type=str, + default=None, + help="""If skip_rows = N, the first N rows will be skipped, + if skiprows=[0, 1, 4], the rows will be skip by row indices such as row 0,1,4 will be skipped. """, + ) + return parser + + +if __name__ == "__main__": + main() diff --git a/examples/advanced/job_api/sklearn/src/split_csv.py b/examples/advanced/job_api/sklearn/src/split_csv.py new file mode 100644 index 0000000000..c06ac58613 --- /dev/null +++ b/examples/advanced/job_api/sklearn/src/split_csv.py @@ -0,0 +1,86 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import os +import shutil + +import pandas as pd + + +def load_data(input_file_path) -> pd.DataFrame: + # Read the CSV file into a pandas DataFrame + return pd.read_csv(input_file_path, header=None) + + +def split_csv(input_file_path, output_dir, num_parts, part_name, sample_rate): + df = load_data(input_file_path) + + # Calculate the number of rows per part + total_size = int(len(df) * sample_rate) + rows_per_part = total_size // num_parts + + # Create the output directory if it doesn't exist + os.makedirs(output_dir, exist_ok=True) + + # Split the DataFrame into N parts + for i in range(num_parts): + start_index = i * rows_per_part + end_index = (i + 1) * rows_per_part if i < num_parts - 1 else total_size + print(f"{part_name}{i + 1}=", f"{start_index=}", f"{end_index=}") + part_df = df.iloc[start_index:end_index] + + # Save each part to a separate CSV file + output_file = os.path.join(output_dir, f"{part_name}{i + 1}.csv") + part_df.to_csv(output_file, header=False, index=False) + + +def distribute_header_file(input_header_file: str, output_dir: str, num_parts: int, part_name: str): + source_file = input_header_file + + # Split the DataFrame into N parts + for i in range(num_parts): + output_file = os.path.join(output_dir, f"{part_name}{i + 1}_header.csv") + shutil.copy(source_file, output_file) + print(f"File copied to {output_file}") + + +def define_args_parser(): + parser = argparse.ArgumentParser(description="csv data split") + parser.add_argument("--input_data_path", type=str, help="input path to csv data file") + parser.add_argument("--input_header_path", type=str, help="input path to csv header file") + parser.add_argument("--site_num", type=int, help="Total number of sites or clients") + parser.add_argument("--site_name_prefix", type=str, default="site-", help="Site name prefix") + parser.add_argument("--output_dir", type=str, default="/tmp/nvflare/dataset/output", help="Output directory") + parser.add_argument( + "--sample_rate", type=float, default="1.0", help="percent of the data will be used. default 1.0 for 100%" + ) + return parser + + +def main(): + parser = define_args_parser() + args = parser.parse_args() + input_file = args.input_data_path + output_directory = args.output_dir + num_parts = args.site_num + site_name_prefix = args.site_name_prefix + sample_rate = args.sample_rate + split_csv(input_file, output_directory, num_parts, site_name_prefix, sample_rate) + distribute_header_file(args.input_header_path, output_directory, num_parts, site_name_prefix) + + +if __name__ == "__main__": + main() diff --git a/examples/advanced/job_api/tf/README.md b/examples/advanced/job_api/tf/README.md new file mode 100644 index 0000000000..4cc57b4f79 --- /dev/null +++ b/examples/advanced/job_api/tf/README.md @@ -0,0 +1,213 @@ +# Advanced Job API Examples with Tensorflow + +[![TensorFlow Logo](https://upload.wikimedia.org/wikipedia/commons/a/ab/TensorFlow_logo.svg)](https://tensorflow.org/) + +We provide several examples for advanced algorithms using NVFlare's Job API. +All examples in this folder are based on using [TensorFlow](https://tensorflow.org/) as the model training framework. + +## Simulated Federated Learning with CIFAR10 Using Tensorflow + +This example shows `Tensorflow`-based classic Federated Learning +algorithms, namely FedAvg and FedOpt on CIFAR10 +dataset. This example is analogous to [the example using `Pytorch` +backend](https://github.com/NVIDIA/NVFlare/tree/main/examples/advanced/cifar10/cifar10-sim) +on the same dataset, where same experiments +were conducted and analyzed. You should expect the same +experimental results when comparing this example with the `Pytorch` one. + +In this example, the latest Client APIs were used to implement +client-side training logics (details in file +[`cifar10_tf_fl_alpha_split.py`](src/cifar10_tf_fl_alpha_split.py)), +and the new +[`FedJob`](https://github.com/NVIDIA/NVFlare/blob/main/nvflare/job_config/fed_job.py#L106) +APIs were used to programmatically set up an +`nvflare` job to be exported or ran by simulator (details in file +[`tf_fl_script_executor_cifar10.py`](tf_fl_script_executor_cifar10.py)), +alleviating the need of writing job config files, simplifying +development process. + +## 1. Install requirements + +Install required packages +``` +pip install --upgrade pip +pip install -r ./requirements.txt +``` + +> **_NOTE:_** We recommend either using a containerized deployment or virtual environment, +> please refer to [getting started](https://nvflare.readthedocs.io/en/latest/getting_started.html). + + +## 2. Run experiments + +This example uses simulator to run all experiments. The script +[`tf_fl_script_executor_cifar10.py`](tf_fl_script_executor_cifar10.py) +is the main script to be used to launch different experiments with +different arguments (see sections below for details). A script +[`run_jobs.sh`](run_jobs.sh) is also provided to run all experiments +described below at once: +``` +bash ./run_jobs.sh +``` +The CIFAR10 dataset will be downloaded when running any experiment for +the first time. `Tensorboard` summary logs will be generated during +any experiment, and you can use `Tensorboard` to visualize the +training and validation process as the experiment runs. Data split +files, summary logs and results will be saved in a workspace +directory, which defaults to `/tmp` and can be configured by setting +`--workspace` argument of the `tf_fl_script_executor_cifar10.py` +script. + +> [!WARNING] +> If you are using GPU, make sure to set the following +> environment variables before running a training job, to prevent +> `Tensoflow` from allocating full GPU memory all at once: +> `export TF_FORCE_GPU_ALLOW_GROWTH=true && export +> TF_GPU_ALLOCATOR=cuda_malloc_asyncp` + +The set-up of all experiments in this example are kept the same as +[the example using `Pytorch` +backend](https://github.com/NVIDIA/NVFlare/tree/main/examples/advanced/cifar10/cifar10-sim). Refer +to the `Pytorch` example for more details. Similar to the Pytorch +example, we here also use Dirichelet sampling on CIFAR10 data labels +to simulate data heterogeneity among data splits for different client +sites, controlled by an alpha value, ranging from 0 (not including 0) +to 1. A high alpha value indicates less data heterogeneity, i.e., an +alpha value equal to 1.0 would result in homogeneous data distribution +among different splits. + +### 2.1 Centralized training + +To simulate a centralized training baseline, we run FedAvg algorithm +with 1 client for 25 rounds, where each round consists of one single epoch. + +``` +python ./tf_fl_script_executor_cifar10.py \ + --algo centralized \ + --n_clients 1 \ + --num_rounds 25 \ + --batch_size 64 \ + --epochs 1 \ + --alpha 0.0 +``` +Note, here `--alpha 0.0` is a placeholder value used to disable data +splits for centralized training. + +### 2.2 FedAvg with different data heterogeneity (alpha values) + +Here we run FedAvg for 50 rounds, each round with 4 local epochs. This +corresponds roughly to the same number of iterations across clients as +in the centralized baseline above (50*4 divided by 8 clients is 25): +``` +for alpha in 1.0 0.5 0.3 0.1; do + + python ./tf_fl_script_executor_cifar10.py \ + --algo fedavg \ + --n_clients 8 \ + --num_rounds 50 \ + --batch_size 64 \ + --epochs 4 \ + --alpha $alpha + +done +``` + +### 2.3 Advanced FL algorithms (FedOpt) + +Next, let's try some different FL algorithms on a more heterogeneous split: + +[FedOpt](https://arxiv.org/abs/2003.00295) uses optimizers on server +side to update the global model from client-side gradients. Here we +use SGD with momentum and cosine learning rate decay: +``` +python ./tf_fl_script_executor_cifar10.py \ + --algo fedopt \ + --n_clients 8 \ + --num_rounds 50 \ + --batch_size 64 \ + --epochs 4 \ + --alpha 0.1 +``` +[FedProx](https://arxiv.org/abs/1812.06127) adds a regularizer to the loss: +``` +python ./tf_fl_script_executor_cifar10.py \ + --algo fedprox \ + --n_clients 8 \ + --num_rounds 50 \ + --batch_size 64 \ + --epochs 4 \ + --fedprox_mu 1e-5 \ + --alpha 0.1 +``` +[SCAFFOLD](https://arxiv.org/abs/1910.06378) adds a correction term +during local training following the +[implementation](https://github.com/Xtra-Computing/NIID-Bench) as +described in [Li et al.](https://arxiv.org/abs/2102.02079) + +``` +python ./tf_fl_script_executor_cifar10.py \ + --algo scaffold \ + --n_clients 8 \ + --num_rounds 50 \ + --batch_size 64 \ + --epochs 4 \ + --fedprox_mu 1e-5 \ + --alpha 0.1 +``` +## 3. Results + +Now let's compare experimental results. + +### 3.1 Centralized training vs. FedAvg for homogeneous split +Let's first compare FedAvg with homogeneous data split +(i.e. `alpha=1.0`) and centralized training. As can be seen from the +figure and table below, FedAvg can achieve similar performance to +centralized training under homogeneous data split, i.e., when there is +no difference in data distributions among different clients. + +| Config | Alpha | Val score | +|-----------------|-------|-----------| +| cifar10_central | n.a. | 0.8758 | +| cifar10_fedavg | 1.0 | 0.8839 | + +![Central vs. FedAvg](./figs/fedavg-vs-centralized.png) + +### 3.2 Impact of client data heterogeneity + +Here we compare the impact of data heterogeneity by varying the +`alpha` value, where lower values cause higher heterogeneity. As can +be observed in the table below, performance of the FedAvg decreases +as data heterogeneity becomes higher. + +| Config | Alpha | Val score | +| ----------- | ----------- | ----------- | +| cifar10_fedavg | 1.0 | 0.8838 | +| cifar10_fedavg | 0.5 | 0.8685 | +| cifar10_fedavg | 0.3 | 0.8323 | +| cifar10_fedavg | 0.1 | 0.7903 | + +![Impact of client data +heterogeneity](./figs/fedavg-diff-alphas.png) + +### 3.3 Impact of different FL algorithms + +Lastly, we compare the performance of different FL algorithms, with +`alpha` value fixed to 0.1, i.e., a high client data heterogeneity. We can observe from the figure below that, FedProx and +SCAFFOLD achieve better performance, with better convergence rates +compared to FedAvg and FedProx with the same alpha setting. SCAFFOLD +achieves that by adding a correction term when updating the client +models, while FedOpt utilizes SGD with momentum to update the global +model on the server. Both achieve better performance with the same +number of training steps as FedAvg/FedProx. + +| Config | Alpha | Val score | +| ----------- | ----------- | ----------- | +| cifar10_fedavg | 0.1 | 0.7903 | +| cifar10_fedopt | 0.1 | 0.8145 | +| cifar10_fedprox | 0.1 | 0.7843 | +| cifar10_scaffold | 0.1 | 0.8164 | + +![Impact of different FL algorithms](./figs/fedavg-diff-algos-new.png) +> [!NOTE] +> More examples can be found at https://nvidia.github.io/NVFlare. + diff --git a/examples/getting_started/tf/figs/fedavg-diff-algos-new.png b/examples/advanced/job_api/tf/figs/fedavg-diff-algos-new.png similarity index 100% rename from examples/getting_started/tf/figs/fedavg-diff-algos-new.png rename to examples/advanced/job_api/tf/figs/fedavg-diff-algos-new.png diff --git a/examples/getting_started/tf/figs/fedavg-diff-algos.png b/examples/advanced/job_api/tf/figs/fedavg-diff-algos.png similarity index 100% rename from examples/getting_started/tf/figs/fedavg-diff-algos.png rename to examples/advanced/job_api/tf/figs/fedavg-diff-algos.png diff --git a/examples/advanced/job_api/tf/figs/fedavg-diff-alphas.png b/examples/advanced/job_api/tf/figs/fedavg-diff-alphas.png new file mode 100755 index 0000000000000000000000000000000000000000..8c9ff65747d94e45185c2f815645a8014985c8d7 GIT binary patch literal 81019 zcmeFZcT|&E*FFpp6oU#X%|cN}L_}gmr3FD`9QuG%2?!!6O{E8tfQp5P(P0Kuq$?c) zQl$lPASy^iq$NP4hY~s|q%hjpV|>*Rv)gP8|U2Or0FpXJLt?(7dC|dO6)J&Ug+7 z#Wdnk6!!v47yL=VSgiaQas0H(%FlJr#0(;v)T`7u0}!t3QoA#!eF4jD$AMPfAb%+k znM-%4K#aa{Lexp($~%NSm84dFb}^OY@4Mhnp)!AKmjBpoz5Uk{<*nEM%T|X- z>dIS$yl(yn5B=XTZ|4;cgI}rSE={z3JVCx7PI6v2skh^=hqLo4@3~>TMC`1!Niau@ zTlEhL`RJ`|su+AajGW+GMJ;Qr;c=!VFj!OCRPPQMdM*)%LNUI#@Cj=PBk8~@srT{w0V zPTS@Ixh5=_*Bq%lgumW?-ZlzP(zp#G*{8&sztGBVoH((d*i*1JavMrqWY%#?<#I8_uIa*!%8m-Eb8dR z?;z~?K_w}QIxO~m^t|y<8a1J#)xyPGll#j!#6uyvgWy5vXsKUZ%%)e63d%YI3A4w# z+MX_0lqf8R%AUa6prFtvVbe9isfZ`+lVYNk=*~;8YQpFbb^JU$B+rme#8{-~x0GE_fhJ zb!YFJdy_g}oQtk%zA2nw*VVGAkbVl^wX)}1k8Pcej=(Eme7B2Ywb#2-H z*o+ep-;9rFzYJSK;xBihwMU(9bh?c#jJ1s|xCOhD%nua!y>JSapPHVI5%+u9x2Y`w zn`BH-sDV(4U9|{UbzfJYvnG6GxQLag8RA8>tRTE=EC}fAw;Joo^?UKgsn5i(I$T3% zTVLiK3ni3JP|veXNt*F6#GpPrmyVL{3ptf|iu*L6ZRXUHSLHkZ(8ZqOF$$vU^Vt%P zg+0l1lV}<7-88+i$>a0vx?6*}ho!o%Puv=-D4kA_ZM)1n9(Fu2jOJPC*Lk+0rn@W9 zR#O|Ec%rT0L}sstQRU*B!LjBhzlCH+_=Nsg*Z^@|twJ@fw{$ELOYd0hfCkwHRpHjv za`lKc6UAfi`@BN>^ZTwNrZH_FyE*Z+3s7#uf-Q}aiTiB|cZ1DE?7+oxD{x%wXIZF1 z-a0`{85^n;%2kOxkS#PViD@clQH-G5ecO)T1}!;k22X}lOe#M?mSp0^$C{r`Z{qsr z6>4W!)E=dVEg4<$FN9k>Dh1Q8b%)Ofz&%G9h_ek7wk?_S=~^RAm*MjZ23I_Sl4}zs zUbguy$>&it)j$&Q;x%pxIP;CEdef!j5VB5e%bySj_{7V%V-G)f1{~$k_tAVIbJIQX zN!-?sB3$4_$qgIgej6np&8D^_^Jc$7%cDSAZ94l17~(rnWPaztkUR(d$d~|DQG8hb zF}tN*XK*t*7c>Z&gXMmZxal5x01Z1q7ch>kWtb3U zZ(kF5&*gPS6o@umiJkCeg#oFrib`XjtlR>gB;!!lb!G3}(tUcgNkLJiFWaN{Jq@T~ zd$OL7KL^pOy*G)5nRiu#d=f8p9a98P>i*WUD2_=O`{*ZcRz4-yS39`0P-WjAQtjyg zyFm8$4pBVddKq`E{HT!GrUTrBQOrhk=35q{1>O`D|Im-43LlJK=zj~@fbNt1JjMwX z4hYsfa&ji1KWegqDmz0@+<8Y_w(TfuZon%hRnM;o&zX^~<;jnAv=(iOD_X1>ox8u4 z3r{jOD)>n2)+%sI?;N>A%bj7r_Rg5c(=O4+-V>*cG9_BNg5qEF8ntZfa0=Nk&-qjO ztp@RD+wg%uDS=w^7Bq)WAPM&H7QUA5uZB!|qk;7}m*RAu1$oe4Um8sx%Y)KmyX}>C zRDDKp+3K{1R~EdOF(vdNub|u-#@I~0)>#(u`b7Vb#QcZZ{LbsC7AfW4wM5H$kC~_~WS**si}e#8IAo z7#+KL6yF+U@f9crby@HHGB``Pikd`sNuPB-GGf|X#-Q7@Xq(+fM0BG;q*k(=jHMCo zMhn$sU6P`d93Gvqx8d^viI=i>42MT0q>yiXf)swf)f>aB54C=Rpk5+YQR6@E7~^K% zjr!tm^tlwt!tzeTr(#Zg-r*On#p$<;?Dc+ew`D=%Ck|t%Z<4m%AW{)^w6~aQhc&?@ z$>%*xZt4rTu`tNVCtI*5Xa7~Mo==w?R8TQwBasuRqLHDrhSsz=qL-apNdMUOe zvY*@Nqml3HTKcw96pgujkN0pEd4AaZzMrwk!9MDQ5eu52urY}|(rt9B_@|)EbQHI?W5HM1_#_Q|m5Ihuoq$y!T%$76wn>&!^iRmRyER zxxdPQC;Mu`NIrq@73a%HzQ)txqgt$TmQMyXH9~q}zP^A%FL5p*kHQxs-1-aUItKCCqTrUb3Og(z*<{ZX6qbA)49|P_tD9Ul7 zLvLIx^O$~rEbApSd6V~n68y-isn;Q=Pj0Y;`_<*=@X7*b2jfNMUTd{=G>t^{<(Fx( zJg=>Pnly8~3kAQi!-?s+eQ0~Pms8X5E#=Uxat}w?IXs==1MBIVlt*6Zp)rMOL$6(v zFFBy|c>k3IWA)bubUJVy+`+Lv`UYP|6)6?O^K;{&1&-7A>-XMf7yl-zxO~1#(=6)1 ziw&eSmd*L_W(h;m>%(}j@)KQjp^d#3DWLGR^>jQ5aTLzBPimy?lUUa0*K z;fB+~vOeUJI|jlKi`r*qJqr2B*b3d!3cs5-#}`&AYTc3K+--?YkDT=1`BgC%IHa=W6rZ~p$Wcv*17AkV4e9x+D3h?Ayr>%T zW#B2Ak7Q5neqtT0)3p<1$x_q~IRupdgX={vF|o12b3Cv-M4NcTq zNuI@51I49Jl+UM?0k|QY_3B(yIt@3TlUq+sP?iRp)CROzTw^|aA7#IF=V)W!u_snR zhu%+h+{Ue$-$}o4RYG(ibZs;(S;6{Ax%%M}7nK<*WRo)fV4!JbT4U3?m=V>AnO`-z zRh^mgZWHM=okvNKK8*${Qs?ElY1~aFDc9pen2R(aux@m_98CWm=}P@@l^`hU=1jq& z4@@<*sSeEfSNMC;T;_F2u-?=5PR#l2lokQF+y7t6K>=&6c zBop;!T2HuI;DAU@yoE3!=~sL_b+U~vTdsKTp?jPL z@p8X~%LHSPDUlX2$X!@SiS*#Mvna0<5di_G60eN-VUzApN49I*mWQ2OQ{<%YNtlUr z4jX8><|tTd;r2HD$nzgC=`pEwd3iz4WLXK>FAEygPQQOKs_2uvN979aZED?FBE9)5 zktLhlec~D8ge-3f_rk7{<+=TL6`qECZ(^_R0M5G%p5wd&dY~CezeYS&#)**C#YY!z z6))Z~mgLuRy=Oj4N9tV?nXF)1g)dXwUQ-adE7a@ez-!`E%f36EB@nN4KbKYW>OQvI+jK-b zOrwLS%n{ju<;yjGd-v`$O@U1tjQN=1N@RK?JT zyPvn%RC})d#WBsOtf(`IKXib$m7!VWZLJ^%ax3`#RA|w*>r2Y z<%tt_4V&%J6IzPDWFqdgm&49^gt5k_3HwC)!jC4ZOBL2ULeADlAtPz|orIU8aC+!e zkJtNA@jh*oc5jkwXffy-YIkRR7@pJfdVx5zDSc3(>UR@kgUF;ri9`ic6IRE8TN)=3 zOWs6yw^`<0uqgYgPv_B&v)D|3!bT!xp<8j2^K|*=CcG~heDCTLE0+Ux6#34E&Lzwk zzFX_l>5E9xu)oG2U;UbFTVyc)M-!n*@|j~glurdL1ye*e@QM zO5ZuxdHvW4L%(e`G9GPBvP;+$c?Z-}9VxqhfBDL}PvUP`-Mf4eBbO`Rvm#HGLO1+F zq%iK?$nJY(e8xjEDlwJKvr=&(d<4^91Ei~k&7P-DZf7+h_Tz#8e6j8`K$v@YWMfFp zv7U^L4wB#}iDOBFkcZKqIqvhWOa0k|43^m$!>RoZi}K|ew+QgVZL$X(pb4A$G`R;n z2cs5jACi-NHD@NXBz5ije!-$fJ^S-+rOF4tNGFFPEW8(o^E=yuAChB(n29O7n1_9jH$8;aPwHCrt;r29K-en$aw}INCK(|}JwkjJ&hROE zFThvbScNFtr_WE40m2vjaUSrSn#jE}OE=-Vw-QioT91UHZV%BVWp8TTr$*_qXA=d+ zole1~m9BO_)4iVhCH3JDtSe_(00i_m)P8*3+o%$r#G+PI;;4b0PCCf3#BCvn;H6iU zHzyjUJ2xZ~Y_v?IOL|RSX;e;)QzK11rM5H|dW-+0@frA61;1MQpBWH>*q00x~-pU@qc|(JTh%N{^Vkw)C0-FaK7VszVEIX|eyzB78;V~d{ zgl6BmtNx=~-)5(mCx(9Wm4%Uti<(#=i`VGKK3jp5;a8LZ+0?pAK~G<>L#yzLC~crJ zf4mC{d$_poaLvr;*#L6EWM?xUOYa-JA1Pe&w$aqc3UxK+qwMEhy6FP5CHhXO5wH0L zljQLSpK1qh2AkN(fV=kI=1ULZDuvg>H0OTRu%{-_#HUY`7aYei(-#(MX90W1a#44h z5_EZIm_Pj~Fy^udu<)tgDwZe{&`XLu>pr~*ln|Y~@vF_(;MkG)cYs#X`CeJ3g{;^D zyXDEsD7|DB2@s5Rm`o2s%`^24qmI4RjLUFlbL(vJI-!z!PxS{MPqd-&)B9huWd)z$ zS??b2q+#>G4{g|D=Si%*3wqK5ieT%^wPKLRdI~lYkGCF*Xz98h=(KhBa_X<_))un7 zSi8TxH^?#MxzlNt2>T20v6meB9gbf~?ZFihMCf!7GYL4acMO-#7vakz2KcgJ$OGX0 zLVN!b=MF`Q?vxSh;+>s{Eci=yi`dq6`&BDdFMdUi5EAW2pq5`+7?{n`*LTGT|Vj)X45WvK~J+^)%2j>@8DWm!}v3CAG;Z^V?oEX^+UDN*Qk zqh2A`v}y(r)kO^Er9*HMYU_|fgikd}lVtErizUbz*!UH4*We+}Zel}=a;=B2Q zvxRhOgrvSOpjN>t)?TSr`FG7q6}-&#h2?=)#6(LZ!IX>aG{AyFBP1)@LQgC;LIHSt zKu+nW><)uW2TbX{)Cd4t0tOt!L>CSrTk9Eyz(-gcswmyI0CcDb16O4+i8c0;b)l=TuT|2@J>p)*m>R0H)tE zM9*&qi|vy7WB|O~tW-v`9a!~55$z|bPp1V2!~yG#5C=KjF?_sd?`}C5pehE^S43zf=8hM9*zPw5AaEz&U(>+WdnS_&Z1=+As=V= zI+Tn3_nY|gRPC7wnK%Ev$N#q$4#dfSyNR+uocy-|ZR{U@zvJM||33PzJcP7X8veiV z`2W(v|F=%!=bg@JfoPZiKKlOu6!QQ16w)MztLDtnedo(|(5>f!bpumg@h0Mv9ik~k zyIbe4w@%o|{xf3$Y30XMPSi%)mqlU^lwF6Sda@;(KQRWkilTy z(>1bFU*)ys(zvRq)m;sFqpmt`IJcX8j5%0xSn3_7l%YMJ=3y;o=s#C>xS{N5DCw*Z z6dR6elTw7rQp4KQ29Lx~RVzE82C@UFnYubbVI`PQPW|w-9LqUx?W)41FU+s6t16?* zb=a3C^vl@jp4g6q2C%M}A$($!c${H_&pgKvA8~`}WgG59EiLdei$sMF{xOE)5O|f- z<9nv3=Xu=xJy3G_L~YYjUoq=5^5WBh8=Uc-I6Ib|qkY1#PuCx`g+cn@0R=D8!}1OP z^FlnHrs}~SJ?IFIl1IU^UNU-hLofCYQIBH=kQw{6?hGJhnQCfBkh3>)W$)*z%EYJR z>%C>R;kvy1d|Z4sm$v0aQ(rf0EV>1`<8`U0n1^LCVM%WEA1d2FJ?>IEr|65M2>ILELs+po;H3YXk*{1;`;O8!%$wbqFh+`AYS@@lF?5@ z4YK*{;dwW7(vFOTW60S74{N2wKc_udGxgaV5;~_9arwN>lf)N{Y8lI(az&#SOwr_P z89%xYhg&;MnazB94EY{&0>!>E=QhuggHBO&=T&rhm9zxMMqU5&Brlm&g>xDAeLLnU zr$SHrK65<`ag92A;G{!;D_5SnPxGPj8N5xn?)(RIvU{|smFH|w3t5)SjqbafTlV1o zoq2*qLefO#J3IvTdrWjvEQ1rxO8Qg0sYw+?Xu_nb5QEz128%kIoU5`V6EV*0#pA8W zCdS~C0d5mxwhO_wOGiMgkUh$9#B)Wz*(bRQ^cQ#sss9{Xb_xethDE&~4l?3TP3LxB zRfl9fbA3}((PzktR@mBP>wPz?CYrka{#2(j@_FkhHYRQ}*ky6fmC}3HR}B3;Hl+19 zk)9SEMWoCYCCZjyx%t7FYn*G2_GCQh^|SYJ@Fp0NjzMkQSPvD-H!+LMwYU^+;A*6{ zYQND<=*%{7e_Kbo{M^mT_u{RM=+=3asdN)=MbKCZY98<0TV{Hl+{${DTm8t|TI~{( zCu7L+7T1pida8GVHZWN}t*&{~``~%!XFEHRn|2dCWX{u)6%*c*>KOjlkX3$)RR3*r zZfMxV`FGB@*o&$P$wkyO#IcXbZ(m_B|2efaH=?ny-%D@$Sey8MHb?X~jX62r@7(_Z zJ=!)T8rT_?RvF_|2?W^gKX_Q#+~{b-GI#XDU65zMQ8Of5-#oDeJcPZ--Oln-EH$4k zdrl?{vzir~&fsI`sdEUEFm~uLUPj(v2Xo?xDYw$_1M&SZEBayjCZ=1L+X!B&v{MZ@ zEqDW+AFahvo%Hsn2G%GGqN-qw%#-UJv8WUGg$1C&S90cdyR|rDF2GX4c`-ay9pv2OmOCJ> zb~K?i8MWYWe7|3UCqjOTF(A!p`WWJ9gxx{RWWZ?j_lqoKDmh89mp8#7rL$a5{pZyl z9}?~7?BgBb^e86E9q2k6ZP;@z4S%!&0iQjodMMZ)CW(Pz+*QRz$1OT)?FxY5h=zK5 zYq^GjY8Cq-Y|XsuWy_+E(b25;hL|1lgO#s3_n>On$4fh(2{8M=Bgk=I6`B6?=xNgr z;yhC+UX$8UDZQgM+rUFngPPIxp^@FgQSG}SB{GJ z)%}icrQ%e{9^4Qbqm`uw@HO$l|A3qW!-fn|=@%C07JE>&8FI)o?AphaqUYFh-bZ?8 z+yH7DP%{IgK<*7T#h#CkL$X?|Onml0x$MOw%!%RZCl{n@&VoNAB|xXyrL&#+j3ssB zJ{x7Ij8++2gRD$6m;Rq<9o7amll1bDQ-PTgk6`hN6N0C zx{wlO0f~;2{sqnEo`U>()_d~COcSfo7fpK%Jv(bR{yTJ?Rvaa;KBzB9H?z)Ek)_ua z7pgUqR+eS3VAtSwgS*pYVP~iG|3s#mtTqUx>BG-A41~b~%4sFjeQdjo*?LZO8X^_u zv|bcgLqp@Mv`bqJ-?1y$U1_d4m9AQ6u+k$Q$)>CaM33VRPz-y%zq%$mjg_t-c;Nqu zNh}tELKB7cW^L*7QvLZlqZ>s(!F+&Ek$fuKkty9dy1$miHGx$ml?pvmOi%>oiY8JR z+s|TU$E`e6B7$7_RQ-QqHU$|Eg2`Srxiok)-m)!8teo)WJ0Hvq))>Apamc-eCvTD-ZhFX%St2R*~8)QY;RZg+#fsOdVP%>otVyuL7`jNssc zITN+096iCU;64X(9B%?I+l!0&P9JHCGiQGRh6j07^hF+hww!##Rrs{6K1g5q2PNd` zODm@!nYzmNYa%q9m);Tm6AdD>A|?%|s=rd_(q5ryq~GoJg;BLk|L2Aa>~u~#>@RKQ zRm)(!w#XABr_+&qGAarF2QyYl**9(tfy?OG_xEs}8mFx;;Vm*46|EZRnZZ}QkI6

+B7m9J6PPWYV;mr>YxreXfP^rv)SEN80qdi?ujBd(m?!DaXe$TRktv*^1*HlHxj z9CRUqSUb9Yy&x4@2(6Z30JRAA07tV*HtS3dQROToRV_u$xTYeuS7Bsl((!M`yFIM; zOW7IDqAWv0EPhuEvs5@F#hfqK9~|7Lc#b#K;!Gv)gR_q?2S>5{pBXM!Qs7X27?eHm z$#%W4wss+lWiQ+s1mFl6orDzdKfNj@YC}NNVIhzYsO}^*m=QRGrxLNFo%a!EtAIxH zt{(BOMljJbHB7y`Li=;59rTuJizfa<1`9h$=gEx4KIzQ0^K z1ETwJV-I|uKcQ%5ME0j=+0``P3`}I7dXgpP+z6g8zAeTG++_gm~*&Z=xPA2o5Nf;=dQt@fyN;r+hRbSKf>nEanktoPbBaGDvlp4w#$_-7v zH9J}u?M5F=3d(EJ28hw>7#0(iiFIC^W%ndGK1yse8MRI8&hwA^^)f4ULsPp94f_qz ze~!-$l-DG+dgcE~KJhp~mllcqJo3dI_LV*@$8$=XpB1X@|G&|PVu-PcB{X^2}anpIC{VKU_ATi$qA23M0mVn zv0`vM)WCH2_u&c1l&{PHTE}pH!SJ&Q+@GS_k#FT>D<&z;GTjk9(vd{#-H~rV7d*RM zyzKjGAzQU)rLV7Pm;-&lJ@8sf;p>)`!bdcfNAWhpA?(nSn*7V5$TF^%?X+(<4&+yMu+Jvp=r3CB zusjTy75h7<5(SDd-5w+vII)jk3#?ts zeP$D?fK((KmGA%D@Xsd{jZ?0R7~61+?A@n5Tu0o$b>RedMZ9RxdB@N2-- zv+AE=YGOuMw;kO@@G1cP%WyTn%=T~sA1LheAf2HH19e;S-#!s&BwX>&)P!zTQk^8E z*)eQYx$(dYL@+J1pKP5du6IgA<<&RM-a}qL(q6m)_Gb*OzuIn+1m7GQ$2sx-Apz6< z#d5({3-u6Ph9RRmI-6l+Pv|MXyA-aW;Bx_We07R^X_`20s4oNz!P~Y5WL6fWp;|Sg zgbcrF{l`MCw?+TvTt&ydz_EjIP}N_TDt|7)md%ZBmA>R(>r{ftnd@w(Q4QT8CM@3B zIiDuRVIy67fyCmzm!+pf)7%_``_#D(XMGN#s}EKunwI#ql-+;mwFPK42vk6z5JzrD z%*7^~>@!a?A#iibO8Lt28;GAD?=kv3Pk@qfI8J%5jF00TL34xO9eFlo z%`b97c64BC+=FxC&o4mcI7awF-)};GfC21L5L!wva~5>PlFMks0EDYX1@)XF=a2Mm z)vavNO`vTnC0s30ATh%}lgnGfE^p@cVHCmuS`Tc|H&-1m5~_~HQbNVQcOhE$LcUqK1ilFZS^g;P+a*1Cma_4b5 zZslQiF7Z+y{2;H2!6vgoP`8wjFh2Pa$ZTFu%dLE(xYb?Lc@(H$KXjCJUt#Nf_UCN% zq>YCTM00bO!S+)C4*OPF*L@VH%}eFje#Cslbc!d?c6v{4iE$23xHp}tf|9?{diY?h zqWSEp{5ATK?Zt#M0bx!8H+c%$4+6v&x!tanEF&%)J>mV7=7rLF3xJi)0`;=g4)4W# zk2C2+j<=Bj&?d9AZ@+q$3BV zP9oVMs4BL}nyTh51f;Xcpn*$&9l<+6LVqfGLeOF z!5JL~1BGZxyV_IRytPUda)e;@0=5>_g4E7v8O4oTxF~S7o%)oyx`iksx|8j4|M0u$ z@l3CMA)WN@eXT-R{$qG8muA$yp2Bbdx%`ay}ZI<#X!IKzho z1@g3WBL=IYnRNbD5x}|SY277+EQC$^B8A_y?Ne;g>RA)QgGZKqh z;ud~&Zk(3Yk%<`5LCyEqW<5#*Ms0L~idxMhL%8FkOKH^1B38Gqw{ym1Ma~p?T9LW~ zm?G$vZV!)-v`ASC3~eZgb?v>y*1&~7iV|U##fGTN{DBSMTSKpzjuNW!eq?%y69oz1 zbpbOq`&HfHE{)-VV@|heO))On%@8|~Eb+^92eWOz@NF86?&E~DFd z=Y22}J%hjZa6R=B`SmhFVCj9eGNi=8L^jlZ>)C81O#0_fIjSf!^O7?Va z>*C0~#_G`W@WkU9K|llf?O(~S7wF_R6*yHbQJG;ldRRYP4Z~+ic8(4S4y3a{AKmg} z@HXI7&omz;EI_+jH64|sKpLlt4whjCG!LvL&Am5A-XK-py9@HEQQkL#e-46ID0@_O z2eLe#?(8ZnroL7l9q$sUnTNIEHknBFAZXj%&4lG+N2Ch9s*DU5(=-c&lbzjar@Xi% z-54qq?!COiTc=jI=W;3Twvg3Ex7boQ$69+)o_U1X{_?L6brJ zRF_VxyDiyvd^A{b8;g7{4Us^3a--EhTRc?==+S3II*sGwV)vOp>_t*OrE;k>9FuxV zVpCf2diI9f>{t4+%kb79vr@hf$qYNNPWY65p)m!cBmlpKr(e0RT_aGg^-RNI!mI`yXNi1MC}C^eP^>7)ehs#UM(pyDQSNe`n5=Ci=swX< zJpFK`?0$Tc*S5i#(xlic&lea8)s!=GVo7K~4{{Q&2aHvXL~*hxibX#0 z*UzPd3c6*lTL4-Sp8WEw?)~0Qdg%w(>bqhie4rk%G>>OR;XNG#fB_SJ%>LV?a>QmiMXlOK|}Uz8YlH194!)_KbIFFmRhIxhmsP}e23s^vn9a{ z$nb<3yQ*xhpq>kOdL;q3@E7F@QD%Z?6J^P zWDJ;5Z4Y+v21!eLS??QmpS25 z?UPe7aU|c%@!-~fd{{&CmbGW-j&XtFqSm(}b_!a%O<>Us=>>nw$H;BbQ7EliN>>#y zQzY2KNLmxR(Xutj5_Yc)pS4A*8Sc&1PD7+z0EF8YN*Cia?nzC1@X8Rjp%531e{`!x zMsZ6ReWCL4Lv{HEfEb9V8Zx(LK*q01J`$X!V&LxJ8X?FWq^irg3{|BA;pR$n_X)rQ z&~{F7noIM8WhMU6OHfx_;8t-Q74C!!Npw6*ebxfU#$%yLvKZBm`YWv2%;$W4hRA$m-H?a4pP zN~IS&Pl>&2;|~ zy6)OE{2Nz+F#6pX(Jm>K6UK6qz1VY3Q`TXUlF}TsnwrHX&<#OzJT3|Zdy#xXWX2UL z%4r9rVIYr0S-9xJh9ELaHH9#v>%Gj{On`TxA2_pL|Dh8<;B)?=#5~~Z)x%pBF3)-6 zP_XcY9dCs?M}WteshOGf5N?JmN6Z38qWp-!b2?H=A{1Q#&UWZHEHzVKK0grFf*!EE zSiPx%L&R~4(%q7bdI5zZ@B>eaD6=Tgs=pr+v-tYk0AN036S& z>f#3Gy~VYZp&jnk9n0)$RU5Fl1z`Adgd^9l$<_;%N1H9~#?g`+4I%<1_#ak905b)0 zs=IfXeuS@JoC{!1)dX~ZMvH#Mbd%x%&4=zy$!<=BTwku{)9QolK*vQdBfgK9iOH&g zERSBzcGwUG6mcQ`W<(F*GS~}DGz0x^jZpEX`8C$3Gb=BU#7E{ugl`M+YX|>U>m29? z%q|EP01Q;;{w|A~PzxF*RE>>}0WC3)7Lp2llMgUR_wOI(q}g7)R@iCdnhOBef^Kzb zJu0NK)OpKn@g$^##9InvP*M3$`tIC>BQL~;Q^ygvg?0n6rKTlC(Qhq%pjfqECU#!G z_sahs4knX^*NSyQbRbznOMAmzIe zLClRzBARU}^k@Pu`6!o|O{+&(_axu0=gqZdE6f~n*?!<1R`aJqw}fFvxR#eWYbu5o zH3fv9Rj|s}1_#Kvnd(A8W#I&ijt@j?H@NvY%myB5gYEe?A4!{LpMorDbGYcqJszz0 z$+okvT_P;>md6Ue>la*291m6ASmu+C2n0 ze*L6wv50VnoXF71Fkm*IGCe+_3d)_=ubl4$`h>XqAVyxQ-k+H6WZQ0kjPD7iSL>ht zw%_D;0p$BOmT4Xj5J-NUJqLW=q}9%C;%4#5#a8N$k9L`6i)x1SSFTmP&=qLQw`tF$P@sjvI4jzpR(*1ktpU+AS3Lyay1R-AO&BfsLE_cN7D~UBC*sVQ z6YZs)nP>sf`WGFmvMZaW^mRIXysk{BF4RS`*SQ-@WqVvK!fn{4$I3UzBu0 zm?=kPiS=oSxFRYQ+4k@~GpP0O?<>NehXW7>MkK|>h|$~3u4i{hsK~lASmGu{WZV5sd*r~ zG`ey9ZnQ%DvaS}{-h}RMQn_jbZ2o7oDE*#a0vXjE$Akv};lLNe1-S4#36t2DO><@& z2}cw!M#rute0UpiW z8y?kR0=MBWeKQ_9vj0Qp8-v?Cg3;ijApk}7{u;OazB`aq7MSC)9wfE~SavRIqBkI? zK_wuEv~%s5I7HmdZytf?Yl=OIm){XTI>v`?%iV}ot*nxlRyxLVg)Q-7AN~*p1!)1; zp=sROaaSJ$JD59zFxe6%)Y4&u&-fN%bps>oLE(;7&dB{rgA*o5rqWibmrWV;X2jI) zu7o3#&F>E!TwbOY$xCAY+!eX&BM4G)oEt1j~KJaNYif0(j-7Eo0!~1??hd}l? zgj{a0ud@85H@mD!2;ic(g<}tH=dZN)qG;GCZiv7PT;h!<_iUBnpexiYg1bC^x6m_^ z1%N+8QF>OkKk_CO_t9gG2~dal6N*4eUYVs{75JK3?e6j7yEiQla2phOM+mkM>~xSDFF@OTQuf1M z0d4Sfy*l+>QaMJE|$_x;Yv^gWt*Soc40x2cz&oj&V9pgHBba5> z5FoK3ZO}m(GGK(4o24db_LTugaaq&&;qlNxIqOA6${!r6Yt4oggtlyWC(dd_^-s?M zx;uUuc5VTy5ws0w&(9o{h&vyW46rlGO z#K2EwL=Or;0L-7W&$szlX*{(mxmuAK!~!g z-v$1^Oh{Y3wBi8p(pc%xo&z+aSh1?gdLg6CN?7r2;a1N0ndSQtdWdR@lZ|9@=2zVV zkf9w3R$m`Ol6N}ks%Sp654HQsqTv%3cJ8S~Ae)~6Md4dcTuCLXrr(=M*=2eua%<(P zbcjfK`&86td*R0N`%`?;0$r@ndvT}^A}ZAzyzSfUXBPg2J~JnJz{aygqqIJqJYtB@ zS_{n2;O+K>FiofzI$^FNxj*T=ML3MfN? zZ^B1<%ZX{#r2o^7Y-LF~&!*wXj8rQfCtLn519x9MY=Ja%c2tz{%K6m}UPD+(vY`}a zSLYp9<|}>SWBjAMV(wpt<&^_-d};w=AaZ#<$Rt+ah#*@|^I7N473(B}$a7H)`!AcN zN^~|{6Cj1!9{1(kB?v~k={ZL)@lE(&> zb)e!qLXG7yw6?Y2#PO^ys9!2d#z$hsvN+$e<>5X8C~LbEgA)#=$+@$xxXFDLbPkqV zKZ2zCy>gILZRI2*CN-VzlBDF)iHI-N6nhu=(kfAw6K^?#VGIWD-Ed97M9cO{Wo=n& zawNY33L%*%07daTNV0st=)38dkW2@J{>5aKF22$RbtRQk=k(f2vaNd0^ph3$NeHo< zZ&Bo`BKXP|A;TcD5`bOk=cb=%hw4^1v>-DB+qo+AI4VY!Per*NB*GM+Yku=Ti4oY9cRRZFW!Eu8EEj(xCw zDx%MLWJp`3kYj!DdHJag7JuKlT0ZXI=c<4+{$l*Q1$RhgkEItqboS^`y(7-WwPw@w zT(*OGO#nF!%a9(BJ>Fz5dRsX6f7-lbcC$D+u`{I7_L8G(z7W} zlW@odfL#E*`sIg+%N>;13{)q1N~iA3V~<x2 z>woW7Ck+*6a+z7Fsfvk`h0`Cv()6lYvkT&7dHYN|cFeL2SBMH>d7&TUctu1 ztIARZguKM7pvb8ds~V?XVZA@z-&`K>{QCmsvfo$jn*s3av#(AUjPZR_bD6(w0YKTz zKPsGaN5~YApjTf5{RU6J8!b=Sarw3PBd=FhA8*$$Ch#jHB`J_<9kv;&I%obW$5`lK z{puT^%RQ9u2RBN{dGQ->m)WyYjvjr2 z%{T9P0`RSPEYuWvv)H{W#+gJ^uc9;uK*A;1z;Cj~a&zF@5&c1q ziKMB(G_rnCasld!`dZ*?#5}dUi*DAk{xo;0iIJxHH`JrH0si*Nv2t?%&E;l9_rD~L ze+dAcIla2(qrl+;>df_DFM=YeY3n26BlYy&sscjT7YvxYSwG_HzXSjuJN_M)T56hr zjTW=O!;*(C9PvbR2B46xQBk%gzaP&%^S1SHO4vBXnuO&x*;ytR3)?jPpSJQC_F<^V z4S)`=7g26n6B)|fd+P$ye%ENc!qP|74bjA_hdbh?aTDfk5H<{%Ia!+C+vffS#c0Wt{RQaYIs#_E3$Nw0wfCnq;F#s`ndk?@$cT@kl0J?AGU&Y=#Rk%TyhNH+ ze!~yD0}PaZKeg_8Rzd8Eruq)PnP7l~Me0Rvz zBjLz{T~~;#>Bm%72ZF$j|)YLcKuySanIh(4nc?`%ywU z|JXXk#*3?_S@j;YRC4jyFx1KV?+i2)i;VMaWLo*ei|v-|a*Y>%HGn!SX}8!ddF9_Xr*QR-M0n++4|o|N}(p-U@c&<)XHgt2=E$VpTRVr*Q7BR8sA3$S!rB8R7M5SA^ zeUnurp9BcJ^v~=4Kdz%GejO;NQLn@_@q=c2)l}63y_9`m5n>R3p8~_IpF)*@FGBdo zH#Y$2=OX(@g~P63z#gPgu2XEY zRS?S?P!qPAR4as(WQ= zvm{2u5JP3(W~k&wQ7KEZL{av2lx@UCWf>BN!Pu7=W1Yclzvna6y}sQR_mAKG6>snJRe>Zlmp~v#XMm{Kz!%H?B?D=iCQ~}?) zhJv-+?xYr>c=|;(ujz}66CT)@IIp2- zSHQ5jTx?Gv6XQdrVBJJ`e%Ttx?Fm@i=M~}qiq^Q6n~Mv=N6jzD+{e(<>=y`K*E8`j} zCyIO_!2Fafw)9P5p7NT+xjc3Pz%Xk&Zap4gvJN7v9t3hiF@7gw~kJS_-Y99jR(o z(0l;s?l=}5*DO{x$+K#|`?!hs^jn!`W9(wfWlh_edphgFc(};E;|{&ent4NheO6Q&knDX{a1N|&z|;Y9Vic`U`&32VY?JKFNV|$-6dLFoZ*e? zBi-PV7B;C%#jVeV&h6~G(!}IifFUiqxR>vr)1ND1CltOe&hyDP{6uZIGzmE2`S21& zeLhDQZv1_mh~N%>5&9Z#0vh$11s!5KXv&qx&6S)Md#PdFpD)}~+eqV|Nk3noaFEIA*K$nl|f$(&@0qaDa znCc5g8kb;Wciau)ZT@8V6a=`(ZwJ`L6bY~3q^TDpPvfu&KZHp6~}j><31ya&ut$fR@Vuwipq3N&037UgS@~IQ0~4Ha$}*= zN8bogpbL`!B#2uE0?PM*z0~Ed2QN_oCI(=!2M^{m)W*D19w}L>f78^82Z2ZSEQZN{Ev{0=@_=I_o~sAL3o;uWtUVYw0X_S2 z?Do%h@lze9sIFYx`sZ5(m5?9L5dMPl#7a*l$0J%^VBixzy=5kK@ejM&=1qA-IRv>* zurmW7?fFkttbZd*N-<|2Z_5THHAL5OpDJ?f5L;(l1@v+N@zF8fZi9%4oadBA`v? z8i&iduGy)705?wR>3~E1E$0{*@1-fO>}UI5T{cIoPX7qt(Fy&6*tH;JxCy2PGHg2R zcn&Mo5%O*00m`lY!&b8lVMhqJ^SC4Sy}Aru1(4Rf;Lj$qYdM>)lyEh|AIeKS*75&6 z&={xQBDIK5i83_dUheWLWrNQf9RQ!t1pBfo|GHbrgl|t8peF!iY}xxA;S!-O1TaK5 z*VH60?nF|yFS%^G5EIB*C)J4jwmSarxm9~WH{n+8%2yy~QKjwg#cibcNWJ{ByEgUb zdz$Veo8TVdZ<>F)UW2PXxPk-gD;VIv^4oT0NrEm8RM%u91a)a|L0k@?qM={x^xLC3 zC)&^sXiu2yzw1r0Mf9_O0@W|XsReNX6Y|Ob^V$n-;-!>FUfhp%?^u)Vrjo+(sbJwZ zO!U{r2zrxS{Ie#*5LX1+h!|-Db-5Cu-3)f) ztXfOm)@-%Bn{0>V+b>yKwgKEU9SHER7^rv!EDw0n7vz1faR@?&#&R^WFw{t)JtkCnnoHXOyqHQ08_5|0#b|+QyX?H_+zMH z6)<&q>z-(LAy(&Kx!QXkz)W7;sSaC~`&Gapjs;j5NQZ#O?DzcWv9Aw}4~GN39PnM~ z9cr&GD||d7wt{o$m#Fl$>;V7YrANOBA|^>fQV0Npvjj!)0gzDvZ!iap5#V=yTcQ7_ zcKUGn6_6fgP5pWc@egvrPfw42#`LuX(eozC%GC}u81NfRB zlC**EgI9!hB*2u*+~>-2ARDakJK3{sP-sPf%8vy1>(Z!U=VT2ZpZ9Fa<1TV-m#)+R z{hp)+0fA@P>v2xj@>gw1zVv2Ou9MQ)6EmwG@Nk*`Q1zDX!l8g7-1;{wIE9eS+ufrR zGz3kk;6T#;@oFsHj#ILR!=_y3G3Q@yScL~!827P5+aNIV8)avKtGofbMCncf(wK&B zf&-L_=bF~;aqjkGo%2Y22=U9lCwyPy);fT)J3d*peJ7f5TV-4GG}>!(CQG}Rs-+gJ7#RjS_&V6G_px0@8f$@E*l~KEQgeH zAXl}lr5N@8w2Y6~%ZT;sQgcr|%VwtjTnXbc!C4r$4H)3<9m#8hh4_a9{7nn>3$C(p z*Pk1(=0V_j@&UC_!>zX)v$_6m=$AKzrYZUb-amT5*JYPn4*<>LgcxqNqUO=(JtMTs z({)o8-%AwNaB6DHa7Bvu#{f#2e>{P*$!p%CrLh$WT5CwoL zPxWd!m+x7nc$@!77QWVrl#nOd5;Lny5Y%VZ>}*%&?@-?S^cduBgJKD3Q1;~8p264F z3G$J=q#LViSDszVefw03ch`>wvZR=9TxKAbTb+y{&~IcoKTaJ?;}0kY198H>{ptU4 zjYRqK{jT=fS@qf#|1Jy#XlB3}T81bW7)oPZ@7}^hKLs&IOvC!;VEs=fe)5$;^^Yw&2Mwz^Pt{eN&YQyFB7V0mngqJ0cDY{RzTNb#-)BQCB?P^WA-1Own ze|QLDN_Plq`~IpdG4!LSWbejrQ!;>@RV3TQ$?pNVd=eXW7hL7IEMFrI(7VqL3)zhg zRJiGbA+&;PZgj@9_6^Rw<*#beX2{!hf~H{r-chaZ*4_?C?9Cu-`i}GPbrNs%4_QMS ztpJs{ZHA9^5+=0E0qxNo1Rh)g?aNwD;DOkUu|w*X-9R?Q)mk90;jx6F(6T&!*KNX+ zfKG9BGyxhxiKAgGNVJ2JCkA{gi-8)nmy6Gys{(=4 zY4(1AC=!IF%XZpJ@Lla)dIYo`3a}GzuFm6zSQS;KT(+jMA3aLOrkOT?L2m>B z9X~gKNC6((Sc3#4$W$qcUrZ!LT-X?UiJyDbibo(F`E!>*5+Fl&5Uck>@Xu|1x2ZK? z^ew=AF4y)98Cte3aCe|SZN$e3$jt`8y+j0*&HMJ~zdkt(+71U$$IDzH7fP(qv;YW5 znuc!qhju`{vXfkv1?yn@0DGEL0MeF-Gv*O=p%yuAClt;*H9^2r@%JTi$zh(7@uWN-7*F{C-)p_e zAlk3x9t>bZKOqoPu>;zWpRcE3k^yQue01P#_-HYa;p9COjd=g$a8L!w_R7GUd#?2P zseDlG=dvGwSPaY#bE+##1E^+fMmu2?gtS)y|ESfB0_@N7(dGpYNO0 z&+GF%fbobJAw}=u9ZgNk+|u=K%Cw6ACdY*&={ zkPMU=fGq&yJ@zv_NLcCqLdtAhjJZIP6vb}~oF>TRNWbRdEDi&7_OT4mfJ}a7^}rMD zGN*?f?jY=UPFc)NBX1S7+x=4;v2T(K<;p;tjbA-fMc4|0Um85dpd7Ft=US@_6L!t4 z&L`y_bc*}L@X#SVIq*|^ddc!A+APrxQ@5TEZ*$#Il8YE&$dL@js4=s8uLxg0V3s<8 zs~!Q`s^1s8rH1el)V~KI4&Fs~yngghK$h>I+-ND;Pm$XM@UK!9E5ynKGo33~v*lts zpmnfE*#uNj#v1%l9ggKIWKL&+r1kcW%pIT-7F64XD#se?w+<**w4k+LR^RK~mT3;s z)TKmP>1EBLnvVA!A39Xdg-zr@661Y94y-LD0KJpxO!koACZCeMgE&7=m=qJsrv4YeFS- z;fwDhE9TNLE}$3Gr)dxOW+?NFV`tpbCPCdH@EP<1wo}5X?KgEz=8Kd#zBa~~0Er;F ziawO#Q;BR^7eD`(%p$vP5vVZq@5V7pD;XvEIF5tLz(Ia0B;`x-Eq0nki1+h6P=y_k zy#10gXBX1ppZ`=y$AXS#E)3IHBn`W4KLR$1CfY6*(EI4IT8BzXIl-w@8NhaJst4X? z>;3i^3j8wdZQ2G(Tx%wf0p;r5ZVMPvqr&;{(*kV+AFN4lA9JZioz@P!1BzdbSn6|X z*6m>5Q)8oV2`eq|ip*4E16OtxA5fM9F}mek{#(?2%0<<$gDhBA;h(^)5Do=+ZQsZY ztR?0k8GAID6tils02Muj$02|7yaosffT;fyFa1MLk`%)Z>+j6Nq*D$Dt($WO)sLOy zkD4|rf%;Z)m_V)tQ!db|v#jBee=<9u76o0T3l8Q0@0ksa1Nr}|n++RQSmJ{RJi=!! zAaktBM5cGGnujbH0S@-ix&pUE%!b8(g470vz z0H_Ig!ff+0@^fT&U*6?%zJ=ysW-YiEoTA4z0(jFERdcnC&uUkG0=>X#tl4qryQjy} zU2e3>32HUF@mw*}_>-kQ^7$j`q8yJhIQV@Co z9C)nb-QwPc!+^2;)7^&f(dLJSmMt@@U4aR!9$Eu10_*U#I)p^=??3#tbC|zP_>H2( zMR1~l)C=el2;Ria1?~ZVPt^YP{BB`8Z><0Yz_|`){qV^pbkF7pLur_94^nkvqgmlQ z!-TZRwL4dGd1y_xTvlhNI+n3^psuQNy#L#_mW&X9aoxHxqg}pzQ}XGI?|V^8_YJv< zXS~i|Ls&Kw-tt?eIf$eIotTGvxy_;s#194hKC?QCF3Y z33?NFOY-)O6CDE;;oC*Q6qer`DGpo!11>9b?p6P@ z34A<8n(^9T&}UR~F8}ghJ`4ba64%Du!vPBBD$fBoGT!7R0A9WgLVUB!)}u8%j~*K& z07&wP5Pu{O1bqFsr=dh!j$y(}6zCHI$kZxqNFx}45zC&w-c2sOdYQ&&Zv#Vw_zn2~ z!^&TTqOj~#!vx*PwU6AadI2}^+&5Jg)MNY~h(6_op=I0x_lYv6UjUBy@)86>I(##B z0gwGpE44TbXb3P@8^t!bk34?%&87YlL`<>YDIB#Is4H&0CvHNwFeBdBm@TS#06PN+ z!w*FDtJ_3zDmS@4k0N!?e-WA^vPwu01VirowA)$NL5NkPgwx;x%4W#At&U|gGQE|svqiGwC*nZ7wisA70HGiM$;SN)wY>F@!8Jz6HUltuL&iVQoF%}BsT**cSAwYEil0RI8js%GgAmrV>_tph`yxWmy+;!;a22~~ zHgLSW^)r%ya6@_P`@L*UUD28NUsN@14Nr2t%w$|wWGHZ=icANA5LB9Sfm@RS{zS1lnsN)Ze?$>8-`X1po!$L}Iahkcwg)DY#e5z!* z5y3m~!`)npAOrWW2;hCS0~LDggxWH9&;~L*!15Y`Hc)MP%{RfF!`E1OL6fskK6jah z_L;Bq$=D3w?6&G7qE zD~zvNJObYX0*Zm-R0LA>8@bqV3_$UgH;KG)b|zy-CJwhgYMZbL<=J(R3V?2Vwb}k~ z7)y{y+|I)RYC}Tv;QpSbDBlsnnP-xvqlkYYXoI6tjAJsaMeCG(C(FoGP6Gr z<|V=a`skQYe4$V-Ur%HwVl^mz@&hIM`YblJ{bGk1B~Qxt3=0_^(1p@x-xuSr@kiVq zQ0`%7^#-5{CE#3ZQaLLdyKVWdgw0JT5Oq7T2SnFy4KfL?|7GPwpqQygvsh7r9~|WO zY|Pb;lm5ff0l=qDJ>Fk$=i?v%t)G4VpT-Srzb{wvOD&i#pcBewD!v;QdOr64AP}Da zxC2LJ4XK~@uf7CWga0P{dFe|IbI=n3EJB@3g7<$}#9#P60LL3ODlLRmR&2>zzU!HH z8|1Usya7tL=mn8Xo?B~i4*s|26B-tP^%<4^kB>ptcft^4z$HKa*X4ep&Q5@YXN3&R zto{H#J@>N_e|_Ta-3UodxeV&#$ICIM8Q|#ualEj^>w!ML)bqu)XvUYTJ^l~sa`q)h`nSEI8DnnQU+a5{CT;OJx8TK`@Q$nhJ{Qbe>VFi_8#(UpbYfJ1gK`~SZQ zn-u<+DI3c+Rxfb3PW^1GOAIp9789T-fKQPV`ZMWVn!(64w0!T&RlR~UFBp7r?q^^8 zpVblIVK-8T>Y%S@*Fo1lK6{Mo^S|Z8&&-&wJoE{;1^?E{{p8DUb2wZx{PJi2eV{G+ z=l^)b#l`J$ahjuzPY#c=FI^R48Kl`RGRkAyc+U84QVwauxJjZlcj`YK)1QF)>V6C> z-!tjX_a(w5%v|frHU4hIo-oX$kp=jtkybSuB-EAU{vM4(y#j6qD$g!oQFu>6E>idq z=pgiY6s-{=ZKOrU$*I!%@u<}+4SBwd+rOaYnKF0qI6i5Z8@JjV%{R>M!n$ zD<2pe-bk`!{N{igp?&DU5sK|%7mu4q9k$$*wUN>4%rH{(oU~Mf?zqi1=`?qF5KBo5 ziKa$r1)DNj9PYGT&WIC7V&!jeUDFWGR&B*GI&pJEJJzHWv$D+2Cm&Zz?T#s9kepML z5e>741DaC|QpDiCyEU2f{k;@B7;URLMhTBpRn!Ni#vK@xT+D4u9_I%E)Z}%h3I*Fe z#J*!$b$3L|?xt8uI?^%cph14DpGs{+E|t>=$G0AXM||k0f~C3plu5)lIb~xeamfWu z`R=ZnGNWtId;$`!eU1>Hb?X~>$~(dAcQ}Q(XKUS8ta7Wu zxwI3{=Lg-fp(}5i^J0)4^V4hh&-syk;4HVOmJ}Fc)V0mr;~ADYhV8~oC^GvL-=mp0 z)+=l`Xp>+?Pzm?MpKA$N!Mo`86uX#){TReWoUc}0jK_hl^srU&>W?lIoJ(t36nLo# zs;-q+jO*opR9*jZhDpU@SwZEQ{E=t{))<l;twerzNQ+ZqdK3k1GEL04hY-T zieh4$Fcxp#@QWFh=LTY&1)OEfO7v<${VXT^q04O+qWE?zp4)VHn))SOrq_US$6BM8 zN()&+rLpPr51bvUem8rkRcRMjkhF`kF^@7$VB|Tl#tIjRoh2{rs5$P1)FUYL-Wsef zXwv1J;%f%Ps2}#{-Z-z&ShR!N|uBuyVwIh6i4=z z$t*@k)}1(_b(m(Wz-mY0!pt!jAV%Ph%3K;yraui62q-cYmBrHODMZr>Yn)hxEDDiw zH~{KHVO^Lc?wI3#r}ar``gOZlgra`CRcuC|d$k$?5Q4iWt|NkhV8?nm7&{0^%}0pH zjk}vA!ex>q#%E0A4@EymF5Je%7n)hfy0^As1`72$BuPEQq*kLUH_QO~bZNE;UEbS7 zkijRBhS2>W7Toa&XIF?v;BfD6mAPov$)Q9wW=i6czAtv`uE2i7MNiF|+!99^RA%$Xu2%Md&iMWZ6 zr(jGT*}`m^P!elj?3BBW6?5r{`IMvJX}U&IY$Ev-*nBGmfw(F69+h=?p{fKGx>()L#guJp|+l~f*EGp4sjj?jgvYC!q*V&cj%=UfFk+_ z6?8wY<|%rsexWsUL5lMHZlHT!RY#^L?P;j{-s8vAPh3*9Mv}%DE)GWr?UZi2??6q= z-*%4)R5QZMqFCO9!RuFP>bho_c$W7*GJpFiM{8wL@uvD2c$C;-2{5>hKP}wkRd#xp6T8(oHy?C!5Kb$B~{ZH-5rj1Kpb5NlJtI#cH^ay>BYN>>dn!N zxuk5QXq9DohkdJd`4N(iY@-6G+?myWj)A#VwCjm~f|YE~>IV!cTT{iv@Vq89JIoR6xuZVqRwUz+()1+cxQdx$OrP3bhf~EH9URO`HrU6*S*nr3mlRnx z=b5I1${k3R^3aYqga%OuXI5Rup~-HoR*Pz^Zie~H9A+TiOck11wtJ3BPFcF&@HuHi zI^hi>Uwz!go=)sG`g?}9^Cc#z7b&^qli02ZH;I&v^P!XynVebw)5(_IsMa{l0O4ri0kcG$q7hEZn6mKBfzO7Rgdr7$on|bu!s!!arV}go zph`d%L!FO3?e068DSb1)3EPSB zbDcnmKjDZNlA>QnH##E}wthfSf*u)KYR)U}Pdvoi@m6%qPQ*E-2d{UQUr)<0F{(*R za+NUuY2RbTZ&~^cG>4?l2ZH_ia$EaJsa9ctl zE0kINKCY_=F?WRc`e?qr4<6AVD(7_0XFpA&!-_>hencZeXi6BRWcxTYX51Ok9oOyN zdW=HWP?GH@=!`u$p?VE=!ukk6j+ed z)uy;{AaebqeH(gC1b~Xe<)=J>A$bwpnxv`4TEJ1UBQSceKv)+uj#`o;#pg?MfQuJP zkPs51ecO)T7JJb;=zWLJF(heK8D>Jm$LuXU@j%cJ-DMuD-831L0C{UN-7fuHH31;UBnoAK=V+PS0Q+h z>|Nl<@pz9ZpyXF|s1buo6&-ibE{Ge|Xq~RY>_tVS1!Z*)=aAa`9;?~wbH<#(hfH~Z zIltirR0Jl;-Ajr_g|p`1VH4JfHPB^$Kd1~q&}vcP@vdSIN1{z|fPak4hW&nvvv?P} z7^@Es+@#7NFyByH6Z6(f`6vAVF@#^P?AG}GgYF9nx{9T)#*cT~IatowzqEej+n z5siik;|raMc#<79mn+e&-0_0;2MLUcj@&s!yqQ^WU|65xuT7H8hd9lGum9=*@5nc| zWhj6tu4QhF8C(;Piv~L?k2$7t5RO^*Rb?=h2R6 zj-ac^{YL2fH6RaHxa+okM_M|rc7K7!@!cLoKM(Y^o@y9DQ33(H!t^`NZS#C1xpegvzpX=PFCj=Xiz z0;$O&nvIg|X%EMeN@D#Q#YJM9N^&{x&u|506e2UsGZ@I6*QZ>i(j&ySj2D0C*wMGb zTq}u$KnpP1ZW}$9#XNh;x4Mv3Ug0w=T|IggmsOBhIp)qvZ*`HPa(hi@Jr1O z9Lm&MBF#4khNmA&H7n^#bgSk)R)$B5sT1wOayd^mT<3(R>j5@erB+z8r~`r+hqEhQ zKwN!)ioQ+(^I{j})wBIAlC8WuQAQaCfB~?8sbbz2xo3}apGv$?_MkurxZ2Y=$0>J* z!oJ~lCMh!Mpt|12nM~S#H*4!8mhbTH9m4Z@Fj^jS(ah=9Gwhw%-zQbkGpYoP*>?V| zAZpulNTq$MKMd9#-zM$Nvfd5j{pp)%0PLaffF+{XyfQFTr7}aoP04Yd+F zxS2t$URL$vzd!o?78mEx-yhLFVFqpn{qxTIC-}IdPJj6X$1lxXobji7B2lA}6*R>a zUt)3&j(suP1SiqTDS!W_L<{iM1ivgXhhexE7blHs!1%yY?!@IT`N4HBa6S~)@0c`T zjl$Ma3qkL=FE5cFNkK2B(0EXC^Mq{y$qYh+;2V|%$pv&o`|>N#Un?@+E8Y>APhrpi zLP7llDysYChfCMnQtzK{W^e!J1tvoOd@p_VKdZM4pya2ow|h}MQ6v|u0Eg9Ruw3SJ6CYcoBH*9^SNji z^RiUl^D*_SAa<%O{fm|(UEZ+XlQGv;7-*|4_6uMB2`GixZhuy>WU}INz2z$aiqv52 zuWJZS23kU2_Ptg8@%Q%EWnY`TX5rMwxJy_T?H0kJ*s8GSGqQ5NDkn8ixnf z#+*v$T3vf90((0?U|r9ap(H`Rkz?Ntu3gGIgq0|~>TnIc`r$3vW0*l|&RG7@F-I;@ zY!}6$tUPo26(Qi!pT2W3U=?J=ynGFl|AA zE0tS#9}JIolJIZDH13`oRYOn9~uY= zJXtkIsWkBsSS%5vU@7E24en;p3AfB4LyJOkM*F;sJlN zc;)QdFP`K^?e-z#)rwi|c?g}Fi!5`x250RtApF2V3Kt_V!aPyfzDOjKKf*WG&vfxV z>zp(^bn}5dn5^HyrA0s_J=Ia@3}<08T*hloC_($8T}5P)h?JTG7-1auo0=4s>WVLf@NS3&@|F^#p=N^M=XYAmy`JTPVY2B8~0Qhlri3Pw9Hep!i1FIn6vua zOwC(j@j_PB%;1L?r;|ucdJ9y;9DSQ~qX&u$MFeMT>1^qRLV`CDGp_!=ZqYoZt!8=~ z5ngsjGr2Tnm&W8aft#dM$%Xp{$6%vT?-Y*m$^&j}tuU)cw2dGJFYEN3sV7Vr({t-t zJws!`aBt_b1zXtL!dn);JKQa5LA9RZdcfkxvKVac@M~yp96NNvBuQac9CwH&O_xh6 z)=8ojq|g$4k^{MbkGvsM zf^OL2TxjLPMCDOyTyUDI6lV6Eb22aRfgqGK7bm3v+y>$Z75!uCV!-m zV1?L)_aTPjB9F>gth7A}b`|^@LQ9o0_>gt7KI;|6PWCR^i_~4}<2hy-t>S!8cvTbt zozOySpt%~JQgPqO{Nsbdz5%M&#}_HbB5%)!!xqAuNLr}eDuwXg!Zr~=n^DfaW^6%c z4XI)eoX_)vH(DbKj)dqXT#6J4oe0OKlt_P^ekG96I95p5_M!xD&l)@0NiTIE+he-x@PAle@gkPF|_A>L0 zdIh*f5&Xn|%Cg6dxY&xvKoe{{YP4#)ir;J@>mccNK%d{1ssns<3Yd;ogPiE}%(Vwu zdGJfL`>aa@ykf53TXYMbIYo{o#heVPG?Idk`x$9i|Asd3I`{!^a}o^uMDSaf6w2#i z?xw&Ua`;4_X=>fmD%8DrqM{;Q481QNp8O75^R8@b?E}9yqt!g}&$I8ghzTf76X?qK z;!j$;?Ivc0OstR>`egNOY9Xk+Nlecb+b7im^7-2p>BX|bN}_A)t#5 zj|++^q}${^Kc}M6WF}r7CRFlr9%I9k1@eEU{H}SzEeZYL1S4a0f+Ht#YF`bw1<@J( zJSR`fXX~G%qipsC+o2cKTx;ct2hj6D=$9mvSH`9HedO3&@^NLH2lAm|`9P##uKzwo z=J2l$sK?C_rXMetCxRtU1}#FMkhVQ*((hL0OcXI|UZ-GOz=;`XM6Y-onF>E<^T`o; z{sz4RXt(zQH+cN+_}o6xFj%kZJc76pXqBksbSF3d$4LhJd&tle=GQc ziGcfPf^Sak%lRTYBV6oA$Wi+5A;d5MJq(aH7F%H33a6Dh6P3fK&$T?QBC6XE@6Djv zhRCHOK1#4my7JxlE-QrM0GUib5k%jgvg_>FWhnCjjrm?yZI-6d<9ixd_*B@XcBxJ} zf5v=ZxZ+vt?N9DU`t_1G4_TM?Vyj*AktoJou9e-=0U{ztCI=zZ`xsAAqC@5@TSs`~ zJ$UQRo!*<`6&oYyeh;xJ)7M7oRf!rUZrS)7RZX;aCYH9;9cd?{=*F$?2Oj!L6m3J} z21t)_&Q`it4%w^gRz=oG`pEk#MyF+!6=D58$^8PUX?Sbalfn1^`uanlL*$-Xu3Pjy z+B7PeOmqrqp(YLw1RL{mMt}~xHq(mAX4^CJ92RuOf+cXi$Efs>{j5V<+5V&drRiI{ zXP$X#I)=kl)ubHSv(C+cj;V5qqN^xaj8Bo3p*^9Mi!utW;x zfRgVHex6|R7+q7MbeLRBF3Uj=lg)jyfNuSMHwk_(N&Ow5sTcK?$Wi=;bY~^AsmS)t zF$-+11(z?)XRz|ZN?serL(GpU5R>9gt4suplYaOLCPM zd~@bTdFb>TL3#bs)?JBV=l~RqSp=FpuWeh?9by!gO?_C3^^Nx`2erg7^At4j*;Ob9 z-YLRwOuQv3d+mBRz~gB)y`?y#mdHzy%*|UpQAV3!`c$!5_mYHU5rm@`z)0n5V$`+r{GXH_BiuIJwll9vzj)ehLCm6T z0G|BG3pT|1UcF!zj9u@xph%I~1!+v0$p9|WcTJR-HS%^umYf2nLE%a-;c!@>R+U4l zPKb!&4DBeQa^9ylI8U*3_Wfk5rx);IbZ9$oay8u-^J~%z2PZtMsJp@9U7H>)F8^9l zTPmuKO$yU4#Bc1Q=|}i*KOV)w{yV_ zQ_E*jt#JLM{3L2vow%|1hNC23l$mp>+jv`F86Mx%I!HJaE%{ zI<#drxrc-4EoAO<7YX(CG4t)p2P~uUlcutcj!tXUlhWq*yPvR$*Mv?J?)Hmv?&m&; z7@z+olNTEQ-rC@&S}!iyyowUcArv{5#3*i)xBv`1yqc!R4zXWGSTY&e;k#_WRSmgf@gp~e6 zU!q^jZNL~oz2l(IgUI7nnD4;WPaZsNqPP+q0JUG9%c;my2L7-L(LA&}f}pc&av zx_C*A+G#zSX&H00a}Gc1x;Q&Ji#MP0Hg8MjEpx(^dJ|^NY1)h4-KEj$P43#@I7?2HTbBj1^=u7&Gj^K$&Urey`_6+FEg#^|V2}G5uV_!4pMrdHGLdCjf=c|ID3hi#Lp|=~;XNXr z(mWcDjn~>=g2eUQ`QXDpzQd7#;x8C{oY-dHPq7~LDWy)=!fCP5#F|=GLr1ivH->t^ zXYPdeMUQ+*4*!+2=s(?d#`?H53i$TlF>Zc^2ur59BmW|ZEeM#}X7BPOUUBfAoR6NJ z&K9@?urt(nDBC&Y7ijr_66~E_vLb<)zeBv=uKZQE<p|Pr>@Qnt2=!;2~VqZyIx*$$4G;Yb;E~Md@^#htUER>|GJ7x8p<#b+~W^ zN28z|zC>gErK80O=d!6|-ZOoZV#i~Ko2Lkf4MAS;0S~^u7 z^HyrHLRaTK>9q-B_;5;dt9g2l+XZ|WscX3Iusz}e?G7RAY2;1pjgFFeTdcNnv!*GV{uV|_Bo;xIZ{rp!Ci9=g3p-GvVNvcWB(?8pO zC;Lk_aB1SFG?GUs$Y(JJ0Pa3l@C1IpF9iBl1hE_m(cnj4S|g@E!{2=&o%` z4q^?>H6^4|5B}j?zhDaRkrTP;I~9X*DdV)iV5*4shph|Sp1-5MiMo?dj%MZ==;Xog zdh8Q6DcLnFRHiSk`picPwZF@!c^rNC<6^_34^HYZrndKdK$x@KIpFKx=+dESzP2!) zJW^ym8!{>}8Y)y-aY};Gc*pMrvHsQki4N6GmFJ_^l{TVp%yuZpcP-{LH~74Ni;{6w zj)fV`tAC{ZF7`fpg&Een3-2Z!J1WKm!gX^vfIg|iS4x7bdFxOPKhGF0^qP3{2ddoP zrXxIbFy%e(lk$f_78^?9;Xd2_+-I*>#*W5?S5jXuSl5a>KdIb#`%27axB&X15fh*F zOlCs96EXVE0i|sY&Y+C+1joR(LJNc8IL$ao`lN`x_02=+V!jqWeGSO55^IT!m@3C7 z<=3j}&kD!*tm3g%{4>a?QGc#9l5_z3&;mwvRAcr8HH(PPl&!2xoFC#JZG70Ycr+bc zIGxnoW4r%Nmv66O(Vk+zY3f0S)hQFf`@<15@$+ZaO~<1AFaW}Pu3lU zxh$Sn%YC!i(ynu2>##RDq~3L+({emlQ+P6ugnnG+WiVBea2 z(^+tW8S1hA#M)LOYD?**G@L}V!&MV>H&Kg7j!h03#%g+YGaR-o{#8H}{|#HpVwEyo zz}1@iUWX~t3`5_AeDHl?5N(GAF%O;5fWSR7re$TA8}Ge@ePUs@*>`$;L@ib^-fxe@ zI8yyZxk{A0*O-=biMK+mnr|C^+dZcP^YyE7R{-XPY?CM6bJCo@h+aR$L@_ocp)#7^ zJDkv0t=%o~7do3aJgQ@$GH}c^f+T}hbn1Md6>iR7fHp4=BX1_6E@rb3c@7(@2dG5sDoxki}E{I?%S8z~r}3oNzSD?K|g+e8Z2jXbJ2tb6;f6)Oc=- zjlX*C0`Y@Y9C~9UJ@qg3Hf7Nj=A$zcQYo+@icJ|janT{iuYKpWxw1}y`2w{d(y-#h zP|WD;;lM+#s9lW~X@t3rv=}PJ&jo+1ii^Vn>Cf#x!9-+|B-Mj85kSX#iy4!bwZV6YW|8l#|x6W@$YWy{^S1QFtk?|aTo58r< z{gTqU>pX7@F$AU+Y4H)}Nm-;s@{f+v_|cQ9P6j#{t<8+F3Mb9#!0u)o!TA7rFkmz` zqBB7mwOcbUYtlEf1Qy%j-OC?8*Ea_XA}ma&b&b`hOsifzBSjC7l$r1u!+B0&LbhQ?x?@&G zzrKyQx%li)BTw6{A2G_B-mQyTSx<#jw#$yu{3Z=fvAT>}6%~u6vg`AAJLUC{{mvN~ zl$v*=kDToC|$+Dz`s&Qp26n+RmTnr4?J(;a-yw)XdKi zbByR0s1xE_6VlgY|FoC-y1@dGOLXdQYc8PnH}#4$Dw?TbYfFZij%&~)=d4=%oUAkB zlp29}vi%wNyN;`Ixakt&)XWX888qUKMX_<&0hLikD=|OveQ$-!PL^aCOibLzU|oal zj*@QiVy&3Ci4<7X7A!W!#{o6#>W=K}MlgD2uJ`X+Xp!OI*jjppkRWIB9=47Z(NL*a zzefvkig=kAlR^z6Y7L-ualqsEB-D?ZpfT@q%i1^l;UwBpu2+JPTZS4s7u_gf z(q0RpchxY)GgVPdPljDOCTC1}G;8IGN3#`A=#H852f`$+XLLqKaZY#aRox{{-~?7G zPqm9``x_?gqo2X`h+#D?=2mRtH#t#A;tc+vCgZ`2A$2&l!$E@j_t@% znR4et9n|tB)#zXMGbo~7QC~}bG5ZD+T0yeEywVeOqSjXtw_8bE|4(6&D&DO2s##;! z@H{EfaooUwpok%Q?)39^^=Y4qfC&glB;sPw@UvurKc#&{bC_vfUB(NrE{1vkrnema8;_#phD~9CLETN8Im~8O zax*&bSYX{IQ}&9h>W`V}o3chz78#LE?PW1y%>o7w;}`ynn9(9T$TVA^Sr5wKrPz%v zvMTRMX3i>yI8W>*`8;2knyc0eOLkt13&y~SwCKNLoKbR`-pv%XBY(mEXnNK$A+=^N zCYE7CKM^=pTTz8;m$G)ujQ$mUyGp@4$PF|g5Jjn-c{{g=fw$$*b+3#$FV2&M0s~(j zzL&O|I6g}sQn2#m$?QJ2lz2{EM%G4)|vylj%9m-EwM0Y^oBN?q4YGY@=GJ@1+Y z`+5t!xOz)FJSK0nNtO{xF%Ph+7; zSL{5&F~cyc^=UK}8-}d#q^^QfvMO97Zeqvz`-0}#$`T_oT!GZq8`QB8UXumIc-73~ z@oyT0Gbma$-at9`BIz1P#&3DEQ>yPXadCfS&0+^`MrzKHOA0N1PsxQ380@h>u{Xy; z1huk%`250de~u^GW_9ORxeUUC!^?hKnW8NL;@<9}Od5UW%?3zE5Re)>y(66<(xmrJ zC?Xgk_cU}Hst<21CGJ9rz zdw=(5CKY0CkHJ^HFmh|wy%Ri_D7Nr12v9McTfLk#VX2tl$}=N#g&dv^SAcU;TC<7S zj9ZepW@9pNV#3EV^wDM+=EJJ`KbF#v0~&{^XBCaOSLJ0XC$%=Z$J*%Hp!vf4-V9}4 z=ryC}Bu=YwMqYz9M6;cY!6NK~1vbBGkhs~mF&l5Mtn52elu~E;kX!obYOC?@p&s|^ zRNMkEZ7*&vM)(zBlvAb!Qpb|3Ly`$AM{#0Dv*)voma)PasfQ!=dyc*${lNmRJlxu- zCh%_fCR_>)5?{1uq6w>D5{DnF60v68OgzU#i8xZhIL|I@Jd$mQ0aZttpvJ$1gJCkP zABRVXJ`M1sD%2S{(o<*3YOC?En!5hTVFH|xc?AWL4!SIHI$&ym4rrA5oZ6tqOL2G|Lu0Te-OrZZhRrh}fA%;doI`TX(1ckeRR!~g!z}r{$m#;e zw#r5OxIo|ObZ!N*Gga<_AojoQSMzApZEdR`XKBM-4_ru9Alvb@>>{D6X<(L^IHIf;byh4^>SPG+|z996m7!0pYI$v;aOnr<*)(XlfH zGaP4X5=`x@4`VQggy7Y2n z?2y>G&=>|pKB&X8^le8H`f1HweTI9IK+pUIsdN0HCkBqpdp;{Fhq~AkZ!L3u#W{7l zoI`GtiwG?-vkHFqKwtffW47Z};w_VV&Iq?tW_?|Te(~N{a9Rj#y35&NTm zzRB1XAc829h~B#{g!bjbGA@hJwWhJF)tPbmNq714B_e-Y=OEBPh4lcf_XmwT_R+){)X@s6Tp@RhUMk zy}#-upnuHSOtRZx-c*!T$zb5}WviVhhCYe$8yhIhQzqE{oaO5nXr?3x%O0p=GXz!u zzsG(rFy=h}D=wk)TESw^u#u|R_`}p>JTQCA0nHAyhSl5&**t~DNY8J`a8HFy)mwh! zW71UvhfZ#ig=*MCI;0`&Fy%8>*x`v)jhk3SituMHuf38wYS`kZI2JS$1R?QI1`e#L zm&m|p*@mY)ZQdew@)3^Rm}?==@<@H8EU6=q`9Yc}W#HX`pZ&EyE{h>0ujcXo^I%09 znqxlF#1DdM0QjY}A0xRU@`{R9ofOUqM-J(<@?UvPiA99aVrZADB)Rya&cDhN0_>p6 zBvn2TnFp|xjyV*VtkTUZQs+D!@^Y{)(>qP*g-sx!u^#8MeXBi_ardsE#tGL&UiHwqn5 zF5H?nPqicQ$N7#X^)1EG7DKbZ@2-ce&`WPJVj03Db|0o=AY zK$zUd6FXL!m}g5k9I)N3e!(Fc!${BGkw4oa4#VuHwn`e=HnfZ1oV)UK|NQQT5b3Bfx3Y_OoLft5lBe$jqUe-0D5ngzXZ&#v z&@c%FA-;+o>B(`j3=S$r6!X+!J3^_2w z=nVP{g)qP^M2Y7+7nA?7CZ}u+Y^F5WLjCRBNy}nXwf${kWE|U*UIvb3%gaGSl~%lV zyZzXr)s2*&MTkP4DUF92zE3f;63HQ2g4w6-&e2vm^|_JP%#hiOEA_WVLmlR5EBkp0 z+_(VgaiPoM8|iMpCre_swc+iWaX)twQLJzg5fUnne)VPeo}kV_0~V4A`i3{<)YMjz zneeS1=Z41AezByhs%zTsnIuCJv;lsi&9m5#7m#8N12b#mRnpP|M^ZbTBsAI?rM(|V zT1!L}VB_u7H$^0UE;IbudZ8^vE&-UNO>nHs)7kpR;j=u0))7vXNFP)SB@-mARa6bA z!L;JZkd}p)^W#7-SPIE|3s?efkZA;EIAkSTdH61@ZhWdCY^#&PttdyC*UZhxAK+lF zT6AO)7BS0U3uY4AIDm#88OR0hbyIUkFwqR}jWVNW;}F2{(&W6Yr|90S#A3%>ng)TJ zol&pI$9sWM7n`chBnq|}p9Tz5Vh)u2?CB!b++Rpy?epqL#q$lnf_yga4iwzVt8TS( z>P;vHF9)5JCC?ku(75N-dr1$+!+aD#m8fw!$qhP}19E39#WYkrO2@6< zU2rC2W{ViphEtk4?Q8c6Vo6%(U^@efnSHtYY*UC8M}V$yt^2H>2^9+>CF{iRy%IK-@6lPQ=~PbRYD1@usLU44S!-hO4F9ok+- z3br*#QVFy=1d&(pQC?q`t>4!HwL&G@4|6b^o0HiM<_bJQ5pKzO`aWz=t`Wigy%-O>HvN4w9;1M@ z6%%zM-=J7@%+8hwxolM4eD@IyYri3a{lY_}`rN+q; zPzVfg8MCf40RBr3MKt!Yfvn ziA$i}T7@H6XlS6$*wuq%D@vuYINlw!+X9{lfiA8opB*OLzcp(gh-{s3axqURuS}yr8Uxi<%ddeI)*DKM%xCjN z^H%*W8sydt4pU~9Yzb_HxRPGHiNXHeLS#ZYw^H4Z{u||Bq|*?KTjX&{vAan3C!w6ny~a( z@|G6b$;I4bzb}*_#eP;vXAF@Mn8rx+wY8^nJeTW2Y*xL=yA$+hm8?u-EdSWKDkva)7P9t^`2!cxigjOHs$iD zfI(Pc4XOGMt4bp-_PffbSPOJ@(fjc&4q@t9)YLd;eoKqHk51exTn1nG({fk$ePZ4$*D)t^xxQNKoOXZm8}WJI|9E4tKkQ&*S;jG567UfiYL@GO4o zKGgBmO@`+pC&yGSx#lOIbSyZJt7 zDH7{)Dtcz??QNjdYzJoAfNFuH|8$(@+mvL|Nio=AE^&n|L>smPM8$=Ggh z24F2obz}>~c<hTo5L@6iUUf^Re3>Ih+0UsS5ww;G=%kqpZh(PZWRm zHi8kQEOdN#-!m{k@NY#LmGw2X6BZQ|GfFX^C8fG083G1r!}dBGfltFeWJ6MhdolZ8 zul}Q3G&I+TekGl3=x7n0qpZ&%N|R_r0|T0~fhefO-x3YFa6|evLvn-A^_d-%;kM-y zNYVJJ(?1MHEfmaUCXhpSVa)n8vk&`H?)WeUvpE4}`e*eAb?6DTa0n4s*W4Q6LQA$q z@9+CMBmed`2kGrx@MB)m=V&J*ajcD8wRapS58eEYlK=i2bdiJV&Hco?_Fy@W6KX43 zgqvZraV~^^d`hJtGUCdZuLcLrnhshf{l^C!uzwv=$Wa$B{9A#>BG+1o_Rsi3vS3ys z8)E7ZNS!rO>2HCVLh9!sy9_P-%3`yh#hP*DwLeZ2d7^n&(+lPu+auL&N=TJ|@Ak-m3LjREa)Tcg>`E9S69!ecqzh7u>T!iwW;quTQ zn>KS>p8MOci)d;oO|}gTD}`zZ#b;sM3WvRJT7SmH=u!@4G!%o{4{pu`X4C)6GN0sN zdOrpuiGhQ2zYVawoz{?fIIGD{9j|}RoZ>%7?eAy&a77sW>-a_3<`=!c4-ON3Sn&6M z8jIIz|7yMAbke_$UU;7q{MQ+awafo*Xpr~oe=&oOz~6=*q~!endw>GOk>u$aY|5IP zyxhI>$-Kq?2h2RBcpv;3ho&_#4Y}E?KEAwgg5dcyFwXo_XTRByxmn^$nUm*GRiAHW z6ZAR*1B2&gK8U=F2{UghM>Lay7G>Ra%S$kI?xbAKihj5k_D2V1R`SVoid#{+KECrG z*v*Eb0sY4F+7oHSy=Vm!a$0ana@M%9gKDs8nnI?zoy^EVcgAC%G}0WM=Y%xCAd#H7 zD1kIjHoN%uL*(&c!ivQt&swPZ!83OE)O>v>i|>*grZMceC%{Ng*W z`5|R<7i_6qQ27lJ{QR1-%dWcIl#bb8hC`PwlE_uIELjCBgJ#pv287o+og-17W+d3l~&#t>Wb37pu6 zDY(Yb>#@=sX~+@eh_yxq2_esFpT2-r~qkjCvS3I`$U)V(M&m(b(~8^Q~$K zo9qo4?&3&5N$*1p@#Mhal9X&gcJZ%Y`a7n{)yJf6sz)HtVqd?AW2l-_yd`4t{UU{nCd~c`+k|k&X=Gt($sY39?qiNqnTmBz zu#)iXT9-D1>uCp#B#d#BFsOLh>C_cZG2<__^;-p8?I1UiUK=|U$TPr7QKKjjJ$9(H zrP=jBFw_9VGx6c_g;fLNUePN|@@7?+nCLk0Hm}`j=w=4*yuazg`EnsMSs{~S;HQ%z zU!v}PmV1X9_cAR$_d;!fYY9f$v8vX%@YL(Bp?uk&G9>^nigW}j>_?Xd({$Mtz3%95 zzgp-=-fASiRuUHsWlNqb{t9yH)fQf2S>Y<}wV!PhlJIHbCpNDh8hpI!&?0`n<>GeL z`{R9Yvu9BKUPtoy5iXDV2B~TGX-d9-K4vcdh*^+ zxBNW@-U^#nNUczpe>cN51Fo#|3$|`mOD@`E1MW9cCvKVvYOiY|s`RcG^)XEd7|<(3 zWt)aqiTck33iltqH4pRxkEk*KOg{-$meI0VNqbZ40%Ttnq=lM%KQb`Q(HF=Ob%z_# zZx|SFJx{x*=n`^26Ew+_dJ%gnrC*j^4Z%Pgxlks*;|p;i`zl1Lg-b6ayqT&vETTa7 zIztfgOmd^Hv+9jm4O-fH)IBo5nmc+Z zIzn=%p4@MG)(+cugyKtpCKgQ%Ltj} zHKTEaLF*#9t_;Z7y51qn=bY~kxRAA!vN$Q0_4=7kG!>1PV$b{~#gwr#@2+KC7$K-# zcC+5F8f|oaVh1Y7?dqcRu>kEMyYHyYECv`>JMjXqD=_F$1yU3hlU^E1_{v*_%{N!y zkFq-xLmVkdc-}pz9YG!)iYN;NJsyA`!_1RAI(gN>VU?i0tmNaD?iw-?A)}6pLs#5< zER9pgg+dw*ywccf`gl@5Nhvli^tNW=;X<<|Po<5N4iHGpm0#!03B}{h@3b;Q*R%9T z!=)meaJcuBlSuP~_=wGXx-pFdx^D!Q+L@NH1~%(qbt^ztulle=VRFCFi(NYPxUpX# z1R@Ts;T^r#IYE$Jy}H+ujjnD|j*HO@iVzP%N`Ye19|mQCG+2ea`jmnS!T|4zxA?_J z!39q8xC&$W+o;(6$cC}ZBwXF`r`{;yCIa4hwgM0;&z8K~*es}fho?r=sra;dq28A< zF?&q6{md-IUQX$5Xc0KW*)kR#RMNhE*iBD*Vi>3Vi5TOot|-*~<^^!x^ElN9m0S-x zfa09@IN8;IE*2akYNmUk*~>#7O^b4AFnJe5@bY~jg4S+!tHIUoc=*1=QN+h0f|YMn z`*r6a>{F1XHR%BcmeHb4rE4CXYc~3+O+V0;V@v+=Ipwl}YU={=EgI_P;&@#il`I;# z`8maxQ*qzaH^9lX;ksh|Ey9m%YkD;w^-%j0uP*v>Uihq0*8xVHw_Ewrn5OVnk?|hO z2X#+d`3qnAj11#lyUrnl5f9V0Pc>+8_eV1X8D!rh`(-4Z)UA)~Ih9og?_n_r5#|&V zmxq{h&#-N_BfOq1a;#HPk7b?07+Uu{y_~$IeO=fMJl_K7v;D1mS8#TP#wQVDGWqC6 zbLkQ2Wl%~6sI*6|)jl%2txX{@%)3VLZZzgoWcb_Nrslw^hNcC!4Dm&0t(8dqLjmiXLyPR6>^ z=$j5ih9?3UuyGtX{I>5xf$5czGd4E^?>B{5<$;{P4#%TfX3q7pj|C+y@pp=p-3pAd zk#5*7wpX0_Jr6u0@k=1I?Ce=W^xl^0RZExd9h;-OESCkJ)1NL&kp_I8H&}o_7r4k2 z-whMlx9vVCLJm}WcoVbf?*{h7NhOSj7VZNLgOf`-aB-L=wbMp%)yFpe1EF^PJLMiS zJBuF8F(ovQnq~O=_O+rOBlqKGo$yTz%c#5xH}ieJD#B?R8exsVj$brCYqrIbP*>?q zM^=IbDr99oAI=CIzOL#qiYgo}uMLmZdR@*sS%pV<-!=01$sxC zK?_Hem%-XUaE|4iK_5HzjUNTfl&6;u8y0!PZe^|Z|FWA96|K9PP#v3t5y>)Q*cDmF zYD~T(`S*V*o88@gkhb|1f7zn(i?ATAoC-FQw&jJjweGW*AjP+!&voe%mk!c4;|6mN zg^q%lk6e3(b%;>l>u{8`-Ll4a;?`T+GAK4< zUdps#;kfF?q4T(&gL@kj4Y2JN@zbMF*=<;8cK4RAIXuIQ8|0V`6E9;)i=IJd-&%YK zNO0lX`DQ?WI&uN(nqG=~h+@I*0M_F=2^*vHmE5QI_6{#3fn(ic-i;VvQJziUb$eW1 ze%c&g)l+RQ6$jB1&`}(qhs>-Kq90=O{H7GywX+iCx$EE;YFpVUPRS$t<@?|iS;zsu zMVDJ!O?u**k%=P4x{kEyQ*(Tp=6e2)3-D80k`ffAZlTe##o=kM5EvTeAP9+c3gr5b zZDcF#j{>B#YvkKcYs^;^w8m(fQD=@S>*sH%%y}slnOfGRM+^8c)jEhqj}Re^BbR_?reMxNThF9KwR?<)adf} z;_{#dj;oRvzllaMuB5>h%0!$s#_tse9d@QRL9i(8w-ROV9B$rRQqs89nX-#j_KXHj%AWI=XGm-77-@%d1W&jD-lw~7t z$pP^JscLk{UAu)6s1k3vj=&sGMDxcPR-ROAsNGA#v)4~Xq`?=whoudGR(h4^*Lkwn;}0prYkxdq=h-Ro+e}u9?14jJg9e=ZE52Q@)pJ28d(G zf_FtY!~D_jgR)$w+O$NQ>bK`n&E4gjoXLb6@9 z>ViFu@!b`o zEgjD*yv#B?jg)v<4stavy~gScuaUQsEoD9npddeu9$m6;35$?yux1Y`m`R(=u|3T4 z2=KDh?G+oxHH65V8so+4cbSKZW$yP>dI@e(&u_$l&PPr(4EX=N%1qgFIW;%AaLR01 z>yzq}r_uMl$Sm2hrxraC9~ca6R;U!y$D0B8Dq?kJyN883b(~PXcLqE~EQ|GiC=km1 zWlBe)>yo>Jq&IO?y@&mefdq~*(01PMO){IRzs2p$vfc&H5lGNWwT+fX`Nb^RkN=p57jSZ#x>J{X-D}B!tqp59 zfv@abm- zVJ=zI6>E$PwdCsAuqj>20!|=Aqmd(7QBm1u8XKi7%_?dGt2qlutJwo!&>YE=&%HW) z&ge6}Fi80#gEyh~v4ETH1r2Hkrx%ZGm$%ddy1UbK1Y(qf3-_bTSl%YbGQG#D+GRnr zFU-J6GcTMkNx8Tjo??&B`eY;-p-lUJ%XZR!QlB@@?`KBxuA}_G(5Wj3XG>AehbY&(-oxqA6H(w$oUzDBs+`{8iJQ2)>1xcF*7j1} zkFVJcrHk-vK(*Fz%X${y*zK>KxZb-CEFI`P)*ClRDF4F7shwEdVp^_v?|0BleBZRA zHq=`VVWVHXB(f&WXCKzMA!*1-XEQe0S*?o!2rLe(S8;Oeb&pO#`kICJg^l7CxjYh< zo2K4ZYrHm(OoTVQT>2JJ_>4D@Xx%HcMf$Cltz9oPQ&NtEq4}ycjNY%PcUZ=mHJn@L zNU>Z`D~EJT=57I3-a79lxto9WG5MZ3ER~=i4a(Ww1Ae3hp(qZj`Ta`gc5nT7zq+s>r|jjT z&U!NAv_Tc(`@4N^#{K}Qg)%z}SVBmEiRI1r=wvl}@Pbx<{$V zV&;myQ`sD>E>^AHl2tc&7~!U`0;pS~msP!e3s_~+&E=`o-8>+|$%s8U!Fe}T@z&-trgMdPCvREI@b zZiBAIQy(a&VAJXA$ue+~vDMfytx4lt?H5_BQ;MNi+f=0URhg7@&mp4iZh%0!LR_*L z&~^r_h#LCmGJ2+Cw!W@^yd>rJqZ1FGJsaZ1=6wHFbkn2KEX1Z#rh-&i|%dKxuhoXZLC*3)N zfZqjlNYtZlo>0ss`pYcQuUn@pUNJBP%(sVV$g?Di`A6tZ)GhYiIRomGXXs!u|BMOz zF^LzozoI?{VKQIE9?zT?Hrp$B#G&@{7WqNv!NbW|=Lt!~YfP$3+3Ys}2S213Q(jJD zYDrR*mZb5zY0f;;s}pT6i#72vH^N9dBDI$9_mZc-r{L=s&m#|Blb=}Ni}Z}gcjDP; z!c>6PrxLZ^7YUeav@?%aO?HYEx{vR(yjC@{8|tq;;cysnwyQZheA-(XNq7KXSM(J< zetoY$xGjLPN=B{5v9OMfohc;N6j%p~{TPUGN&u%+OCEj6^a+aZuJ=A{nq*GyBskk$e5Tl@F%-ipXZ

WX>qk!A8FOU&N{OLs9 z+`46zubikeyw>fd?-*6yrHo&FX#q`bE7h!590RL-h7)E#pdJr{RGiRi62W_%DaBM* z0{=jh_T7u}sjOxxPyY?|#rf+Gvc-3E3aLI}+3{T@!?H%|fC7hA?xU3F+3!Li<^nGy z%;RnB#JS0iLx2T*bDC12Z(G&)y5PQSIm#!-R$k4;y%VxMk(wk~i6Rk_j851N*gr2E zBKmI?Qf2#@?mj>Y2w4snzEmFSx~(y^VR0EadnpbLAf=KnYQ$@rd8g& zlb7#uPUvW&@t^+$3v{Dedxcdv*LcpBc`&k2F8+>n243`cGC^lHZSs2L{UqTRObqIh zSjQIi4sQ&h;@1Pd&5IY3(^T-OXYOXy+1^e(BZ+PblHkS$N@y_D*c2pK-k5W+=b30M zbpWs_0?oH+ok#Bq436#{f>uiQonb&s0%<^OjU`cAxQnjg`JF4@_B5X4j#llK9*MR$ z00=l}Gt+O!U+XffM6JzkYc7hiGx}EeOhhE#EiJz{D{;5udTewq51Y1#)p@HgMDhmY zDAPNlxywdBuR*G)j!055^Sk}KsK7(r70xVhc#;+UPgADlyk{2I0QGn1+KyNd9lTgh z>&Q39$H$~q%cM>sD0J|bL9T#s&Sw!5i-zLzrxt~7N4?My(xCnVcY4V6(VE(ZrP^n} z@1vjLz}LdxL)VX=u5z9=>s@b(&Xe+VQCWG3gcZj%D}mIvqVfv96)!OS(mzV2I-7en zwdo*fiO8lTA=(iJ?;-353p^x{k{lwX6pt4zPpL|ZNv}IC{5YOXg|Tr`^vEp(OQ2^wz| z#>OXHEL)6&bI~3M&Sss@=23Kw#uLA;x|2f&H4O_--oaY@@r_=)Z%{>X;@2BS%{5~$ zKQi_(GIC0vR+T#N|8gLbcF?eg8@@ISG!@VGHBLEVz(F{JmfM22v-KUs0g6|&BWxMS znzV0;!{34z3Ya#^E$?BMsR}XoiVEQ=V^;^JF2DZZ_O&97$PbhAzMc6GjD9Hsg++Q3 z-7{;GOHo-}_udX`+?pI~#yO{TGDxu>NIc@D1J-@h;WIeYn5gev_{#jiK36Yin7Y() z9oJ91DlCKVs4hBJB{^#Sc5>HPo2(uStA=HI$@qe9hiWckcq?nAX>BC}Mgb zJEb~Af45Jul-no`yCnVKPW&{**_(i-ve4!UTbCmE{(EJEs2P}WyRwZEW zO3KqHFpI=&_MJ)Mt#rWP_=oOiX*iF+1?At><}RdpV>+k(PEfO) zVUAU3b17Oh+&esAg6#mHWouS1H_Z4!udcAzAg~aY8q8m!Dic_-iENtsP-xRoJf&0- zwVY21=8LW2<)CKh+{{1SDj%DEV15y5nt`cmx+k*8pJ>SmPZJBPQ#$T-#y_Xy&o8z& zyrwT!_9#tfUeb!KEICWLfJW|8(#foEP;KDBvnMDqMd%UI=1hE`Z79b~(8lL&&Q0*U zv@X%m9@g0S-g}e&+=uPlWW>y2&fuT}@d(zOxX@*y4o$dDgV2 zPV&xk!zB9=qdjMvpKVWlc{XQyt9)hRAlK|`dEiOW@W-~`$22s)#)q>aDo&#Zn}|@h zl|nA(mk#3gaxRV8ok}zhR!{gA5gEd`l1{D}+x-{_rsim+kE?>O5hggO`l0j05X2H< z4jd6l%yU)B?vUy&xqub5Omri6yee*O$yT={6cRhuzGlip+aowu1u4!3!^`-~f0D`y zlGLp8M+kYYe2p9KQ>TOqXoqBBCn?G1fuWPwYxdS!Z>Pu_ZiKJQKdOh&h%K7*74sewf2IvzX5@x9k*az7@iJpLteggM4%j(D=6&L z&9)r7ks7HmMs?Z0K?%$umD}i|45^ECCy7yHOfZx$#CD!wOEOtTw9CFl?lwMjxfBl} zn^DSFG0S^928@5x=^saaLmV$C>J)O_as;5^I%OLS>DqxX1+knE4TJ%;sLVm$%fF?= zRFsYA*Qgvp7^OAkS=s+3&Afv3 zAf;e@99ikY^Uv;uxeiw*oeZT|qLL6HK3%30mdQX%H#ijZxBCs6R31fvmUJnYEfg0>m}MPNY+X%-N(P~q_XC4@|Bk_f{5&DrdlnTCGa{xuZ13uc!0ovfN-P8Q z$`Qr@mJ`Q+c_Q?3@x{M&t`{!~(Vn;Hg3w(c&+>349;GNRVJVo#o$W4EAZ5K}9~-EY z^w&r<6Ps!OMm;L1)WhFq{;we&|ECZ5vHNXIj2~WPYvo{)EiBf(|AB6!*C)-CqRI{i zek@q#uWItP5aq5AGs<7>;&qsA;_a`Ma+q!4;}_|5hIczLR8>@5AJ>{%1+@HrHy=+U zTV%3Nu`M?=gfNVY^_)NkE8rQaCtegE{{$WRuFLtjW&UAYe#|~D_yK&*@v&PQ+?BWt z?yhs342Vri{#lgN-QI2M``88CBx{f|$rN%u%4PSl6dE8aTHP-du=6A=|1>LfJ3dI<3q<+?ZZVV(()~Q7;&cs(AFYPo14veub6X;xa zPtf?EX?km1J!HN+9Il;Rs_F$3wI?_oEPMIK#;s&jvE~bag~ikshc2Njg)<4?YBidyji$d=NN*c?4x5dQejR37-PNwo>?uy%S@U<*6{s-c%Kpp(Im>5O z0eX>(?sq8bt4X?E_Kf!tit|&$`!yc7fJTu!@194;b=%_DT)I~p-Y<>Gj8cw5A@|Nq z#kmK?XC?mnr7P=KIxay8J4|`HX}waqSI1S%*gai1JMeazT`W<>jVbA5&=| z6qd>I;dxcSy{;Y`^!v1|_tn$5Lt2wd;+GlwtOqN_{uMvoD_8{bSMQb%^6w#Z5NM=;JqE2t7j z`-#eQlv)*Oi;9)f1!Rg$zKhz84U*8kk(t-H_V$@Bb3OM5FVh`&*9mqAZ>q+#d4s_@ z$zSfix}s()QMiDM1B(HKA$Vh=XAdzf>rugv?B|J(?Vs81^?lK3&5M?Ci{IE`IR5dK zXkq-iD7U9f1TfSm)F*G8f@q2~vRU`nJ`6(Oim%@t3bAqzxiM$UvX4PSW1NeUN13G| zn%!Y8WGZ%X654Ff^qhXyiw=G@cV|TAZAW2AOU73psZ!sEpjK9Ru9_02BzJ!W=FRdm}|d4z0xfMj4xp z5st0Bml=d@6xL~HmObcrZyCgV7VDyHD?t`$F3`)tZ_0=GhwOdHWa8jF=ti>xGV2Wa z{KQ%!6@_1|<$Fr^x5#o!7nX80-1KX@Z~vH8bmE$qq0P<`?S*e~T4!)oG6lO(D!YJV zG?d;NwZ;V~xV7BydaNxE^GZ27Ay4HkRtNQkbjD~QX7L5GsE_JyTZyL8uZ{KRcGo(i z`xFu@p#4I5<^?~b8Dzj}4BpiA9_`klT6orl-iL?IB~=1VjN$>lUeONX@1A z+aqr~0HnGCnS-E^;K5?wUEtK_h)>|P-*lcgW1=1Cwv1N(D%Wo1+@g=i{DR{76SVT@!%)RK_fBDH$q?1#7Xr zOLvC}cs<6UPT0O0a9X?F!il)Ib~iRBU$(LzN71k%7Y5(Y)PE7`{>a;V(ovd3|3+hW zEI5Iv&W`;+*^-|hckNo4*0JNR82=4x5S4n0_Gwxl0_=J}P_@Y0j?o+P0^@q5FKJ3V zfc-_`fIWx77Sq6r(d=8E#dmoy1WyxIjuc_g$hiYPqZFlJ>h^?RUe6mZWY<-C~x$5~cowEp8 zcn)P>1H7#0Y2!S_@n#aawev|Mo-2V{@|BwwCI4v;XEg#SFo&;$5YB1r+oPwn6okhZ zyZtym%)`dht0l;WmYzpCP@ac`-MV(Gdi;C;3@3go<{F}7@snV_$HthH>iwl^S1d)6 z3!K_AjL?KaI`^7Kt^A2Lks)kC@LQ@On1bX$!%yd0o7$H5?qXm!cL^J1l?L2sR*!S?{y z#tC#2hQkHsr zuRTf=%i*5+cm;ZtopZ9b_GY3o*M-^lJK%xUTS9_$IsrM=`^NRJd*VkmWyU50mUq@* zuXsYTC9k1$R*zspQ4+SoVH`;(xp*P*I~VF@F1gWuiXGJWnUVG~!OZ2wqb=Gj)nihF zV#0-CFZkOj!(6+O4eRHI^h!c|%+xgfNmQZ6#F_BB(N3$!z5}}lal;#u^3R>39m}8k zzNc?BSW>FGj+rf>-vZZvFZ(U53%=WG6lNf|Iqne3c3F*j%jxiUEPjm1DDX2yJSV~) zjPtHH)Z93=LMwj%mfQt55gTdn1TQcf2fImfA%LDjZ9oAXD8uNK98EM!JyMc}M!{0` z$wLDw-62Fv#BZ+-HQ@*4pSj6f{TO{HGs^Vs+y}7Ca(3PB#Qr{C?`kNOH+{#=2(g@a z#lx|_w}^L(YOj(7Vz1%5$a9GsO`;L!0e4*;xPHmsuKpFNt%yOB0mHaxp(cK;Is*4> zLOb72yY0|XJ(<5Grud@%)%c&Z{|)4krV^=KcKjcd2T#F&r92J||KF5{qU-i-#+8-8 zqV_MFfy&t0pxMYBtnP$N`EjEx&bz_&B+Z5uU-QDNK`T>KN{`!lS`kMtQ(TeR*JBXL z?TkoGUd^Hqf$KuT)P(Zkz}}FJ)FfWQ;*28oC{xcTEkbF}=YcEVhe9ANLcPa+Mh#7m ztOOc*>jMC%UOFwUX^k6M!v+u--&*YsH=qi5=qS!RyN2lh_4c%m# z>ZTiN-}7t(?!?_V8Wh5Prd@?!2>#UbHyB+v$JrIigVap1y8jhRZdc> zp1EYY=c+~KehSN|+S4k1?p$X1j5u)H*7`uhIiQ-U78AuFaWK|f1u4AzhQJx!Plbba zO_GH!yNZ31HjfO0ZMw*R_KKUouF2tNqu^{(?i6lMM9GZ8+S9jzKH#SOfN!VJ-Ugm{Fnw$7eP0c2ByO`N4m!N=yZx0q-0vEGK+R3=lX0n~4 z@C#?5N+Wb?ZIjvwDc(430LRLq9&NyiQn7#+$nCV3UfRtoA<`+c(W66{_t`IWps^;h zS43OdtxOaNfnJooS5ntrjb}iYaQxjN7ZP71e5rg;pM9L%{G|}pFD;x@kOE=jCttqU zFUvF>@8a~-?j;_&+T}l}ZIcn*B$#ya;g|1a8GKH`gMLq&i17`1II_2oe<|Gw&|}+o z?Wm+4#9N2TPuxxmUGh|_v1>Fb3Zqt5d5qT>41Ju|HoVGkZ_**E<7+Q8It-t*|_vB?D?#Qw;qe!A*P zzdHx%HV=!B``LjJ-ug}tMQ?{NrFs*3@;RaX%Ht9a$5cOl>?q`;Q+HeBMw8xaQ5%=o z_D6i!Iyn|*=<1S-g=56RzRu@6AG{>v^{O=PBvWvFNo-s}QZZ3ZBvcX;@w}#^GaCW- zmhejPbN#MvHuQf+M6i2HSTW)}`%xvCxVD0?j&vz(OnmY%b5hxpM!Tr3ttnUKd#s9F z=f60DgB~PMb*`{s3w^??@4?pwPPtUS7mu2dE}Qbx-kOOGQEoBagkv(v=FV_ik+dhW z^di}=TzMx^aTsq+%ucTu*Mp~Q8uD-0*BBJ?6)fCkdrm)b1|c0i^;7XKzB^e(d|U~~ zzQNOz##6Fr{#^itV7AGKF(Rh=N()DgKQcOb&#ib-2VS*z{orY&H4Ws7Ow&nSvoGI6 z`=ys7C}~7e

e%um}`9{n=|CTfXQ6jw<%9P;}#iIb!q|=#x&8?@lmGy5PcPSCrSk zFVIr5VF}kS)%M%Bb5ckRO{`{y^2b|6z+O@#pyqa**Oo2XzTT4)`%We=MP<*7!y_4e zyvF}{^8VdG;$n+@9NhW?henfA3qZ1#0v|naG%F*!HdhWgQxSqAqx53Q8j1HRehEq{5hZxrt8;@R4gS%PB zXBUj6S(n=Mubnz4-}OI}Bxh(C|3Q*S(BQ9zfCO9SX}#0f?^)HEa$7`sNYYeFQC?Yb z7gS91cT(Ea?KR&@A|)(5cv~i_cp#Hen+)fw<0UM{Pf)$KtTu0%h4pG&t-gfZx646A zK!y-6=Ko^dbNhrt#}jZ_$?yi^0S>4 zKHD>UJI`TJzvzFt6*?sx+V>Z}5W^K$*GtDyNipQ?xv|X^BYI%!Q+?&bLC^M= zqZKdY3#ndaEeO(ET-gv)Bw!%SJ6WB$qRbw`}TUE}O#i3c>;oEO4&I#aG-!67kMci&TY3!Wr8m zxU0W64)JTIX79@BH2pHleCo{r*=pLUekSUMg?hV=g3XJ~y;TlyM}p4p4DMr};`sbd zqM!3O}Q10OV zk|N*X>DH7H*)lxg8Br`O(K38>`P5J@_o=FxV`>60`ZsQZ$RYV7XX#!%EO=0K$Ks;h z_gghhDs3O82gYnYWY@j-d{-#tR8%KjoGJt{xr$jHBavcRLj-a zJ1WwYV&e8-^J^&XkQq=obD`Ixa1`V;|Hw1@$Yy9rrnHDE%eP~@eO9#p8_)zjJlIHH zA>Zb=e^emOjY}KjhZzGA-CNZEf0{e*sHV1dU)vA`5er47M5RfWE=sikQl)nS z(xms^ML?uUZ$gmXt8_>xBE3s5kuC%XErgH+?!vv@+kHO2bI&<TqG^|KE z@HkNd#k_|!ged;d+9tO&Qp#aDa7-jNl_=y`b!aq`OJK1t1v_h#=t-3JW`@@i1Sh}T4GC%51=~w*c@MvD4 zu#*_ zszf&g`rW!IpN;N&++gUaq7^WP+VR!-h@!6>b;e7n*namBq4<;}=W?v*2ruN!EzhUj zttcS7aGiaPRzxEyu1>IT5Y)|nJolDncr$S6oMbiU55A-vPyAZy#|{Z*S-8iZjna% z?lN(R4*Pc3bhlE!{nCTktCX631w{|K={p*&%a-xQB(E2OX^+Qu-g$_g?BqRs`y3+3 zvL`q8-ndSQ7tC^GKRpzn0I$I*^~-w7>AlY`YXB7>GsF(EiXZ6)Tpsoir*N!Whr}&T zLb8XJ)kXI=vpqdWjZm!x(e!6McN3Pm^f1z)HaQl%dYv=DDUnr z#%eDv=M-Y3J#^rIf(-$yL@gtC`C3zMI7$Hg9Tlj{wyrI|(l+aMbE2Atm~we{B)|5D zMM+pIt`WrUDngh#Y1jO-c5j_09e$@A`jl*#J+`m(+%be&U+rT3 zH`|67-6@WY9j35+TJ0m&s~MqZM;$H_we6l) z>&oqfj=|`{>Ji>|obpU(Q1!LT;nI;C9gQx2`uD-6h${?KATrRdFL?O6v8D6c!~g^y zboO+1I}NiNXE#uaT5y$YFcU0so@axdN@wUj6yQd8qGanA?7AH^ZXlD&i0ARK%ZsKz?Q=H!WX6PMPun)l3tANAum&*ZbzVkp8M2b zA2PkeVgVhZ?fLD;J^qh~VS1vg#`aGMaR zr+XG4|7?e@iAK8SVgKMXEGjs?`6g^+GlR;$g&XcnS^#^>8?B)ovoMk;lR+^dfy@mSWhrE%bWKRNv#bF!G(Nvhjin>^mq zo-pLm6=!I9HrRM6)-`T7RkhuzQUnO2R0*$G?+^2EgFNahwc`A)@YTtOCBTXOIN|iE zw~KuI_g6DrlUmw%W$oB4%y8GHpzQ21azRxUq!r-{DX)eI96_H$E5EqnE-}FmmM?Vl9i+j;m*Vw+O^UyQMo5NcQ#wm(E5@Bke->$}V z)uLW32eN}zW50DRyc+42SjWUwp-*ePU>*>cKggmg|9JN`^q@ft7V37@wBdfYZs}Yo z{cE+UomUd)7Xl*8v2@~;oSkPf8d|Q00HebBArXZ&(N2-RJbLZ&yOF-`W>YQEana!- zR}K!rl~;H5r^I+GpQtZpU>(Xhv}FxOv!`hh3S{TM`~xTXd>$(eed@_ae*1N;xy$}& z$6Og_SdjlIx36AiicW#UdoEd zODWZvLYFUv_$FQQV2EeeG`mC(P_QApNOe7e7(s|?qI9+G_xigsheJ#(cXaxL%!1>3 zx00aLMR>OTl(@utmuRoiIMo{w=L|k8KCl|U_B7l0`F_HP7ws-#hBUHzt-_!Mx4aRl zqEXbDCOWA$pYRT8oVj(v7f+Q$3oB~z^>GJWWY?^Vl2t)dL(0F{CH)s65?NS4Kvtql zCHzoiCgA1xtJ~4eZ_jtj=6JL?YIHijdW5}?h>`a(M3!gcb@p8Cnkh@Is9Ti)Ux-cTO8%J_ovAnk=|7@Z-spgyjD9$;5P#I8mz{qd$oB z!I(dx@Na$`eKqF?3JGjW-U~J>g2!vuF>JR)2@TK@R60hCn&%IJjkOY`{WL(kML{6r zzCp}QjhoqGg{T)I=3le6qeJX(e*gn)*G?aonpAE{3EbyM)vO1OCZv&LNNPDFJrOmM zmAt0))%Y+DW+^j6D$KC?ag|U>PP;%x&tfq^Th48rq2o0+rx@(m=T}-;*q8i3i*78? zh@)yFd2JjqT)pW1Kr252man^?{GnGF!W)i+5A}a} z%_cm6Z!S1)xS56Ws0Zd-xcSwz6#>^u3{8=I@gN!dAun|L zK)8~xvHamraKtv5%Hc^_={I)~+2&#`Z#T6ylv%CwP)`WS!>Y<#`(Q9ckEpKGj7MCR zEaDtEknDep8_@w469U$qmNPz=SCE7=M#e0@JF^5QPgIr*!#3=02Z~RM-OY;HZqvlj z)Qo|iNXb!E*fZ{9i7Rb7_nsU#<^u4@p>CYiqrz@nz@B<|nPaZ|3Sh#}nBAuq;(csR#c3?|4p zs*e=2*@{{$@GHiXWJXEPgMz+RnYV)61F|6_A=gf7aiZ*UCFdFW-L^YdRk|;scNKwJ zSX$3Wa+HA*xIzIZojU#rb#UG6gEdd|dk>_nhO=V@#e)2miSP-0sC&K$m7b6uHE9ei zULx4ER*4#1q$E~Aancq0_NL68ggyOk797cW@VQT4MWSMq>3RmdC)SiE$*1kk4 zk+W{{;o7Gd8`Bs0g)IHKCEq7H-UfuMi%izX8*f-sgZcW*MZ;OIM+9|=1f4pyUb@w& z{eComV+wN2(IfZh!5c)3XTP-tsKM~`dTJ`ZUipZi>EyGq@#vdE5%yfvR|@KK$|4#+ zDg?F%vf~d?T}#hqxCPgi7c_M$8YZ_kcla>N;Lx~RI7IDUrA-$&G`y?Uu*&bXiwwpz zBZ&hR|DzQBEfJ6)_pa2X2+}0HielANEV< zh*JGgzgAUbJ|*S|$y_lV#s?fKTi}ajGYP?7T>y8&Gd%YzcXDB5zmfTgI+eY$%W21o zJL*BRY%_!h*6tf$b3?%&XP022fdxf6x!d;!5N^=5E;E?DfCMuX!F zMGt8((ZR=mzpzm#_NRg46upxp;2{}5e!Nk=2s=iNElqoX$fs+j_f%_~5v`A8KB7&h|HiL)8i4Fs6HYHSW0T_3_Su&m{u{f;$#t?{mxLMqo4@EZ0!~Y9H5gSh|E4Pj) z+c9GK;!Ox>@8}N!3CmyergRE0-uxU?eUJYo56I6G{7M*`lKQqxw}O$|YMqXdKDbz_KT>9>#!V*4!eXewNlsoXZX!H-46te#h!>}P z>i6-cc^@){s;&015hCR1a&p2`6oX^VR$*K>X=+?&JObZhcYu=Y+U$cL(ih*xAZfqG zeE!?f5RpixZqaBPfF{1h${wu18XXkfy&gCz;^vUcQ31koyFjX zgvLdJoTv)CQUwQ$$NVhGbG%kEenjsrREbLVEIZ;n{H;o>O%v2!yhgzLJQlfi1=q&O*b4 z^!_ly6u&a^$k0j=>NCI(0yGy*7H|1jd-Zaj0`)O^${+w(yDt`=MGS|Bn8NLLi6ntdG;M#dMi1fSN!@=_EPwJmqg(&R=PG~KD;ZFfu*RMd-dW4zGGh~YFQ+^Q zusr2De$ejnQd&!Wi&hz5%Y+p(Lq!g>=IHYWCn6E!|96qd-G7Kg2rtHu{xFHuJ-cKB^*3|+3#qbKF2B#HLdxCD@0aH3T3lM(b*U=M9fGQQ3fOVJ4e&qU zm_*01DO7>wN2>l+7;;@Ou_f_uVTeR2%jWdt4qb0UOM|}?rbpZLS$4Q2U$Jm|&g;)o z*nnnCfNy*ojIG+(=uu^1cBz|{^m)r&mkR);lcw}-QDI43<9b>Y5ev>kc}dzY>5|X) z5#BsmxRi>4Dk47enW#Z|Ue!dhCZ`hB(ThEb`GEP6n)&Oy(`;Q!pOKGpQ(WnrbzuFO zv=-t}b->T!!&y+Kmos@skmn(b>E&3TpmmiGu`ck#*}8ifl<+PJAgX8U5_O?R?XB3N z#Ha;>kX2eikxC_Y_F-(*^tRgg`?knjKFh!KAt{AyqH~qEJ0{S;Hpty7Pvn$K16-SQ zWzn;6P~%esNg|ar6kPx)aRwC=Lw%%eI@=RU=^{iz15!i;g%^VSN4pj`m#RyHXxZ`*|{MP<4%Z+2ff-o zqkVjB&n3oqpSb|>M2t?qc68q9UG0*C>RB+j&sdQyn}U71k4a z2Uc*wO#Z$aO)9YI=|~MDtuzxBD@ALrT-VgI?8uT8HDOvx+C{eb+{qjv zf+F+aXQ;f7$ex9JBnXQJyEU4rl61Cjb ze&puibvMb+LKIa;IT*aF(CCo=u++qsT|{qNYdb%sqrKUA=&S`J_Us7$-Q*@%Y$l{^ zHLDclHf*Y^cRE>A!~RoWbY*tWZw(=BhnIBqqE^cQ^V0bZr+qHX!X7T{I;W-26=mTA z+kA-VnDe|rnX?p>3Q^!wB*?m8gJOs$#Y{mWPzJAjj$&xZj~0BY0S5O$M(PDVP43qZ zMQ+pmEi!koNhG zHfQBmV+9kH9isT*_Cjo4&qhUcm}$mJhsIr6&JoJ&CA}&XTRhO?IJOG(qCrMYq4pnQ zy>i_8^;tp}+ZWsuapE1r`1|#(23M*Y(dp2VGVe4ecrQGQ;qs}pH?yqsrWPhf1Kd9P zUEL!wEKj*-_KW05j8?RVpJmGkV<`tFTtQSnOI`M3mct5Oc(Bw>*I_HmG8_@|_ zCu0ayJ>g8e$F`dBJe1Xy96y91+un;Kx?^}A_mBePF@KL3`B!L?{?1l?au{q7RmzDS zm40%*hq#H1cS){Q~opo;UE%3L_2dP}A z5Vh$Nwpl4dAmk<#GK=sRBuIlEdZ|Bq+?1HWaiuWcGKKu+UNXB;%j-En8^R9zy*A{^ zaBQ)Qf^#~&O&Xv95LvyX#t&i->=@yJXj%e8>r?5l)6H+LGu@Wk>+M_OShsS`gMC-lPP(OTmk@0aY>T|a5R%R?Rp&79-o8ZnU? z;D~+ip7pD5C>oQaVUm$^*=&v1Ey$@5^`0k~2qss*6lE*^W!7DDf5g1p29h!20o*mr z?4pECAlMIGw&Z6HtEx9%^a%#~^IAdOVHq+7W<35nO zF?eARL8ukOJ_}8SiH||@{S=}9nkU2(6Z*ZmqTl6y5fDB11!qg+S!J<0%(qU-#V6l9 zfyQqz)KA$7g9Q8KAW1o#vOfWln(1v0jYq*8S<+czFYZ}mL%+Ju9ICt=3h|Hf+F#q! zAU$&!*yR`mV~vH740GDnarV$5JEE%ZsH>kT_Rw6?EWXwLs5qLBO+BKb#iIHUkc?x$ zr#LHoQC?ZRjQj-=sXW@{UKu&cB*w@ks5xB2vlN(mtylRpgsVYLoqD4J?Uhlg8S;u9 z3djbdNosj$8)2=yvF`11*+VLf@Nuog;MuZ=DU^G=&@$WR9Rl+t`ThhKKq2^?k9s`O zEA2W;S951`ZcSxCto1j52;t*r1iST`YAT;>;%VKS*zOU#X6_IselgTsR+Nsr*tR1& zo4J#2SQ2RGRdigx;|KZpTvceiUvOFy@v$c*V&_xk5m)VosmVH6`$xy| zIL$;MYTR--S%!=Ie>8^d3aXtLLt1C6wIoiAAy1(xZ=tzcjviB&1^dg?XZNMg2#h!> zjNO(z+sUDOahV2?eK~7mFAG5WvNq?a40oVVAJLm#^i3AtARhOCEddANC#E9*s3ZwX}+sv5@vKO?zg;v!D@ zo_V$k&0lSS4%^cKVISt2b(L%a@>5W?qRx-3pf>0We(-#i5k7-ea7L0k4m3Nm+LVi0 z8(mOP0>8ht0VqUzGu;*fKJ;zf3Zr_@o{`(x6axWj@Tb~<8%ao)MU`@@a+cD!57T=) zU4vtD$)qO64^++8UjBi4gvym-=4=@`%|TOb-hC#ZZHd(L3!pTX6K@EHx@!;&CoV~e zQk?0Z$QuI+Y>_YbG=Wxlq>ck#$&fZqJMQV0B z^K+U(WEc9r)}6y%NA_#Sg%f1!3?feaVV442{o#d!OZ%Nw0yCpsO2w*LcJs* zs;!SZpFT|Cl&epsB%-FQZKiL=3!o>s-R+7k=1-Uo(1QoX!L>(v4|wq7 zj46-e zm~F*YN8RF}JS84u6*SK_wzIiq?RL4h0{PCRNEfsye5}fS>6CA=iHEvcA|xH=-H3>V z66%sBZSrd)?5n+nl2db0-Ixhg81d({EgJYCT9sB zszX9u`Of^F=MZ3(Hl@rO0mE@LjF0w&T4dMIp9hNr9k1NA_ z;JWfW!%Hpar$u^lB#!CJlhoMBm`iUEUo^lkR8<5`u*8o+7uWRZubmEE1j0&oRmIf@;;g4F%iSX0K7wRuK*X_v z)qi$YE0*78n-3?OiF_(|zC~P))|KFmLH7&@e$A#K_wx~}L_@%ezSi19t(hb>h}Hhe z2LQ2IVYs&NP;$4vK&h$5txp?zue=K0dQZFxRl~d+RxpUi)B*fgyKWip`AI1#^;pq0<_$3awaO%6S+V1wG5i!B!`RU8|uq?aqe-f?c3os9rk)wZJH+27s=&m5$_@kmJ@Hq ze&3E@FR4K*qXf~$5v-ilpOQ}ojb@k~lJOXJ&x_I&s)Adp$zm}7fpvuX$XP=va5yi) z6y}(c&Q)}yfcJY7ss4U$6839g(B_uNz-f2Rj@R<>_~9rW?6ArtZmoS59}`h>=mYy; z{giBpIaNp#*s*!SL9=?Xekio=!Fio`LleN0@KYl%boG|NL}f$@p8{D&uY%@o?7pYW z&o36^+>&`@(j3RQ?LVoFnk(!dOMSDA;_0mRpC^D$s|it3jtK@{T(Re5j`p>0)F?#T zzUq@HtpUm+EYE#y2g36mwq%ZQxE;_>d4(y zipgb$)xW?E@c4-U&$Kl?R&g^(oRQf zyWiM7akMX|&KGP|*9S+{-_)r3E)&2X^Hvf!dg~VlNQU3n<-i`B5lC3{*q-kQ@lLCa zaYXNASLS!UK%jfu&0xSzx2QkY>QU@MA#R1SzM-GSbiB&tF|I6w=7$DES_E5tHvKvb$CDeO89&r)t^Ig8jihmpcP)sjjEm5ly8-qaw`?0Juy%$_H=x_yKZ zX}9VEn{vmX(nYLM&8DD;ao7T{i;#5$)#dz@?{dZqGqi%sCVK4=17 z-v>NYnOAboD(#i=#4la%a^OpY zW|U{bYu!rRPT^O?FyLWNq@;4RwBTEZK}8zL9HT2fas*`AOy7OBEwyDVrTDl#^|n%b z!#5X5CUzRUm#T=GW-mHF))iHjcW9ZrWa{r`pdq)O!F+vgOn+(9I2Ry*ZndlDk>%2w zqVzQ0vLi*6tCJ!c5&Cicd(R5@4IFs^FqNLoAOj~rK0+HB6{Y`UtzPzFz+lM`(Ii@KX zcH1?9D?YTbz|0s;&VR3;hjD0^dxNu9zwTg=-TeE0jRp5W89V1FOW;wXC z{m~g-+T$MZ&19%um~{od?S6BF&=6cAR%zH*LRN$C!+hmR+DfjgvQi=XzU5G7Cm?m{ zT(tChUj((%T-Pxdddb`?vOS??X;ic;CQH>7z8W|!x30rp=dFG#Cpy@Q=f<=t%1DBz zfDylvleIe1^qG-UL@>oXc-dP7@Ystp4nh7waYXmn#REAr|4oKN3t_Jw0Bimh;u|+9 zS#IA-9u!%7){L?YJW(j(kB^>@XAw#6Yl4xd&(wAOh$zP8@mD=osoM7s$#`atLrSr&j>IG zsah{|N^$qg{KDpLgvz~~F4hoWm6`GuFbf%l(Um{|x zLi8}XYeE{w5lK|v-kiUShQ~SFXZE2$PM*7FR;28yoG2iXWw*`&$a-_ z_5b1oskX*}EEs%4^kxfgy!dBO$@oc73D|`oQ~PL&=;(*y!jt`H3EsB^NKV;HYjzLy zP3Q2}*>nGm_G7C)C+N31FyAz>m0&C%0$HC09ObCHUSJW7-#D)NS3QX7D)^}TSn{YB z<|Yo^3gFaOK?VXLFb__yBBT*ZY+>*pg(I$NjsZ<2xUOSXP8hBnH|2x1DFUkOz>5wJ zTXZeNeN=yqoY`U%Dj+%?#`(kbaPrw)rRT}dz>W~GzJxR9|0X|DnO2GXe1O`p%@?x! z-srtj&$-5Dzd5;Yg?~-Ne*-IY#auF!~U_nmC7GAgQQ%G?9FWEiL+KK_bH@O%*S)A2;E86V7nywKd6bx&fdL*O|R<&Btpt$JMwB7oR!R zbKieX)|~j9$3JJl7Mr7afbS?UWdk`D$NbM9Z>}0(IFYmO&%zF*pitYfe@UGYVpU~1 zw+BBy1Rv)+J*qeUC4*+5Ghgk(cNi++%<-Hkju5f$4Ad%rp2>2!1V35gMu4kp0=ryP z*{eQ!eEX={D!Jb8@2{J46D91R?dfTsK{)uJg)$X*YZ6&M5Agp_lFiR|^gkh3eh%5G he`N6lP%a)5){EU{)-HT`baMFT&*YyLKh}T!e*l7Ju*CoX literal 0 HcmV?d00001 diff --git a/examples/advanced/job_api/tf/figs/fedavg-vs-centralized.png b/examples/advanced/job_api/tf/figs/fedavg-vs-centralized.png new file mode 100755 index 0000000000000000000000000000000000000000..c9a86e7a8200d569e66f4498b8f009947067d180 GIT binary patch literal 158790 zcmeFZcT`i`);^5KBNjj`^eTE3r3eTFk)|M`q97tAgdzf=gn&ws7EozQ3yOpe0s;zz z0D;g!K{_GQLJ37cnv~E3gphBe=UDFbzTY_a{p0(6zi~2#L$b2>+H1`<*PQct=3M*f z4J{3peLVXZ7#LWtT)uddfnjex1H&$!J&Zuhxm?ah;J+PCH#Jlla$5POfG@vWU$}mO zfgumcv}wKz_`dhv#ul5`GNU;D+Z1GR#py9Hjd7w zdV7Fd=z&)*Uby9EIzxLJ!PgZ-K~&MJx%~V%{7|S)Bd}F4|NC~1-~4u1aO5@2Wc?03 z2tE8nZ_j}vVv=`u?|pw70_r()`#k8j0J4M}Xf9AUx`0<2HLngcSKNPRkLta_t;iSX z4zK~#3U1Qnf)E`!ES;_ewKA~MFDk7>w9*};OkDQ@BmCi0uuq8hhp!9_FVpt){?x!A zuHLXMkMAFwQ>viv-+;f)YaYzs@l%UYP&ms^4Igg$+5gn=5OvgY=T9vKZ2t`9|1zn3 zxHR6Hz#FEt+Jk#(kr#H5fhY7Qy?Dve5-Tg3ZCY`ze0@9)K2}Mc6XlO{X<_(q@@I8{ ze+=MtkW|ZeeQALW<7VLT|Cesc*G7V&7m53w?TJ!5&!-;vQ5|mt#z#tIer3^~2d-yk2vz-Y*mF5-hR5Xvo|1=@wmIqv0oU+_D`k zcZa_9hcEMi3H+HEFI$3IFH*pyPqxcj49o(tCOX5hF0|k|SQm51&o@qmNViIkirc47 zG!ky&*aq;(@=Zf=pln2r5?{iPYW>ZRS9 zewWcvKlD&|cUTqZAr+pDkY)PGe(2d{gu};MV$7%7)6%RyF4G|vv5o&jPkQ+Ll&6nO zO<#nwKw1=(jNOL)pLNtI!uY-$sh4O~oM=vQ3ONsQ5Ls*H8S&a+Ks@XINGH|zxauonvnL|RZrNRjZ9ZD=gXRjD?djzamXNe_&b|1g)59I3trMy&5j3)CYlgKZ zfXuLpeRX{X*aP|IOh2ma$?PtnxRJ-~qWaNDiBiXxzx_VF;QuePu?Y|xFlz+#x&<-O z1|n9rFG5$_X~fDlflRQe^UEl9c2$u5g&$SizwnG@oV*j%u+;g@dXJR)9v;|i>a*;6 z4pm&84xgp}iJO{dJ11$$d405%;Z5#xHqjg~w$5wz4v(%TGg#tR-8Fm86g+y;m|f2f z^kM6K7MSGV*Sq`KUXS5h(y2xV^Y#ky;*R{FxDTt1*z@^)HW$+Ba{_RAt6e9FPDecK zVTzgElXssScvDeGovvB$;4auKKt0tfKCwaHZFl9fn4f*m;~%wmpYAKGTF9MHvL$f4 zpM7Y@4>$Wlg)D10ep(zZArk0+b)julIC{eGZmU0M024L-u-2qhU02r3JuTLy?0YNG z4?GHdY}>o@HMP$U$FJSc5&t2Ar^8{~zaw>2Tzb;)H#A>1KjqVFc4e{9-s@B8xzXsv zyVWj8zfeKiDX(YiH|Xrjw1I%T^x9pOiSgUQn68ecD6=H_l(?%i8k%8l#+B&)AMvl=eZ ziM-d5c1l#|R25<+qo~@1X$G-!MFYIg$^+%M;dw^K#;&VI*%>l>_gpQDOne>-o8U2^B-I z^6BO#*6Juo5$QfvvD_tCYX3K>z#n~+N(s4=+G2AVN=Sq|L}-uK^cPNHMN(2HaqEu5 zHNpgOWzn9;_0@rTwMrxhGq3#93OyoBy~_I?%U3g6z`6NA+pT4JSotm_g3Hd!uA0-w zOUl?{25YQsc(Z2dW+{A2%)S)%D39<0T2Uv?)H}$+tFm!tnl9;JB?RFo z%t~mcnl&d5PMlmRMXX;i>~*|`ixgZY^i)qc`a?qQeVeTf>q>yV(|C&%-BaO|N61%0 zVX;eXu_I}#lh4c#2VYih5v2CU)I;eE8V8!i1MPBpchW_%7+wADj2dZAxl zXlH5oY-|$OL6X5RK@iR?k!{R#x_rLcRI{du6BK|9nR*WLSv7KXyTxIpUW7Lbps&$L z<~1GKff+@8^Q+|MCKMhlj}$eFYzj>0StwfsMbKT?}-PrE%Z{j zfl;4RO^x~b9VIPHnrICZugV$gn2nK|F4s>Hl=zjw%8ey*4T%@0WUW2zvd#+Qu9K+V zEt(bWo_)asWzTG-jVE^B#xK@F%zRCQqXK2eov`v=t45ci*})&Eijl{`R+yEM+|6N@ zVWsK9!A-E6xP$;~9M+#1jxR>6sONi)4^>AAuCn&FOc-jut}GJDHZEP69@^lucO#pH zx-A6H4}S($prbvcJKws?Q5Tzu==D@u7PNqHfM^urb==^YB(rY|P~h*OiUXoRPM&VN zbC>wDT+GC_`YZsm{;Vk5MhTlShF`=1!GsepgKyFuHGdefP@)v0ci|396(mYrA3@4D z+qW{w!PhKA&h_#sn^ZM$kwlNZYRi8`iH|!7UWwQgtIB}u@5PHfzPq4p!>enW$kQz8~3(B%CjwghzsUBkYh9z7b3O9NY|vO z2aANkI2-SMj&_+La$TuQJDA_4Ep>(1VtbzmwvHjOC&ST*Ac2UO!iTM?(?*94!QF^! z7DfvKAD)N9+cvbRyDj2Wn0TW;y%#^OWZs*8>kO)JIq_h-{g%Cj;d9PYkv-Tf8Yv*; zb}RF15i!rwNLk+=PQmP!{cm%6xi(Zf$LD+Njz{`R9pmZWZ0uX)F3Vnf{c$?~bCK1U z*6vmTS7|il0YRTTE4FeuSAQFLh50qah)Ac<)dMcRu^M zDA&CcluEhfGCgL+qO)%(Qq-z*N&ekA1@$0mp)o4a2>o{DZZqT!1$R|Q;G0Gn<@ANa zhY%DN&8XApvpEUx>oYC7yKmSU1973Z`@1X0Pz?CsPs=?-5CbE|su>m2U8ZhPp6POT5Q*FMvoKU27tU%i+zHev3n$n@9jAG+#XZva`h#sd&3C}_0r`i9$z z>?G`s%;^ga`U-fz7e&h%HjO$klHDh9h{!ocD0Jt!8o-+2rxgh^y;wX#w)t(K{Zua& zF4X>y~VIVP^O_B6R&8r{V!0N0sZH(b25)=Vrb>d#DCn0ZH` z^K5N&!v}pNWz%ye#P~1M9`qEn@N|1ORoRPLPVd$BE??O~7xhav-XB9&3#LraF+OWL z#b+vgj+k1g;!JnW8?Ba}KuF3r2cPbncZBSQSAfa3f6a@*Mj=uaw~d9>gE?X-!tGRh zIii&Ca^V!Lg%_A#e_p)+@9E05k^Z!c7E{r=0)0^{T(;*x{HbAGeXI{x_5TD&&JWda zOKKixUJ*>o+PR{RDW%30hFA^=;Uk4AjI6+Hx}e~8vua+=6l$;$&79tw5b`li*ktgQ zyzea`7M9*lU+`$3c3wFAid(9WmA-C4wDtKfqX*VQlLNkowj=`!P(0j(1)226Ep2He|x22ziIsd z7Dpd0Z+b>mKoug!=r<4<8el(#adcX{>*8?@!T3p5;Drzg2tF5O%}1&91In~!vl>n_ z_<+@;ID{84hay9^_QU8H>8gY(QI?9DZ=U*hXwn$T7>T zkZ_i_=gn=~&Wyd&r4^0)*;{FPr|-;Iw&*H-J~5#!Yz zyV?A0Fg6J(LUAo|auV5wc_|~st`W+n2$l^akx3$xc5GYKz^jZ-J3CWPY-08S^er2!{+>yZ^QMyR0s$yUS8VxUM)L&ePQsY zvo0;?Twx)9jJEIT6#>{2$#5t>mYtXlwlM9G3T-ol3F13P!e^^$(yfTa=epS2?RBK? zUS;jsG&kfQGV7G(Rikf>W9R!KY4tDEweW`Cxxmh+m42Seuamvf>r^biIkB77-^nv1 z?-aY)#Pji_qV0u#96tMM5Ka|-W#*^iP<*OIx^C3~)#qZT|TeEFPL3Kbl6i2BY!6*S6iCc*G;iL5P;*Lx^eD zA%_M5>tts!Jv%Bq8STB7f;{z{pJ6ZuW|`_XSRIA7CsIKMGo-l>H?s1+oT zy27_d2x8?l(aMr#fN_ZCH1ld>*1ae#1}1f0t#?3Nf1PwR#R92B?96<7k~mi%c&EU4 z8DQKrhhrz{1S3_Cm&|t?TyGjusOW(@BBUy9l_xFdWR`(PW7Qx8JaZG2jyqu0n@%4r zr4dQi7=O`@Q66^u7&Y6tMKc&W|n+caO+APyo={_T(pULT( zDU^rJdV6t|11lkox?z9mt&TN-g+3R(8Dl|ztbelTU6$FDw!?9+Bcjq}6kS9ZDy`)z zI_G6*Ak%$V-)FuX0Obx#TfT3kd_k63i`!YY#=8Q?EZm!WYCQA|XK7V=;5ko$} zGtQJ;h3*zjp8{Ohp}a0XS9LYqBR6qfAdscq*Atg0UG()POLbK}_%`&HU3+?Qd5JrP zEP37h>=Pb^vzW5DHQ<_4ScZpib-WM&oMlXN_F7<<<9Saj%Ly-@VzO18byQvUln7h{l<$wK6xFlczR2)@U<529ZcntE=B#@^a^&W z;^5g6=lAq73J5{0k68K|>=ewOy?@h?dL{F0?tJ4y#;#BEj zlfmhUz@zm?f!hn+k@1Jecpos0woHiVa71%VHu4 z^K8{$VKodTu|I>LS=Y*xVOA`on7@!ZKY_vR*K`1I3no%Lsi&E_7uUZHfvW=|H32vC zN1Gvei&KJCVr!02flRDq2d{P}_QIBiwq(mGBa69f%kGf+6~Du+jcexs7laMn1r3pg z-qTFPYM>GiYh6AX@17A@+ib>6rcDnuC5}ocd$c63zz+o4yZ%ui@3k%{=pQPP&8#U= zE@|B3xgYK2q2@-V&<|D?lx2t)4PlyN@c6`-?9A@c_8DKiSLoQQLLnug5gcvWz0Yo{ zX}-E{$lr$sS&isg3&$Hd7!+ZueGnU_J@a?TPT3aI6{dpT<`!!M8FVXSEXjdhVH>l~ z1z~;*>m{Uw8$ENL{_4WqzHbkOf7>1aO>$MJ5$4mxqI2KTU0M&=3TFNoJGYnpM*s#E zY}oCXUTgEzfCs)ynEn zT=khHsY6WF7J{gH3x9_=I)}1Y?k$@R(^2%ylY(~iCXd~v1{8aMe)&qJpcg8K+bQ*; z?G}t1Vi{@APLTUP+AMv4`+7kiuc^GO5KN*|O@!X>&_?RJ{OfN{E;O@NUxm?WGW5b3r}oltWsfUULyCyQ6Gy_~ zS?&grs5vUKUsg~qP(p<+Q(iP)3U3yLHtw_aXl0?;H~7S3wx*M+>pWZ0=T2+3l&x*$ z4ySI8iOTt`yRR)*$PQ;#j_JarB#aBkE9R#r%%Hnf;;NXF=pB&ZXIc9$%K0i=k^qAP ztM+u))bd_2qa{(*c~w0Uv$eaE%y4F!gc8bfCDCV}*{5Zw8Db%0zKqimeJ1Fd%$n?6 z#&$ehH?K{PbegZAZXuB%KhM^IL!qYFJo*dk>ws1|d+}Cl(4Wx?9k&)$sx^4$raT=y z^=Y6&v8kwG-^eLiUo73I=}NMX+`7BlG{+Aq8%AIK0DEccR_11uxU`@`y43fa#v_de zBKMx&4`TZ|6TLTVmkNVzuZiBlt?ubPx};SlDQ2?nJhk_VV!7A7vlD*t+&%3g0Wtu@ zaN-CYW7R+|&2QTT9w@k}-AVPV^}^!(-DQ^-0;BPV?ZQ6gFUQ~2GA!R5h9f&*W9?2N zyM1>4!GB(M1jVfb*V~HU4qBC!fUu9~XnK}bon~&eyeYTgWj78}BrkUs-tfrjo7e;} zlp!|?G<>cvB?dFr^uh6F8DVH&QS|p~TkKnB6fOMYu@6c3Mm=i&8dydWfDDVmAuF56BZO8uD#ym7%9NWom! zXkc9St5*)c{`(9P>w?6|U zpASt}La{%FSNON>DQF-|`trB{Gnu}kJ=~#9KLjua64eHfp+~rZN@hWolD9oSNb5R) zYSETTyHy*$&E`TLb|rRCVg{}w{uHIf<#-~G}-RD z-}#cf^_csh2n2cN0J2Hz&JySgEF>$UJ&c*-?NFTjlUC~68F`))qLw7SttH>U>JqV2 z+hsLhJ|Co7&yF%n4_1Rb!m=m!GQ4bDHm_p8Yp(YtI~ku`0$j}|yPe~|J6zpPInzeR(4nFH zKNlW!o)=uh80J+omne2PwSVTS76r1Xz5rDEHU0OC=ADRHkNwNWhJ=HRi~!hr2cZ5RE`GW5ODzN#n=W<-g0qj27hvcE5}Q9< zaX#>Jz5PXd2F9OKxeO2YKY#zF`S6b!UZ6QO8oxni_>cn(?5EE^1wH?iIek%xe=4pR z7`7{})S72Yr*q7U23)68>F`k43+p5Vj^OCM^yBV(7|>J-w%DPajS{}l5P;Gx$JGmX zF1OKfb+T8_S$fh|x@&Z`m3BE7OeA`qE(d~0ntgw98y}PNbLWE1Y9@*3(T7?~tqSDj z1PZ2dBU7GnQU+78YBc1INpi`SI-lV_l@_(p>Inp2q4N7DWft7&O`@1+3*e^lILFll z%BI$C)6Gx9DpLw%pOq}L7~q%Gp)ui!ib)>`&~++h{Zn~2wMv$eDjNtQL8>Y1)N~Uwl zTJp{d%TK=a-Pk&Kq@K?yWP3akRh##R+ z*6QS1?w!wgN}vKoE;W2Rontiw5e4M(i3jKAM;4nU$O`0@Kv5#s%Z|wh>y7Rb8$-mi zFFW4*ynIdZfiX*RSq(TRdFN=ideX|12011#ZYYJdx=&aBF2rl(Kv1GEd(IXb2+|y3 z#;Naxkf#p-!0mhkBW6$UM|L4z#iN$K8h-ZkBFf~wy&rddZvaXW6Fvt69N&~OaKpuh zfD2#DauQ;7>g#Hs{nWM#@T#m|`zimkpCM_$Nuc(Li5wP zD*6^;xe9$R^iZH;DL@o_+yPt;+;8ypaslA-thI7sOJC0K4S)v$2EmBj(<^y$l5=(+ z%OU($tta2&n)f0^#VFSDz8ncHpZ>94k5{uOeo zNH#}jBwT}Y5Af4ARq?f>?GbF>$DTw18i3I1uLhu|6Bxlqpd&{A3d*3uGVuAT%m3v_ z2+CAHd8hIJT_;1{RR3dPXH))H)1gs@RDh5Z^S|n3=l-)V**O0A?-!Rr?QaS4!<)(# zv}263;Fp!%$$YONmB|lSE@0ia73TYLl?M?#fd-)N>d*B9^i3_Jsq|0fX5{)?N)VtG z>C%Dts+^L4b5Ds(r&GVG(CsH`0wzF()cnJ*s{38VKk9i69?pLU1T25*{Ps>gr$@X_ z`Kp+25B#l-MtA8yF>iXz;V2yakpU z0AK|mw71+S3h5;V)ACs}|nzvQ>SHAZD8M z-!uPz`H;V9z_*Po5E1=N6a3G_ehMU$br?y(7$}vucGZ_-T!|XIJ@C$|9>3* zFVV_Bkqi*geF@e#)<%)n|8e-=Ai@6)7-73~z5TxfQvU?sU-9R*9KIvR|C3Qgf*n$6 z^)EWkd(>snxAw zCu$k7ZM}h&aA)g{hEI9MA$NBAerHPm2kP`E*u=iKw>8GY(y*gKax;1Qw6qK?9a)Bo zTVc^G z9FE$>%Kh?yey!OCzNk!8ODFTUCc0V{yENGj$6Seep`<+&>wTlwi9zPK|5@w))4Z22 zQ+CltvZGN8<+h8b%rV@av7{4PNg&~J$pz1G<-P&r36q04FtsD2H?ZqGO~VM(t4ZY`Os;_oJK2e7>87B)RRH?1t%FXev0t<=+8PrA&uv4eUd zi9IaAJ9oWshcEwswMe0>=PB~gBSf*-_1XG}9OfczWu^FYa4lQ#&FSJwWeendf^A?x zRz~~*fvL(P<0WIL}o78;wYRDE=ik(INhf3ZKgCfu*xSXMqIWRq{%e=N5kcT{@*& zs-DjxZ09+a|1@Ezae;%bn+sI)brLzM6#*EJWFTdzloSr1kE@(W9QV=i`8v=axZ{9J z^3EH|fLR!Wx}l11e{M6kqVx_YmCebJF`w42L|~`a)NS|Zq!P`lV~l}yssnw5hf-dbWO^Gt<}DVwXBKV z8+dV;T1Qs0fPUGF^x0B8A8$u{vQf@*ob>7}sq!E|`2WW|pe`!sq`&?B1zs|cj$l#6 zwMvbw>ZMU~rH0B{%LPeamHbEkn`gMP(K_a!(MZ~rs2CEy--?p!*0eB8+pF1(Tc7od zt6_=I-tURGX_e^!rUT^c{`4|Gr`l2vYHt=xSn`$*)9yADRefC`h&7kzTpEIGPC|Pg zYw1C-I`IL=*Z-AKdxcM|G+5huakv^OY{IQPXT(9&E>Ds1>}%}UFY0eSwP3B40O3fk z-J@d5zPU-K-`&)&ONcO7|} zQ>Gth@kmRp0kq~K=wjdN*|UZ{4XVdgFITJrXI4eO?$jS8^y&4~?f}S%B%Z-&H#B!@ zt!aXro}se)g|)eYlHvI&YSbM$nDNLf+rSuPuigJ3qquw45@NyUUs0bp#*9AF0!o#N zuam5+a%s}M&4X0|ig`ja`7kG8nUi-mrzZS`U_Jt+6a3bLLn8mCuH)T%JNC1PmJ%eR zR_tGpgp(YUT^0UJhW?5o9%MGFEEW6hSu(4VW7zI8AprG~03NA_mK zl2~5Ydd~9WJh!6KEY{0b$N>pe4@{<_(Rn+8SD657{v%K8wO$vZRK0Ux!jC@SlEs}U z`yA?k4NKlOq2Gbc&nYoSoaa;wtAB#uM*gi6Psx7FAo`d%gg8i(zEkvpjcr+KktQ5y7_a4yUWHZBOR-Z5Ijw9^^d=jK`?pPIj zfb#O=^~&(*yY0U@k*?O678DoICTr!`!{Rlc^^DdI79G;1pp{Ws6Vy(v?$gr9n*5s7 z>6#Zcu!%bSt4Ak2G29weze7~oD!aK2J( z*(7D2#4TM@4(#9B3M9SrRdCIac5!e2i3Q^O>6THO?P`Z#Sv23qHED@`7KF0Satse} za$4^GR0U1Bc<5f2^mVhG{1|LMv!6LdiT#A&;`OD5gcV1l}I;dK)IypsyJR7 zy8F}lferJky~})}M!*fk?7q>J+w4#lL*Z0!AR5M6O~51^MW`RV>m#*gWkGSkrgD*R z?-tReDC92lv4EGbH*c7S+WxM7YzmOeEBE>s>JCP%ohok1EQ!6;E*@bo$?Z8vsdgpM z+r0!EQr3ulQC96ml@@bq0g5XfA6%NiXbB(gb4(??KB3|RUxa$I zO+C^ibojxhl23o%RS0_LnvzFS+ zDNL!<&|YhOV?MJhDiT@}TT1XA@6kVbDF1p4!lq~HGpX8DqEfe%A1}~Wze_~ibgsj;MGf7+%|>hHNY$s$ zZdOb;;n1kx#&aU7c+=<1eBQ_ZstkhY%@&{}HFZ>45eq=NoLsNP$Md1w5bxvyatMYJ zcS^h&G0>%5(tqVYS%zT1Xb~ww^SRXSXp5kufRy+Js5(`wQyrK37s5+SDsVoloCu`U zT)g^tw5zPwx*9F<*e=VCRl9yMr8B@ekVy)cdg131KKcMt-+lLvDbHEmbD-%;*Nn7d z!twW^lgcBIaaz}h{O3VobrNSKLQ0lPB^Sr3)wzP%EF=|dng4PyGRzy3oUP zTB2e|pEp9!qTw*_Rf74(LSf7D2Msf+qBl`cRZ*q7MClq=LWOPEvvTsza}&VwyEv8k z{dOG&(Q$^tBckumo2PM$%W6&zp?$jfA~dAOP8~vzPsIJI>;<F7B5VDL~el0$<=dgzkTYMV`~gV4sfFmiI|+|urivr*7E$J+N&7n}JstfyMnN&z_2Eo|xpYreO)(>? zeZkWq<&vQmkEce~!kP62h*eR4O@%?qt=@${PVhTDE`evAjOX?lS2ynr6%}a>Ie8eV zQroZ1fxndTw!P2b7&F2dF`1}9@R~Wwb|I|7<{lS)U`bONeqDLh$=Zf?m#jlw%_!h;scjDHd-YKk;PWZ=&12y= zjQ8u2h{Ccd-t|053I@Vk>!3ahg5Es6Cnz!!JP3EAhcA zb|Ktm&0EfMAy&}idBOohv?nDdg^N&24;qcN`b8XBimZ7}-Z`vf=+cesqK?9%IHaf% zV_RQNn~YjV@sb2n&Ha(W=)?{v{*Uwt%VoWyXe8? zl+&*^Th>jqKY@_t>N_>-)3^_fj>eV_?HKRzgU*E12Sc5)bg{R6QG^&5F5J+Rxl7 zmgFqX>Ugc3SdMw(c>)`61C-By4`=n`@;Ht4H+B`d2r>q4mt4ZK8#IVEpXNp>zSsvKilj_miSgv7}q=twM3AGToQv1KkcH_!|*c8L+4r zs_0eh4KVe(&D7b@JRE$t5sGkbuBQRwbK3#(0+o(k74@uI^`7ciu^%j|UM1Me)Hj4t z6dmTuOk*F3ie3x}tpMB-)#KFTQ?)SJ(@}508QCfaxbfE67OKF8)D6Qa{y|Cn%Jg6 zuo;f))6dpBJgOBA=g8Gtnaj7`|IudSKA$>&8KoZUyj+Brvhmq2!y}&Us8Y_7>3HfQ zXapQGm31|BcBx`LB?}8tjxMTmeY{s#{I$Gk6GYX%Y+_IhvX%A3m8BQJcSU2G%Dq^`pv2Ow)+XX`xW~8 zPlHy=0XhKfkD7@;qoA_E@6#tgj_bN{GHOO!s0#Y3urBS?Oc?TbdQwa_{7~cKdyh)= zSTJweBms2JbeF5UbeYu~S&Kd-Jxj8;@jY%~?nsY}2%^qzckiQ&T7=qOsp2=#H_f~^A?hGWF}czfQ^5A*gl3K2cBpc)mcuC4d2IkmWV_6v z1AllEHXlXdL(^7JPG(gkiBQD6yi+{0yO&yTB}{7NrCN2a9w8d}%z9EyBAS{N>CKTR zkilUhykZuRXJnLfw}-Z_DF=dVcxu_EV@1?d@crpW?6$F6v?fNJ+l96$x!h*ZvJiRg z>~ZSSlN{aFGez8cHYkZM*VIo<3RG?iIStv?7$`bidKFs;UrS((Be5kj+`x1Jw~Ow2 zG)1%KcGsM34uFaaqYXVLS(D_7p6STtNEQW;;a`9@K9fYko_SOgXS=t`mn)7TjNN6H z5>R*f-8(AMxM#CzMy1GXZ~gQCzJJ1svf`!ii=$5s0s!h5KBpoD_~>C&V@80_Txm!D z?&hb!D>i>{&w3K39vR?f$>jBIJ{R7+`&#G|yXUO^i%>nANZa==e)i&$?yb)dL(*bF zS7Y-EJ^`Un|CzGh4YMc#>*~$%O2QQE#$`4icr3h*Cr%x^^qvH{nvBmWizQjx)`!F2 z=kLa29G!Psd)vKwCO2d)fU#-pp7NXn8$E6-RneanWSB&d`m|BzDu$4A95AyKs*J*O z=)2$|-P=-d%;pf9sQS!!^Q^iJqXx_j%ti)NpXDUkgY|Dc4@Y~>wT<_@?_t86^bE0^ zr22?s4;*(5gF0n)zZ%s738r#v#L9TatelMuq0j9~vuW=mpXVK*A}-Xw8ao)=;+&qu zQrMY{LTLGz*p>#AYHc~3YT#S_c+SZOwUChn!C?T44h=&`RYEHWc8$H|47Wzp4dg9S zs~6gas?yjfM%3(!^g6x=#OikH3zR9^RbH>wZ(p@D%V)!E3o%u!D64y>kB!Ou`7%K% z3TMZeqA6~Mz#Q*3zBtgrn(hWbm(7h`XOBA5MJ-PmJ6J^I_EvMmy| z-jS`(h9Bs8pYHftO8i5*2fdIJyTLClBKkS06lL97jk2P15A9KzQI_!PD@<`zp&ZH^ z`{^8jUWPsNVU%3togFZ-K8V+#Cu?tkaSqCgcjLp-I8A-Z7lT3jJZRo6`Jjlf`(%f; zcg1pYTes=D%YfjABJ5nrQuVT9%Na88ZWfmEHiP~iIyK;e;9I-#6ELdLwb%#7J;Ql| zD?TBdLwh4wbHuA(UzGlAx$9upMUNWtT5<`p;AnGO?e+^*EpH=C_97md%w|jAUvOT% za%L$p46L~bwzzY+QrBz!>aO!d<;6+G{8ibxe$D$e2h{%v4Y|smYKYXmNIVmc%2~IL zrG_8&(GkdhdWRF{m4fPhDARmf39C`dMgk*s&_|owPssQhr>)sdrcP_B=o%ue+S&_` z&7A~^8Wdi|?j2lOL-pdq6iSw*dtO;+on&j7;%TGsPgfdN_`G;&V>5D3m^~<^P93Rt z>T+i7ek59=V)y&xEYwOt-HA&k4>X2)6oV+s(glS`Xa^u%rx zv|Kq)96IukipGZ90w z0s`ir%NcQXeV%nK^Q?y*$eihVqZvSwDo_hU6FjMLZAhj3?3Cy5Tw{yk4d8g*561VL zLbi9_OjJvkSl8p#gGrQGu6(_v3$_(ku^_loQsHu>Z2BI2{sA-IjT>D&a^W(;mp69a zQg_|B+smV3XuDo8n?zGTb-9K5F&KC+L0S~DCDln6``(axN>ym|_3;O>R8%su+;|^T zHXN3Ad@Q^EDjUm=Wfuqu;#y8VY)5fQ{MoAn%!@~-2h0ch%>AibbVQtp=q^V|D< zRPO~rcLpDOd37Ql>J%h+EEsB<3B4q)Bpf+8-L}{i;Ta|=sLNL9STx77B$a87f3BZ( z3d_o*MjV&m6*pomYloq>9dp{)qZOq~c`sE(rq3m9yhmP@b_drVk4!0P3>36-i0YZ@ zvl@5ZY2H#BjJL33Q>r44rcZgG$0qD(Gkpk4mn5uBqro(nNFIujxDE}y-r<=xDBVGl zs}7?m+bXBu;wAhWYYMR#=tiKpUt6_3e~Hm5&16eVke8(jX4xnLM*iFJ3&=fib4*Cd z3vz_7j@O{UzFwXMb30j?)PTo_rzEX)fIhO@ z9ks-DcUz(ejP&kuGhe-#8F9wc&wgiCANO@jAogK4$w;u6lR!dGb-2T4r9q$5J@K^pARLA}{FagFV;?YaugZ08keZg9PDY^a2iY4f9T z{F!6wC=cS2-X9yyXGT_ciwFd&CwIL{t%3{a%ChwsQaSN58CL#i>K%1deseHt(8CAF zLEUROVRWaf??m#EyoQ#CLBwD$@f0xO`R&SfS%4sbGznxGfxF|NuUmpVR%BV-@A#IO z+?jD7k}hJWG0Oq|vSn7xXfBs{ToZdu-d-#Du2?WTty2?gu6NkgTIbd)D>*dq&MV4i zLUpMb{X}`ECYer*zBskg>Yn4=YDAivw(SzLS4;Fpe`d(mS6KC?5vBfE)?i!NJb0~8 zCN;5A&Zz#mIu-+_K3`yJl`eW}dqM?9y7YnSzY@K-mP~Zz_;*(vFW*6#E-J_vlLP7# zw6B$(^zv!iI~+ZQt=Z0sQ2fV#a>VR%_G2C664A11k7WgB#g)mT{`NtBBI3P0CxbYa zcVxAqfi#6oz06GyZgISgd1%&qBM*m9aP&GNCb1@EK~>oAwSq*x1q1%Zb7jZ9y^Q*W zng+F(`YSU(+%(_!_|o9UPGF0&wHZ=HGN%i$04RdHCy5*-t9`ooEN$XWtbbD@+wfx@t17uNYOe|tPNwAJf^GNS1h|55d1zbLI3o0nxi-{kMl);xD`ZAcQ?~I(hR5i8qJ&@C3ciixo(s~mzS%e={js$oE@G&SD;2(9D z?k!#0Tx}fR;<Yzfk9MFxu84O{ zob9d$uXLqaiF=+V-fI-sid9d+TT*$ZSCkC$e6Czx6r;@Gwq#-2W1QKzo#jEO=?1gUU!JimoEz9R!GDSksWsEQw0lRhm*LPGJm zb)b$(o0!fNSL5|t(WQ<~C}um{ocj={D&0?6GAxScxFxTlyYz8AwPqvOn~Sw*BhcyI zsvHHpfE7GK18Zzsm$+sa{DD$ho~F6<$;9F?ay_i%`f>Frb9i95xI~l4orz{tG!>33 zx7xa7R?^vj;u)n>bYFi8nU%Q+TFfk);S@zIi8+B8q4NO}X zn~#fl1yho^oNQe`~%dM&P98u zCa7hfbsOOpQCYqD0`Q}Zbb2BD4z*B4LY!^{zbvLk+{`XmE?FrKy%o!U&m|9hq{wq& zR&r``6i<_B@1*T#R=5?t(3-DC_N?pN35}y(%K}+OWUClHZzqgGqM9)UY;UYSiBqCb zv7v&FO*3kqRhS`o7$L>*%wYZbwli4b!ZpS5-<`a$wpVUWyR|PbJ%hGlSD&TdHk2Mo zr?kCJOJBNnZthcleJDGP0-U*2{Q2aQY+a1*MC=a8owi#?-?#$r5BScvh(gzb>RZgj zgc(PClN4i3gym5>nq(jk_*(hm;t6FkRuzP}@%-Y^jSp=+9Va)!0XlF@bIb-O4)dzG zDlKN&0y;Cd+1vtOWUiv>IGF{>WiNz+N~|69G*{DXQOt;N%h@skux6C(W9Y-S#hb+L z2vm7GV7IM-NF@g2Z^Mx;gfyUE@Xj8bEi|^2ee~pUM9Kv}b-^tNl^xOG2-j_FjpB^M zzXsleQOz>FbI~recr@^OXk177Y+;c*-(G(%qHA6`<@MF(^l@=P&ZM$q4(QpTY*PqkXU0deHbT3WUXQJ3=RF;+z#z)6A9cJ!@YPaGBbRfET1J#pGT#?FajcLo z=}gaeJ}07{6{*}#RSsf}-)QS*uIp;k-zwbZi zI)9w2=W1T-@w|`Q~-IZafrmYBwcktnFem+@w)lHin#xs zoMbS?z#9jW9y(lin#hNmK7;?9?UfTuXgCay`yZKyI<0vntMyi*hp_;X_UP5<$1HQh zf#30ZazhTEk>%jDhnl%Hkf`QQ4XrwW#M=3aJw^Z`13G zSgJ?@Y)MD@fZB^sHw`YvI`_5IPO1-WP$$pG!oS{VB>~N9AWn9ksb%`1rXH`9ss$xZ}|q zHpvAxT64yOJCI17%y6FcYPc#<^8|>m^yNO z@!2ydu%m~=9~-kDkZnA$LgZDv%g1i+dqSrH%sXhM!p?dwOX`ZWr(ZLhWb_-m)$QI>tf5P`7QiTncMyxkw2r_p%|!WF~6pMDb7 z!^uL7*sWosDkD#){y*;@(^2ORpD$J2lZW#IE9K()c_UA4+iHdo*|ZpgddtRTIogF4 zqabJA>n9!mGaSwgMQc-(Q$PsT)$z7~8^*(qNzU}atTbxrRq zPJV|JtFLBirc)=Lm0*A`vPnv@_O(gtkH4DL-W3`@`qp1rrM_P7L-j^-#>1Y2vw`b! zWPug48bL9_8;22(xzxNi6B1Hd2qJe8C*JftxEm62(KVqo4e>H2iE#2x^A?K6wYUNv zPDNywIm)CIbg=XGA3|2n8L{X^Gnv9TIPFxL1}O!So!UERHKMH1IbtBOtU&B1LWU9{ zG6-@9n8#W>)F~D#DI>m?3!=?@fE>C#sA58Y`Dd(;DW?Y}bw9cj^~pC9ZCZ8rj5TpB zyLvjE8I4D6TgxZ*3q-}HtQACs_O&mcTf^RN(ZIOf8R|Qm_SLY|wSna6j=s(CgMy#K zX)fK_xfP65Jn&rwMt3kL2Ei8Fo>z38BDmzgzXVG2^p?fUwfuR5ygjuK$;ptq3JkSM zfB{xg+f#&gHbX0%385+Y$Ggvtdm5^js)gME}P>eOg>%YOyiZ_$=>GH$=Rh-aKj7I!R1!SE}1M3tq7$w_flrJ z?jHT}+@LIJRW5vfjvaUji&XhC4gN^X(_=*1ZBG?fWY+=EhX2t7n3A+(Lu^4H9!ihz zKfXH{aObT{EcbQCx8*gm_cRcR6WDJ)xoUc#R^oroi^OB~`uBEKsTUV)ZS_7#$YEX- z{~2AS5)pidsRT~b%fPz4O84wOp3h!*VUFJi>%0I>|5|P&F+RhciO51gaT_-)lS^lA z;DvHsFZ#j;KFpW+WTrcJ8Jr^&`Cfx&(N&1j@=akwL3&gACe}h*+ESErCEB8 zOE-mvV6^82eV$3+qWgwF!$VsS-K<G{eOs*$sbs6AghP0nU^%a|utqVt*E%vEhIW@OwQ2b&!+L z`LH^|q|pG|d9MUzqu{6hv7(nIctt>_dV??rG=u0z1=30>qYY=}?u&1?&2jSDs)l;i zJB4pscF7F?D;I;YtVivB_|tUPwu~{&%$nL4Wfw$Fcq{zHZKC%7j;g_jsev@tk^61a zDQyxRldi_VBLT$0CJ>8b^-1T7C64l#o2#(#uwt z5*cugrF*w5r~!Vy)3s=nP0Ac<1$r{lO7^e2+7sAe2Gu#!Ee9rZjU$!>c6RC^_5<3^vE z1I(0xr0A3DiZQ1um#$~_p55z`kkMXGf8o6KmZpOizO!??LhWK1k*`0TEqkgLUb8VIu-+SsE^)2PtPor&OIEr(VzLq<5$%<= z)UGv~fe4NPRU;FAJwmgJ@cHpHaAYUD-%$5NP1E@?4SNaHXF|EIqc@ z=98ROy#mOu+!cgO&{)@*j^`BgDbQq!pY`>~93KN=OyFvFhuT zoRqKptpNd5XuACwe~&GJB$bv)7;V^jaW9o*3kR17C!Yd+b889qvTvq7|C8|iSUo^} z&44ia_$PuX*G`4Jrsak!*&HAyDq@R%iG{eFXs1(%cz1OW^bZQLER8K*M*UXdJt$NT zqmcj};~B+kSDpbmOWQ+1jKiD#Ho$%s)kd}(ce6Rh!`RENkP>*>1{^q9$Xa_toa-BH z*`w!7^R6FiT^ple5#Vn z*86qpEp_|LU7@Fovz;Q!kZdQceH^tP13hqp)kdix938`&L=`kNJ zV|{4^g!4L5;iayzJ6@|A^-xt9>roxS+rZJfQi5fvwOd<6%T z<%grQ@9))v{<7vbr=(hcDq_AO)2o7GgntI-oC)AQ@NbTXhgy1=(Y@>mqCmp!W1=)E z)i4xDb0PMJs?Jn@bE+_Tw=}=6rB$X@pke(IfJm;L2AykaJ0Z5+Azw!5c#ggs$EJFP z^fl$3LyAE|D8W|0xqGKT=W+@8CNF1d83(Cv=BLt~<`wACv0aBWg$CeLL#ybZsvv7S zD1JM$Y>aVwPw_EH+Xcbj^W^jGtRim>mSuW54oTz)ZS6t7bJ_{&Q`Tl4hiQ2KUrLO< zLERoi&mm>Aojj3T$XK5&YEoxk?WzB~x~%}aG%LW>#*^9(1Wp9OffM92lLFhn>!5ZD zWYq6z=9nmuRo@8Kt|9cUDQeMDrz4J5N@HqSdGW;6qKn@eXD}HqO{XR=h?S2FjqL)L zIgI!uROlY7roP;2fo$B+(yt;m;idpPG6zjxj26F`MXSR^S{SGXIjUXmye|OD8rm}O*ZSbS5@$HqDzZ^w2jrP+= zO0qDUwf`FDdw67Cq3cBm!~Xd7H}WsV0;UeF`6&*(d^&N(niupIt^LW%m7*tlt+Nn2 zp|vmKt+)3%j5K7wTwrhE2uX`*!a`MzP{-WR&C(~=+@GC8M#e%aS7P93uL}lPy8)`B z@T4v0DiGK`?%xE67M6_Fbc4Ljr5%yg*g+hM!h@1jY-vJiY+kO>SO}iF;1)DvKBErbuAgTg>y1% zAD{$3L>oW#8f3b)tuuWxbsvn_k!yZ0?CHu(^X@H*JkY80J@^urHN+VR{3|B)@5aMt zNf!=<_2j+&!wDZ_H*}b6z6g)VIJriYO3bBeM#>L%|Ci3VbP~7FR$AdOuJ9O83e?D- zz7y74)srr-q8YR;9D^L89*YRA5!ZMvxWeh^)vK+I&xKql`hE@-uM+}Jn8$VwP2u(W zM;<&_sM!PP&FjDRCu^=`Kpm3_lS(1@C+F3|&>hyX5FA>K^zxLT99@hUP6)|AZ<-3L z&8Z@opUiEgpkfCHb10}26KkH?l^0ZiIPNU!L?O13I_~qn+bv@Xlyz89apU8Sg>iY@ zr!1$pE@+a@_V>td#I1LU%UHFxe00;9u`n)j_W0Sz7W^Ob&&k;d7kxz>&?tZG-tGeUE*<-NRh7v4N!gPe*(i#~uc8EZlqBhepA z9MI`7iM?>5^UeQ!Sy-2{+#O$SR_+XFf>>#5j;(YDSmrmj@ z*S-&%VA8q1=C?N|rv|5Nq0B94o(4!{RJXnnW9J+jF7%TPr?)`lRYipKDjlmpT^eS$ zmEDO<-qN)MXTfRGwK?U4T=zOHWcE`87_S#x^$hW_b|bexJ~ykoeXQ9_)Dda9Qi=uUBFMuqcnXl?iCkD2{BN<5Zrd)V$tNaVd^ac)?zTU|v= zl?7(P_GD;lZHLBV-o!C|d#_J?W##-T;`Q)iDMfu=DM5hB@{RRGlQC7+o#Djr zuPT%BVgN=g>A!(tx#GB7E$>9&KAF&@@<{9}jmj50ZFvohUZ7IB*m?mP_Mhw}`i9PH zX^iMa5>#8^I4%uM2wRoKIOKR*KRYOU;Xu=;mCL2j%fHk2N7bwp`(_f#MAYy`^ysK- zZ)<^Ab0*sqk4_|I=Nc%GQws!n6-#gCU%F803Ix?ESL})rktMdI!JH7*WoBzc4*99nfTxfk==DlZPt3;%i8K5bidg;mG(UgORS%3AnJ}F545$%K7G(5 zbo{WF9_92v{KXX%lr#l%Ui*Yo@{|J5?RzZ}vV@IaXRV(;2*PIcoV*?TNIZmk=FYWb z=~uIRCmNwVS^`^eh2G+YrD<}cI|1OVVV-U zvZF`^2inIhm6&1o(f;<4t*kukpYO(sQ=KR|GuH`=4TRj@Js=n`1yt)jtcrRZg4{jW z9!v>LyOp)#-5Dl|KOamNcy$h~>&^?3S3PHsk8fN)vX=3N<@>bK^!^+uS`&zYeB7{2 zkiv@HF^0o#oZICn!qvTp>3Jz<^|`F25vwS_Y(%ktNm*23rH-uHW%XF0m{ zu&aK(c_iC8F(tREVFJ}-f5V zhwf1X4SB|&0*$zO-#iS}Ipl>y7n-|w|H;CkyEP+c85`;`{Gnl2H(3LpgH|$=>Q@RO z4SYl5>yigwB7_UBMLARIV!HSHhFPo{s3?k0__+pG%>mr%Z`alvhYmRr?SIEkV`(^n#1vryDOuR`@@r=xHxdp#+7gSDvy zLJJfukXOg5BeHjzUzo2F@5B|yD~ zL%qMluT_q0K4s>3F8lr_m)iSl2!v7AG1GD(Ve?$H37+vS?1m08Su)$5@@^@gYNO>) z!4WeDzlS>%Pq#Bd@IPDXH3R?TAKYG_0IGXQgI-l>ztjPM(*JR+9zN&_+^9WA{;ag} z;Z59odvGlGy(6zS@4MfKs=xj!`-IOCa1eE3{Nxg_E}0)M4vvD9a@WmU zNbj3i6R@6qB7qlX0M5Fd1+C#cU(764uv=@h9^UXbIgY#5cauPt8IjA%?9Lj#@-7xa zgdehGp>lfAx604E_r1&+3YjZKAk3KccJ8INXA7$)n%S$N(-FJR6Lhh`_e)-Nj9oEf zLSvhSRnPzMqaQ}VI42@X&M427OZQd!#lW${Iw8!Ff>&kkY8Cy}H`KHrc<^o{4wr3k z>Yc_A`No5+2J=?01U6u~)g9p{H1(lrXmzdc_xyKL z!e(btN6(4@l9;IkdSRWge2P4Ooax~B6k_ppMpR6a*=E(9b(ma$9UhBz-%ta--e_Tb zT}s8sW#j38>E9ImNWWnI72syE*mgpw73TlK#N}Es52y8ZmCC;$Z`BQoQe4$}ev=kLW6gg;8 zlz6L}7+Ezpbe;(|MEcqrb=K=eq=hM@SmKUaSPDe%^+;(JQubuD*LH5=R||$p)-}A4 zXUKKN=J8OwO7A#P;@6s3rU!IEl*XevWF(!7^-?=g9FXr$y#=rh3?B8*m-}2G+j~w- z9>{XM$mB{N+wYVn*Fo5j4DFSTK+tR9)wlrCmIA#91l5d?Apv5We2V#7zyf$HU61f2LwlacJzzbyBtkvq4L}_C0c0A)^p2T|1qP=5Fn(qKq}H+t=@-iTER2kQN6~ug5wIxh zHCFEuIqb+o2MGS~QaE%YUTUiEBkx?ENYm0uBKjD_Gw>)f+sK}!#$)-Dy$5A-f*Ttm zz!-bVoPKh`OD)d-tI>w4< z7+f7V6sqQ5%)8e)7LBf}t23SFYWN=PgnN|wGFEWz@-U?S9`D7dI~kpKVjycSx*fR= zyq0T|=|fZd!n%t7xS@ZuxYc@qeHxh!#OENLrgVYEd0>J0-=!bPlwSx1^^v>mNMD3N!887XEgAGmo$uV>sXuesAy zxkh8u6-ZAVtH4ISlDoUeH{!?rjoPW0mx4?+BoM#{f?@mH`(Ot6%QjQ4{LV1#mZ8Mw z+i>DV`{ml_%~WR~@7AmkDqHQj4~koG&IsNK_pfkA zqg}dhbpQJPjZherQ)NV2f^kZt(7)7)8?GYWi2_2~WQ6$y^Y=B>z=ObUqOo0mOGTN% zh|68^y^kpyF~J_65!4mBEV;AyrH&9iri3|yK*8#RFGLk+|pe{$% z_sm`WD)K3Iko^Kc#E-v#3^{-#vk{48ZMq2yTtb*M9*jpq5eJ7jW?J{Gk0KKi0yO-t z-EE1sN%HWR4X``Ry=%SHmkCAtV4bKc# zIo1)$evImaN%^8Lk{>vE-%RbT`O$o00!eKoXKw&y)d&6y;1z_7+)Jh=;hg@s^tHRt z8DHmnl6=znEYJ&gQ0XMS#h*fV+=3tX1DZN%OZ5lH;ciP<(b({*&%H0#?<&d+F5VBZ zvO6O)8x;vf#|#Om1LG@8DVVJ4y}Rev{7A#R(R;+m9|CB&fnz>>Z55H9a;)B3!#PbF z$kV9&icj}6n-}d5b7;21dvxDr*c{iwmM0h@UyoRuTEjbENVg~vg0$T}loIqTyMZD6 z>KY=nLfRz-?)F)MyGZ>!a|wSdYh7g}h%tFs+s;3X8KP6J_Qa2}8W-cIiLX_E55#pc zld{$Mp~H>Jg8YM(Q#}RxXu$7N2Avaal~@qeA9U7|bfL++Z6O&+#vRi})JE&Use#PD zvgp9L`9$8Ha?>;z?rdb5)KMSmc4f#}y1to88GJoaWS-sQM~MCBaUU|M(?u+vUKbATH8dkq+x8@HsFend%6qtzV8bllXl^wJF(s7J|%w3E`N~<8NfWCXLAy&G(L3 zf5kY0py{leuZmp@WpcuekH67SF1m!OR1&%OSK3*s1VY+eV`jB z1bcvcDJAiBmW(Q%QwMBPR=i2b&lv@8gT3IPb(tB64*qO1k0Hn~tEUL>bMPbn z^XHVa+0trZfZXOe11#K`El?zP-fFJqByzs;r57qO?pDS zM~oX1Q2f2?%TRUh9|T%pjdQ(aX~06P2D=uC@8of^{YSZK~>kquci{ka=rNX5q4Q}}O|`sLy% zuMso7cLJl+f_6bsd)!hZgZJhCrCjcdlaKC|=Q=g==MFAZDD#n#=>a$`v`BCdYux>y zC4*<&9T!z*HN7V|>M~UCa|#^&r-Fgxoz>H^-zroRQjbsr?{b}5v0!h$VD4R9a*v;| z$$;O6BW_3_;51Lt6!8svKGU-${)NL+DWJ0J628c80wIKm(IMMrSy`_468eamcC%B)~K}sCd zJw?;!G&Xo2o49foxm;gjQj-G3e_49C?u~J$jk&?;(H$XHHF@bQ;wrjhNlQMg<7SJ4 zE$uw~*sa@ex_V}^GAWfBodxVDzxvur;!hyN?J~Yaqaz{C-UFC>+1`T#oowiJ4Cc6P ztxGZ8|K(iq%kKR&ymly%3sochj8~j1m31Ly+X0?y91`y_tIP;4yKVX0q4Jn8+A_U# z|5+~|nn?(FwSyHcN!dtvlfNI2LM=aiVIKM0J>7aIu9zAU9l&rCdU(M={lDncawqmX zh2U$XYguj&%4OwDl@;M04gC-AyCsy~{%4gyt`Lq4IES0T?^S^ZnbX*(R{zBE@0T+a z_aXBaVFpj?neKNls&W(PoUbk96&_XFG;n8PoIUQoAdNITr{*bhfqY(l=JSq->MA0^ zWT?V}qW8@);#Dy$qoM}bW8qc2Td?a~EN^nn7b~~`Shb`x4haQdA2|g=U0-1)co3W* zFG|~r6*B?5z-V1BAo`5@1Zie)8SK6L95n68m{AKDY_1EOAX=T4Y*(H`tXX5lowln8 zl|jZm@08Ky1`YDkd0+X4477JoHs;)G;P{|dB=UQOx1v?EBk&G~>g%Kf5oOR`;bGL| z;AS&S1TVLIjfs4}1MaCQxTvIS5AaEFntHD@ddI4$CL|Mt5>smx@I|}_0~1H7Po#sfeodX!BtG5y81GkNrN0#=f%{ioLK=yUp7 z$5`D!tx!E62TKBhSds68ukU%F^__DB99C>vYv#TFXY;`I*~wk*WIYk`Y;f>3CBugB zGaSuARRDuVFn_5D=%3XM^KV`(6?GS9{G7c*M23t8h4PNeeg_t80fU%h^=V?bZN{sa z*b0@9+Vl21rpqRQch}84o?S#b?v$}Wbi*|7qr>CIg z6F{Koksit5JP(jqsl1U<4l$WH<7V+pp+j4g;7^MHs1gDVAgf z0Sljw2(FsBE!<Q*{eyb;Eq@!3t~>cVP2H4@CVuirwR_4nGzJe5g1l4@~~YCQ2m7wj9} zJKG{th^^R7I(RV{P=5Xcr8dmmv{pM3=S$e<3fRY#40!wBdujURC24dp;@K`1aD<&w zW>UmBrcMH=zn|52@xZDf4P?BODDrNepBreAZMrNK7`%AJyP=2t`=Ee{YhN6IZ=r7L zALt)^w9siHr;zhtG-vR509?u%yyI?M+Dv^K*!X(KZFB*@afnZPIFk=FDf`lw%1>%X zVpjz$V=F7X8JHK|ZB4BQ&uNS%W%GOX<8;-CzD!It;4WHdE`A@ZQGAdI6l5{?uH%h7 z)S(gmV-dkC+zx)BXzkJo?W%AEv&>xmJ^4`Z19^o{@iG*yM~La9br!*QTr-Z}_z=e> zlhP7+sEy+;cEuYGbt=pTYk@OKWbopML!Bx;K-=XB$Mf zTPF^Di#SgwzVxihH>nWx3~FY_Tm%wWJu8HM;vf)6We!nL=a2LbeTDrR(Be~nA(Bcl zAqfgTRVigN3QX%{25EC5H{1FkX-9Do3mtGJbaGGSQuUJHwb-8Uu>-)RD7dJ{743wJ zP~oh+vg_ko)}+nHj-iD!T!D(W)UhHvfz~z0wYMjRHk5hb8{mf~T}Ankm2-Dk%ovxu z^uFOro0n%Q4ZHq(J&NmBTVQlY@6>kOD##rFYK!3lVvYx+Ya6Xb_-AtpaMiO7Ad2@+ z?DaV`QL*$H08;`5v9^0Jco!YOm*stcX1oY$R0S#=@3)V#FpPUhHWvOdG0>IXActuL{#gI9MjPIZ{}!x z-B`pZF<`3KNvd#8kE+X3Ut8^-BdYA%<+|t@Wl0;-YQi2UiPbKlfHWprKfMrw&u#}> zNY@JZ4qSC#gvOETeE$WiLF(pxz$4AKDM39dPGp}9SzolWB04A7Y{S_ts$%#msu#A^ z>ub{b6QN^T1Qj%MG}Jvsud7PqtD+P$LfL#Sh9Ecq@$f!(sK2;xj23^x8UE}R-WSgC z-P`^o3)J6;eO@zn*9nyy<0z{~%uIIWO#}7r?Y3u^ zf_rkd*fS1Mc4a%x8UjB5^Reu=EH_k-Li)Wb$^bS^D$D;s6x@KMnyS z?$kljdp^jC_Dt286t-MD^F zSMcD(WXY(BDHxt4nRTptOh#pPaOblRa4<+mq~Cmwm{8unZX}|6u{H-o==1bwuw!d6v#6A_=(c;iGQ4|5-&w;-4qkYZMsZJsshhV8B{CBnrYZS9i=^p(Y*>2R~Q2}AWEKO!T&QPgK1JdW?{AJNwqDRikeUyV*R?5H6-?WV8I+Qia(d!s1Lu7MdO6AV<-v>M$}`GL=}sN}i<3cp!M*m! zEiD)=6bPa(9iPPJiPG5}t*eO**kt|f0P1Kig(h+JV;Q6`H34hA1Li4Wo1GF%T~-yT zBQNO6mEQZ}hFh@M^#IJ-^{;(BOU`aOcQma@xJ_z0IpRp*uOlrtHV8+!15ID*9_zP# z`bO56T-0XnA7VZnqfWKRU?(@|8wqr_Ir0pAPF8>G+GNOXxdZp(nVX+t>G=~un=4@e zfHPEiyq~j{11ZV{xNTjhP$!>NMQV%>OtZ5K#Fx05o;FO5!(^BvvqqP~VO~UfX=jaa z!eu%yw1u^zWyd~mt~c#WH=hDV)$x1Pzs@r;diMH-Go{&!4{=nh$hL zT6S%n|9Iz4&VtBH6z4Ni*6j6}#r1o&l2J*8{&dff<4?U3YYn?(7BGgDwqS&l=^I1! zpO%;@{`rtS>Rb9@yg$eH{q$2UwrP@=uodUI)vHX`sJ|xG@Q#k^v|o9PpX@?a81?nV z>LR?jz<8tW9jqMB{~K;5Fr%C3D1EPO^8VS;!0#k>{Tb}~y6uJV!tJ^9fehOX)J*DQ z?22+|QpkJD#fV#~+Ur@waZV^~>i9_O@NNxWXN@uW^q9z;J>*E`mz=l>N*EC!z~tD@ zG3LlUcb4X?P+L4SW1B@^qcrLd42MJcKzv)U;tUMv-(AWOl-gXk){YY2e7-B0# zN)X0BadzMwhENlRuCW}^DIUGSHe&`RKd4Lw{QPhMkYwfnAy{`6CS_m3r( zvsZy#%D^=U7}}*QA{Vj|J~vs2rDE}?UVnaGZ57|i6seZhYh3D_4FvY3M&xcca^t=k zVFca8$5Xka?+qQ%vLns6w+uHix5Mw3PGxTotwsoNfXh%q@4Vx}vKE%OD1UWeKktyM z1+U%reFuBdZ~g68fQ?Z(J@CsTL$vdQ$LXB1&1!w-lO!d>Fo)>FOf*xG_X1>o>;~qj zmCAU=A;(;&cj=)HeNuhwnt<@@>Y8(xHhc=NyH{U2H>bO>zlEt0B2gXlwc}KRe-%|` z=e)K>3;QgmHID99!M$DZd1x$u{3z5kWMJ}kt5^Qo+MO2Wkn_!v!6Pm99`BG;f%;ja zsRDL6J7GyFZ1zJQnnbT<4_&i{>c`#vxPR#hUAkqi!k(ltrgYR`N}g56)AnRmm!lMt=nM|sHIK0GmB>3hEfE7O7JcMK&^c(Jeg!Ca$vegQ z^5>4W0%+WJ!hnj0dfjwUSLJ|;?HOFlE|2qNCf(r(u*(tDKd}6ED)IXm61!d~-(6X= zG|lt(6^P7v`yTkxNd&f##*bVb4__l=*xOHueKx?}&$T!QAerAha*GVC)lBQq%Ws{Y z`=TEVz-j#c{N_epnx3tvD}ze2>9cQ|P)DB;b6SLEj$n5bl$y6FxTV39$_?~* z)nI{P4A=?X2v) zZHj(+tc7EyZy{Z1zv=o`N^8S&aTC|_uh&XMR7`_8`W!Rz6M*|lsNcZ5FMwt@jVOT+ z77{d~29&3j6*U*r%)gA&&}?u2drYKPLfqL;+A^pdsl*1Rq`yN9v}G=Xa27{y^~Y@8 z{TN@2Pd7Bl8E>VM1Q~yK zFzjpxr{Xs7t}VM=-lGByUwb6WIqN{@{CTgCsk<_QuG?*60EZZVYhB0Xwg;aLfx6qg zQOXEW6rrbgzxB>|wWA^M8r@l=RcU@=W-+RMjn>($4UvE|2ChgfX)`B786bZFIF{wV zx*gP(*0O7jTlRI^X4|d$rjtS=XX`rzYF$a_^;~uVuvAt|sc3`Oq`_S?DBxs{^z2?B z@N9*iq|@hswTqS?g?Ce!VL-oDL6*Di!2v(z0`m4y8 zXiJa#8C)srQzl;GA=|h8X&+t`<0Gk+)SkSM zSrPxH7}*?8pOn*qyQ2P0qu-mvRq87NTtTEroo%|rcg$fx=Ew-`mz?#x<-FLCp2bmi znqCsj;Pebde5mh<=;pIm>Pubt6dEq(a2Q}PbN;HZr0{7M832}*m9Uf-_V9Grz*qXn zmz8Qei+{;CwsJ~V^UbdS>{d-Z^^M0QQXMHJh?mqdtL`s0J;d2I|j+RwVU1sX3a<(M`cgJwtVFjMZZald6Vy6&5w23H~D&Nt) zS1p2?YvbBK8RcUUe4H~MjGpnA@Hpp7>>DTE1muU#9VWRz@L|dtaU!yKkJ?Az`J@NY z8G`y_I)f zZM#8FlexY|ga8Y3|Gn5Y`&?`{|6pm0`3Xb#0pYXc#|L;l$aG5>+I*%cv(i}yy4nYM$U;pvUv zHw1!Ic}=G>IOHGsLT(qIkX2u5^m^chtU_P>ZFzJqfaYwl%KG~5SUpxDDw@KD$)M(L zt*yMdx|2~KVJ3lt(c;kc<7=;|vwM z<+@1w4|}}s-DSt%O8vR|J={d6aX?jO6AS*lYJkHT(4HD5Pr<5z<+Y@Vw4xYm?^B6bH7{!E6uAx!Y+AkKbqb6 zoll%2AiLDJz89LbzF7QtAoX(_z`;}INE2KElB8O_g#Q9zHn?Gj{85s|%5)Gk%-e}+ zh09dd~>&6-T)g%==%#*hw;cEL{tLG8ZA8MX>UXGoKaoaGWp{AA3F!kIRh1b zl#K@iVyy>(P0V#7GuLyQej zChQvv7`aiM!jld_^Q{b8=Z_NT$0Yj11NvN)SAq0zl}jfuze{ibZvW^Bq)OP>A;47L zJ+?JZho-+?)*vBrHsaC`kZ#6@{%HI8`bj0AAl)}^ua=!eFo&pV=Ma3!6!nNcr(^9b zSXeYrYa@+Y0>6r_*X%*^4Isi?=vcnmM^icF;OZk=sNO6AjD-82dIj`X1XKKx1Mv|F zHXHTFI+Pm6bUKvy(kuJsEp{_-en9V3nJOZXc(V+GvE1^PWWRAQIvm(*<(qW$wc^lb?K>U3Wa@ zGH4&?nZ6+w42b7~t}7=mxF@Km2DXaU2btja!1+1hk-9$E>}_1$)WT#&TGRMKTI;x~ zn53lblG@D=liYdTwXKAOFdM?0EjU0zuTQ5}32r|!2Hex(t#!PSS>jrJrGLzH?KIc^ z0ChD%AHVyO9x8Fe#wUS1++-eF`b`pvQopA^1UPJZ{NSC105x!|F*VF8MQ~Y#{iN6U zN96a%uj>x$o8H)(%g=Up@-r)%xm>9N{a_k*^5Fb=`ZEml(ROWEmpY&BHg}@cw*wht z**ST(H$ODBLH7k;t0y>j72T6L@~>L8Oi0y0ZBHi9v##~;;iy!1*r&Ff&s~@?=c1Rz zfDX;ZTl;8TTotof8A!m<7kvsc6YChd`xwLz)%)|7vL(&I5%Q-;fs_KEW^i2_e^G^6 zWmWczrtpay(Ez;wQ0t`COf!KDwMaW_b0Mr(KwGDdekMO7crnJ6*8T&`WFaFS^s>R6 zptd#!;2^)zm?dI&GNxALd?o<8FEC!n0#HL8jjJcf0%*DqA5h}#x?@Qf%tozU zUKp2kDYwK(-@^=Cww@@{Cfc^KJk=h+tgg|SQ7=IG=og9UrzQYSbB`Z% z@!=AV$enK_a9WvBcHJezYFcsm;wXX%F3{oC%mrYxLQ z-#iZl=r|xK{GklQwArj=qcp;H>Nxzp8c`Uwk5KYYeql`0h34H+<;eY@2z(hgT1tnN zBYpvzb>)Eq)Q)_VPpgC3k&so{*|)}G05KTTDOR(1c?~5{0i=PLLE=Ixz=W|~H@T9x zxYXz17daW?pyo2<5W+~G2C9^!Ut$hBj@gH0bF7;+mlvZP0kU@*;{MvkDs*Z~x=~Aw@hu& z=t`aYv>V&o+pCX1GA741@JA$VBa9vV3b)S48jthi{pAUFL$M*v9@=)cwtr-*Z-2zh z^6&yba((MpdqX5$%DsYF+?%8}xN~g3*V71&CcuwAttR-NYvU34ZQAX*39HkIqSwC# zBpUy-PUjt9jhB@y0*a}eSYf|X-Z7A}-q40b``c@gQ^Jcdw(x8AQy}^44uBFVrMbYVG_)W|DeG8YXwQdgxt#&kddg{$nvFSt z`*9REe8qGF-iLQT`DY+k@1qne!?Fq~Zma;5@nJfkzsuBEj-rG&dc zEy2z=QJ8FzEx(TdUHwXY`N=%P&SNn)xD#36MZUA-dZtq zXvLNM-8$*t?-Sr}`m#VG2n3!Yd=PBv1J~~b*zHg50EI6P^>d8$6j}?*B13PDF{_?% z5r9u`>(aY^h99@6d4jkuiqdEee?9}E7Q-H^t)&Iz&o`V2VF>p8f1F(hSd;hmuJvDS zRrFd13L;XisE8C<0s;yJsxm}CWLH6uEd+=VhOLT=RNOv&-u7N*SNQD$(xRa!F<$XXM|)Y?-b(f z29#!rr>mgkT6rQX@z_eo+2i0GRBgF}0j9oPFI@`!?#i9)RQy^9rh8nmr#Rh*`yC;C zzM9=&*G+yhDEQ==IF63)S}A7ELq;Diph$d1*QiY$_He=4iEOo|y`k;ig(qg9{kF5Q zYqGVo%nBLFdotuUVe2a9{=7HZ($+MwxUc1gnB@g_wbw!)A<~n-D<>WfeX^R4rV5e^ zraN}rus3UwhukD@y+gZXOq!(AK$GuZ1|&Sc6lgK#hF zA8Dl0w1V~aDq?Z#En{7Tyv-cNOj8*9wA%HF7uIG)d{i435d zu?@1`xj`7u=5OpC?rhK{w9=QQZiGUhV^M_5)vMO~0~L3hH8*u8TlLhN+NyRvU8g5k zNzqthbn?EOv+?v{<3ji@h~Fl!7E+)8$PaFtl~mBriq{rxGOO(G#JeR(^)mfZVz}Hw5Dv z%(vDFCA)k_71GajoH(d}`EFS2gpLSqyF;jzm3hr3^YBRdv+UD->}hC01vl~LJM{)B z;K7IS=H)Ss({7e1^WsDQa8G|VPd3605Vx$UyQW(vZLQ!fXakMkMO$qM^2E&M{n&JZ6^!w6s6BFa}5Z6I8%1% z4w#W9>5;zv%&GN;=V;K{X{Lv&l3_b3Axa`zL3O>!tA;Y$7~kiQ`pPEG&323FlN?!6 z(z6k6efxMy19#Td`rE`!ov4b^fl}E~+S)w1uBn{Bq^T`yFz2y8fyy=9UysXJo6%Yw zs~^GFiq;ucxw_E4MQml6)LYNe5s^g!HbXAZRVU;W`)-Md-!{N3)$h-UKUWknGM_eC zdr{XazST3Pxy4tg_}Gzk^@+tq)4@4W_=cZTB*)Wh>7%IlFac zLo?*->1mt%eJ1V0vaj5>qn`F+hg_Ae%d~^R*esvf8f(Xs50PA^G>3X@&N~>94ItU+ z?3QHUa4vL1Cv4q4j>474?fxUqKaX;j48=Q~JdWo<45&O7Qc~cfcIhe7no|1uLses` zks5fcprs3+e&<|YL-;xPc38g=V1eAX2=1>hMI9zyacV3gJIh!uOP> zkK8^Xf&^@72O6`sU(UZlg&QhKu)v<{BiRzQt~lqxe1^HLHm#wB%YR*Zw)XKT2V=F@ z&`#75hPMB1=tPd%m6^-F>Jhm{t=eyO3xT<%tw=!39KJ=Cw7_ltRhnOWqZVa{%-yJ@ zNJzxPpnd1C63gxXy>|J!b{8J~4t!$bYn1ZLp}%s5zeWV#g?n!*!(J6QOrV8y@Eg5M z?kL)zQOb8WE+${mC^QM2QwQ*2=Wo?9j3#19^Ll;$R=gO$)Ni z%wt=IH&1|$bcA!jETK_|wyS0JfnCF6GvQp7zRQu5OMfrzWd?QUyV#{Yh2hYvXygde zK`Un{5_9oRw(zNnhM3b;J;IN#DR13B>ZWaG=H0QhA-18V>H#!C8aW{{NN$4+qSW($ zbbG@OKQ^D{q z_GmeHjo0bv-=FV=leXO7dkRD_a*-Qj;rJIrDSXl8Ab+8u3m0f&+>7bvbZ^3SOET)xa;n>D-yq55qxUbx7Rk^*pu#D9rk@j{TQ9% zlg;!v=%qrj1v+aXaqk zwCj;{MY~tn@tkM@je_H|+Z&2|Xc=*GL>Ik%8uEq;7}htlM#*b4wuY_HOSsN+AH3d) z3($G&rQDH?nq0c`yQ**03G0PYWishS28B4Ov9#Oux>lEa;2(~zVS`d}17SxptV(Nn@vL1l&m}lAI@6DZ z>57cg`nMzwjE8a+@5RpEpwGJJd?)F*fI|D3X)lT2ZOw76xS)=A2B1%n@K^}ny78E= zn)rx-Ik!Mrlxd>SGm&|r_3H+959CT#(}{8W*}C=%3zp~yNz@45LZF-TzAM~>2!TDY z9^f94LxnGEA%BCP&R-S_7x@n?yec|kB$rU``qXZ9h*@v_5??s~^a(NB{0qrEHD_GI z!%Jj$%HUp%%-oOY(r0ak)Ot>5`%6lA$s{3t zqEDGVtMa;ZA4X2tP>2AYJ@cY)fO}B;?u=lEJNt*~L zp4dW_a@tFOPq|N3ov7#y=Im%a9q#N|*>~dIdkOEJ{?~%O(%v-Du>_Kt-~GO$>Y1qZ zX*u_`2Pv|)+!cpEvqw}Nzt-{s@7L76K^D#sGZ<227a!}hlJ3>K_wjgN4A@pNs@4Zr z?XA&(t3HW8O(c3(-54gEd1dKD_fp>PJYu-nX_YyzSh>EV+%l6XO7pe%{vSvn$hdO!B0!VetMO=RK051H?#lAt8n+Ni%LStC$UAdMH_@uk z>m@OE_U)(C@nt4?{y6TWO+DwSil;j^&!-FLTSsYEwR41$Ew%_p zoZL&MVK33)T~fT;qq3NP`teXKo`vvC-&GfTvY%z;%?sI|x44m-bOA6SSLfo#*rikJ z;b=wFW(_Rm?KjQyUB2$am7V6|e+W^wL=7M8uU|u5H|=QO z;Doi&H=eB6gvqngHi+l^TzJ5%#r|xqB(e3TDvN%9>6X(~Hp`)te#M-!E#@BkBwSKury1?# zQ(M*Le{pmS-lN7h98%jA4!m2Em%iIn<@9ZwD*5>Z9iw&+{KyfrtQ^+C@j^A0_;hq# zt(CGCI5mb`h^D?*bc6e7%x`W*wpsSLkx)4{p((p`u2ubOH<1=>N}ktLVR>%Kglw3| zP`3TvGs5!|v)l2%L3eoQWb-6pBDkedQ#6p$OR2nFI(3pZ4|_(i0lyQ@^V=w*#9*Uj z=kE!y4!qhbCj7FsZkU*2mGWfdUO2nXNmKWIe;Hxq9?gJc;N2K^m#l#&8@YIFm(SlO z$G|Rq1z(k3PcNuN576Db^m9F{i^lRUb&N7b;L zMP$IEV7gEBZ_HMhe+2JhNN_Nz=$K3++Y*R+W+n+bc3PRf9hOYLVPjmEZ?bKr)Xh`z zFI7gh)1DdT_XXud4rSJt#mX6x`jhdqi0?$!X98%ze7h!BG0%E=sdN>b(W@JelW;|R zrRR(xfp=p!tR?jaTa6Rtmfp}Q2yyN2Xuw}Z%|2G=xm4cfw>X96RU6=-_XP0HnU6)b zGfYl-SHev^>mSfrg=x17$tvknLWBJDRM=LToi?T7a;MM8K2LIE$eH01=zm^) z*;8;jgR+okXJ}|N)m37W@0%{TkAK!#Jj$htVMq-|i+WQOMOUtBR@>+hYE$X8=S+Ue zy+T~v?brjm_Z*zRt}GSCv$){o)t`bB#Qb34zI-hN!UYNIqO|GjA?(RshBk70tyftt z_~8M^`Wsn@C7~+cm9MtpA z6DGr_1fuoa{Ik-FlG7|a;*@hpLC*~7v&sEq;#mf!!%mig`(Ku6L)NrKCfRUpoQmA2 z-{I5RIo{JDvE(W^Y%MzPxa>T?l8yEcHLDCMcEe)2*Kgtea8&nBt5SV{GfDd1i!}!m#%DzqF{#i9 z)o`M87JJ$nvly&El8ZRb4J=?RQX#X13yI*!J?ip1jNzlr|LsC^B3zC<*#IuIEOjto z)mNTC%>2<>S4lOlrQtnLV$P#xu@-!g@nYJHZ;Sl zWib0rPkjv}NysHhf8WXxMEFl5SewVNQlmhEHJTxcVt!Qu1;PzaMCs49G~D;Nl4>kd z#M5(#O}xE^1g{)raia^b4AI%U{aJ;n0dtul;}DMEw%llYoO^Gg5?_o`I)Q+zHu`Wj z#aM%)HX0DL`+(2RbaF@F87r!cGF>rwuy5F;qO&1a0l!^Cfj!Sz<|UJJo+1yKs5|o& ze98M?Zp)kO;vM(MOBy&f|It4F1k_t1Hq{iBxWWLlx_M1jeAufW#*RAO@o+a9d!fK$ zZd%!1>LbG2lCo3BgSpCg=)Q87Az^j{I+V4k%$LO7e70d2(AO8dbJ=QYg+f>;q8 zXSg}l*rxK(bpX7x*X9l6oqEP)?-25#2q_3D;cj7BaXT9# zV9Y3{V_Ui+^bShrWRzaZ-oHG@e&pNF-)ea6_Et+K*me6WUI9r5~ zx*oimw@+fzQ3>VVK#W;=Foq#VOn23qAKGz)Q#x8dzN%~52_2ff$@6ZxrLyeQvn-ZH z!^0vqZ%Y4e3@?r2QxUM`aW3MGDq|o!h>*sU7Sp@e-ecORuJjvIoUxi$tg^hxYhRcC zzIT5MEe|}7s9cS#YwB#3{5^MzDYf(j6u6RK8D6Y|cNPAVH{ek0>1%ZUWnAj>nP>x> zmQ84#TSk&36^V^U^RJCOaa;I)L!f5`$!&$|%pS+$`^=n8BJLaw)|_3(U?oE_Aw5os z^MAQvs{)_T;}jfU2(T=|uiWesQZe?LT)~-aPTwfyBy8ULsD)Y23GP6G9W&-p&eq@) zJ+Ta*lH+Ck5`2SD@xo>0orp_7a?HubMEK8`g%l5M*|b~^|IwCn;So@E zYNDlqA3;$DS79{Mid?IHsQf(%2W*0%|GcG<+Q|)LVbN%J%Ng1aFb`I zlURh24P=j9PORrD+KEP!7tns$_jl@q1b3-$UX>u$Z{q>uevsUM&-fuX3VymZeI70^TPyM%E)uN17VaI|ER_-U0M|dxYfldsL zzUr*4(ZtI*)}4Zz5++1k(`HB%>2kU+Zt61>v8QvA_XbQ`q8SG)eL@EsL#0ZbE;|SM zqEO`+p4~afyAu)igi|}l6-7~8a(K*+AuY-MlNEhtK#&y#B=f9C1NOa1o&V5O1zzKP)B!SaItL%yq%3af0~@W0j!OCyE$tZm}J(3A}coGmJ2hXR-r#dw5Ijx5!Riq6L<%c zKZLru>A#Kyhc~?T1Mt)HZ#A|mZ-cPcC@4mI*}7NZ*JGIUY!2?} z*9F>Dx4ZW~KC)bqJ$D$B7p;&j6OXLPwHxhf`hU=C>K zSkjpSW&MFYRQHFL4MvO$8OkZjdt6O7ZxB)9#8w{=04R>?IrYmxmd%PVeiJs*2?-@? z3x3PWP}P=FVWdRQ=fB-fJzZVS;_dkNSHI=hUFQEjZneJbY;EjzeyRKpyA8n4@Tym?5r|=Hh(M}$PGfbwT$ghLjS90kuF>q zcpuZuK8eA)L zNZ!wuD{d_Eu~FV{ODoItYFxc{0&3?A}FDJ@y$p`^!^HN>jCWkxK7qo9~hIp={GbOP} zS9dc?w=Ukv%$hh*w~`v!(%Dcahtaj0ont2bzRnswRG13MQ6Z~@ZA~Qo-Xh!`r`0uk zsGYJlWvrjcY6w;OS7h|T{KqZGndTHtToN~nDX;GzY<&Q6MCw0zy;7L3qXhf!{N@(W zA+*V7Qa!XMHtDfO3PVoaSRv;;)B4)p_B{v%%XLW4A(wbrXp6<|_Lt{J#ooImvOKZ% z{xg@!J#J>KFsB?_ny^8-k6(p7dncwYjWdFkepSLSwO)XXp;FZM#>l~SxCje0-?K3pO#oh6?@ShJ<%mqd7e&r7uu|NSpJPOU zwVv=`ChWX@QYP>wQO_+uCD+GVKXq(Oya|Z5#I+pN??cTBcBETf(2(`$9ZVfg8)<_0 zN>?1&z$?(2DZvU*LsS?wXVlyn*{KBg5+eV~uT||NvY(o3ak)r~$)H_B-Hf|SL)l5U ztq^%30Dk~gd@%hbt?q=9v`$_~6@*PP=Cw~@)*=YQIDeT`f|N(X&j+eV?PEa9b@2@5Mr>NgJi4fVtYtoCgYC)6?KDhHr+N;uzQn$vP|d zL4Q3=d5(ylk&*F4v$+AI7e&YLPR}e2x98oVN%)X*4_y6xcm=ODQdq+PP(nImU+|OM z6YTP-Ab&GYI$46c$XACwLz0Q@_0O*SZe#s(+L6el*pf-Wp0>=xoS^}M?I%@+ z0|ZL;k6{g)B3!tM!L)g6G)5>d2HsZ-JV3+T@Wj=8+*en-418*^o7LFU(cj-d1L^v? zkThEzzdN)C*)v?VU{Z~ccnKUOyjGylPW@-!qSV=rl`p|GF-$}EN}daDEq6yQz&pfy zQ(DZIiOf_j2nFmP?6fXib_B?lpL>UJ+yQR9$c+aZL|`(IGbU+z`d(B~W{4xxVkE!{ zQ}QiUr>Dc+fEwhIWqpJ6vhYkfiFIHq;jr5lH{Q5t!NO&XDEZ%^4-yOhVJm<`AwmI_ z!ju2xVMj*3LDdX(MG7Yc_g0?8w#lbT(<(Hv?CE*}3YO2!rC#l^#iRz@ zglK3yO@RUtpt0M5!Pd&d%dB?Fy@(F<6D3IV(;UTw1+(2w^oSSRAPGf&1=FmFNJgvC zS^=)qKa*3(+al~Ucckjy!Q6*deY^AQ-_;v&FvaErb~>_md(9Qb9Q{?!{nUP@AQ7!s zV$-rB%zv$%PC~4-RF-Y!5_DKvOwmJZCkwSy%Funtk`;e5W2<*% zk?NzY-FylbESWG_d)FVbZIvh}(`@h?i{i#CHLKS`@`Xv?rGF(WMjyz`K7=aZ`TkbC z%fh+5ob)W#g}Y?r`7pa=YY!3R%mb%arA1$LiMP_dRISC&C`DLkLsc z*`rpzHP{x&=iV?-A4+b2NiTjsbb&c8q<6jyf=q+t^mZw(x0|2Y zlUHd5FSz7dsE1xv&L&MuJu{fU_x0>`KqejQD5Vc;R#EW{*mVft80z@F#~L5x(shUJ*e6vD8jX5C zJCpl6bzS}aQ#qz=f&peERUxP76X*VZ)n^+dkW;NYAg_08r}uy>5(Zb43J5@9V*hp) zrWM;_HER=>v{r&r%(2kb&3X~XiTOsn?7C28*d){IlD}ma^A3`Wp?ueU`GK#^lXSc@ zpyh05KH`C1-PjF?p|VkcMz5;rh-@iUEIB?mypJP;@YkS?FeqjEFA62fS@B(O>=`Li zeu(w`Kn=*^vNO8fdIXrUCc+Ry@lfD)+ZxeJY~1FN3K7AVTD(1LV%(@}n*BtW~5d2k|RqRuYGqAI0*;=4$d7%-KYQVw-1I!o-vVtgx z291s5%lqdJ)!GY^;^N5vCkiea?QdAdGKIVJx;*zUEk z(}oU_`f=;3H#@2^?WI7t8NJ(jAzoQLdfDh)A(xuW3I_Fg_MhA8&B0d^nz1dAu71R7 z-lK%z+Aakwur{uw!2K441~&)|r;!w;s>X;>`WPE|4AK)XH5yVxQBKKUOiL-NIyt{7 zr$AEn5#RLYCH^Ll;oL-x)rPg47fegGI|?ygy}qLXyMV0_AnEXaY+)#t7%4d6VO5Zd zvf}H72&dzS*|`_%<5~p2cvWuc2_72wz?{zEu;mj=#LTyDlG(ZE0RMroFI7U3p-8&h zk2Pdbe!AQ1{6}mDDdnhqn20dK2U&6w!^t;#7A%C ze}}faDPpvaik)yHM=6=^%&!)oWK4G_3;V|0C>36h<sxDJl}*}tEVuvUv6?^G0OKCT|#dq zkVzulYcV2eMJLT1=l>6FW&KhA`R3OAt&LF;px&0Pg$h)D8p{qUP-~!G+>Hp7G!T>4 z0+ko?q=8Ayd`_>poC(^+&4}(DP7K@akH_d=sDb+77p|#n?i;`b?XA|#`X$KaSa7^^ zunjIDq+j3w4a>98MC%&}&I0aMOsbTXZG)&*>IWc|e`7%(N>@L0Oom>^0C(+mf)df< zladJ%b8M+HpnOmoQg0kYI(g=Mj+6AFBYC>}z<41sj8xVt+zpA>M@;#b^g#Y3?tLCk zUjuLAIC_4&yo`d^bx?FbBO&>mvrx-iY{TgcQS>M$2*MsAH#!Y6Tji7@0l!HN&XBZ# z;XZF>fxW)PWM7;ZQiYup`r;@}; z)A{OqnT--zrzh5fW0VMV&gV|wsWu1YP$!7&;+4CN==f@E%cmCpe&k0k>Qrha{n|)Q z6pcQ9sBD|9GE(t3IzJWel56}ZS1J+7uxwBd+-vWoK7SPxQYDdvc!2n_#-^6mqYC7g zP&9_p54y|&uEPKN3Jt~$5dEox_HSMxWxh$*^pWxSP$fOL$`zQOft6`} zeW}l%xY2W9+6UuGv>vbKK-Ad z82MH<^qCPk;j*)#a3yhVoKH%hDwr>%2U^-1c$QaEMgbyAnDlk5LJ=7aGKD7fDCShu zTC_@kfTbN0WfUDQp-DaUGVJwAZvWG|)$vXzq9J@y4W6iN+D1;h>WuiMN5(;s_KBTP zMVVDGnCcRxH%QD3OlpO120?)YQv4*#%Usa+o4*ALGJ$i7L-i7V3-dWYj(ps{ceJT3 z%<8|dw-FbkWeK2=^vVEp2`o-

gn0A&l5$jL{s84?YpOljsz$pI)Mv9;c#^ZdJNh zsKmo-Xh;q^G@E!^3@4yage^Nc#d6>b5&L^a6Ok%HeD#k-B`6(gg$OI25BkA>Kaw{< z^>4Y8@Ar8sM&v5M4l2{LlR_#FleXkRT3IA)Q4Rt|Nwi4AHKJ%pu(g`dV!X@P{$y+C z^K2?5NHw>OH>ZQB0^<=R*rVGEiYGJMx;T44NLH7& zMQQ`W<_`n_EpbO2zR^|-7utvI!do#yYLXsd!isH(j?a}-_LsHuPv-SpXlobBkENdLx?f44XJ|| z@+EN(41#Rp>aKy$NmnTD`OPm8%5C)}odQ|Sdv%V5ixi9&EANyPToOl0Y*9_IUV zpRmuh&TDSAb_)dkJB>%s9cMjp=>ZPNtMX3brP=8}N@TxmCm0b~4xWZX5;J{#OgYR~5b$A(Zdxt0b5FE=BQqh#e!GR=H3MyTn>?S)Sos_j8u z=}opqYP5gE9!T@jDR;@BODBVv!PMzXPlgdOlDC+hah+HQV*7cwZ3f}RMumc-89*1c z$0H0+rMFhoy>888N?6P729eo5Ayod178u%e0 zyG1=2CIh^axW7o=gV;)9cb6bi_Y=s{nTf4s2W9zpVq0$B8^VR3Sa)lDO|S8d=#p-2 zLw0R_YOaG06!3*PW&T74r+oxcgpXW?9c{mVv(n-{ZJdZymMnU%aek33lce)8n}I)8W{z#8GXdb=3ysxU!jTTn30jpf(c7VaL(e)6${AsBhKN296UpSx;2Z~1r8#3uH?;|$oti;%lgf*>85^SYzDVa zE;;RpVdCR8WwpVX&5PJzC6GhkZy<3o=(3X3US}iz%x#3Bt0y-`4Tr3mT{h}l$@4aJ zr|m(L{GfH`-@Ww*WsR^oNXct$oM}WX3vl8QYZ$$PC*54D6nd(S8~A)*p!2Z``yiwX zWU<+TVg(uvmeA5Q{GI8D%2Dq`M?)mXUkwL}KE^Zv+Qp(35S5?H*bs#CNOF!*fZUD= z&1&y|MF#oFiZ$5!VC+#mY3+lZdLEUW%bG^)f2n<*foAF`fOQZfgdAzm)yP> z{nIPkETzSJxQxx0x(=|iqux-2|H77ih`eb86G<{yLtIM>JjuEitEB)0wO@0zn7L!K zt-^)ulx^f-4-*$Wh9 z=z%c?z-rm!cVeCF!je)bB(cJ6;{Dd}K5C->w)(5-g5bJa2B%pUwsj+V8l>R&JdM;W znWL4~mrc$_A)1)!g9zC{IaHc198FZaodE4d;aUp49K+qvd|N;c1FqKh>yj#37qTPhdf5Uquk+-CfT z=%U@LB<+(BkT&o4Wyp-ME{|azm<9VSH%&pVcMCA{sk1lc`_tC^p-PlMvQQqOQLZqb zS)x{P4}?q}%syz)_>$}v8!?hbi2FO|)CJKGqOJ^0@HKoLUAmqJsPva1U21`)En0N$ zHD#&pFQal`<6cXYW8w%;L0}$Nwt`wMKjM)jZm5j%mVL>(N(W6EqGPB0mLNeZe!`R1 z&mhh}{?_NCSM>eRrHanbE?VWaMc4sw+>pd#H zP6A{=noUe`;q){!4)~vei z=;{q?o9}$#ZBwazw!w4$Y0mLSO$`W&o0qk>H*%+`Y<9YbzQ0p17<3cL9HveGI&+5N z?3Y1T8&hyu$w=rvYPDqJ#!7pLnu=N1@u}{|ikA|E#@cpmew0_4b`3LT$ zUUzKylzRZ*-W#g=Dr+qs(=R%i0jb?j&h5)ghLaX=B; zBxRo$kV;8Gfjb^vNrKgY&hf0KewON-yyF(-cI>5j69^!i)o|NKi@Qalb3Wn6K0rIP zMU32@Qv@z4A(PkymXKt%hFTHgAkhB!`EHj+;Yyxj@wPuyxE>V^DWnzc!vEtV9ubDX zpy`HzpbgCvI6wWl$qhq@;LwXj*mi>E9_CeTXDL`enXL^Dy zOURWYJJq$C3bSHWC4N1YQKD3tBdv*(=t8}sH}rxknD@UeLt8t`eLJ3PG8|GNFOT@w zR+^YOxCPTC=4J>=SS}a_so=F0VS$@NT?u0WZ#rlGCC&RF(PE;3Z%D_1jVzeXmMU+p zGJ(_B5I<89+5vJk_sX8fCuBICsh%|%ERx?FVapfPHsB~0tV+rSsAcJ0&mn&^1~F8j zj6ER3-O%g{?|&ybIdv}RET44n6Tf_fR@CsmT>fZPyDTAXdPAaBmG_xeGdS!VUm4pB z3-1)L#kCnTm*nSQzWkdi_lLmadzO5(%G=Zt5j*kZK(lwzuMOD4*(4RsW#{mA%Td4#nmmie-zNcizA7{m*MhbP%26Qa`AF&TJ?g;n zJ1U;C4?^6VUagF-71VTBGZs8x27us}|Ka`I+A@){^O5ZfKLxEk=61#TNxN#VmUHsl zT-^WkeyT##e?l$SzBeq@!4u>IaOXGVMU_t7hEt!r0o@^A`M=R1en{Ezi-1<{?pKoVH#=tko89kW*^$YfK=Z{0#wy3D}?hfh5VHLR8H!Q4Ew&k z+iFC<^if!xq#gb~It2+B;zHKODeiaFZ3j3$tp({SuMRx@Vy3)wO75eu?A9LEdo?{z zdD}QF0qLuEDwN2tSUQ`nppHi<`R($CJ}pSy^i!|;fdE8}7^w)70DC!TUL@;uFk;kq zyRC4ZR}OMEQ22!V_|e_`ppUcHz+;ATkjRPECe%WI`Wg5(uTW(#F@(6`3+)hmKYF zx`04=>W=g$ANO7w1;?=0cJ#5F~T%;?J!Go^KTmRLe7m}Zy||MXXaLD1){3A`UxIIcW*99p1*2c^oMTFSUQ zwOm7()GZ%@0@?rY48{qyN_a*j(0C!=Of-#Ml`p<&Y7c@NV`!|@PO5OPW6eQ>(|Kl0 zXGS*o)i0#pJ$Kq-{$we^CUIKV4QwMvsqhko?|5myxYyDInxkv!x4X7J*s1Tr4LQsW zBlhUd6e5-5j{v#%lcSxswAbYN%ht>gIBKMzps=qd3iNY`82DwEjCVJefT*3`{9@f1|!2h z2IC3aSu?Cn&>+Tf`acl&eGxR$sbV9|P#XBr^n_=dmcP1m7<;{gEfStwM;m3+X|DL8m>?eXQyUPTW=tLELk zMnIAnI6i)QZfs)$8uv>$;h?}Uc~IS-CSYh7I< z8m7vdYUzhBOPgoN*w_CoDJj_mUkwIDg+ot8Xj7PovKf`q7dnvkcJKvrB{kcn7%2!r z*pq4hdd_?Rl~Z!qv@^oRtsqpzy6X1Hz-LqEp8AJtU9wi~&)y8wQ#gJ4BL8zUnMSc2 z6#jHhAaDNIgFycU9*4B?j|Zl|*oQYb2b$LQg^s@aDy%ddfVaNuox=O!RSciB9;4sg zn+0_znF_SL?Mmvp_H;q18`TswK>wZDPQ4K3SB*3EpFt*nk*;W6#dc<#fMY76G}N71*{=`nkhKEUwa+a_Z9_-(s@kof ztGNj(=1F2SwkEV|M2^QSR%Q{+(MRm`orLSrS^YyRrB=4FN4vW>W|$so=YQwG?jE5G zTxVNr%pE0nwkB+(nCHj~yy-hSUl!pKSf>KGrtg08+01XI;vmE5ZyX4T*lXkkgJ={% zF{=*k9a>P82&)$D+62|C!$zbC@r^%#tndvO`$Vg8@#B=crU&{$r8h=ip}=>`8Sa*C zs%P>kah*leYzdu~w=IP6Qnt#*qYjO3 zEwkA52`l*vDCKoTQXWI|h04k#1<*ApO&QrOZYDw2Bc67S#=Ao~6 zY0w{n;5@bdt4csTz|`c=qVprl&lF6xHMMj!o0*vfY!xZg0+C<#pA9>S( zigS;4IAE(u1G&IchXN46HzBn*Q#hAeMDurpetDx$Y^#i2?fEy7PmPT?Dx0H8O0B}{ zI&*C*8Wy5P;UP@5ngP zV#4(rb~>_>Oj39|>ZpI0E$tK$(g&#M_?S8!Zh@Y8v!vXK>4p*x#;`54$gbSU0jVD( z#l_Nng5Di8^dV*Gn;f$0*PEv#7il3X;PYPr8#gC zo!7YSC8*}l-S$) z9q>IOzZU$cK2CKcY{{*i^DFhaq%ehLzr?OloBz|LJC4(ZX@EZlQ&H6B`nRK3MaWwF zjcd}%^cS$JBRr9BqtMtq9Dhn2`B!Z8?0~=9Fq!0jsJSC&68@RY$JkuFpW97I;gEO% z-NH+Egz{9vMtpL5k8HPHcgsyI?i=T>A&rsU&M(gDpLbGA7Vo_3d@(PO%aQ``z;F1y zh@se+$G$z#v2$*Q!9JuwXzy8{$<|C;uBo-Ipw}Kil^I|#L z!tB_wnuhHSmL-1WyTpXYZUm~$J^=JX?keAGz-hxRd>@riy}D`{|J4wV<%9BY;8LhK z?8yaU+4X_He(P6XmGl%IbhzxSJ^}eOh0Na|X-Q}m5zG_MeULLsUg#2O5jkDOW~wbH zD)~&RPbrS z%VGbOa9tes5Lsba@qbhRW$Yu`lXLV(F43xXa`;8hj7=<~ayfO!q)6*kjEfM@fE6$yIvv&kl`hhU?)tM` zkZVU@eN7Z%7&zMCtC(GKrxRf&%0NqseJgLJ;6BgFB}Z)T1^9N;L|2u`_7c)JKaFun?QuR5miEsB+YDr zg53h2r`B))?PEjuYhs^872CJY5{cmt_Q7EWy;$QqwmlEGfkYz)`kaCxVjNUmV<>iE zB{Ian$+^!gS3aZ6$>P^p z6W5xiB6{Tc1jj8D`iqw5j@hxJ^^=gi{@w8WAyV-j^mH3>YHAD3n=}DwdRJa1aMu^H zhmW`p)W9Gu9n8U-0#(@Jh>`Ae;AR%fvtD;kBaHOLOcJ#9y{sQiL)3&gdr&v^)nA7} zE-?D)BPEb`=DC;-JDeLBs*RCh-8r6pCJ!z#zw~iQ4O~pY6}Xq|@WA;eiw5<2q*Q5y zOC>uRXqR}Q!g|Z)&nQ3n95n(Zk?_qzg$tA%hMaFYG907oS{kV_)n=cC5 zdwL7ICtQtSquw33z-vgtY>lKcALP{k?8)|lZCLH5gMJ(M#?MK9{^edz*Jb2sGamOg)>!DiF`6uIn5w#Ep!ir)1@CbUdzU z=TyY7_095YRA~wAcAYsJ1c}o>zd;CJOWAVIJkkTeh2SyDVGm>ofxv-TafNL|zr5h$ zN`}0&sfIL0!%yWoTRfQY(0%;z<9d@S{-@^mJa(=b{p`J%A}zC-KjetcE^ZdFZa z4&6YecM!0~$|yY16={y6`+W8*JX~Axddlcs|El(hP7F9?Hih1k{hO_$zmWOzoT<#> zX%;?LbClmDf@-Sl)`YU!{`QezXpK5gky|`+WO&qP9XY5NCSOm;WQ);66Z{cgNyh6K zLoPxG8IiMezYNl{*)MGqu$g$8s9bNf&+=xjt9@H7b~HQMjzjXn0{H%{jeU1j?6$$B zuqswIxdP4)=?wQYSgvoAQF``l#SAhhsWK){m$<*e`{c9wODtqGoH=x-0LAce@3;PS z%JckNn>WKSU|#)X9T(iIMO-WkP70}h2}gik*~kEen42R8(^T`%c^Vp4%uJ@mEJ(nR z%uYBxAIs0ZE&o`QsL2Qt%78&Z3+y;HQcD#9$JJ+iPekQ#mL02*6@LJc=2N5*Ni)+E zV%e9U&_V)iPcF~e*qlE5%T~;WRKW6M2G$M8X~i)gdC9hf=sVZ zJ|@DbsLMCwZ`${ry~7xLKeY=m;oVUdeP#&Lqu`Q9DM+H3cM!CL(iu+x!%9FnK&Ly+ z_UIB6;nJsaqJOA|-aP3)LC&HhKFBmEbGZyy)@_1Bwg+6i^DwqoFSVaShEbk~AWadb zrxX!UEqiZI_IbfKTRy&f?^X|x7FU3DzE z@^%y}zVTs^IlUx#ctDV$$oS;lekbss2Yh0%DbK&859wUM4Cc5qx0?nzGkUaaliBH$ zqrVMWOS&^-#mv2lsOCMcL9J;Sa8UBW()BM-J&RQe{fwV5rN4p1vICGw`dGT^-g@J1|&DFZpF7W-wOpR!pY^vjlZ;x&;F2PnX#K zjq=%mL7N^zTW?SE3Ozp&$Z~Jx27JcD&SGdb99TM3F&FQIlC5$HM+LTSxfWo!9}dUH zCc*%_37b4z8G}WfJhMt^>?ZNH1B@<1WD*=J3^Kwk8@KY~&?e8C+49a@-M3F7&5FR2 z5FG~d2SvvmLWRJy+6X}_G}H4w^pLR3EDg@j*X)vWzeNn#_sM;GTct;CwvsJ(;vV9( zPqtYP)oXf$l-g$rN5&w1peUvN$V4qOtntjFXoKl#lJN2GKZVu*>sL=^aiKYEuTI^0 zcx{+Ih`O}jP#KdA0B>=M@w)EfA*?>Mza$17OCb*Reg7$Qd0SipH?jT!h%AbIkxJ%x zQY8U(a|>HYUuuuU!MPIUG8r(?p=|2S)5sv*L4l8N`Cl2&;w}1PHI=V)wg|j&;2s=` zDW>SySC38tlW^h$ie#R0&mktKAfrN3zq!Hig6UYFF!29=W1+$t0V@AE43}Q>O!{S+ zm#YO#OSOQ$lMti<$q5hXfwBSgA+v-?$l#FI_YK+A4`?>usu{pN2M@0yik6Khz;0ZU z;}9kzw)`kX>Ro0B4WJ3UvsHI_;&dd$>2a^pS&HH$!_qL9X*l(WNFrQxn z`CA+nS~sv)FQ0P^hIwq&0(HooC|t`oPqtF~vn{8Fy0zp`P?$ox2*2pJ2b8c~;Cge~ zq|P(NHY4>q4H?5ZzlK18%$S9YOs}h@W4?dHHZCkg9Q_n3+0k}Gs;vL7;yP?x75`5z zV}ghHRHr*H>NWPSBD-0hJa=0j5ijYP$jXOa2gMJSVD;!~_s<(7CYZ*BpSmyaWFKE4 zSCI}jNX6+BxFbjb;NzF&-4Klwb+}JhfuWtf zLC(07IcVm#%}DERXh(>OW?fw5jxXH+hUe_paDbz&bY}hG{VYB|+#$hY)cF@qTV!@3?L46EK}u?ovR}H& z5#a>{16a$|dNQ9sR&eD10IP%%O+?hKPqN0pqzhX6HN8y{5x3r?ts4#|6+`bBu77mJ ztz&Q+uIQeVu!Bd0zQlg!@O)D!THYJ{27U0>X+$jn`xWPNMOoYdZvRIE_1nq?0tNcr zqLkmt%PnRWNAl|>fLv~ITK+9Wx&wdcjbQZ?z9<`~qBh4^c{Nc@)nl6y(JydHExSvY zmou)MROH0+0u`Ks<0GVD{z06zJ97upi15*(#=AZgOzjEDZRS{m?VH-fhN7YW3;EPZ zzE@KImO8bb>;X-1I?!#!P&|T&@ZgucsY`#x(3Fc2y2pBo`_3JzDRO;!((~0i=(2!C ze4NQb{5S$1-V!w1Y+Fu{&ga6NTEyA6;IKY#m)}x*OT^-CBlX(2*wI9hQihDln6XGI>`kPBj3F5#GRw3x#4d`BLYb!|L}Z@r={qkCx5{(x zc)sKNj`uyD`=9&RJ9}O0TI*crI>+A+N`#OS+<8vnIROf|zSRTCQ8$hKdd)q5#}tRv zIQrXa`2727luQRX0Ww+cPsn6gmQz{nA|N1$uiCj4VDA21KxR4xKLSq3l#0uqflfYy zK+XE282ggTTuVucdn9{&_GXDB9jBou)hi ztxIRa9?e6+Ymko#i{|WA4+b_j^-e%C^ss9IC(XR36EU&>J#gmJjiV57fAAzAT20w@ za--;x9z0%|DW@gW|KSVNxoE2IJJT@zkx>RLAJx&(`?Ac+DpcCQWuhjoE4D>u(MGBo3 zbx;AC8lyf*MWn~;zZmbXjzjIe_kd^^J&HzvjtITb7r+gPm~wTUP1Mf9>=zs_29SGu zT`#nn%9H}IyT>xd=BK=RTESF9^E1O+|JWj>ih!hYXu7O1+VAVtcymn?O>3cn18x$0 zi4ZekHEjdNENWW|^*fzEy{{Q}%9E*5!|YQJcbbm7WI!Z65`bH>_1wk>JV!54yGwss zsWXQI{KG8WhpcFd-r2UJP0z|UX`~SQ5~lGO|9!uIIL-fen?V+ccUkA()YK zpW`k&6T2hNnd1QGPNL`xZ&t3kCStnoo#$xvmpHhzk0W>z>N;HAZ{>h3?wAmQt0YN8 zn85zZ)}OhA8!mt364mDsACa)Nhg|vjGr0q_;4D^&w0Dh9~L2YsWgFeC_fPY}6pK|lhw63t3uQ7X?T*nM{bw*m!<<9UP zYA5eMS+PdYTo2Bw(pzYY{hI~;nKb@Mv1a8f3Q{Afe^yW zyaEt(2D^Z`iJE%)lTZh&XmpC|t?R3TQdB*zTUSU@Us@8BZu|kPgp7lPDZr(i)YE7Z zqduG2Y{V#}k^Ge*Fs?!SA@xO>Mf!ChgwTVy0MQ+=Ie(iX{@L}6*aip#9!$dIGsGpO z$LC}G@f!qPfo2W}ey2##HB-Hhc6}mo13Ygz1c=s3mse*coAT$ zn%@YC! z6o@ZWUihnwRb2tS0900UR=WMraeF~%px3kUe0oU~Pt&Cf!nK+KtCDZJCTsV@&QYDE z+~^A$rp!Nj?_JDt5lH0eI^Xq)U%z-@Gl{v5|t5r+XU9J zfx?8rE-~tx=`O!vcl%D; zP6oTD;2Ei{z$}+Vh^WaspDv&?u`u&jxjHf&b7iclW^i3;|COQ#k320()N&A5$xGbT z!IL8*?X=XkZkE+3iv>h7z*5LydkBEpq*Y<0c;zG z(;*09i3wg6brbqFI?ML@iMG5GDa(Q0X*;fY4(s73HU5z8K@>tSdPcNuxouLc`tvs- zM&JRz6EFO1H-7Kt#opRO7W23}-9(8_Ma`KB3Z?bNRT(&|&S(fDOk8Ms)BvS{_MI56a-SqFR{or?(SS9{TtSR2OAkI;;NL9GbElc|JJQg;;hQz9nA>Q3ftn z2))`W{LN7(jP+XofiAtYej0vY*Tevu=pYNy(hz@ko_|4U!V~oW8IFU1KYogx^_HH|G9?5C#)S zM+oqo80~XjGnTxk?M1DMdKlC^xoH}EGt_|q{WzK9?$4O1FaPWp;gu5rP%mWMLT4dA z9aD|T3sRKj0qTn!!=r#vbg5Q`y5P(niYbdlJXzl;Fq;&B6C_FEnIOU-0JORQseSps z7#9iCigF*1M?gW7(@c!Cz9cnz0|iQHCj!wtAHPWA$>m`l3wcA=mS<%lmC84dO4-zU z3Yv{xx7|ctSIr&|=|xyZbM~G^VA~)u3dnZAFHzTXBG<-?jk+M1*FF8Sbq;VspM31< zw2&7;%9iYH&wNOIPjcTEnUwoMi!N&(-mUrN`v`;(RBs?~0L}0A*gPb#Ug~~QZdEHy zeb?`P^UG%>kk~dMHG8i5(e}-0eZhS|W_EDgx?vh8vT(0nlQ8u+pCvbTA1{p-qs8KLjUNTiQlPe2f9atn?I$U_N?stM5rNyPOT-8}Z}>y=I}TWY4{EFqBgTFk02we1)K;K{^m~;_UIgM6M z2YlJ{P-7>V0Da^-{$8=Ws8j#~Kjvpqzljm}e1!lp@Ety<<`5jDRn>2@3O9KoeB)HY z^|PQt#N0AF+)sK5_%V5;6p78nR8`JwzQ@TGkQ@|-x`1hb*A+fg&4yVwWMe9VRt}*B zd(tpeq0FUJGr0DB)bYrITTDm8&{G#ip5aJ|)q#RW5&r)E#-rWU0Q9dPmZewZAu=|4 zt?l|2hHGCP;mkQ%;ZdysgaC{XVG#ZrH_&@R@;A1Wx7$siHNpS?C=_C#V#s1`}vddxGO z?Ezo?B0f~Mejook-VMT)IfwB{6VNqcbpBLN{Qlg%h1!xEwAn;~`?*7gpn5l&Uq*yO z^!`C?1Q4d(o(8_Yy7g~M+q__CpKYYfkx|JWp@sf6@81s%cf|;s3>S{L|blZiTMVJ4R#OCoSdiGN}Wp18>-BwNS)?f^wdt??5iqaX*JglFBfA z`z|uaV*o?=s~8QmMupsamCqBPdv@aCX}bmuaUc5W6$Y$f@9_la?meAxHBf@C7*Htw z-E{h{yY-hPLz5MA7&LXok$4UlByaoi_O4P{P**FD5L1cCGXJF$$?1Gpx3xOND^W0+ ztCzv;a&cLD$on0zb(rYYfuMfV z1XPv${FUQ}H8Xzo7JGWsIBFPJ~dolzEIU5`JiDEy0xWJ44Qh+G) z$xq)}EvF>(2}LOK;P9?Z$!oL$`-=q2<8dN%Kdau97MqwGf1ZbJff(}73t9IGOJe0#!I&=~~ z)XKS%(F^^8Gy;|zy@XoxTGEU>k?&7GIRQUuEAq@iesb~SfyO*acYHF^X8C*|pB~si z>o9h0&~J#88Gk^N#tRp)L_j-yeSZ(~8P|g5^Exy(z7N}(>%aY;mx3(N&A0L#<~1HJ ze0)>Y@|Ky(^#*BVa#q#Kss>*jhTk9O&(iaauqRm~5o4orn@)~v(TZWAr^fj;Gb^C~ z_TJ4Y=%&T&8GxQgy+Nz%EgDsy?=AE(EcV$3vyl(p_wHMht|g9iWx5lWDf^2<*a)t2 z;rq?d4U2WKq!egc{^u55`cqIq^mTj`Y1wVDM6t+jA=qtf9R84tw1JN8Do5JGL4$nq z_##Uv&!F7Q6y^IanGE&EwCw?zOTH}=SS5V^!b+LRsPL`-F?)?aG!kP7*RHHZ+O17U z{T?m>lDWLkI}=-!f27FQ9>h9XKFeIC@y-r&v4|`1?=_%LR5HkYj-E`j!(ELX0Rhj@ zsy{lGP6F^*dN8nwATXeF8MJjxd0DVno5!-=796GXT_>OAo5}W}p_=}mfB+_(8bBfo z`&knDncvRuZ>`_62=Ygm@6#PqgGKDkdmllQF&W0!9{zH#IAfr@Y|Y2A?LTgIb%4ui zz6Xz(cK@99soF;0oq*JIjI#03WEQRL%NGjcWi`~*n=WNBf4gS@$h80J3)s&NpGMGz z+<&PJU#`p3u9C@s18;aQ<@Vdq7yfx>!44XPONE~)a0w^&G{nP|rnTzFLk@!S&>8{88~Q8(;>1TyoMtO)aZENFpO0g5gtHvWtK z`H8ghlRq?kb*iv@2-sB75dQ%;0+4gge9Pe@;xQ|W-Je0g1KacgMeGse!=aZQ{K0za z>nd(HmhewE+;?~zm<&)~1i(0V5tbP*s>||+Z+=Dw3((U*r*qnRbP*^7s%!FlMAqpT zUM0Ij8JwJHjveZ+@<;3<#>EV2K!mOJ1!5Ulsu{08QVF{_H6S?w%rpoqNt?GlHQD%M z5!T9JtJ-5h%8!NFy#+swYT@a3P4JIUtV{($Rs?V2DUc6Yd(CSKB?h)gt3al?ADsC) zXN(VU>a$_5=OQH*P+Bz4DL{8CsGTasoT|TB1s{_FSH>US>d&%X>AYKJkx3-wo;B63 z7GKG=XQs=up;)Y86BE@=rrD0|4SV|fBfM%G88H_RA|*a&ThH2_$DXRy4$C;v@B%u6 z#3}$>@!uVh*`f+v@!soX&QWpgxb0=De!6 z!%=`O4Iq23lP8l2U)_YfwggsZ(a&!>XP{k@zzVEJZnVKfUy1Uy z8z};DIJ0O87IEtO>Tb>-d3j*wKnnf}4VrZom5&RH8YP4w1VsQuCLxsw^&YI&(`3^y z_J8ivm@TqX0W1pDpA0P2YSuuG+ra+F7k|~}OBW2-utx5;{^H&n>uvnA<_`!8RECD6 zMfpr^hU9?_AvqHAzDJP+D75~mcLLGpmR-b;$1!E)K=|Awj{Q`V(?qyH(?Z;H>|Uw; z4Ytcvs~3I`hIhq#C0?oVgO0l0=S(SgD^4PWh{G}+OqTAj(S^AIDp2!k>tOu1cFIrf zQ~@yj2wi37oBQTXt!2xUo340DJWJbUfveSG$)>iIf9UHGK#YI5XYa0kt0?K|4PvS} z!{6A=DE8A`i*)95*1uC*^x3iicXW^7>l~&WYfUKQM1n1~cTOWcd9llUiK85IzL=x| zbp0@$VwQJctY&t8$eN2;`P&GsT*q>hD$ru!XSwdD9h(+O;**tFb zDUATG0oz)Jdf$)FCmrXU4X6MN?_v)c3w=Y^#E&*0{DxpN8{0tL3Do#fL?d!K06Sd;Pe zEgMZb)hlpTrJm?z4(-{7E~by9EkiCtas~Q&VkSl-PXYPikx1CIq6NLwXS>e!kHqkG z9(}fnvYlY)OoQHx975Q33$_JhjW3{Dr8Zl>FC5^JyL$q%p@oL{v9xA;%(Y?1ym=)f zy8yrVTg{Zf$Kxd&!V_hC_lJe~SgUrocbe-_GF;9mitQ_*vRG%aH5 zrn`t2QlHlE)~+-k5(`0hK}hvgi7aqCaRpjtBRcjHN zJ-{~4h=B6@iD5@HRT=(cCM=v92v6hfxA)Cj`loqw=c=H9eYZe08PXPlpOWalu_phW zGyn6F1bW*#_f#FbKeS}1s~Pm0MIfyKt2BU-8RmiD&r(U6b>Ct~|3JL_ zr+gd8MwNNB5jXSe1@h3^Nr?T%&D}7n^fK=P3=!xd1_KZeSGHb!=Vyda`~UeH+^0L&HwkLP^mvz=4p^<6u7D+W9LNC50-p=NN* z>Q#igm;tF4f{X*ryg+q;KokFKxACm8jph~g2ylj;PUfmcu*xH#Xn}a`z}$dLex&j) zi|WS*+9k55kRG>3Jwq5~sag|0CWY>-f4X2miU-yzM50))l_NA52!++ZzS|#09?XRY z-GNl>XY+hUm@ob|*YmRoI$Q#{&At^jK!-8F^uRnU=RS|^`tfpb#mhg3#-{xy(Qer2!9hbrE;mQFzXLTPHXvZLVxKkc|Qje z)%#<%z+U$1>ClO19a<^Xx#du6K2<@R)$BWl7qK6`4zKnNPdUlW1-}yEn4oIN&sH7{lo~&TUm+(IC^hEQeFIDhKW>4hQ~`uYf|}&a zs_B3;1S#(05~r4Ar8L-z|o4K{yP+ zRtgqFa^wBI%$XLRj|4_-waCw_a%I5=d2b0*MHq~50E@G8qVuIKH76?wAH0d z(^zvM0q$}&`b@7mzY755t5{dtgY+XTV=2rRIjLS8zeg|;p!9a=5QR1(yH@oVH(jWy zhTkpYmy^h7+ga0vEUT$m*POAlcISsjdQJDdzcT#l7uj3;7q2v{p#iwr-nRQcf+_dOgOK(& zEAyX*NqwS&KxJ506B9BV>6gvRc#4E>vplAz+vYEhwj2TEd&9&fnzHj!2$wj6Zkj;z zIplDW)XcosClglr0|LKeplJTzj51xHhy>8v(6+oEW#)B{+@^=}r@rZQNVWanjWQda z2nU}&TRO#sEroHSs}Zp1)&62Af{j#eKM+Vl`zrWZ|0J#W^&7EzM@J~B;}jjMjpGje z6j6I(>03Fe6IN?FKXVV>+g5sIV|}WAy?E$W@j>pCL}#W5_NO&0A886!%1F~*lRxOS z&}lity9h?wN6Sv0;aF7Dpy?Z~x;i-b#?qCmUcO+c5m>s~g_D+(b6_?1(mo?D{j}Kf z=SELhs)hTzWBa~@hJ=i#m0jD24($67_$98dySw%b1?Pc^r>!SHyG1_Gyy9uiN_}VL z#r5Y_NsFi*@?un(91Z1QOW?rMx-Ct8x#fo0)4LpOySGe#T(}&C=VEk$&lNY!!uDU4 zWlXu#PZ>emNW4%#2tQq=?|k_CNFB9y@Y5SVfBJG4e7ybb;|?N+r_f6He1JYR(XRc$ zhVyGs11X8!R~$23;nCloHT~$4-6b9La6x9g{FP_ z+x)34^qX%Bz0}!koYcC;Fi08 zqD+>@w+nw0f$)KX{qGOJvv2Idk1=_mB3OJHYvij}16E^4*c$i2|*-}ptnVcasvo3m}asuA; zn{XUJR$x*Zn~Rxv3}V@Wv&PMTkb9Lnst3(J(Me6kcTc@ghj&6|^FwZCJ0a^;Q??|3 z7bZmN+v)1b#cNtl!YKWu$}rVhx*9|l|2dJfigrQf-F4UQeLri+9;5zT=TWB&7=@Z3 z?MXU+Sextn*@p-3pikWt_|U3lyy~nleoS@q6pfA6IbQe!YNF&GujXC*P23z2rBDL; zgH*7+8U&IYLa4TJ%5eXi+59R;h>l9-Xe@eyi*~Jh#bkfkEt~JD|6gqQPj&X~l`_a( zY=}scFk{2r*)P;Vu&8a+PgMr*6tNpWHtzYW82{Z1{m{855p`mfVyAC%j4369*kF+P&X~FPuYsT3U-8Y;ky>u9qLB;J&Hc7Ny#Sg$t># zqpqf0bImVWBzHKp8Ih_NN1w&?ENFB5cK7}Dq)bVSvnwIzpHhiQ zN}_S6p}rKqpZ^HlR7Y|gC$`^iNTcu8hfzM8P3JcL#jXFlUL-{(;y#mKuSx4;-#;nO z2MT~pQmyK6x&%s-+JX69vw^Ul&857rV$W2hw4Uzkq!hIES(;}b*V??Eh5Bx}UIMJl z4X`UCVQM$&PsRGf zPW-+hf2qbQYHy*4y0%~I_KHeK^yCeP^j{7Zvo72O>?}~ctf2K|#gHx2Ch@At92S1< zuf*ogFihTXOW5Qaqbi&3Pe1Iq@8Uzng!rNOmdhD`wQC`S^6Xm8=UWkoMXyUuypQ8~ ziH%-lKEJo;JhsRrwj-y7WWlQ|QtxUOFYjIdDcziLj?(r+p~Y<7_?cx{K(U@w+S3M0 z%9y3?=J78qll>PZ3~q9Y)JGGWzpjcNvdwVRY8FhJ{B9G^Q9i2Q28rgZO6fRy)ueRk zwZ$aXgtKqFULsi ze3^dHP{ceiZq6hB)Q(-Pbg%9j>XmLpp`YiZ_qe=EH<IW}=|m>Ttffaj8qb&C=9`&Y(PylTS^)+jgn^ zT#bos!W%ya>~aza^j46uW;G8yxn93#d})=ye=*h8fL+qq`$acukkbBUoEphHdhZ;C zcZRZWcS#FYUX~hF79$dq>Un^-VBTudH8^9n{;-iA;lp@drxdZ3DZW&>V{YekhN*wB zOnqr8D1yW_+NH^taRg(+#n}F*{ho_Cypfy;_99HG#$T46LIZL2sUaMk$>k zYiU@V1P^V-e~zt{sry-Pn7O)N_v-!h8tjf#qU@=_5QTsFODnJ5$lWd1D!V*C~K` z5>vXM?eoW%&OD>!;_YE)0u~m&1!hW>?p&|HvWyPLOKr7;mWg{UCy{@ss;WAcYa~yn z^0o1qWz+{u8{4<KvXFb;>i<62Uf8SE;zp(J0dz)-X z?e|wS@#+QopyovW0BCfJEv??hkXp>hZ!UH&n3#qcE4Q-IX#zr@?$j6iu2BdrIIC40 z5kYnm9< zMn8|>rEw49M-RI3jE3&rq&66<=wK4{A#C3z87uQDuG{z9we%DgzBfe0i@Rx{kozZFLzAyy`MjsrCebo^ zLR9>76ZC&o8Q9-iv8kIcS2Nw9#Ajs?dkmZ5ZH4(={H~kR($bnI9$1T7emD~_D2Lz8 z(?aUAtu`p}V*Nl|sj;bXcj;UY_f%lmB&*+d@B1LrG9OYhWfZ5fQS$m3trPYW1pr4* z!coTk48rV(t&3xytkw}e!YIR-D{sona}+UCm6E0S za})8&c@H&CkaaSHy+)D7pZdOj(ue-oQkOh52j37_3Ee_lN5}ytV*0vCgPG z)TXksvJdag)fd!4G%W8KdWhv6G{TG-Wf>R{S_M)aFoD3vm>r;fJ$CIi%hoIL19P+U zX87Bj-OtcH&zgcP^S&@MGf!O~t2e4P*Uuv~_kZ>eFuzs$eBzT^mOSTy{YrxKiRRY zatFRnRdfe94d?YIbtt1csyb}K^WUC-Vt0RM^Ub(R=4{#)R=;**QU2+4%$ZsZSD96rbM^)uk||Izlj$1B=@TC?kCFmC>b$8nA; z`%4Vqe~$$>`#(jwXND%{yvPq9K2$3H5j*~wN%%W$^nZ#F|4#?anuD0{_U%c6)?(cs z2Grc|p&7}FISSl=b^ZPCv43}}bv1jNvQBUNk;VC$68zsqIx|Z0KRXDd=|{$Wm3~1c zV&Bi(x||tYpQFJ25=ozaFZa0?+@< zd|l4}_fBUhKW4JDsp;l2_K6pCO?eIkD|BxyT0x6>{r3KA<#yslQw%hyr=RF`Ci0Kv zwmiRasBNmVoBgrV&5=Yn6|ea=mqO89UbgVJPKe^kJO%SPLd z7mANQPe_Z*i0&>}Q1ECv2z<_?M)sHQg^rgF=|jwZW1{yj-y8e;cNDMGpMmBB|C|7k z{a^mdQ5o_|zkKsF4iW*se4_&1@-JV!h=XdIU%pX;OxnyZo^n5@kos+dL@lp`QHY(* zs(C}}Y=_$AL`6kAwb7FX<01X_bPFf7+X>$Na(VodAKD4N{ig_Mr)tR5%^ZlU$C;fN z-k4*X9263wT%aCvBu{ZtyMs`jXM3*h*NL8MxBsfgZVeGFlcn^dKK*C&EX`313ZQ=J zmjU&xtlqzU^CrfLN9cH-?Ibyav`7ewOJ@F!N;3rbrN&?Lqfara>1OJq@Z^fIH5F@r zT{|^p1F7ENk>9X|aHLHMw7=N1Gqf4w83h_ zS=H{$s!=!zY+l%th7vEHPLAf4T*CEPFFh%WOFmsY7?`)>h@1+0My(QlP`x{OBQA=s zbWoG5(QprIzr2jmSiNR^bNfDR+m_rr*u^f7rQ~p0c8R-UhwDcjcT@%xmr0M@sn{x( zcP!++c080T7;fDTNp?1 zF3fw{uWgIsO2F;xy(_iqA-9dZ%zh;PE-@vj#+Z)c7|R(K7TCV`pNY}xUW@DsR~oH$uF-92mK<+{U~7Q3zcQefP3 zvpv{LUElHkOZf!XZ|kzW{pv28<2g=oyzE?!Uu@UZ@RO33EbgQgIxMsQJ%6o5xc{-b!l0CU&H>qKr!xkaAMS|$t+j(MZ<2HzS;-^x48lu-z zq)$IaojS^nyVEA*K3);>*F+;6-E#ncVe;+--G8al`}jtB)IxS zC3f3ZPp-6Q%Wjw*h}cbv&E;+vmh+jC774WJJ&JBWDxO@*b-06KAdO8}Wt2=4-@(N` zdE2h_CX31qGYq*C(}(V~MHe}MR_u8W*W6fHzhjgEel70+lG4w@Y}ZL$1#?)|AyD{JMlb!cy=V`up#ZsD_JupKOuYz&|D9^{Mf zutCK?Ig2|q^%d<+fem_;>|DKNRHa)9cXnmEjb6UFTu|}U)X@0jTo=*zcaxUv6Aiae z5`DblW>NRk5&9iMM9JYr&9)*~Is#2ns*N=zAp>v6J@l{LJ_O<9ztGI*vA z#RP3+>3EP(U6e4=6JLbKX7nE0cV~y@mLh8$eoCn}4WO)@2ifm)7JbYnOz5JH zbe~gEOhezPv3-Z#roRa%s}bRe$~H7K6t@YtP(+c^?LG$@Rn<;;%N@v@s!q-ZTuIx| zE8E&zqCVhF8^lS(FJa_Z3HynGk}((Jv!=9af1MtEoC$%=^w#1^^~9QEUJ9xF0>!6A zh9BzdZ53^!t0&e9`vk38T<^F#l|v&TMOJUb*({|Z=uN)=l6n^*bx35Yk-I#}#@q04 zOva4*STKVu)g4!Q8>+u(%6j#7SI74q*O~Ik-uP5K--!iIs@N(%Ha5p){3dREj!_Q! zhxyPM29CP)bXuuOX98}bCb(=gG>*75c79&xszAG{AV|(iK3R!{#HMPs(Ub9H$$>nb z%xgDwy~V$@B=}^r$3JPkr^eb|>mP}1QqdQ+!Je$O<_-#VMV`&i85@<}y;Un?jL}fT zrL87Y?hiOE?J+*ZzGJ`WtL_VH%$H&1nk$Z}^%Jz7L=O>mqj#F9j@&sZDppu{S7+R< zQ1g7dT5?9%ZkN-vZK#;jYaEDA!+1%o=3-w0T-pxqtSueN>uU4l3%q#HL&3)TWF6$* zD0ykkP;PV?vffFMtU4;b#sxq15UKG<#*litvmXw3mfMEjh%)EldL(j9-5*O1hwOB+ z&6G)lX>8tTqNQ@f5s^pBYq*y^HINo5e*TKv(&wc5*xG}WPuh*s4GhGtO+8}Qrw_OT z*K1`eM^KG%JEi!m=~*uY8S_>FDY+0sX^}^~Gz4ayjZ?9>oe!g^h1jnX54Cs*i(yX|9YZ*(9*S+K?(aMl| z3u%2Fqg_N_$8EMVsCds$!nTDSoC;xg%yxNsL)-)OGwf@Sj1r~2(5otK;7BYsuMl^s zI$6=@eJGrZWoU1y?2WfNqAlyV?=BJz9LNl|vaUvn;@@(a2WB^}=CFCouaunXm8)|% z>GbJI8vzEPwbMsKqb|(*3Wv?%DHe3l?M-fa`VJH|zD}RGM*@YIlh3%PCJd(Pc1~;U z(vgwNo%_U=268HJk7V{FEM-oP1(T)!IGVOI@zMS8|H4unWKWbawq*_dq;PXJWN zM2jc8Cbjh&C9Zsp*=XpyvJv>pc+3xuC1YFmPU-s#FH3>d55&u9@Nu;Yq43;U4==J7 z*Xm1S1vo`O*GI2gQvX;Qij@aOy>t97H z1XXo_Z1XGWrFl%`zuaJcpprdYj`idy%{qpza~Z|2&#CBF;3@qPQ(u`X@R0G7s=uPL zzT)Mcn>3G|n-2KckRo{7=rIOOmM_?2oKXY(qJ|j;_GHHTy20SQ6)J8U3pxtpt0D(J zoB9Q5wvIa1JYyY^&PPI+YuCbXgdAK>s^qni*!??8TRv<@P0HD5 zCSN`U0m0{%cY|T~m-*Bi^U??@i1(iL>Sb%0aLty-Go>~owH5*|p5AmOHwZks7;^D| zAkjwUoug49jzcl)C4Uk#JKnC3+o3pbpYoWsJY7)Bk$-~sn zCGV0Qgx~o)M(E|~JVu+8J|0SDO3m&HP|Jx>bm3lGZAZxryiP2e&{be1yz`fPJ;Bti za%TI~sPuW_n2(YoA%h<=P(I|^Nf2W0*1fqPKnP?vJI4YQ#gr%?FMIDaS>m*-iYDpyc)!z?x}9t_QX4Jb_#xfpKoez5$8 z*|fcHAVc|rB0cfrXsoKb56-9A(D5Cy^MN6S4uY!G9c8F{P}YvPZ?}t zIFV;M*%$8>9>`viP3X&!X!Jc9=+0?KK?&oz1)~gl?1^pcp01TH#1ONyoJD;XS+lnH z(6y~#mugCL@Bd)euXR=>|5Ag1a&gN3My%|zwCZ)|*QCd0$fff7*sO|OBLXVp;`trL zEwYXZi_F!TpRnIB+XEh5q4(vLw98T%r{ej~36HW7WA!+Eb-2$9l3PVNY!O+QaabaL^YX05fE8?wMnJ_AOz~I4X!PF zbTe5|Q8D>BpOZ+|Q8vCd!e>!jR_b%F?08;PzhD|-FU!sPZiW;CeT?bog0{ml$Y~c= z^o3$!dAQhy$_Fx}oy|6er>JjzB?7hv0Ou+PEUZ~X`J8m+oqES7@r`)-O^X+GuO_U^ z_HxTlctvXxSEGx2ykgNp=K;Ebs4_XN0L+x=)6^~I(rnL1Cbwm*AebM6tD80^sH!u; zc%h3s``G=6o8nPPgl+a(2X*KQPDWX%h;fe-B+#H?qZ!*KHhgHRCLaM)V#lX|Dm`;N zlNeq>l_HF+qM!JFcM-QiDi&MD?>bFQr8E^BW<48+lJJCBg zvik-E z`+=8!wHe_-hCMZjJUzq@{iM+BHyrKzz07rtb+OzsW-?>XRG9WiZ9O_1I{w^f8(zhr zCSwZ(zaK8HOJ5npfp?wmLm9H{JJuylh}uFEsAl?ja}oL`UwgzJo)o8taW-}QvCaox za1f_hKWTAg~}r?n(EX-U6C0#L#jzyrhSeuu z@y8Z5xVx+;vZouTjZAD)>5s2!jc(jssNtt)uGUH1q8Q?Z##MFfdBfE)G1%ttEC7)a z?Av^-G^nYf(|0v_Sl%{#Mg%14?fE@AlO?L+EjDXhSLv@Ii^F|-s4ruew8*vOdhsw~ z`}S&sZjMgpul-AxrG@OjmYT9M$P1wNY24`ooqCa|fVec4+fZ)}ygiiiyK_N~{s^rY z>FC`OoN4JYT7eY*2^&RPvwWRL^jSOJ95V}#HrX_WY`g1I+1y&%&VshQ`)JY3_4rrn zAItgJcn*VA^yljjO-u!r+jFxx@`CAF^-;QmKNcF`)s;} ztB|4`!O-R6yiW>VRK*1+s`UvkS9`_}t+>bQj_$zmV=yjFA{XG3by?abABAr zsx!wz#*ZUtzqX);QD<35w}8~LB!f~n=wPf)MhlNmiQ*>THuY?vI52VHxt)Y5lFKJI z>!&U?pOpS43w*_IWtq9@Q;WHf%2SB3-u|KbmKyQB+swmq9uB#aPUqBHfgLP2&)#9I zf~CmF=ifPchE|xgI+Tu>e}Mal-zwb6JGLoW(M9@zx5R}+ExCvu1MnMbLKt?Zxksbing+wj~MFN)k0e>p9TvE#H(;=kqh_+fdiFg zp^_3Oy?Vbq2~!DJctma=z2dRk3NAfh3Ff=B*p2bZ>1MTZywvE@9qeY$t8w=o>UF+Ig`g8j8np!Nkyos)+NsvShaiQK9 zzrb%j;e+w6ucKv5$%QT$XxaAUU|YG@*h^tU3YMWl=t~UKYW*OnJ%Z&ryl;J=Ri)@@ z1IHLJag|#acwJf8HEi_RTbhj`jHzfw*6DQZLR#cn-)m2nc2d@O(bJ60(o|{FrRc+4;aZWtjzG=~Id+zO|3d4JbNQtcGpxBHE;7%vNjr z65STpZ>9Hr7<9*bC7ZO!xdo`HNK`hIl4D>Zsjl)~jPY`z1))poL$7{XW)oNuMwtv- zqhIilV-p&8e_6_%i==@F)zclS&krbDd8?`p(y^0XXp|`MUtmM)a;z!%SX)`8x}GMr*kS7e6ds8Y^wPi1USJ4IE<_)C+aH~DQ>`~jB$PR zz&@P}#olL2b{fc-eb?3{m(t37%zdR+xxQsDmy@(`9X&(V&?u|zFWTcyIrkuLWRPT& zUSBOl>Xrm&SJ%QK$=crR#>UdrJt~15L03oK9ktfwdkbFfvhgPC@(yxXlXEq89OwZW z&6~4@pP9+#n+@4U@flH|5WDdM$6XmSR*K~9?0mCST*h_BjBP=$`%SiV0JEafPjscqvWJjn@*>#y{G zNaUkS5Mg~YNT;X7f9xRITgMTi5bXiNXT4WsVi``d#Om3s!<;N?NvgU)u{4PzimzUO zA2qOpl7u*6SLOL7_Nm)k*&V(sRu3(gb!poaVaWN{@*hJg%eQNYMx zt*h^L#Va1C6xMGAf{vzjW&Ye-Cq_H?FDfJjUx>Qu)#50Z@38>@pNhuTVHVpvJ~gx&CFugpkB*5 zA{oi`j{6%$=~Lx)B(|iXF`u@hx3K8@vNor8I7XAEk||RVZ-64QzPH3on z#N;5WwL(q7it`iG*Tc@b`)moV;_Bf}u!i0(EZz~kk%Px^3ly(9 zA2&T6%(h2f*MY~*SLkaGgO&T(@lDgtLqtzi98sw%scVhy1L71nY)#}&3Gx;jL7s+* z6`=Z}LG?Yw_`b)jsMRGJnNO9d=Din_5|JLe#+)Aqtvw!P7C zq5+{K@iQ+Ol(f%{6X3+Ur$?Gy+iUzl0r3l-ipbvXrGd2aF#tAM7QFR2_gOZ$85H!d zzk;UfHWaz7TY&Z2ePf8U6edDI(M|qR5S&<~u^`a>vIhtv+`9_Uh$38k^D)C?{ljDj zFSpVGnT)P{k>^WKgzQ|ho*A(n-!a}8Fg)|_EzboV1I#LhD$WEulA*zH560|8Gmdo| z&5;HHR*W5qEOL?s=gDPrQlpUN!;Xrny>P?cjcaOOZS}S67%nJ<+_K|gvy(NA>$d%c zC1CVl=zi$EzY<)F^F*b~A?kiQcaeMsgKvwG?3Y@j{A^Vr+BMpOrcTGdOQKRg(Z?!u zpiwn}{UAGL#DP2<)^9(q;H9U-{9=O;o_QE)l)hSZkX*?SsULAQSkT2yn0`==v>APOW-2;#=(BcsC2ho&xIa%Xg(Xx|*Qk2dM z-gI$}OOYmCv@@^}qH$ho`IMP4^9kQ^u*hRd6!=zsXu4NTcF@>!?pd!`!qDxk@XH^#%(dGTG0uVAszDZ4&;y?!sbnkJV7P$)p~xl z1Y;y!?sj6e2}DKFL)$S*T6}&kHINrVKjxx`i0K8W3J{WWG%PMxEAS4igG|Gbd~;|c zoTLb>X+%tz6q-Kv2|3v;3Zfub-$A<0DEyd#h83R|#MF1E*tp{)CYnkS*zI0Eh;uF*QUfA%Wbl6*o~QpX;Jh)y-%?r3t6qg_;*rsM)!po1gQwT^4S7lT zYz@~_UOts2jksamk96DDwRn@W%oMwgRTML%ys^0qurb`@?+n6(9u;#m0?%5n=05qU z%?(xtc)K^)?JxIDvNc#K>r1Ea;>5~tYW(I5@)f_5z0C`Tv6-2VllS&j%ZpzuU6xb4 zRS|vVh=8C^*s`#TI~Ixr?kizRUACEb`A92W{o_I7QoQN=fmGKW#iuU>zn6_lIacr% z1i1hG%IV#C=ZQ{VV+g^S06r}%u-;R1>?czRZnz1z6Ud_=T3JSdB!W-&rgOq_q>btL z!*N)1)^IuA!_Eg^m<%<_CL|Hsw+L-aaQ8#Ea)j>`2#2VsV(b%v2kTZC7T(=m#Hhg# zRF$>ojmnwFq0vp}qF}YRkk;aiOV`SobGcmKRx`$9H9^DX*|r01 z0rz{VQi(qA5&gQJXsR}8YaY18>J8&TkrW88vQl2ckBS1s!y4EFwm3XMJ6KIkL@IW> zZ{+H!Ipft!oE)}5!Yqyc=W4^#QZ`B1Up27%G>+~cuj-(^H1P3d$-?wE9F4{ilv|(W zRunR&8hA3l4VB$-_QCGD&vXr2CEDm3h~~#`L#*$lG~7P;9swE+xZ`O<)K?kjBN_B; z$^bjrnJ|o^SOdNXl1XEzIQLlYiEV{a)dhd)kO&+n3!dSF-u=l@(a}Yx_`Vu=38?6E z97V^*B+@hZj+{DAGdV;l9nsalsMH{9#-W2P;-FUv+GyuS+Z#3*JRi=tbvnj~81U+7#7gS>5dK=%w>;p9f^e~rt8!79}A+0EUVnd@Rsm`;*xO8FUXWuZQ ze&^ZGfpDbbOBXU1FG*rpJh?ve))L*^05>!!N#SC~DDj>PnUWT2x>d#`AL$-R2zIWy zz@+O?;lT#S4ToU$0uT2>0NLjB>C=9T%mU5qhOVIo?87zaFiD=+!|kM^vE^;2;^rKX{eG9j?6hhW3Z2{63vlo}Tk z-gq-ZdMpg9<0K8(1~dNs#Ysuv--Gg>NQJQ=EXqmz*D|md6%JpH>nIf{tyf6JrIVIc z-Td|SRF(*)B{IHw<*GY-F_YqdDrWjw5asnR1yNAMl3R|5$eDx6>-p-LQc>xo#X`SL z;YeY7`{N+k0~!@$fBXI+k||+{F+|m7yyOD+U{5G*u(*C*ol*0V_K;-hcw#TzIkjKK z>72g9fHV7LyYW67XdQ*ZuxkAH&X#z4T*LSZ2JoY14w$eEiLp0BFLHAyigIn-FWLSz z1p3~;efu^mKnH!6(e&*07B_p;kYw@rww54!%#fr>3;)xHzdmung1@Mji>XEtZ&4~p zA-G{oCOh#Z`xzakU-xjqf~q4jX3&jHVH{Vbbn94S#{0=(Nc1f^I~cs@nG;_S&Wr|R z7UT+OOf_)JPWb)b&z;k~-3yJTVYWF}dQIT}@uf(a(l2As3dr78_*GCAEI1=#Jj<6# zNB{itzKZHBU%uymHRg<(aPG6D^s;vm1vOQ{k@)4YUc0xw8mlEYP=L7sx`qpoH?%Al zk@>X{?*C!$&BLK=`~Pv>iX^F2l29p){ z*r{X8>S)YYxsUXfryl;N{Mc^TnA~({k+uJOX$P(f*aEcdKizhFTm6rR&sBo#rmWy2|18jB zdlsRroEXWS4ytGKfcWEKAL9ban(4Fr`L#Q72+Mww_df;LE$CH0UIAKcpcnl9O|`ArOj5ZQ+Oo!5 z(+Ux*w$dv4yPgtX=|)|f>rahJt2)gV;(lKhxLy&c@GOZP8F@q>wu=NG?ZGSl2E`OJN^R}q=1k20IQ-MM(PXq!@2#d!yHr^EP zIZHpeSVxut!`Y@t&@uS@X1Pc$4imhCsP-f&gHeS~EkoRAy3-fYt18+N&-=!D<2j2& zTVDwy!^;QCYHDTy=9sML0hDu!{H2lac)1FIBJrGJV|!QE0u;pSI+U7HXFvjc+~D1) z>2;jVVpUSbIS=Y{PT~92oM)`0woP*GFyaKhS5SESuk9UC^cp3?aSc=7-|_+JdGOwm z9=o117N)>$(aM<5JuHsC`WdywxgP7k0K}7HtwnCgbMjYQ$j71u2asi6W9s14I>02_ z-6|iaewjO?J(NIkCUm@#=lR~@-9ll_kAEEnm)osjluD}A^HHPe-bY8D)WR7MXlGCW zgHk;RP6CECT`ax$;&ILzs`?~D<=&S)1A_K3=VEXR_)0ydg}z|r86e*mx%PZgWuSd+ zt)lMG6JS!@TAoU)*h`ly*ft=;eYWGTy6KY+Es-LN5zh4Z-N_+j=R>Ej^;i{FrznGU zt#Y{l=nM*r*khvG+n@~)H4!W;-b%E=yPz4p6$?2M^`rE=H1#tk@EWj!O5G<fL8x@&fe^#OrP%{MJodq!CRF*>U)S9qT5nA-(d1Jzz{$Gv)0XmAL0(yNOl8 z%@ysCeEiBMoHeW%bsKGo4tivT&cEFD1s}cre%A|>?g>2w5T0GBDmbgU>RmOk%6-%E z)x5F`CuAQ(KU<*Q=bIAh9=af&;km2uQD=a1x33PU@iIAwrDBWfK-IM#Ej6s;(^q8`rWHJ(d9wWi<;I6Qx)sa zm48rqa$e~h0QdD;iP~(#!VxOH_?lxbGc1s{$Nhnkh>D3DLupgineH>i)uZ#Cn+DOC zO|i5l5x|fio1`Ytw^R~ke!>{Wmn;)q7SE6+d;d{4|PHr zGDv|DBYeZgcX9P`Vb}sv7;!R7SHd+ETrEnuc1FI>179JriI@aJf;x|kcf*_7VO8~F zreXM4|DVq}Kg^?!aT&I~kOz2fWOtuztLZfSPP7AXVJ-rwc+=fb%U+YPO|KjiwQ^{- zC~gfzKC>nxZ!~xp#1*Qtg7QH+JwY9ZWqp7gH#$m$I@u2?Nu`)1;ykx?xmz#*IQp-4` z9eF%HD!Izwk%?{*k793xZOikQFJDqI>$12~8`%_r19y(7V1R@Y?z?VpfV5({E#@5M z0U_-TJpS_;i$VI?kv)`E6Nzz%(blm(n%(A0pon{_Y9>a&t8!&~wYw0jgiL=1xMr=$ z;pYlhTTiH%x69@?IwYfO!uO379J?$3njmuP0RU8aL)}M|U;G9{(#{ZVYg3+OSpl`Wqc8|mM5*w%Nf+dHzjv@J?+$Aj) zr_PzmJe(5fWg?FBE^hWatZ^f`q+8>Q_5MCU3MM(qO)Es|o!U!8m68A(O}J2A)0W94 z61=@U+pu2(IWlTXE;5ahrp5WX_J;}@aU7&p|BW$G`PPv@jgsbvLNGX%*pg0m?ONc+Wh}}t9 zu}W127cgAC-l`oveCL7Rp`m#CZsBL|Dsqyo0&GipP__Cqly^FwRN-QrOL5aKq$9gO z?;Ekj>N3(&QWhSmpFqdmFspITq1qV^=SJCSNxjQmPq!@}aUFB`kM*YqObb*4z+L#m zV|DnJ-&)3{?1h(4toWfw?vdStiofipK@ak?w9Rb*^dH+dviCm>^itgs*upR}Z7SUH zZdm7;6R5M2O5n;VI)#9lKe?~6!0~L$V9UW3OhKuY6nRC z`MFs%e^GdNZ zJZzZ3B_fGmL;Nuh%;#0Je3D_ED1*qbLpwnZrVCT<;!7T4Bx{1L(JNM z?cz6rq&kiyGqnL2rrB`TMge79hDF=AQ_W{hBc6_z;UhUFcV4#i=Fs_e``c!Zs6?4c zeCQ)px+QEuZ!LHYf_;=+H5E1dOpn>}NoXoQfS~?Dw+hk_QB%(^udrw)jX*M6)lP2k zI1C8aVmimSpRVjltm%bcwb>%wi6DuNb11yhbTn6i(cI^5B&&AGC~DsRQ$WBv#=|>9 zjwf3k-Caih;|ewtG1@S7*zTJ=l76y;-=bu!f%VXVK)D>^e)MMlAFh37D+%G93WaK0 z<#Yc!{=Of|a=>C`F~sUj0fq(W{y;oGl064-KakiE_jBB-*)KYcLft=iK>msnPQDtR z&}Oo|d53b-Zj?o~gt<;Om#PX=cEgM(t-C1WdstP2oKk8s8h1oewqflpu6j*29NL6|6A((o=Q*vSETBt&DZL_6yc*5E zU0B4n9KR+1$))_dYx!tcot~=*PaY7Q2Lhr2{p2=oUzxpM+`4Z9Xeb<@Fp}zP_>f&T zm{2#M(Z_u#-~Hdj(z6FgVGD5@X(F&9>t1+B2S7M~rc1OR^B_2QDk}w)YO$3qYzGhz@#^wCi=Kk|%!uc)>>IN{4^ zi#kxJ+j=j;3%%(G&7Z$8FYl60sf5e}Yuegi(`I6t-e2(Aw=@khi#DX>) zlMf2&p@mP%P5L&Y`FDCL=sNBe@!js&T@fJJy?fh=ZDxwv!c`truJTJV)eTesrrL(; zjB^dZyK4}RJkI?kYx}-B#Or3G2T%%-QD^Wk-!$0}VLogaCekN4u{7RPi`jGV=6sB4 zcN${L1su?st(Q<&scdsT7Vz$h_vGF|b`4u4V8tFVn7Kc#V0TH{e7H4Kt}u5wxY*-n=D+qkJY_)zynt@f54Qx%Biy4q;c1426&99p?hlFzn3lU9v_M zQ!hIZCnb?O+hM~8owa8$a<=K(D4YmP_Gl7sLT9W;b<@EzF6n1t!FtIQ(?$=x;;sI_ zp+WeeuiE0(pTnDX5xO&g(94RwN6cFi{^qZmd{(DBsm@Te^S2M51+?YlOiPc~A`7L{ z-yZiE`O_7h{iIRc>JA+*`}##l*)PvR6sCu1IqGOjU#*~>DR^-=$#d6u1rpC*QZzPN zIP!ege8Z1<1UyjidH+JX5`4=YGJuF7Wre{{P^^QCifk}wr`(=Z97i;pD#W}{9;ER;&KPu-(1v7ufJWH~zt~V;?YQ3Ea3Ne0+RVAo)qU5b)w;(kcMtp>B`7 z0+xU9lXg$p^{fcpYv)XD%d>wBNhDn_*aaYttz*P;C&TWQl^xr~*N;A2eq+9&7giON zllLB2m$3qQ9w_A*1W(pW(U;3H{MfStN9@C2-VG0dA@AJ)xRHzfx%+=OdaUWSN3tam z4Q~MS(I=gS8~DYq5B)&>GWEB5WqD5U^OLw`>wB)BeKY*+sw93aie7a5Z@T%Qp?mWM z%o<;~zoJH^(K5+xTgr0DBI>1r* zMyk<1ssLC{-0v_#XJzbh2FJ-*)td@KV&!rg8koLA-vQLQRl)~Ta9Lob#{y>|e9G&z zdeF6M>ln&KNJnd!yWS{$B2q&*^~V)SImUw;0D%vlNxqm=BX;7KRABv=V_adnuvo|i zj~nxt6v3v?`?}fzVS3!h7X``8@!c7_v9;f;VcnE&((v8Ur6_Pg0@#k@`>16fRxbV$ z`x~{i@6oduHS=2c6M%Bwv_mm~x{=J5a7F(SQd)MUeMCo@qA(x;F+vg`o z85!Ck4>3#Jm?(lLQ1qtCsxJ9Fb?VljonfN7_8rFp=}Y+;L z@LZAPpqS}15zNE!!#^@0E9d_`72xZ9Ok&(tTmy_=Kz5ysP6f$VMtttHH=^bE#?Y1J zR^DiF^*of86OE_k8E44I#h43yGDtjrhkz~}TX<9WZ$_cLqz1XBXix;&z*l-X&x;=r~oVc$M@=U zmA$e@T?QEy3~*Qf+TJI{Q99%wVZ@U|LlR{`oNJJG zKD+fjA)H?pvLk6z!4NS494@%DNnGhucbXc(O1Z(Z7L?8Rv$u+Cz@#!LZL%bQ6FTo=95S1y(s-V zhZU~oUBdnl&VC$wArG{W-YW~63 z$FZ%PG}y2FhozvRdu!#M0Lw4Ct#KbU2@iDdYoj>>yhdoXoMArVGdD3^+}F)`3rAni z*lrSlYMMcx1%Z8q09gHJZtv15zvc^UY`Zk)UmpVeJieQ!$q@eA0|U?UfVqflZ0rBX zT;JHe%!nMcy=EcVa3Ka4>Y6h{>GpGo`voH%6;Cy{Q&U% zq2E;daVQ`(VSp35pTNn+2Jqj1{l)K}{~rxj`M(U&U%UL@BWM0^XZ~|%F8_M$NwQ*) z#uInbT-1nudCwj3im9i%ZmB4@ou^8^_(DOCTqhdJ5P+r<_;uHbcxg{P$EFwP(}C~F zP$lGemy;`@rX*VySHo5P4JKltSmG-UzRu=$VkV=vN&VZKq~V`~SZP60T|An2`CT9E zUw-^c6)^s-)zu^LYo~Q0RL?+C&yyO37M60Z)#vS~lUxm?c$v{ zZhxt>KGox9i9zYe`f2ol0_%(UTJ`0`zQfWGTs3i7>FS#e=0?p-ct|!cHE4& zQRw82w}L}4oZWReFTD&V3_L&L?juK! zyN4nxA+-~-n8EAaG9&~kRqa~7tkScqBKFCuvt4L(5Y;A#>;MhIxptzt*}h&AXmDc{ zGjd6D{Ec*DR#NclcebwC z#!H{RA-u;D*Y^$5PwpeKf5ii3Se^JF zqLr1po*&i&N_|#l*`)~GE`a_)2Hr*6H^q%j?Z)CtYH}gY-+K3q_{*iw^jRwVyoQ1f|kz~Q3eJxjaekkS~+4DfAhT+9x^+X zNTJG+KCe3K2$GRTm#9p9J*ACvX zuEvDEw;;w#C-P+f0ajBII3ZJ;PuaFb9BVCIr7qy!dy*cfrb=zT0w2EQnlQ+H?fw1` zcOJpWGZt<*A2U|$dX5V2>p?l&m7mw?j(C4sYT1&mT6v{=vJVubuO+-+hMC(69ns z30}`U24tZ1kNT|-xr=bKJ$rB&gxiknx-fc;-u~+<^z+Xo4C@TR1X}O>sywYrx7Nt@ zw4{VPkZJxDPf%aC-K-jt;VvCmkBx~ZfQ{Uefu7DsTJ$XJ$KeUb(hD4x4P^=%Gs3A4 zt8$$w%Hz5_Z?glG=+5%Mz8cYl*)HD%!mFqBIzC*(DEZd~Xi@iDr?JHC#c-( zk1-uDzpE#d3M-Kk;V*`7|F}r{dg>0K#Ou_a6eYRpwFGMH`qs4)B}V8Ttq^zi9)v*c zMm#DI(kwJgYM85MJc9+%Lda?kR(?81WvyfM4QjF9fS3b9Xw#n{~GQCFhDKMd$Wm�PgM=;HR6jAFO6e5nSdh&JX_ex(M-9=5Lq zCy{Qfp^_0xU<}NOF=v}q4fG0v8o~VPGJbQ7W9w_=2_`KV+xewxfSO)x#xihhc}pf; zD2$sV)7u!NeCMN2Hj(bto^P-CKR)Dob%grj_2%Hza%Br?oC%e!hS%yGuhpr#49)w( zlLbgA5d*Q20OV*`4#;tTbGnK{Z-dX%AvH8;=drL2vnE+>OofH^W8T`KD{bbXdVJZE zprus|Vab+s$e0$-sxoY$;1`9oxtSKo1Q#`0Kc5 z4h{I*UDwcxjLjyCXZFWeD++PRge4FMCkO)${Tq?u)}VxlVzYP`^F(!gYei@n;NFAh zHIyrC@|hcPHUDx^NeQru=}u$M(z1@n4|5i!&yF@lRz}h}A&PQtD;KReAHrm@7^C1K zrjXDmrgGrjI2+r~h~B#CvFQk#PA|-Cl8vy&sjl;8i;P-3(0m!x1Hbe7#HPT{5)_~Q zi`K^|qPyYnlQt598IlaQuO{4IIpTHMo_Tg=XUJl{bDJ2wA*yAOOl+DNJ=pBzS!WE3 zb1=?dR|1%oCvAtRcN4x)HU@(%Z2YsAqSJ~g##AG3RS98vuUht*#u#Y*;@LM-D!(?N!a|$FuQK@%OcEa2JcY1Szj5X>cxh85(x`OR_o{H~Io~4Q zbIi}I+W^H3-ZrA|_~~`Fluj}`uRE`OF&wJ#=)$B2t`1l`Bv!gmUbiG&QN*1GCN*9c zg>;J0CX7oqc~h|6mn!|fsXb;yveA@-%cwxr(ke4q?B?|t3g*b(Jts!BkpvYQ;Je3X63lRKjMC%*;BvXXZM8YSZ<{6XS8C9V;5V*tMpgo2ZgX$~ zIC(5}3Yh`E?=?OxA}KXk5yVL)w%b67%e#-7!YbFpwhcm6sjKzL@b8S{LH?Rnb^SlP zsl77!{0x}kl-GCOtA|bPqTUO>6?Jg;JoTWvKoGYmV^hmjW&-|cNj+CFmMJe++j6A} zJCz^Uh^rL{#9xpmei;=)tp?RW@|Atv5h~T`8rQZ$n?(B$U0R7W)T<_u6AV!d7Oo~>As zkDP{ITz+z0h|!Um)?(*QAPztr-8I9OtPQt1Tx&(PfsVXBicvh7Jjz$AvtKqvWkv?F z1Fr=J;9k%li;;PXYYTFgIa|{pRkKL)?A5XTw34wH4FSp&C8WXYYHS1HX9SPltKmtV zdFXYrQB)Bq0(MvowiAtfaD-0~PZ+B)sFm2vQ`s3ZfkMROtY$J&UwskvUcU3|{nAf< z&~F56$=9sXI`p_2x!SS*p66aSKHrIFXkwwkYvexVNaA~S|d52ORie5^!lv%-TKhGx>AQ%ll^KgpO~R1zfq#$5M8 z#JaDmxq5J7lhjM6h_FVcQo#B4UZ!UuL~6gfO>s-990S!n;hZ_<)mxi!Vgw1gv!2;{ zDV0!0Beg;;vx5g`ubTMcl50l_!x2AJ{6gu5r{-Wf;4!Z(-uvCK{7>COip0eW3U@S0 z-%Yn%V`IDMR#CDE8|&VU1a6w(^!WBpd1@>kqNyct+Rfn%YlP-O$17>I^0iV=8{HLe zhy~#_^D`VScRiZ3esZ~-(3ql9fsUa3;bzkG+qXXkY#In$ME861TjHu$*tgpz22F5< zT9wQABd6F~sVavq$p1m!U#58#xvr_?3*dHS592)L$4fzM}bktn=79&C8p_ipva{9~6)`(o<@x?`( z09OF@xb*|zsgb-%x1ikweKpYXFO&9_hx@Z{^VZJPFUmTIXwZ97*F^%mU>+<8B5an1 zUc>Q?umqB5cVRQV{nVApCErSHpNCQ~JwMrBsX|>t}DdRV8D3BN%Zism8S&sad*2mQd&2rgpy2TSzTAJ(4hAM5s(Fo zBY{Z#v|R_iSF;-nj1(#bYUrS1RU9khYM!PB@0bydRRR-6mIsb|C59c1fPJoxFYS9w;*}|j!Oi!~p$Kbq5&s>;2Ed+H!SU&5aOTss8v$aOC zYc@rvv0_zqoK9V+LZv3UC}lDU6;*HZOS5X;qxMd!hvdNsE2wxZ?X%v&rMNQeRC5k? zZ=GA-Ol^xyY=EkZTMWue8PqjbwOZaEonadJOZ{;-QZjEZNVvTr%`c^ZTtDkvUd%E#yg7>AAp^wyu4W44FI&YfR>ynbVN1GQ@%mA-L3d4&Db! zj!1zjZPR?nC~BhYc1T%KX~QTy+Rk$ZD3zDz&4IH5x6H2E81GYFmjE@4To-N?mE}+q zt~gYuqEYs~1G1c`;ZwTBnD0iXs2hXHQ7SW~j2%@9%?%44Bx&45r*UtI=X-!Qm2<6} zDp*+kIWzU2ir_J~K{xC-OiWtd_Qrh@2(mk7)I#36~Vf_6iJ9GXsh+8XDA!a>ke(xi5Pwf}ez zW@9lsX#J!$2(iL?1a(j^u9EJ=%;uDTF$@`^iBh)(C8T}YIx*L#s`#s#o`|$Q(?PAS zEsexhp(jUGp*{LEcX@XLGrY=cg+VV%P9jc4V1Dt=5hWfAr4Gx-)bqwiKB7yeU%3D4 z6h^gY zdFmWRZtekBKEI+S39yK%Ye(|?Hp`a6l1KU%PkuT`Jw%KLuU8jqI$8?ZlRj#4v~xZ( zTL80SjQcoBu&KVpUxL)_XbPNzHynfOYbjA5(`uNh{)2TLzs>sRNmXdiW!@Hym`@I$E!CE66!q2C6V8!6G-Y+{AL zrZ!4`(XItxlJ)Cf-K|9kR0I#UUNy-XSa)Rjdpjt?3D1f8wX^R0n~L~NGys;?T`Mup zJ1TVmdZ@EBX2xtIWcEjzTq~(iR9ooE=da%^QwYxP559|#O~j*jb@&pGZM8-d``wn? z5=IG5#IzR~k66GgG%GPW7SC*(=*akk&HQ*Kq;m|WpRBB9=__AW3~iAKm{|n^Vsl+g zIru#NX%oM+=OEESgp&21#Sqa#Wy~QBF8evamY2?^Q2z-L{YM zXtUHfUN2Zy&nwNBQ!-*~&y~F+-YwsSp?$$0gKQtwJoGESGR`KD;=C0<{1&|OKq(=R zvqMsEb%ylmGk3+(cNX4crrR_{(!p?x9db%{?DEo!$lIvEpz=E6&qnUoEC*nOr(FMS z{}{PXQdUr>qkr7$&9K*eRHsgcXcL}(;gz{pjlsz+BJ!;?8U+{Ei3;tDV}d{d`3Btp=ID}l70B>JHVf1jFcj>d1fj^zI0e36@q4CgEHO6~9gkeA*Q^{VE zS75T-?%SrX4Xs~*!mF-*AHsuGrI!{GUP55fCr_Ws3LYH38kgr^Vx5#Oxm{nktzS#% z7I{Z~z2q;h>E}^fZ_0qtFuB!0sM`+Wp&1%&eG+nrl%y!%FM;`xth$-c)p6OC2=R5x znAk&QSIrZe&Jfx}%cr(txG9LXFHJw$*xuo<23vGT9y+iU3f&vYDw=EXn=Ij(iiMvU zXIAWk*UA~%&lL`idW|Mj_|2JbEqwo)5mMJMfHt`6$|n>pE7 z?s2zp>W;<;hY=UHC`Jl2VMH3vk*MfVn-Z_`aR=a$_IT8O2%&=}e!VCYdtW^YG@nNZhSC$UE5r#D9py5`zqZU> z`htD(J4bc)s;a~CKKebXj-hOAidev z@o%0ogR54j4d(&h={T;iWMmJCN@Pco;(4(0m)aPkOyGa6VD)01SsuD4nWNlU<}Fb)wswr3Y!j=q3*OP!Fe_OxT6g157& zWvQShwbhr~WIURy;Ax4Uu*F6rh>!#uFMu_}CW~ll(v~qT7Ql)E^O#l?#P`(s#0U1D zNLQru#Gj(1YDNUX{#u);5Q_Q^2Lop8j;ygU5mtn16r6H3Cbx&cmo;h%`krOkd0& zgR2bZw_s;CZGRI2LLSuh-I($t z2BN!;T|Db?gw1gpG`}u?UxdR@x^e1+K9w;T!3pnTw*}3Ex_w;dXk=99#DF3i0v8T-{lrv~{!q9SlTls?Hskvm9!F;Wl z1k|G_=v5ce4d9xD-le9%E9t`~!#M6xcM7>GZoQuolV9HO2iyJhBdBTqr@_VDAB*<4 z1d&l^1xFPO84oO4fNv6cZ|z?}+8G&v-`gn;s0ImC)%^4%)y#KQGfY$}SrIwR046k! znPF$^h7u8tG~gB9LHof*t=#B3?W&4GZ$^@nPcZ=Y>4|NFN3trYw-7bX3vF694srh) z<{k88GQ_>XF7-EEi+!)_%>*T{GgE`e#mEJeb9~`Bj9;nW- zNa-9(1_U8g=X{yRh53;i9``ees+P_>Omk+tO+_`faUb;jR!z-a=n^rR;zc%iJ9>U~K6s-DsNlOIri zQV!EadoC>SUEOOrV5%ZUir(H+*U%L`ySyzF!x^fhCR2+w-l88jb&_B*%y?{!i#4XV zb5GojDx2lcY#J4{0d2*eN$*csZ#!`L2is#Zr=CWnjYZW1C2U8hBa!rA>i9o z$GvP%w%-q54K~|o5=a!fIz<0CEKdgUmC)u&M?v|IS{NHCAktBJrc4! zUm_%40yT>QWs-95!nYY8l7o@65^6U@Vsus};^uin&xMG*Sm(GFGVvhMN976e0 z7ag+4%E;rx zmc1RctIxI9>qmqzc&mD2BTvu^ObCejE4tN*dVN!B9e(S6*&Y=hcB@HFC*U>;mq5>@ z?2bdd_?Z>H`?E@$jUHoY|E$HTrZX|4clXPVPNqG;Bo|WXF_=t#mkH>@{75yS%$E@O zEPXbwvxP90VP+)b*JSgwE}VL&h~4sY8qWX+>|(h}y^kErxgpJtE=aYaP-m$#zbsdzayK-s{he;_qyAH#*!NY%M+66i zXM>EGs#UDvY!yab_kuqbQbVi(wU<`G9KFLSF(1-JD~0@%Q~<=IdUnuz`w|92_&|0% zS$}sSoz}s2Lc@`k!fk~3WIk>AW#R?aY?0N!L+|)GRAT%d7pcz3swCfHy)r{(olb`R zp{6~vT{2V>vq?XCJ^t0;_59$$wX1`lYPXj&7Bf==oxCMg&1NRGK1h@sdvpbFpd1u1MM*Yl~Nng8ppX`|wVP3thK+gLUi`mQS78Wb?U)-nk*G}VBg|c}9>94FND23v! z@qOtF0XF+S8L_dQx?+plmJAFYkT5h4V6M0*tq$8!krBIjG7C-r!e&8&$EHZ$g^m*w zZLd4q;}EzDucx=Yrjt-X*H0!a7z07sER2VJ?Lf=d{t}h=VlSM97YSlh3(eW|-mdu0 z6#uPzg@^6Q0SE5P1(<$FW4`2H~{moN}C~cv)_8iuBNnyL9A%scOIX;!!zzM z9@yZav7nmp@z6DY;TPa*ANOTrr#I%KRK8EJ}mN5h1QB=Dm{v#e2TZ2ySnfI0RQ`4|Oj2Tl~Ud z#{+Jp-@Q^Ka=}qUPmTYf%j>5}jQst6Dk>Poz?x!F~|+jPKSg;n{z9$MOFb}cOPJkhNSQ$A&+wG&zia||PpTB>b*Hs!Z z;u&^1IeSiHeHG{V3{IRx-#Id>DlNZsxSpQ2sIu)2ziEetJ8ctw;Pu19LhoMiI$@R{ z$v!y#;lqyU+dxwsV=*=R)0nJ=1qDMXz5mN`a4R2T)+hPYirYqZl{;+EGt(h9pcFu# zd(?yd_s{_vyB*Zzf_2EQWDjxQv=Ptd_|zmYG9NSm3~>SuO}TNXCHYUq*w`>}39h~{ z)VU2kZ}ovCZ$LWo!n-07);MuH@y1hE#sMdLy+o$()$i(t-3t&(>D-n({@vc%maeMs zt>ng6NN8)G4vnr9d%IH^hsS?2P&;v-dL@2 zK2E0k8d`VI!D}l!zGIt_yx&FWHkz!K&I^b$1M4rHJOkxhrD~OCBI;lREj!i3x?q(F zYp#+9zs8)diS2urtmAd0qm^2;FEyh9Cdte`{rO|F=Lr z-`8tXx-eVeZ;5Klh_1o|9xt%l6)O2H>0 zO0}{^d&d%m74pUIk!&=OTEKqnymB1;mIWf4{HN5hjKu(=(@y>l?B@R8On-`FI@ak{ zNb=0L&Jm+X4-1OosyH>86dy2&$)tvR_ z;I9UVq~a;u_yr9eSCv`U>G~60Cy&#=!y5?F$AZwBkv1lD=ws4B_J}FMtqp%nt=2a= z&JbVCw`Xhv0WF}!%&U=wFRq7U&-SQCuIwMoCOZ+CaFMZ*<3X6^Z(`cs=$@_2N)h?+ zEMdXM#ES>K8hOspq3!$LugJ%>c*#u@3_ByerF;;&quqB22RVg-{1>Xx;n%5R-wSBI z2XhVQ2Pd>-=xfD&@~>WvErX%9I*T9xEt}wWOY?vn7Urg-^Yxk04k#%B`zr(mc022z zH4A!_6x<7x8I0GR5BjV>C9(*lQdVXwW!)DwPGoHVyd4~-1?Yf`M5ID+OO^`g3bgX$ zcI^J)T^eEk=~Bxzt?r-xvDZ8~KOVNW!3CwC(Z-x1fF6NX%^YJ|M#N~& znpDW6I?A+HCuig_$_XydF`F^F@7M^dqW#UH^ER?f;TwZ2b6@5)bDgNp`V3<;Tg7Jz ze*$-6W@VQL5&l(8osp89j9Mrm)eWLA^GHp~8VhoXt;z4=_82Yi4`irTojUu%gcw~r z44X$4(SWsHa4|D~@8t+akb+w5@3!}3`Ca3nxZeBMcXqw5-R4%Et=*%;x&0SbCc^h8TAXwCD+A0L{+_2TJx2Us@9u(rqMAcK)4jR;88k+mhajTvRoGR;1rSH z(`?FvtE`Mc<^ohrW;uVLFi+iX4G2Ykbcj)rsZDR*1d5v65bs<7QUKBcF+Z~Z)FSjw zn<)P(yWU+)kURI-#ElDM98H1N(@3L2((SQc{3JD5ebL%1*tS3SSOYK(cj`vEhO^ha z6b$e|pIKEV>kYDk6~*f(oo(VYA}fL7n%rW3wGx@nmGJh4Yc>x{`ALoM3#r=+qMlID z;jGD-J06@?-+r;jsRT;~buX7CX;wX_L3K&@4xnkhPQkBcLJ$m<+=mR;T8go3-LLtE zpPqMm=AucSTBLGl_I{1-J|h-~S*}z~t=8Q0A4?3Oyo+OtUt2oFO7fM(NX*=l<7q(p zyc^{PJAed=)bcAzH1YPQhDUytb~umjxSUTLQiqDA+xl`IAiH|2 z>Yl7-@7UkMS@2Fm(Zx10nJdQ>MN+rE6@25QI;aMIVo|AoVv^|;qI22?VtfISa>~S2 zb%5`k&H%<50KWxHi26`tA`phwK<7n5@31D9DCUO>}o)huVy~N_9bl)zxFIypQ~G@6J^$| zxOA=k-1yAfxR($?-P0QfFk{cdR+xuC9qtQB?ead|51_2|C-Kt7Qs{A+4%MHE*`xOf zuy#fq>rXmb10Lea*ml7hIi@*(3I0%@9)q7e+}*EF`z_+1AM=JLo+QTFSzMeNeq1T5 zeqThG?b-8Pr?A1k4@$67{UA4eLLKehW~FBBaiWsQn}VO3PK0ZvderUQ`@;UNw;I5w z;T!$*X9nCzt0WL1Mk}TmVznyi+8)^ogxqa)Jnt%%n~x<|v7(Z(FZ+KnLp)fe0)(me zF)s3Io+^l|N@6CeSi8>$CO!$7w4C~Y)6*C;qeKplMY6{qv6uJy65hdTw?1rA|IA1j zRq4k7L!Z)opik&0s;?+n`I~u1MgDF2Q88NNJDxJUQ0DhT;My%7@916R*)!Ssd>d^E zs-B5!nLl5*^04OLj=+TMy_ngG9~&X_6Bi~piB#z=JKCQRbe&1x{!nD4%RDOm|JZxa zs3yB^ZB#`;1w=(fM2Z#Z0@C}Vh*IQ{-hn{(MX-Q$h!DC0A|()djfm0$gx&%K zgwR6?Bq4;5bK~>Ad%y0p$2jAR@%{Yrhrt+GckVUUTyxENtvRn55);YlV*Ow%4cv|; ztCK=1$G=G3`gH;4V~XjiX`!wATUKy??Q9s?i}qLyiIZyY53%$paW{sn=irI?MJsDR zPPx~fWga)JxAJH(Nms#?Lpvvf9?breGQeHzw^l+QSlebbo%}Ez2c;qxOS;#*Imcxc zwKHN2fwKj=S%-0D2l5B-vm;qFrCg8s{rmNaF8bEZ3+(*7sb{O+Udn=0{)5W;OIS6Y zQLd7|1w;|68O{86Q?BnRo153n+d!m4*UamJC)@|B&KyVKejaOF6AW6*&LHIT!p`g2 z7z)jmor>vFT@VMs12j2(J3d{Hc3W1na~HQCp5lThIqae_HgN#jvtQYyF%&@)?4T() z!^6_@bsAuKK%A4b9ICC-6Mh++`=d;ZvkF4!8!)E*Vj|5PK6G&u@M^2a*W1>85`*SK zc1F8|M=YClo0CHAC0~?!_R@ZYA8uX5?t1cnbfa{KJ#Jko_b~N&OW;*eJIdfU&G5+t zDka>7wrjuz^?N=+9V!`MYku5O-Qx&^NcwClSBBj-ZDb~B6$#f)r&jp&;)!d1%2(^> zMt{DyRnFNb^)0kI2} zce0my^_i#KqdIGPazVYA+<29C1j>h5-t))n7{>Wm9 zK4BrYt?5mBb!U$X?UDjJ)@3SrpX0eeOflRiR6#?tBj0FBOY&z2o2BF&>UMcoTKJWV zdTO)q!|oG=Q(bBPu*$Le&9nZ z*j)hkBgxFjPBh|p(^b|>TnzQe;1FGp(^8vpnPvTOUB0l&7TU7Q&oh)_#dn~U!3_b+F18)FYa#fv+Ify5#b^omkH1q$|g^lk; z5v)JckX$+E#jRS*g*-C6X$IorhRlVULkart0bFiI?*V~pQW5}qB3~R_tL^%bcx=6L z0KdXE3tk6#Jy13RzFR)Cp<|lVqn+e8tdF+{Ar9_!gcjaYDJ1R-0yu0ahE}KaxXO;p zzkjILQ1i{a&-yxm4DR}*8ascYrKuf`v;@-a4EP8p^NpuwL(Y^GG8d-0qr0n$yTwC0 zg*RCX0xtVd4Na)Mj}X%lkQDbd`57O(#&9|0Ryn0X){qc)z2oMZ_WK@a^IW1VRV6VN6Z*e&Ll8pP+KoI=mTUccV?*WV`7M zr1o!O@Dk+^5w5Fd0=O=V02x)#ljRz#Ea#_TYB$!I@tejDcXb?Ko~(ay&3uZ>c@-9N zt@$;SnQ-T5jNC(xwAXUNoeX4A-AS7KzyXS}pNi;0Y*?;L-1j`WveBnI!`^K$lik!R z(rzO2I%Tg~d$1h2jtZDh4kyn8jc+2uuSEFB@vyq9PEDnnc3y^=eEN`l@_Z0*T3kt! z_|BCB*LsyZ0U;J1AZX2O^w;mv?{c}Z(GLr7;tg>K-`C$oP<%#Up0I*{+0~0NUqTx<%r2o= zVsZAnWys}Vbv58>*tgjS$OQGPwVBbgB}~JhFRV=bsa4aq>A}rO%Jn7(n^^1jsQs+D~hyO)qxcZO*-1dYt5!(>WlmM+?-q+=5M)iri;M#B2^mM(pH*)@#YWX9C>1)jMo!df?e5X`FtHnQ|E&85e$%tGBX@Tm-Af>; z$%%mKKgXnh4n-Z*1-cGBu-|0Ar7=8E_rXbvD6p z4T;JKzMJFrtYDSsKGUp-hfQ{N=Ph=p{>GyME`8ryLY0b zu65rYe^!HvKhoGp`?S%>oFq;S*=+6j44D`YxH$?di*+k^uidN~8o)i{XV948-+k|o zdtZGx8e!0cAraQ4{7PlzcC7tdiyV}Q1Nk>`xB1@*SbjRV8E4O0-7JitPsRnGI_Y`2 z!gVVJA{6QiBu&(Xqh{sz zms8c)d~zA1Pk>wdddfnQl-eQOK{#ppXi4sZf>+6;YmK2tHBCcE?am^Vtxo_HIN3C~ zU$fapb^1;)-M_-ss*Slg^A%wDxqV78%A3iy_&5T+a_R2l*M}+Fe+NO6Lu#SJVG}cU z-2*MNDik{axW2nQs&ngdOd+HZz_S0_31D{@3z)WodIiq(Ct_`$Z#6FDX|5i9EL&j& z7?6#*URAm>nDM%)=%9kd<^*#K_QtMA*?~^UJilbYl8LeICBwhgQeNfU+sVB!^8wAX zHYe_`pqW6sYAi3%*Ce3i8@l=F3K;tC9QTjKCFRPIV0<7d%L?tD8Qj+$Fg1_^o?UJ6 zFx1EDv6gKMSKB~4gM-ehGgya&dDAv-eyFDw)vKLtKMPb+S$QT%eC({br=kO4V8>OK zC#nr~>Q~sWi8coH@U8|(yAz=S%j3U@#4Xc_EE7bDr z(~N`zcAZ3guCbF_*`X4$*1fkMYw)~974>OWP7d6o}AdVRi8k;p0qgf`(vyjp($06UY!QU^py~g@V;qoSc zjO26mwSybA0?PBBk2ZjJjT}6Ljg5hfs0I`{RnX4egbKwe2b6l6c@g&wJ*an@=H?NL z+1rEr4P39@BG;kg%M;1Le1Z7-fA`Kn+hh6}>RWAHyXq3H|sR4FQn&BI~1L zrEc^qT#qtJcUpr2o-fo*e+Dvj*WmuX&9;7fSACCbR0X*Cw)uAC_&WzABDz!;*N7fZ z_1#7Zd?}67Q)WG$+sUIEP0JmEnquh@Qy7Vv#4(odk>e+-NzJRRBk2Z?{)u(3W9|VQ zv4if`+z+1BnylV*OrYhw25KdE3p^n8TikVkCmNG>(sS{G^@K=z1v#`El;X!B%=`Ua zF-P5)$d-ndUj3Ey3s0AfJsQ30K->bm)iykCmBDHOxu+goyy*selpeYvUhR>)1|Zw@ z{NCkUQ(l`l*WCwzE5zIe#?;{q#!QJa={M7vf$Xy|ckSa}AshY+F>BjRktM^5)ms3q z{>9OzK>2Lkmz9DA0wdVhL$OBQ8B{TmJJ?dhmivJ`_4n#)P!GRfz1&0)qS!B>mOkJ# zS%_ZxIAvgj3X6ONusI1uj%4`7Y^UktKPs=3yk5`0JvZuQ=~3tYE(28-5(O4$_?9-h z%%|)pw!=eb)(XoS>j%{tLL9%i@XB*t{sdd;QPBXq7Gsz4Ky1rFGfkau<_!1?`gKw! zU6%CZBMzQsZq*^F zCCOTzKDx}_)FD1t(wrP~R|~_^Hs!CRoV~3gOhQ~je0$-M z>q{C{vbl`L3DC(HYpI&h!E_GPXVvWIKyCjEnc_az{f%AwK5Io2FO}Gk9uLE|uDJ?c zj!epNColcfxDpiU&HSKuWCs7b@jjb2e=&r2<5w4i%AcfJg=4S?Uj(bT7mtqy+~;MI zxN(t%AUAc)>})j0bsVTYQHhJPcf1-a(c2n6I1S(HVwj;!n|EX6VE{>*R8(BrWG3WQR!51~EcEA$CI;^bJl&+`Jibg8b z&lhV3dUM0g$A97iSerHT5?}JWn?1&e`lJJ8$`TW6&s~;}UzE`a`k{9K=&w;AE5m>F z&cp-oP5L4wGHkSJi)QE=>9Lurls$Fsg!okaig^d=yB6U&`e}V}M?m)E>951heI7*= zpwc_r8fYy@+HrKi0trUuoiq+wg0W7K^^cfP`m!A@=Sj)4(j^0o{-QNSlH8RqK z8a)|iLO|XLcmVR(T$KhBoS@W+LIJ z>_B}^Z!=oPNy3FZyOJ7TDeHo&@wVy?ly}7RpZtB+UK1n~Q{^>&MmSiTac=;qdVxdt zU|*jvm}T+%!Y1JPp*jT!s=1m==;y~6#2f4l>f?*L&@e1 z>e(;@}U99+e4Wr1vNluf*+fkB{6I-0sl}n9#uO_k&bdpiURn zlv^IKI^E-|V4I3zZ`Ghh2ON%gJ=NHxV;tF73=~qlZMF&0~9VBHg>`Gn>RT!SRI|; z!op1*Co({+P8XaRVq$~Fq`8Er(~nwsr69ig@eNtmRbG`!c1Cg7aMOXbf#(#;o=<63 zJK|8Z+d6J(`Bp8#KaN{B+AYSf#^A1KHgd(eTy#Q+bZqR$XQeMHE)T?5*MI`=wUO^* z+~XvzN&f#^Q{QMxwR`4dvn7A&iFdxESD-WRIwpfckfKdSSZtD8Fm~#xa@Pa)j0TUvt+G6Fo zghKela0+n-zm3z{`RQ2yJ~5ZdHEFcnAkayv4R$Ks(#{MvUjIkOery&F)2(w6(gz9n zDu=Y=%<1v8>+XH(F)k0*pY6kodqGcgA@+#;-)IY7xJk(U@ST?zzooIfi%L zJBOL`a+VvT2fo^@Wpu@{N2I6wE4LwP3(sLM6T*$YY9~pah`L?>nGwLZv+GLb57B_JGg7lq~bS z8+4`pW+%wA0xUoBh*Z09=2<1qvV<0=80q=-M72|1SHSqb2`&1>Q6MGJNS+g-Aw=hW z;uhW_6m|ii$`1R!`EriNnj*aL>tO)Ht=9MB-tpC+1>0rl;k-AKV?JSecuWG_h+FO4 ze;TsjO4(W#NAa4Gp+IEfjQ|>n)gy1AFDZ5A*x0>)6;K5u>#Q%}>Xv)>8uOJPJ2nfr zfJ8X|!l`YKa$<+z&u{4w>I;v7R^!ztdq#AsK>=KjeC_(|O_xr3bRs?oV3F^@(RJmf zxc9g;Pzc&u1xx!_caD?NK1=&WGVkHG%&AwcA(!t^TQ&E&;D;u2ALjn1x)lAVD24|P83?Wsz9j@^}SJvp)`vETX z-3N^vq9S*JrkD%$h+wr??T<^(HTy}eL|O{u`U>y@{0~FxCvji>@}^6m^yflLc1JF8 z2YmFCz(s*f=Ml$V?6x+Q10+n({+3sb4GEoz{<`oB?>k!Ap5RiKEA6j9SOcDrac|A8 z5`j6GjN(FB=R(I3uBxc| ze$j-xn`~rwQB*2B zyn}vV^zJ%ac1o^&TN6v7@enhr*$i+ks`nNyR^6cbF7RbG0)@-2za)87*1g*o!pW-j z=t6WZ?Tzb3!v*O?y0rnOEU!oPA*NP+T&t7-ahkwRB0d`$u-MiGW1cv>dwryjIELfA zE#E;yiUIG-pu{|$CQ5DN9ic4dF&CRK?A2!^bGIc?4_a^?w-FD6qpkaGhQI9}JCx-u z1KQHT*`t%UUAJS?%C|;z@Fi5OS0VZ1GIu{fNEuDtmoBy(ImFgxyYQ5oa^?}tB%H=U_#Yg@%>V^FQ zc6DeO82n9ssjGEZCBxx&7W--xc=g-b+NuMfd2NmpV}+izKi96B{|0v|6AAwwf9T%t zhACf|0LarWuX5=v9FB2x(2>yD@?GnD2!a3mnxE*@>n@wx*Nt@wXXR+m)$$Tx4Xm!X|45(yQ;sE$>B&_`8Ji6<#>F>sq zCkRyX=GwReWdcYgSO8dr#Q*)1>PP-xTv{?FK+M;)mA=NrSZws5sEw^=>@Ze+F`j!I zYp5yGXYgzW=UO+2fl^{5GLjh*2(%Bfp(x*-;~N9jNMn4sR?E@AP*YZi*0R(tyt$4! z_TV)3MbV-BG=lAn?W=D=t@o#;Ta*?aWImBRS$ymL*>@d(X}$Pk9=Cp%cYaD-EZpL;ROaF1Pd9?<=2;c2ttYLA22?;_ z0uD=zijbd==mwaM!1PW3OG*dx99!8{mp%X5e`8uhf#eE`T_=A)C#VINpRI80#@aw0 zL2L+bvi{w(;UB>~D=s-@ujDoTVsC~ObtdYR{BpSwnK;P5i3W= zjJ(DcRLMf-dk7E~uBUgb6jCb?qq@azuC}2q^Q=35{9tBHXM0Vs`Qs?XQ_X);ro|SQ zoU}L8aDXso28f?tqDq+4=nu=JK8>Tudr3v1gJLiA@>LOJ!<6h|StYn)v?AaCd_a21 z3Hw6Kk1C!YvPOv<%&f$X1dudy-i=&98iiyKhohb2pE7NKG;$OP(4ao^ROrjjQC1~J z?Co&EJ~Th;HlP{P5G>^dvtMW7#AAQR@!2C&V353*x($usJ0u`mnYL(qrCgv&v69i* zm!>l-CQ1G)Iw$g<~n)A{1FC z0)(ly?;ij0TMtazSdhHRfqzf!{z`JWJyx*Zejp9WMNyIEz~!3d9btZ9wY-r3+SR`Y z$1m-uR~e$n;V!4)76#ksW9E!Sk=7UGMPFtMW`|)Hjj57&&Od%8R#%G!S>T|I!pb!% z@nAn;LRv1mCCm6t>=wBRtBFdS{Nvk8ON;7N`uoH-dl7xmGn z?7@U;j4{(#MAJ1uHHR+R4h#P?Utv(*-k&p`@Ov(RO>YS`XSY|P=I!m6igPn{S!nWt z@OSZA|NFyfu-Q;Uc6-{a93Ig3pZ}jvZ|(gssa{2a?n^`+b`+cCMe()g5gj@u{&?+g z)_@$1UyZs%H9Lq6f8;lW{;}&1t<_zN)$49|JTB^5xf|tn^v@n_>cM6fgBNrn|CpQl zO-0360w|i%?ID1r@$bG@_*SGjxViGu?f-}v{(s829l>9~jHEw&EZek^D*3I*co5Ka zBdAf6F7?k%QnT{kI${jl+WV~{-$Ysk%8L&6lcljdXll?#Zd!%pUp+X^wQdBkj@U8mq5>ne~E&0%kd4%|O5z4{Sfo9%(kTcx}= z2k5Yqvtb)B@MvF9n<>@S{D0ZMLM*2)$Bsa0Rx(+-Vv>qxvHYRGkdiy1VlIqjdD0Ed z|J`1kf9cklefN$1CJGAJJB!DB{6W{sZDlJ#m7#u@{gnNz{6Y+y{&>qn-Ak$JvX!3~ z4va9h?;B{Hxx9dTpx?%^0-5$D^OF(dhV`KZeDA0L-SL(QR{F=+3syZMcQw z_D(e(1m4#kR@;3qdAmD#w_!YH90XB#eDK4sM*kQspk#f%J#Wy-5o_&eq(I`OPNRZU-4fSZ+*S+eJ+{fT-(#5U$-+z@W1*;U2`0BBrLd9#1+ z)&7y8mu~MVM|NQC|ATAaz$ih=Ap_^RLMrp#7St zz1_G@VphN@M(!SZ6BYL%n*w-Ej|uJUjO;2095cl|3CDi>gxY} znFERZ?~>sa7gTA}!W%q!1awy;cb&K2rs!d3YvWhqWwiAc*MNGGoI z#FedR)=tL83-vI+v5F%dy0Gt2! zC3XDDDN=#n%#B$5N+c&MTY}b(eZQ>nYPxEqK19V zh4Kb_-t6?G*MfLOR3Y^~`&=&9xqQ!N^P~g?zlDS;ZIpjIm2%apGhmw<63)!+- zQa*|#a5mZz9qCh+yG^prmtVE*wmIzg`Qwk}r{_}~9OVg4<`fnrDF%c7)L9;? zl`)kW_r$T~=%GXIu!NW82?ka`nL9?CUE(mz0lbn@gy`znUhKQj^BNBO^l3mUz!Zat zPRvkAuKbx}r8;>{Vb%%$xA|jxi}{hMq!Il~`)dNB&yDgHOzi1n$*X>}FM9}dral4@5+qOlmk^BfT zZEs=`TLqFK&;njrbtS2mU$;ppkAN`aKQC;5JU73V+BPj>cRN8Uc$;eR?yahjeK@eF zEA3H5#^?=;@#JvH7Jt@)w_S2n}gizkXZk(M#mvRhmU=qLlIy7_t@v!N{VWuiC^nLBVD56+I{b3eN2=;HTfSjZ z5-DlfFLR-C0X^9;AnO znaTrG!d6;?aeGsh!(#a%_N}0nSi&)GX*Y`U-@JhdKk*h}WvNmpOpx+8U>9+`bfdMD zeWz7QFXnce2X&O{1(b2{@mP+E1%7pQzaM?EN~Z+NG}d~3hm%**4PbZfy;S?s)B)P^ z6=uTZ&M%(IvQDr}ktJ=Cj+u`@Y3rsFUc>|_zq zANa1Z7+4;)I)Y6*x_2&T^c)CZ9AK~bY5H+ikYu5SOkTQQO5^QVs6e&;FvSF+TRi9= zd+R5KNfU83Kc9EZEKPSIXkv3Q;V=K=_IIncvyMspJeBc5>@BKGq@%HblUwbc(<|_AW|o!Rv?I$$n9Os*QhrzI+!0TYy}E-Y zZVXdp_cFS#OixqQf#!^Z*akpJp@SP_&|Q}f$_i{VTripaOQTV6Y!6(rc2fzASw)Ua zyUt)`gS{SEg-PQ^c2*TcRVMcxV+#yxuQ;43viEHY0tg+hhQXRrlB<@cu-7L1z13_sfeYAai8MCgKXA2>sz` z`Jsj2M`*JrHp5-KjE?a(`Ydwe2Z1#)Q?xx0^#oL=n4QT=(+HSiJ+W&hEn5F#Zcy8q zBgeGzLd1HvEf@3%Y zP9$=#_7&?jk5&@mEKUw%P|KSJe5>8; zTo5{QQbqX{^v{)gs>1ojXP@x7>Jtd%N0OBNE0m$oOEz|Gbcb=*=wk`acT+#=ctbk<+j;TizB=gLrqHO=1#z1Ga%XyWp<&atSjty5wS72Xc_g)Mjo zsI$={pbjMOg&3B#5{VhSCl{Dvi4{wvE^OgrY;mBC@`Oi;YF-s)b z2VHf$G}@3>TVQ-in^kJK-`qL%;;rTMvkbhqPESge>tU5?ABV4g=%w6uFTxg>Ub+b> z_&l6CnL_XUh}?%ZIq(?^qg&ul(DxVYZJ&C*4IgoF($Z{K|7sE<| z{i4bS%A||@z(E0X{g#{Qo$Nv++Z*VuZr|Lk)9u6bOHS9tarHMFfyIi^foCZel8g}z zs5`-f&azHQlYH&-1#FiJWGOeeRXT})3z(%6f}SZfvPgbc>Y%vgG?ynUWo2YTC}hZmADK)6Y5=dZRKaI>hS)SkB&%vr>nG2SA|%wORc2&2FIcH;r`cb zA9uTb1?`t@rki1Ff!^pfF-DOsJ~uMLJ^naz-&zsZ-IcRRk~^8?&AEHiPBi8RUM_3&Hs)=TxO#6>D1mq^Yl>efdu@~jW<=O#<-LwCDk zv7Y)ry4bkWntgqNHSqTj;T?pR6XT!2AN3RP)?Px!Nu9{X+M8PgpUyVybWnt>_60ha4CW?6>}kyo&feTSgv@7wv@*ZKN92Av=F$jydt$F*_&Zn!oRM2u;oz|s z4GW8?2-4~K$-H|ERUNo6^VZw)&||>tzZvu7rF;M&q*{z7;@8w|*QhD8=cnwqgf~Cv zKgPB;c?MmTsJtT}`cu71RyKozR;NR$d~b&(7iz=SX6{|u0!<}riTFxQ%1cL0xh5=8 zl&;#;S6nggc{>c&yJYgl?WrRgW?wa<5pTmJr!;9ZCBu9zshYo|bE0CMJ%1X{HKFB0 zUVtxXFI*+kdUhi4ag_~+%cb)_jiIDdx{}N92r=8Mx#&n$P(w)Rb4HTZ#LYI z)R=_`e4@}g?^+0UW^`plujQ(>h_y&Dn}1eUpd{*%o?;*ShP6-YnBqP6IEPWp{iZ3Uo|tJs(LcsfZ5{aOzry$YcO1Y zB^$fT#=7t1h2F2O7JTYxS2!>gD74@odKbMB+)VPFu044wetjjQ!Q>7oA~(2kK6tLQQy=t7=#|*|j1^|4Q$g`^{pHHYb0q^@p6v{1 zh3L*z^{pTuMLK{(jWC(B zo|fQ!r}+HcCPS1B-GNO0{_zRi-?<9fzx<-u0{UU$R&m1}8vIuAzV#h$SY!Qi+tLfE zvxg3Sw%f@=z$~URN>>N^m@@|~PU&J|+?Cq#zNQy4JI6ybS|7p3FaBPxRZ#fPCsl$3 zN>s?p#shmlRJUu>TZL_s_84=@!u&NxDAdVbAw3E*eR4M#Wm_wnuWA$9f6}P9p*jRp zco2xV%@(DA8)|(J+@vy@YRS?^muB@od9u@vy>z?X$qz9<8R<%_IcGHr&EKw3tgIMP zY!k$jhMb72=9BJ0nz;ESEPE&}p3r*8Nrt|$hI26iLWo_}JJe5~q*Oe6P!_{mo8t`( zLe4Eg5Ns%77SRMn64z^AvKp8{=P>InJY_*!&Qh^rax<)U8>c|s@DJN8+bs=Ot9hVm zaNiJGQX>ZCQ$OQriEVli;wR3dmOqHmU#?jB7V4bj07hqW%vP45IXdvakVPxFLL|!1 zJOzG-MZNX$N}PTVdOYt53hs(Dt0}F&3GH7I81YrSzHTGzi8gT<--sAvVTZ1C=B&$E zGL+ujI0f3Xq{1RFs`WHIYL0V#>RzvdkT}A1pM%?&JwkH{H$vZz zAq1&XcbBIEJZmCWFt7C(-SyxSRtd}b8+T&G1eu)j9iZ-pBK!?Tp!Y%T5OtXkD}xUK zeXCG+ghfxyhgtYYO>HP9p?TiazATqgseY;|sHd7qrxe;S-bz##XYiXRVTF#{K<9z} zGwJJdq(~Qq+tp8R6%MTzW`LUHf~~`0(f~2@1y&-_V{#Z2P*!Ydlp%A4v_K@AREkRb zFFT^C85s8VAHVEy^v%^aT!I=ytflIMLA_cjQQu*HX><_$aZGi+7nu{Y3O>uD{7dHP)FZYM+?<8#tuH+EN*MsjebJ)Nx&m z3Epo-fV7Ra3AMR2evc$|)hlb+dp;ktRt4a7>eU^9-5Y~xV$An-slbxrX=I~-T`NJf0txt>W==TP-PI4`(h47_90II%^Mn`4y^i-Q;qNN6 zQ5?Wwd$HQLuM@m7_u);YyRnauUCuKS=gPaI=NfGi?`QnAs}n%G#oIRQ!{p;XekXeM zMw_GpR#k|xW7pz;CTL8^m}d57oX_@+zd$TIw3btsJr6q1TpEM@Q?={ur~b7!PwtC7W7o{t?S+T4)rR>OT)$R^t<3u z%g?P1H6XsFA_!krX5r{LnM275oSc!!C%&JOs^`)RIETZg4#J$~K<{0{AXnp)PQf4hJvtCfAKExrYN3~+8FwA_RTJd$EIuzwpG4zfhvYh2sdpcuI7l!%ohyt-Z4*n z!iU{jWmp@xy`a#}^S`aiK}ja@otx9Oa_B$T?nqD9A=)~QuGu`;QuOQVJf%u7d*lF; z&NL3804%k!%N7<%V(3V)lilN+zj_`%v;CYiqV~&hdp*{=Ha&S7?=;nuGE1!z^So35 zV{#l3TQ^&$Sc|2Vu_YF4E>|0_nJ2B52pv_`JWW!5myGpBek-|19nqSx>)P zJSd?On=BTEk#2 z8GdKPjSM+!%7|mGqKN0wW zNRtXCeYGkE<5A|QE`$w|@^d&hUa*+a4!&8qIUQ7}s?8Wpg`f&1Kc=~7CNfgLcsQlQ zM)#dpwT>d6=3x)Dfup-R4-(#e6( zW*muXituUiY4I;DlmsF=+$BmOLef*oq0FY_web-ezB8VF=#zFX-hujC8%PKBt<;M{D5v}!!XlfhpTo_rJ}P`eqgfxa zOovF^8EOmm?lA4C>3VVO^COY+^j7UkSnyri+shTiDI$c_KEz{jZ@tj>1MltrWeYV6 ze;$tH_<&Qpxr{xrYSr@bg|48lL{MN- zoCF=Vjof*ES;bsdl@T6@;1<+owY$Qu5&7P+34vVm8OlUcLL^igMMeS`CcdMk*9|@7 zFS0$hfKdTJZ>p3{yUFvOIELBj^$840Q20dw^3LRYQ86*&qCM0mI%9W{=J7>jLU+X4n>e%-&&Qc;AjrIyHzhO4>ap56r*{;_S(q=kzyi(azBM?rH<cH7RinqTd-h9mRGia&~=i zGVKHCtbCxvFuKu#o@=~KG?mz_R@(*<{Lz5&xKZc zh|-Xr?%`3JAVZ;iS5l)D=7~aX@|zHei)`HHQ$}-h6U7k^vMk1ff1J97#lfWIDqS1*{`Satm-?4#A?=!Qsgss=%*dQa z@fsw60B*{i&^xS~>sMJZyBXo|xlu~+x_(@9f35FX<;g-RaL_$wSIoy<(xtSrVo?#p zzI0VZ5N|<2~_D{A^x=&D2k<7Rhd(z0T~Drq5C6u)~)ci{;V0ApxB{i}e4nqGwp#Sjr%P0L*C zPg6Z{L`N5ZRI>ZnvDy^cO2~}p4>nDw=j}#aawd6I?_bL*0qiT8RCYwjcS-5Qvo!^e z{yMM@>s!+x%mIw5U~V3I6QfO1eS>uKR7oHKE*YPS%SDu4OjGb*i|B$!z(J125+Cre zsLCih^RZ0rsjH55o0(Tl!C~J=+t(KUYMXm{q}^@Bd*3xD8})!B$kK}Jru#4LB`;n8 z!sK70U8$(swb0vn5zLf1VDiW28 zm(Ue#Th|L8m>4ciJ>S{gYQ?!5I)bF!3NNAu#too(1Qn`u`MA@#SNeE==a48Ov&s*- zqv~o%raGN7SWsGK&#*nM-L6t14~=;wQax}1h`_N5-l{lpl<&rk_lrmgLBYH=YA9$O zbv5i5lfxqjw{uICHY2F^e29F+NZj@fWaG*;7cvsM-nO}*meZj7I3(!F$p?|4Sn+yV z%b=uTa&bT&{`nj$>#mG(@*BBN$$zcF5DmGSdBxrBXJ)IKAAMOgf5857=l~M%w&Y|K zRXQL-B2StHgi5Nzov&X1TO7Ol?k-$iWIE@SPTPeFoC?y~RoK+%Bt~c0uj;dV?jvC^ zX(3MArK8uhW!b*=a7qXk8adGMVYrk^A6hqtC{(DSM|z}011Tbln!PxFCiC;`{@bpi zAz8$AxP}H~{zzQ4;9pJUPfp*d#DCw0aJFA44!gv>hH6xORza}#^_KSCM!)v_JAW18 z(Bk)%B`gR#*0p=)GT>SA*|F}2yk<_mv%!+~PPs|JZK4OYUSoc*g4VcdtrnqSPapO6 zWHb8cIQpzZCfS>N{-H%&l(n#t09lI5C7cx(DO_Ry5CrRhFR208X^r-)~Y2T?KV7E{wm5uwyGFl}nosZX?WIuH> znb*W~fX7kzWbw5n-g{*_Lr0ywT)%deowK$HL@j;rBd|B_2voVP$Jn;%cGRQnoDfP< z*Y?{AG^fq7PLZl~Un^7gbmGmrE}s9`OemYUEbHdwz}f>5UZVTNM_E|GHL6@Ojel&$hjYMgEVqt zMi$7n?|kPBRk`17g_IHuFEquwXll%lWZZnl`Ll1u&jW|XwdI-wMXt|&Jvpp)!u~GU zZ>RBg)wJg|1e^2XkAXvJNi{dkDu2r?M20!sU z)M)Y)=u$cIo@VJ~qmG;UP2PtkG#l@@TE?&*MUL(5zihIse&P5WxDpglK)iMqHEKy- zVl7R5*im^+^9;eW zJ0@mXR>LIDH?lABjd?8jZ7i_i^aRWOleQ0Rkcjdd?jnA=V=J3Lm2&->d$gpe9OOLm ze2O;ygTuikQu?L#yg9t?m6|#5vvG6S7%P)d#Vjc}9ZTtNb92L5HJcl)Vy_&wP-!?% zFkRK+g-RUVjhTY|)g|@gygOKk@AxO2tE|L*;Rxen{TEq0hZ>XMr$Z4sGK9oo2Zw4# zYdP^jRpq2f{17YD|H!PpET@~0{lrY&h8k-_4BYY9Fq$2vm`t{dNJflDx#5!vcFbGG1&SJu z_jI|at$0;Yw0y&COWe@Rj3_t%a@@^X5cxM+EX!;WYrC^hVgB>-I?Aky0>JU#Trz`W zhpSqAX1J?0ibFYXnA;SU%$g)N@&Oz~`k90tvM=D7OF#D!fR8ls%8P_8QfQno#w^wsA85VQ4zAicX^M*Kv|Z_rLs5c_qtQ zWONS8<(Zm0rBkr|vvP4tkvKy1D0pHs)XyXoaqRO#Oedq)$%WKkRsH`|cb;KQZQI@l zLD&e0ii&^~MMY{PQUrv6N-rY47a{bbU_eTOfE`g<2ptPO6iMhU3P=k@IsqgogbtAs zAk=roefBxWeeb#VetrGor9FBEvpIbJTOzqdWZ zKOPO#46m0{Dl;7|wd_u+Zwf^(RppqS??bE+0Kc|qfuFa$DgHp^T&Ey&T0PlRcGU|P zI-h*4#UhDFJ-gC6iZRXbdJcFXI>)G7Af3^#SJH#pL@ml#O4`wQw zzjopZ^uDl-W69ZNhtajv((`%dhm9}o!LiptZp&7R!WOoW&Q!D$vC6DysnBoc3O-6X zBGO&wG^s)5NYu3!a?f#6xw512*_$o?gwpXAVyl`W?*Bvmy;+c+~*RC^u2H`A?%^Wphx^P(`Rc!0(;%i zGF7p<{kD^vujcAguaqCGn%EE>%nh{Tv$`{HyS;f&yoyg;+MDdX^l>Doyw<%5T%!8# zdwV**?&XqFmd>_3;Fv7Q=tdVaj@GtMXx7yxvzX$nwAkyxro9O#3$s@T6~VCg1OzLv z!~1pX<*6vCK$rQ)?~cyEyIIu&?a%CRzzIOedQ-LlIrPBh9%vT+$`-2_w#zJ-;&(%3{GxJ{frIM=d zO#vVQ1`l%EL2Zypk>z#DI|4U{Z7hO4D_hFT_mH1+ z*Rl#f`v9RmMeTgSYCec%rvK4myoY!v5Yl04h9#D=Vsqq_t=i0xwC0{)hr?`9vKHbt zaiYvh*!A9^Fw#G>wEn!`6De-_vABXeGh0du@5ct$&L)p7Kz@aGl%@FYEU`QzOkoO! zE_bqr%jEmtwL$$@x@ci(!2tyK{T8=d1bfSoPdcO}Jqs)G_NiHsIh}LQdo~l^Ikvnz z=(Jjkq~8yUoqTJW|0}$AD&=*{PCxi2-J`5k)s-L(N;BLLl?{~A`j^F+aQv~Z5ntg1 z+W^!hZfu5lt?m_6*3Z|ni8!pJ+|v{m=e`9POTC#UuhYz2@>rUf@rc=zsv68jX-U0B z0){N4x0}AtIcai$S(s@+46<+D&^+ksGP0;q6h1tVaVIgZ#^W~pb20RQ{zPfXqjm0` zAHa3Pl~jJRICyB40=zX8VZ8`S97wecB<5`<*h*FvJ|H<>&d?qwqxSRp7%>B!fSGU{ zBBTe;v7bz;@^2Des3altu$11D1C%aF1u=fm_U{d}WB>XdE zkn4_aJ4aKp#2>H8#PddD`yy9cD-%O>UtYJ{hnDJMpKmeSHadHG_(m6f8!|r)TznT^ zIz2JXjz>u{RcLMRR3jv@3(`+Fc%6VdEv)E7Dzn|aTTQDtXL)%k%4<)xwAX;6nTA17 z8jB37XK@-D@uNZMH{%@lS~zK!27>E!$mniYqnyCdzOCMJU{tY=G$q(o;5Fj)I-W{^ zOD&>janN$aX*1O(ni%PVqEm$OB3x>{YWW0sP3RH_@H}}%){YA1p zXCWZ$LX3-5c?FO*y)D(>2>;Pbkw`L32p&3n=n8TeIQDU`c#_Gw^TJ2H4y6Peg6xKz zr=HjA&htFJtwlh&oetUVUWn{^x_?i-h;Hu$;$4G|&W8 z^Usye=Cc5$$7m7Bb6}$qb@a0qN)4U!H($^?9+_2W|4C1C!~)dLeLz38^x~xHTe{?g zxbM6@aAZ_oZP^7*F7Kd#Y1^O>pr}I3R9G87Qc(MI<5oCoDMII6g1~9$wYYuaW0*u8 zPCTNUFgO|e+D%60^wT1B!BM|rYS??%BEFS2g=OT}3z5K<>hrw1?--7}W+3VRYyW5I zk3Knq@pSIw&)A*|jh&QhzSrG`dEnVq@rqy!qW|hlo?HgtKn$@x2xBad%K>Ba{;N9m0@Ik^}IL%Cy@xPYyKFT zQyrx?;py~Jj5ofPO>g9|4%t&&=X=8=m_}kTrx#EHRJ0S83UePRfXWW}`~Sm}3C4Ut zt1%XuCgEn4XVh-{kMjk|7kWBmF_0yQFE7~o7Stq040|V`?(3;F zR0D_jgwHcTtw5`#G?|C>rWq}x4_OOcP5*Tq3#Q0Et-oamsMN%{it)p(Z_jS%aoK*5 zX}ZCDGMVS(ix^sb*A=p}WBU0$Dz15{7xnY3*gulJe+IGwiZ5M^Mw$ffIW^T|tDoxj z1)%3bXIOpR*5lcd6ZHL*?COT5Y#YyoV4u5&Mjk2nDkdWvQCc$si-KA_4fO2xGdguc z0`hC0J$9;0t#j^pA|m1emwxJ_J(()rOjzSdt0Bp7Ex73c-H-D7(F^jDANt(ef3}Ro zA}oQk?vJGBP;S2NG+3Zn474gOidK`AG>w}Xoemg8?LFv5+oH^4M7(56vJo6hjgs9l zA;jm_lH&Og$ua$SWK#91q>;0mLS6 z+FB>S=o45uz)w-F|;U8xFjCVwWsPj%1SdyD)BM}cV^;XbjR@8 z^ns^J_MeuboD!6-ICXJS!K$}Bwi7n?5@V(nbo=5SKsTRdY+Ly0{_z`8Z_mJ6dh}^m zAl0qlZ4INJ8}@7s9Hk!gEswgbw?CV!8?>Z1kYVU`yAa>rn|iWYU#=8ZN}Ay8ykhiW z!13Gsu!;@WI4ku7SivHGG+=1ze&NAgI8+uqrqt{(wD99Q9lRiJbGgAf=$dPV{!nvpS*f@gnDwsFiNUh{GY2NQ;xJXKT9Ur#|$20lS7g#7cM1@%S70^-R}>a&9|jtV^AC@8>F{u6eB~E^OxSSRq!r z>$4NS`c-SZpUNV~&s*EX%k+so%R0>Mp(>oSAB;9mn;!o#?2iTgd3C z*ZD#JU@y2V7qQk`FqAy3<8f63$YEdSODs1093Z32o8d8`Y7AsL-=<2wuXCmQ=W`wv ziI>ig_ihUmE(pkTrw2}i#*~+ig>xlvZlLBJFLQ@iJTl27oGFsZq5#9OS*YvXJU`M!P#=e2kDCgmuOjb z-$hTOsFC|oP#Yiw6c0IzKHheT3Nz6PwL^E8Ei15RrN!wM3oI!dv=#bIFR|%wvqsIj zJOc^dwOW4VGAfAY(8B{`swqo~lhYF%Eq*S+0>)4aZx^A5)TD~Y_y%e@HI1U8i(06- z!C@}NXgCMUTntz8A~{VoNc0Ft=~f#Tv(|+aF_jD)@TW3Cls`!>>rpJ=$k?@dqsEv85R)stcS+h-($MX*BaEgywU_Is zql{Y6P+Lm_7o8DPOx0_vI~$@}U@1b^ZG@7LlcexZ)Zvxb3E-r+!6)-ta z4Da#$;e-|jTu*=zqqMo6SM%*q@+v$6e9e4FnEFW;$iyrP%OFWA(~APdso(Zbfw*et zUfb-?a$adG56Q63(skd4wt-6#Rob}ja;QT`sSSAF%^yGBeb>$nv&|25JITHlNtl#( z!fn*{D4j+pl>wk4N20sSAWKn|MIHU)NjJD>&GSNUuB5qPQ7$^t8lqU$w45slZi**(N<)fCF`PAY9Jh5x+4WiOFoi1mfI6 zCuNl_hyra2&aw5G-%VvJXEzbTM8fMo_}6e=Bc8%G-n;sZFjKhHr%<9fo4B(r%@56& zj0CR3ZT(ZXnHDDo!(K;K}5-mcMtUUGO*D64^yN$ z@3ZvyXtZ!$f~WG(kA;3sMU>u>SOOw#w0@~Tg7vh#W2cnJB5(;efaLb^mOu+Jbmvg_*`k4a`xu1Z zM(XCyVwyd=`n_pxrrod2oBndpT&E-NOdV_6mAeyLhE&L2q+F=siVLMhQ6{C?Z?dy? zK318S-3;YxODCF6xaw4k$Bk$f4?%5kJuai;8zYDfbj2Ar3egjAx=_IBR6lgL_Rqne zswAp}+Ouwpe7Hum3WA^;%_{&M3%Y`FM_-XGV?gMrjNwgqUtVqmMA6=fKX|nS1Z&pm57%f{+0Kq0 zZO9ZNOz5ip>4Sl0f?snkusW8-VepeoDI|Pyeyk9|2iIMfZ7zAznQaKD(Bd9|=dht8M-v0o1+B74{!HPh zjKCTLy$rJGD3NOJ*q@48Y@g%M=?YfgD%$;;G+9bIRp(UEKNUF+^DwG+#}kK{1YoX$ zR!~LqQwn@7x8lgvI!NyyPDXF~wiNDv>9}OEvJ0L`BmUe4rQa&N2hpY-x)1iu znjo+G=LKb-sFVktj|O;sg1py(^ZZZX+b5Rw+Fs<6G7s^sIxw97GyYN<`aKd62O;CN zg|QH!C(M=;;iwxQ3i+7Z(=NatN&f`#O=ZPyrl_$EGv+9J!PKlcJfGVRb_vr;pzyaB z&nyaMF@rg|?hOuajS!-#X3T66DcNuhJ7$4p0n`)WD6VkrbHtmWoIUu?_EHf6nJW4~ z*!LI3pUt&oBjBgDJ&#kSK6>W!yDq>t9Hc}qivd$J9Hpwdpt2u4_-dd9SExB+4Afek z&Wr77ksVUH%XSrHG(SXuLXFiyHb}H%PL{B@MT_I%6?TmlJ*b!KJvAA9c$hCs&csl0 z9!)#Z5>ot0=G36u;zK1Wiq9C0Y(ffKB#h3n(sW)8nfL2Yjjm2NR!OaxLF#%6-~jP=gV~$Q1@?E*&kJyqtv5|nNDNR=KN6dc)4#cwFCVghxJ+H z4G)&{emaJ=7^emIL){-c$#`mQpw-3DY{AbhZmqGIb++ldrsw{#vAj`?{{-uTmw3CHMhkk8W=vWU%dPyL$T^RcO&drEb4SWA+{U|bbQOY zA}`I&0CKII*0wD^l&s-L_(_3#v2p2rj=q$6Kse2FhhuFPbbu5XkH30+iQ!_vT}#st zyU(i^k9{y@Bu_zSR|^WZ)%WX84uL#%{&DpNEbQmhGfDhnK18!C--mAY3QZnYS9z}f zjO;+yo3juPrvvfwp2fOT<|U#Vfke;@j!ga*Lz^0@EEK+yP%X3|=NXEYzPJElUxxxg zM)HOrZ+;NyrSkEfuIBf;>B~JwCR5o`(-JO9&|RtpzUfV+oI@ZkNhTDLS$h%v1x0$; zJ+5LvlSFW#pp6F)#&sOnrCP##g@GWX{F!VwZi)PH7;b+Bq+vHGv{*O)slgmCP+6ZP zRA+3N9{-`{RAlN@15*m^xPF4Y^o+Apt{pmtN*aBuGcf;7(Cig0&+zaGn4Hm;0*dG!ZAB!@Fu20}%yrneF+sJru$8NzEyF zQmO{+a_Bq#f4YS^lQ^77)>AS*TV0C0m}~Nx?3(CiQfci6_)M!Gk{8vy?~;-f^@eKX-A(OzM1s+=JTm^|=O<4;wf+>!GUDa}O`zeHEdi_Pd(&ygbsJi3DS%AQzJF z+x>#}^giD}Q;x)OU~+?P$fiA&XXk0K?1P^o*c~Bi;9|2?vW;p}4cOepJ6o0Rm zRthkNe=>fPm;JHU$^m8H`4B7W9H7@;njj9v`AckA%HHBbpXWg#c-2K zYRuhsbU}TJI$u|L27G1)UhY&KiZ@(Etks=dQM$3l7gHPDu^B^~_F5XfmEqQ|v%yd- zlTE)71n6!Dv9W^=(^ z4;4N|=_}_OeHHb7nWR?o%;*v|5vbd3PpHY{&6xK4@Y)=B9y5{5^|&WC95xBsIbXzn z`-)&@GS7m2wqx$XOxsvSS!QEjS&2ZcO9g4^MYMjN?lOl@MwyprTQ$P-$lW(OK{lF7 z`ftu`6eb=LGI8H6ItTTpzUbIX%EQhlN;j8yb=5l!Uy_G&Yr!OKoI~*Ch}b)VDKlSi zGAJ>M-ziV7Ko~tSkDaD(kiDp7l?N!#Dry)1tYuqtcx5&#VDt8Tu5!Mrx6Vb;+Dpw| z=o<^(NW|yd-G_N|NvXZ;<30IJ&81i=Q1QHKMCVtIk@pD0ML8RP&@kUysMr;{7jy8L z>`dD(v0L4}%QO4p<4987sX++g+P*zoa%53x>iX>@;)9n9-7gb9;*4=efuRO2V3S4}(V~)$_A9r4 zyqqd^V;PBd!e(0c1`mJtE3DI(33BXM+15JQn!Wx8A2M4CdU`UrD2OyNt`M-Sn#ttO z0ztXNZ!cSso5cN=LzI%7mqvAB*xT6fOQnpo7MSW>y3gi}qrSfG?9~;uoZg7a?VN0& zJZxbbO6v_FMh+a+1He;&W|aCf3H7Qin5jFxBBZY*P>W}$rFOM-Gg3{$CUtu+Mmkzg z3dG@|e=*SDnowv#_uwq@;=9d9mI*=1A+mCgv<~yyxv)p|BnT08V+6mqKwrxeUWP?y zIB~U_vh;%`xP7#Nsm$av(JaJs??>NS3=;Ck06l~8M)gdV=%17IZInWEa?UvA{+yn46IL} zflfh(Yhv3md#)Bo!m`BlEV+QbInYUSpvJ5$-ADST7s(hqQAqyfonhcE2GUWWfZZ_l zY-)889>}v&An5~%B{dx8cKtq4eP{pG$r}lNZ16vzfs$tl%8896=&bLZ1c4-Hx4ay_ zz-n0^^B;1_)f_K3GoG?x@^=F6TzZzT!wl31KW_1l%gD;(P6tI%i$S6H9&3NcrZQ5) zgOV-X(RS+DT#fkBv>e~i4d1fZv1t$SIB-Xwu^Mn86eH?bdL{eNo zqcDMW?1yF5;@YuVh#1PFd%{#}aG0Y@`Lt4;Px7Y5)i6(OeJ071=T{B! z8hh8b!rN+{jxQnwlH;jmonL3lv`I^yb=yxI1YY{Ev?W?p$I74IpYNA%lwHB(WjJxS znr3zN-R+0n3-w7tN9sgbeW_5z=3|$9J;h*#gcNDepVf6E#4M)c>BjB+DR8p^DJ9UX zF8baVU-_%({paq8m#7rBa01fg=WEn$nVN6gFUKTpQ6=I#0;NHfeh@B$9_Z$cMwnfi zSE!AtHy3Gp*ln(4Dd(_)sv|KF>Wpc@_y_18L8+5mIq4l=WPFjRs#pa}%Y!_=r=6Xj z7~I1$Vq`7=bMQ=zm~%I1Ldn5ooF&X{-8jzUTE%lqj~YyuzXW+af@*B%!OT9YlU4)B z4Bg0MHhjD>pBw#=fpt+M#xLC_rgzkx3esE9j$rUG>V`|$hKdgux@XWqIh$bg0kzRJ zFrWhtayQsc&eav&Mi|wF4s36il~Dq^(}1xR<-j>!&)V^Ra)FJ{jCb2@_nEp_o?^SY zAA`V0f2_!3i!3_Egh!UdOc(d*!j@owuE#cI3INwHw^Jq!%C{5VkES9~Y@^a*(Jk@q zN>;!zjeQ8Cu;GxyE40VFoHGi^`jBYs8x6v@foI`CmCW?A+ zPD6-^vgUGR^{w$}ZGx;%TJW;729WhW)f=+77M*gX4aM(nJ{SCv%2rz~cst~dClsjU zu|E3ijnJvA(Wo4Ykf_hBl+rch!D&`sm}jP}W9Rc%HWID7BSwTgFT>lUXKLKbpSb%l z>a?Y1f_YOwS31RrzyP1`O0_kQk_r5DDi2a;C=#>lItt-TL22X7GKnAm#awa%id%G_ z>f?D6qq8KG#Lp=sX;OGxV8#v~@ZvU7c9Y&0EcCRWc5~Tl_T&D__ScX6Ri6eU*(e@+ zWPN!Ym1C;PridxICUWz*PV{mB@K?jA)tlg|EzNSTyD0-FGQY5bRgV1=9HxS&VM>M% z0PR9hfuJAI3t7PyT3)$nQC^YWhNYaUvZ)o%3HF`tR@Tt3uNUo355#G8E}3mhz!1orIeb>9-@*5EXCjTVOqygZvu2RUqeFks~-KbWM z);s);2Io?i+G1X<@~JS}NP?)Z=Vvr+~JMf*kN9=oXka3^D=c<$kE&p%p+0oq>xN%QsH&DFVLMBRyK zQx02RYilIv8}(g?|Go`%coBjJo#Z0r;K~FcCR@T($atj<)s8}>!EOfp-GeDrK?W(b z&mMtbvA@%2HmW6~3r{p{%n@VL8y1&!-9?WTk_&fM8^$oZ9Z`w^yT%H5UY-(Ima(lo zus?AB59fc{DQ%sPJYgn+DMv(iHV;-HZK3IPo#moAKQ+sVfp9We@;;bP#lf_v+u4 z?O%N+@Ga>CAQHEI-#hf*?_cS^PXjrb z-#z);p7jm@;5TLt{`m1ETrp!DyJS(j&+ep+<5vsQzaBA0=u!bn$iMx%&<8w}ab`C{ z`&g3Y4=imHus-8bOsP;czT12i^-a0$Z$vWw?E&EBI{}<9J&8#wV233D!;RGs-)v8V zZ4J5rw`zyLzj3K<1E4ZE&HRZryABs0EdoEtWr*L0GHl5Jjn9P+$^B1wE>P-LpZ3p0 zV9*k~M&^m2Vb*Z;znc70aUO59#m)i)m$mM z^Q85lJ4HqI%irmAiq1~hrdPkik)opjWF2oMB?7?2{cAD*nbS5y+yY6l!P z>eqUT0ruF3|DGxb2SQXa<7;@Jn~AdQPIogM*7Obl95eV&q&?efD>=wdB}BXJ7+PR( zd}M*n{hzi~U~hnHDu650oP~d6{~Z;m;s=B+ zJ&C!%**@IG18U8Ihg_mwXJWzz_ydhbMiG>yG2RkR{)W|ir`-)A4{0pfc`TBsS#ELT~-s8Q#9BwBBu+Rr;tn*Cb} zEmCIvN9Ixeo9omSu{oh>Y|{{6&w3TtHfyft4MdC>tBlax<2VC_JWr-flqr-6(6T_^ z%5VEdH@7ekfaE%pX*5-8(}Y0lOg;7K?(z=~Aidu2-lY`k*Hg&+Q&szzMw4*$+? zq^kzNLccBT|2trsAw>tbuCwX~*$l;l_J4qQ4vPMMe2V5d)wJPruyk" + ] + }, + { + "cell_type": "markdown", + "id": "907933a8-20fd-4aa7-a3bf-3f5b5829a544", + "metadata": {}, + "source": [ + "## Setup environment" + ] + }, + { + "cell_type": "markdown", + "id": "7cc7e4d2", + "metadata": {}, + "source": [ + "Install nvflare and dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33bba668-72ac-4e69-aaed-8d4254f547c0", + "metadata": {}, + "outputs": [], + "source": [ + "! pip install --ignore-installed blinker\n", + "! pip install -r ./requirements.txt" + ] + }, + { + "cell_type": "markdown", + "id": "57945dcd", + "metadata": {}, + "source": [ + "If running in Google Colab, download the source code for this example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "993b7696", + "metadata": {}, + "outputs": [], + "source": [ + "! npx degit NVIDIA/NVFlare/examples/getting_started/tf/src src" + ] + }, + { + "cell_type": "markdown", + "id": "b68cb248-dc6a-48d1-880d-33c4324d9723", + "metadata": {}, + "source": [ + "## Federated Averaging with NVFlare\n", + "Given the flexible controller and executor concepts, it is easy to implement different computing & communication patterns with NVFlare, such as [FedAvg](https://proceedings.mlr.press/v54/mcmahan17a?ref=https://githubhelp.com) and [cyclic weight transfer](https://academic.oup.com/jamia/article/25/8/945/4956468). \n", + "\n", + "The controller's `run()` routine is responsible for assigning tasks and processing task results from the Executors. " + ] + }, + { + "cell_type": "markdown", + "id": "b2f84fb1-9dd3-4c72-a727-c4614260f02f", + "metadata": {}, + "source": [ + "### Server Code\n", + "First, we provide a simple implementation of the [FedAvg](https://proceedings.mlr.press/v54/mcmahan17a?ref=https://githubhelp.com) algorithm with NVFlare. \n", + "The `run()` routine implements the main algorithmic logic. \n", + "Subroutines, like `sample_clients()` and `scatter_and_gather_model()` utilize the communicator object, native to each Controller to get the list of available clients,\n", + "distribute the current global model to the clients, and collect their results.\n", + "\n", + "The FedAvg controller implements these main steps:\n", + "1. FL server initializes an initial model using `self.load_model()`.\n", + "2. For each round (global iteration):\n", + " - FL server samples available clients using `self.sample_clients()`.\n", + " - FL server sends the global model to clients and waits for their updates using `self.send_model_and_wait()`.\n", + " - FL server aggregates all the `results` and produces a new global model using `self.update_model()`." + ] + }, + { + "cell_type": "markdown", + "id": "d62a13d5-1130-44e6-8818-70e30de401e6", + "metadata": {}, + "source": [ + "```python\n", + "class FedAvg(BaseFedAvg):\n", + " def run(self) -> None:\n", + " self.info(\"Start FedAvg.\")\n", + "\n", + " model = self.load_model()\n", + " model.start_round = self.start_round\n", + " model.total_rounds = self.num_rounds\n", + "\n", + " for self.current_round in range(self.start_round, self.start_round + self.num_rounds):\n", + " self.info(f\"Round {self.current_round} started.\")\n", + " model.current_round = self.current_round\n", + "\n", + " clients = self.sample_clients(self.num_clients)\n", + "\n", + " results = self.send_model_and_wait(targets=clients, data=model)\n", + "\n", + " aggregate_results = self.aggregate(results)\n", + "\n", + " model = self.update_model(model, aggregate_results)\n", + "\n", + " self.save_model(model)\n", + "\n", + " self.info(\"Finished FedAvg.\")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "d24b6476-089a-4e9d-825b-07107bd5d84a", + "metadata": {}, + "source": [ + "### Client Code \n", + "Given a CIFAR10 [TensorFlow](https://www.tensorflow.org/) code example with a network defined at [src/tf_net.py](src/tf_net.py), we wish to adapt this centralized training code to something that can run in a federated setting.\n" + ] + }, + { + "cell_type": "markdown", + "id": "1c551053-5460-4d83-8578-796074170342", + "metadata": {}, + "source": [ + "On the client side, the training workflow is as follows:\n", + "1. Receive the model from the FL server.\n", + "2. Perform local training on the received global model\n", + "and/or evaluate the received global model for model\n", + "selection.\n", + "3. Send the new model back to the FL server." + ] + }, + { + "cell_type": "markdown", + "id": "c02bfc2a-783c-494f-9427-c38f40a2e870", + "metadata": {}, + "source": [ + "Using NVFlare's client API, we can easily adapt machine learning code that was written for centralized training and apply it in a federated scenario.\n", + "For a general use case, there are three essential methods to achieve this using the Client API :\n", + "- `init()`: Initializes NVFlare Client API environment.\n", + "- `receive()`: Receives model from the FL server.\n", + "- `send()`: Sends the model to the FL server." + ] + }, + { + "cell_type": "markdown", + "id": "9115ee07-d848-4a7c-99ad-64e20ab7093c", + "metadata": {}, + "source": [ + "With these simple methods, the developers can use the Client API\n", + "to change their centralized training code to an FL scenario with\n", + "five lines of code changes as shown below.\n", + "```python\n", + " import nvflare.client as flare\n", + " \n", + " flare.init() # 1. Initializes NVFlare Client API environment.\n", + " input_model = flare.receive() # 2. Receives model from the FL server.\n", + " for k, v in input_model.params.items():\n", + " model.get_layer(k).set_weights(v) # 3. Loads model from NVFlare\n", + " \n", + " # original local training code\n", + " model.fit(...)\n", + " \n", + " output_model = flare.FLModel(params={layer.name: layer.get_weights() for layer in model.layers}) # 4. Put the results in a new `FLModel`\n", + " flare.send(output_model) # 5. Sends the model to the FL server. \n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "67432f44-4144-4347-8d74-e7f57e065a14", + "metadata": {}, + "source": [ + "The full client training script is saved in a separate file, e.g. [./src/cifar10_tf_fl.py](./src/cifar10_tf_fl.py) doing CNN training on the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset." + ] + }, + { + "cell_type": "markdown", + "id": "5da34414-bac4-4352-8077-ab7ade998eec", + "metadata": {}, + "source": [ + "## Run an NVFlare Job\n", + "Now that we have defined the FedAvg controller to run our federated compute workflow on the FL server, and our client training script to receive the global models, run local training, and send the results back to the FL server, we can put everything together using NVFlare's Job API." + ] + }, + { + "cell_type": "markdown", + "id": "0cedaf75-3a4a-4843-8017-7716b53149a2", + "metadata": {}, + "source": [ + "#### 1. Define the initial model\n", + "First, we define the global model used to initialize the model on the FL server. See [src/tf_net.py](src/tf_net.py)." + ] + }, + { + "cell_type": "markdown", + "id": "93889e62-b725-427c-8839-2771ca81d24c", + "metadata": {}, + "source": [ + "```python\n", + "from tensorflow.keras import layers, models\n", + "\n", + "class TFNet(models.Sequential):\n", + " def __init__(self, input_shape=(None, 32, 32, 3)):\n", + " super().__init__()\n", + " self._input_shape = input_shape\n", + " # Do not specify input as we will use delayed built only during runtime of the model\n", + " # self.add(layers.Input(shape=(32, 32, 3)))\n", + " self.add(layers.Conv2D(32, (3, 3), activation=\"relu\"))\n", + " self.add(layers.MaxPooling2D((2, 2)))\n", + " self.add(layers.Conv2D(64, (3, 3), activation=\"relu\"))\n", + " self.add(layers.MaxPooling2D((2, 2)))\n", + " self.add(layers.Conv2D(64, (3, 3), activation=\"relu\"))\n", + " self.add(layers.Flatten())\n", + " self.add(layers.Dense(64, activation=\"relu\"))\n", + " self.add(layers.Dense(10))\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "1b70da5d-ba8b-4e65-b47f-44bb9bddae4d", + "metadata": {}, + "source": [ + "#### 2. Define a FedJob\n", + "The `FedJob` is used to define how controllers and executors are placed within a federated job using the `to(object, target)` routine." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13771bfb-901f-485a-9a23-84db1ccd5fe4", + "metadata": {}, + "outputs": [], + "source": [ + "from nvflare import FedAvg, FedJob, ScriptExecutor\n", + "\n", + "job = FedJob(name=\"cifar10_tf_fedavg\")" + ] + }, + { + "cell_type": "markdown", + "id": "9361d9f8-54f3-4363-b3ba-706a7ae3a8e9", + "metadata": {}, + "source": [ + "#### 3. Define the Controller Workflow\n", + "Define the controller workflow and send to server." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6962e6cc-995e-4356-8156-3ceba2c7a249", + "metadata": {}, + "outputs": [], + "source": [ + "n_clients = 2\n", + "\n", + "controller = FedAvg(\n", + " num_clients=n_clients,\n", + " num_rounds=2,\n", + ")\n", + "job.to(controller, \"server\")" + ] + }, + { + "cell_type": "markdown", + "id": "7a63ce0c-ad3e-4434-b2a8-c8f2a4c2e7a5", + "metadata": {}, + "source": [ + "#### 4. Create Global Model\n", + "Now, we create the initial global model and send to server." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0e2c514c-7758-4d30-bb5c-ae3c63be50aa", + "metadata": {}, + "outputs": [], + "source": [ + "from src.tf_net import TFNet\n", + "\n", + "job.to(TFNet(), \"server\")" + ] + }, + { + "cell_type": "markdown", + "id": "77f5bc7f-4fb4-46e9-8f02-5e7245d95070", + "metadata": {}, + "source": [ + "That completes the components that need to be defined on the server." + ] + }, + { + "cell_type": "markdown", + "id": "548966c2-90bf-47ad-91d2-5c6c22c3c4f0", + "metadata": {}, + "source": [ + "#### 5. Add clients\n", + "Next, we can use the `ScriptExecutor` and send it to each of the clients to run our training script.\n", + "\n", + "Note that our script could have additional input arguments, such as batch size or data path, but we don't use them here for simplicity.\n", + "We can also specify, which GPU should be used to run this client, which is helpful for simulated environments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ad5d36fe-9ae5-43c3-80bc-2cdc66bf7a7e", + "metadata": {}, + "outputs": [], + "source": [ + "from nvflare.client.config import ExchangeFormat\n", + "\n", + "for i in range(n_clients):\n", + " executor = ScriptExecutor(\n", + " task_script_path=\"src/cifar10_tf_fl.py\", task_script_args=\"\" # f\"--batch_size 32 --data_path /tmp/data/site-{i}\"\n", + " params_exchange_format=ExchangeFormat.NUMPY,\n", + " )\n", + " job.to(executor, f\"site-{i+1}\", gpu=0)" + ] + }, + { + "cell_type": "markdown", + "id": "113fd6af-85be-4f75-8a8e-4666771252b3", + "metadata": {}, + "source": [ + "That's it!\n", + "\n", + "#### 6. Optionally export the job\n", + "Now, we could export the job and submit it to a real NVFlare deployment using the [Admin client](https://nvflare.readthedocs.io/en/main/real_world_fl/operation.html) or [FLARE API](https://nvflare.readthedocs.io/en/main/real_world_fl/flare_api.html). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99a270bf-c906-425b-b999-2306cb76eb62", + "metadata": {}, + "outputs": [], + "source": [ + "job.export_job(\"/tmp/nvflare/jobs/job_config\")" + ] + }, + { + "cell_type": "markdown", + "id": "9ac3f0a8-06bb-4bea-89d3-4a5fc5b76c63", + "metadata": {}, + "source": [ + "#### 7. Run FL Simulation\n", + "Finally, we can run our FedJob in simulation using NVFlare's [simulator](https://nvflare.readthedocs.io/en/main/user_guide/nvflare_cli/fl_simulator.html) under the hood. The results will be saved in the specified `workdir`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13068ab7-35cf-49e7-91ed-10993049ef0d", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "job.simulator_run(\"/tmp/nvflare/jobs/workdir\")" + ] + }, + { + "cell_type": "markdown", + "id": "44ee8632", + "metadata": {}, + "source": [ + "If using Google Colab and the output is not showing correctly, export the job and run it with the simulator command instead:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7ec915ec", + "metadata": {}, + "outputs": [], + "source": [ + "! nvflare simulator -w /tmp/nvflare/jobs/workdir -n 2 -t 2 -gpu 0 /tmp/nvflare/jobs/job_config/cifar10_tf_fedavg" + ] + }, + { + "cell_type": "markdown", + "id": "387662f4-7d05-4840-bcc7-a2523e03c2c2", + "metadata": {}, + "source": [ + "#### 8. Next steps\n", + "\n", + "Continue with the steps described in the [README.md](README.md) to run more experiments with a more complex model and more advanced FL algorithms. " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/advanced/job_api/tf/requirements.txt b/examples/advanced/job_api/tf/requirements.txt new file mode 100644 index 0000000000..565129248a --- /dev/null +++ b/examples/advanced/job_api/tf/requirements.txt @@ -0,0 +1,2 @@ +nvflare~=2.5.0rc +tensorflow[and-cuda] diff --git a/examples/advanced/job_api/tf/run_jobs.sh b/examples/advanced/job_api/tf/run_jobs.sh new file mode 100755 index 0000000000..aa41e1424c --- /dev/null +++ b/examples/advanced/job_api/tf/run_jobs.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +export TF_FORCE_GPU_ALLOW_GROWTH=true +export TF_GPU_ALLOCATOR=cuda_malloc_asyncp + + +# You can change GPU index if multiple GPUs are available +GPU_INDX=0 + +# You can change workspace - where results and artefact will be saved. +WORKSPACE=/tmp + +# Run centralized training job +python ./tf_fl_script_executor_cifar10.py \ + --algo centralized \ + --n_clients 1 \ + --num_rounds 25 \ + --batch_size 64 \ + --epochs 1 \ + --alpha 0.0 \ + --gpu $GPU_INDX \ + --workspace $WORKSPACE + + +# Run FedAvg with different alpha values +for alpha in 1.0 0.5 0.3 0.1; do + + python ./tf_fl_script_executor_cifar10.py \ + --algo fedavg \ + --n_clients 8 \ + --num_rounds 50 \ + --batch_size 64 \ + --epochs 4 \ + --alpha $alpha \ + --gpu $GPU_INDX \ + --workspace $WORKSPACE + +done + + +# Run FedOpt job +python ./tf_fl_script_executor_cifar10.py \ + --algo fedopt \ + --n_clients 8 \ + --num_rounds 50 \ + --batch_size 64 \ + --epochs 4 \ + --alpha 0.1 \ + --gpu $GPU_INDX \ + --workspace $WORKSPACE + + +# Run FedProx job. +python ./tf_fl_script_executor_cifar10.py \ + --algo fedprox \ + --n_clients 8 \ + --num_rounds 50 \ + --batch_size 64 \ + --epochs 4 \ + --fedprox_mu 1e-5 \ + --alpha 0.1 \ + --gpu $GPU_INDX + + +# Run scaffold job +python ./tf_fl_script_executor_cifar10.py \ + --algo scaffold \ + --n_clients 8 \ + --num_rounds 50 \ + --batch_size 64 \ + --epochs 4 \ + --alpha 0.1 \ + --gpu $GPU_INDX \ No newline at end of file diff --git a/examples/advanced/job_api/tf/src/cifar10_data_split.py b/examples/advanced/job_api/tf/src/cifar10_data_split.py new file mode 100644 index 0000000000..1dd05f6385 --- /dev/null +++ b/examples/advanced/job_api/tf/src/cifar10_data_split.py @@ -0,0 +1,125 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This Dirichlet sampling strategy for creating a heterogeneous partition is adopted +# from FedMA (https://github.com/IBM/FedMA). + +# MIT License + +# Copyright (c) 2020 International Business Machines + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +import json +import os + +import numpy as np +from tensorflow.keras import datasets + + +def cifar10_split(split_dir: str = None, num_sites: int = 8, alpha: float = 0.5, seed: int = 0): + if split_dir is None: + raise ValueError("You need to define a valid `split_dir` for splitting the data.") + if not os.path.isabs(split_dir): + raise ValueError("`split_dir` needs to be absolute path.") + if alpha < 0.0: + raise ValueError(f"Alpha should be larger or equal 0.0 but was" f" {alpha}!") + + np.random.seed(seed) + + train_idx_paths = [] + + print(f"Partition CIFAR-10 dataset into {num_sites} sites with Dirichlet sampling under alpha {alpha}") + site_idx, class_sum = _partition_data(num_sites, alpha) + + # write to files + if not os.path.isdir(split_dir): + os.makedirs(split_dir) + sum_file_name = os.path.join(split_dir, "summary.txt") + with open(sum_file_name, "w") as sum_file: + sum_file.write(f"Number of clients: {num_sites} \n") + sum_file.write(f"Dirichlet sampling parameter: {alpha} \n") + sum_file.write("Class counts for each client: \n") + sum_file.write(json.dumps(class_sum)) + + site_file_path = os.path.join(split_dir, "site-") + for site in range(num_sites): + site_file_name = site_file_path + str(site + 1) + ".npy" + print(f"Save split index {site+1} of {num_sites} to {site_file_name}") + np.save(site_file_name, np.array(site_idx[site])) + train_idx_paths.append(site_file_name) + + return train_idx_paths + + +def _get_site_class_summary(train_label, site_idx): + class_sum = {} + + for site, data_idx in site_idx.items(): + unq, unq_cnt = np.unique(train_label[data_idx], return_counts=True) + tmp = {int(unq[i]): int(unq_cnt[i]) for i in range(len(unq))} + class_sum[site] = tmp + return class_sum + + +def _partition_data(num_sites, alpha): + # only training label is needed for doing split + (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() + + min_size = 0 + K = 10 + N = train_labels.shape[0] + site_idx = {} + + # split + while min_size < 10: + idx_batch = [[] for _ in range(num_sites)] + # for each class in the dataset + for k in range(K): + idx_k = np.where(train_labels == k)[0] + np.random.shuffle(idx_k) + proportions = np.random.dirichlet(np.repeat(alpha, num_sites)) + # Balance + proportions = np.array([p * (len(idx_j) < N / num_sites) for p, idx_j in zip(proportions, idx_batch)]) + proportions = proportions / proportions.sum() + proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1] + idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))] + min_size = min([len(idx_j) for idx_j in idx_batch]) + + # shuffle + for j in range(num_sites): + np.random.shuffle(idx_batch[j]) + site_idx[j] = idx_batch[j] + + # collect class summary + class_sum = _get_site_class_summary(train_labels, site_idx) + + return site_idx, class_sum diff --git a/examples/advanced/job_api/tf/src/cifar10_tf_fl.py b/examples/advanced/job_api/tf/src/cifar10_tf_fl.py new file mode 100644 index 0000000000..5058a4025b --- /dev/null +++ b/examples/advanced/job_api/tf/src/cifar10_tf_fl.py @@ -0,0 +1,79 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import tensorflow as tf +from tensorflow.keras import datasets +from tf_net import TFNet + +# (1) import nvflare client API +import nvflare.client as flare + +PATH = "./tf_model.weights.h5" + + +def main(): + # (2) initializes NVFlare client API + flare.init() + + (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() + + # Normalize pixel values to be between 0 and 1 + train_images, test_images = train_images / 255.0, test_images / 255.0 + + model = TFNet() + model.build(input_shape=(None, 32, 32, 3)) + model.compile( + optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"] + ) + model.summary() + + # (3) gets FLModel from NVFlare + while flare.is_running(): + input_model = flare.receive() + print(f"current_round={input_model.current_round}") + + # (optional) print system info + system_info = flare.system_info() + print(f"NVFlare system info: {system_info}") + + # (4) loads model from NVFlare + for k, v in input_model.params.items(): + model.get_layer(k).set_weights(v) + + # (5) evaluate aggregated/received model + _, test_global_acc = model.evaluate(test_images, test_labels, verbose=2) + print( + f"Accuracy of the received model on round {input_model.current_round} on the {len(test_images)} test images: {test_global_acc * 100} %" + ) + + model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels)) + + print("Finished Training") + + model.save_weights(PATH) + + _, test_acc = model.evaluate(test_images, test_labels, verbose=2) + print(f"Accuracy of the model on the {len(test_images)} test images: {test_acc * 100} %") + + # (6) construct trained FL model (A dict of {layer name: layer weights} from the keras model) + output_model = flare.FLModel( + params={layer.name: layer.get_weights() for layer in model.layers}, metrics={"accuracy": test_global_acc} + ) + # (7) send model back to NVFlare + flare.send(output_model) + + +if __name__ == "__main__": + main() diff --git a/examples/advanced/job_api/tf/src/cifar10_tf_fl_alpha_split.py b/examples/advanced/job_api/tf/src/cifar10_tf_fl_alpha_split.py new file mode 100644 index 0000000000..e25362f469 --- /dev/null +++ b/examples/advanced/job_api/tf/src/cifar10_tf_fl_alpha_split.py @@ -0,0 +1,219 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import copy + +import numpy as np +import tensorflow as tf +from tensorflow.keras import datasets, losses +from tf_net import ModerateTFNet + +# (1) import nvflare client API +import nvflare.client as flare +from nvflare.app_opt.tf.fedprox_loss import TFFedProxLoss + +PATH = "./tf_model.weights.h5" + + +def preprocess_dataset(dataset, is_training, batch_size=1): + """ + Apply pre-processing transformations to CIFAR10 dataset. + + Same pre-processings are used as in the Pytorch tutorial + on CIFAR10: https://github.com/NVIDIA/NVFlare/tree/main/examples/advanced/cifar10/cifar10-sim + + Training time pre-processings are (in-order): + - Image padding with 4 pixels in "reflect" mode on each side + - RandomCrop of 32 x 32 images + - RandomHorizontalFlip + - Normalize to [0, 1]: dividing pixels values by given CIFAR10 data mean & std + - Random shuffle + + Testing/Validation time pre-processings are: + - Normalize: dividing pixels values by 255 + + Args + ---------- + dataset: tf.data.Datset + Tensorflow Dataset + + is_training: bool + Boolean flag indicating if current phase is training phase. + + batch_size: int + Batch size + + Returns + ---------- + tf.data.Dataset + Tensorflow Dataset with pre-processings applied. + + """ + # Values from: https://github.com/NVIDIA/NVFlare/blob/main/examples/advanced/cifar10/pt/learners/cifar10_model_learner.py#L147 + mean_cifar10 = tf.constant([125.3, 123.0, 113.9], dtype=tf.float32) + std_cifar10 = tf.constant([63.0, 62.1, 66.7], dtype=tf.float32) + + if is_training: + + # Padding each dimension by 4 pixels each side + dataset = dataset.map( + lambda image, label: ( + tf.stack( + [ + tf.pad(tf.squeeze(t, [2]), [[4, 4], [4, 4]], mode="REFLECT") + for t in tf.split(image, num_or_size_splits=3, axis=2) + ], + axis=2, + ), + label, + ) + ) + # Random crop of 32 x 32 x 3 + dataset = dataset.map(lambda image, label: (tf.image.random_crop(image, size=(32, 32, 3)), label)) + # Random horizontal flip + dataset = dataset.map(lambda image, label: (tf.image.random_flip_left_right(image), label)) + # Normalize by dividing by given mean & std + dataset = dataset.map(lambda image, label: ((tf.cast(image, tf.float32) - mean_cifar10) / std_cifar10, label)) + # Random shuffle + dataset = dataset.shuffle(len(dataset), reshuffle_each_iteration=True) + # Convert to batches. + return dataset.batch(batch_size) + + else: + + # For validation / test only do normalization. + return dataset.map( + lambda image, label: ((tf.cast(image, tf.float32) - mean_cifar10) / std_cifar10, label) + ).batch(batch_size) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--batch_size", type=int, required=True) + parser.add_argument("--epochs", type=int, required=True) + parser.add_argument("--train_idx_path", type=str, required=True) + parser.add_argument("--fedprox_mu", type=float, default=0.0) + args = parser.parse_args() + + (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() + + # Use alpha-split per-site data to simulate data heteogeniety, + # only if if train_idx_path is not None. + # + if args.train_idx_path != "None": + + print(f"Loading train indices from {args.train_idx_path}") + train_idx = np.load(args.train_idx_path) + train_images = train_images[train_idx] + train_labels = train_labels[train_idx] + + unq, unq_cnt = np.unique(train_labels, return_counts=True) + print( + ( + f"Loaded {len(train_idx)} training indices from {args.train_idx_path} " + "with label distribution:\nUnique labels: {unq}\nUnique Counts: {unq_cnt}" + ) + ) + + # Convert training & testing data to datasets + train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels)) + test_ds = tf.data.Dataset.from_tensor_slices((test_images, test_labels)) + + # Preprocessing + train_ds = preprocess_dataset(train_ds, is_training=True, batch_size=args.batch_size) + test_ds = preprocess_dataset(test_ds, is_training=False, batch_size=args.batch_size) + + model = ModerateTFNet() + model.build(input_shape=(None, 32, 32, 3)) + + # Tensorboard logs for each local training epoch + callbacks = [tf.keras.callbacks.TensorBoard(log_dir="./logs/epochs", write_graph=False)] + # Tensorboard logs for each aggregation run + tf_summary_writer = tf.summary.create_file_writer(logdir="./logs/rounds") + + # Define loss function. + loss = losses.SparseCategoricalCrossentropy(from_logits=True) + + model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9), loss=loss, metrics=["accuracy"]) + model.summary() + + # (2) initializes NVFlare client API + flare.init() + + while flare.is_running(): + # (3) receives FLModel from NVFlare + input_model = flare.receive() + print(f"current_round={input_model.current_round}") + + # (optional) print system info + system_info = flare.system_info() + print(f"NVFlare system info: {system_info}") + + # (4) loads model from NVFlare + for k, v in input_model.params.items(): + model.get_layer(k).set_weights(v) + + if args.fedprox_mu > 0: + + local_model_weights = model.trainable_variables + global_model_weights = copy.deepcopy(model.trainable_variables) + model.loss = TFFedProxLoss(local_model_weights, global_model_weights, args.fedprox_mu, loss) + elif args.fedprox_mu < 0.0: + + raise ValueError("mu should be no less than 0.0") + + # (5) evaluate aggregated/received model + _, test_global_acc = model.evaluate(x=test_ds, verbose=2) + + with tf_summary_writer.as_default(): + tf.summary.scalar("global_model_accuracy", test_global_acc, input_model.current_round) + print( + f"Accuracy of the received model on round {input_model.current_round} on the {len(test_images)} test images: {test_global_acc * 100} %" + ) + + start_epoch = args.epochs * input_model.current_round + end_epoch = start_epoch + args.epochs + + print(f"Train from epoch {start_epoch} to {end_epoch}") + model.fit( + x=train_ds, + epochs=end_epoch, + validation_data=test_ds, + callbacks=callbacks, + initial_epoch=start_epoch, + validation_freq=1, + ) + + print("Finished Training") + + model.save_weights(PATH) + + _, test_acc = model.evaluate(x=test_ds, verbose=2) + + with tf_summary_writer.as_default(): + tf.summary.scalar("local_model_accuracy", test_acc, input_model.current_round) + print(f"Accuracy of the model on the {len(test_images)} test images: {test_acc * 100} %") + + # (6) construct trained FL model (A dict of {layer name: layer weights} from the keras model) + output_model = flare.FLModel( + params={layer.name: layer.get_weights() for layer in model.layers}, metrics={"accuracy": test_global_acc} + ) + # (7) send model back to NVFlare + flare.send(output_model) + + +if __name__ == "__main__": + main() diff --git a/examples/advanced/job_api/tf/src/cifar10_tf_fl_alpha_split_scaffold.py b/examples/advanced/job_api/tf/src/cifar10_tf_fl_alpha_split_scaffold.py new file mode 100644 index 0000000000..ab5cbd1252 --- /dev/null +++ b/examples/advanced/job_api/tf/src/cifar10_tf_fl_alpha_split_scaffold.py @@ -0,0 +1,294 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import copy + +import numpy as np +import tensorflow as tf +from tensorflow.keras import datasets, losses +from tf_net import ModerateTFNet + +# (1) import nvflare client API +import nvflare.client as flare +from nvflare.app_common.app_constant import AlgorithmConstants +from nvflare.app_opt.tf.fedprox_loss import TFFedProxLoss +from nvflare.app_opt.tf.scaffold import ScaffoldCallback, TFScaffoldHelper, get_lr_values +from nvflare.client.tracking import SummaryWriter + +PATH = "./tf_model.weights.h5" + + +def preprocess_dataset(dataset, is_training, batch_size=1): + """ + Apply pre-processing transformations to CIFAR10 dataset. + + Same pre-processings are used as in the Pytorch tutorial + on CIFAR10: https://github.com/NVIDIA/NVFlare/tree/main/examples/advanced/cifar10/cifar10-sim + + Training time pre-processings are (in-order): + - Image padding with 4 pixels in "reflect" mode on each side + - RandomCrop of 32 x 32 images + - RandomHorizontalFlip + - Normalize to [0, 1]: dividing pixels values by given CIFAR10 data mean & std + - Random shuffle + + Testing/Validation time pre-processings are: + - Normalize: dividing pixels values by 255 + + Args + ---------- + dataset: tf.data.Datset + Tensorflow Dataset + + is_training: bool + Boolean flag indicating if current phase is training phase. + + batch_size: int + Batch size + + Returns + ---------- + tf.data.Dataset + Tensorflow Dataset with pre-processings applied. + + """ + # Values from: https://github.com/NVIDIA/NVFlare/blob/fc2bc47889b980c8de37de5528e3d07e6b1a942e/examples/advanced/cifar10/pt/learners/cifar10_model_learner.py#L147 + mean_cifar10 = tf.constant([125.3, 123.0, 113.9], dtype=tf.float32) + std_cifar10 = tf.constant([63.0, 62.1, 66.7], dtype=tf.float32) + + if is_training: + + # Padding each dimension by 4 pixels each side + dataset = dataset.map( + lambda image, label: ( + tf.stack( + [ + tf.pad( + tf.squeeze(t, [2]), + [[4, 4], [4, 4]], + mode="REFLECT", + ) + for t in tf.split(image, num_or_size_splits=3, axis=2) + ], + axis=2, + ), + label, + ) + ) + # Random crop of 32 x 32 x 3 + dataset = dataset.map( + lambda image, label: ( + tf.image.random_crop(image, size=(32, 32, 3)), + label, + ) + ) + # Random horizontal flip + dataset = dataset.map( + lambda image, label: ( + tf.image.random_flip_left_right(image), + label, + ) + ) + # Normalize by dividing by given mean & std + dataset = dataset.map( + lambda image, label: ( + (tf.cast(image, tf.float32) - mean_cifar10) / std_cifar10, + label, + ) + ) + # Random shuffle + dataset = dataset.shuffle(len(dataset), reshuffle_each_iteration=True) + # Convert to batches. + return dataset.batch(batch_size) + + else: + + # For validation / test only do normalization. + return dataset.map( + lambda image, label: ( + (tf.cast(image, tf.float32) - mean_cifar10) / std_cifar10, + label, + ) + ).batch(batch_size) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--batch_size", type=int, required=True) + parser.add_argument("--epochs", type=int, required=True) + parser.add_argument("--train_idx_path", type=str, required=True) + parser.add_argument("--clip_norm", type=float, default=1.55, required=False) + parser.add_argument("--fedprox_mu", type=float, default=0.0) + + args = parser.parse_args() + + # (2) initializes NVFlare client API + flare.init() + + (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() + + # Use alpha-split per-site data to simulate data heteogeniety, + # only if if train_idx_path is not None. + + if args.train_idx_path != "None": + + print(f"Loading train indices from {args.train_idx_path}") + train_idx = np.load(args.train_idx_path) + train_images = train_images[train_idx] + train_labels = train_labels[train_idx] + + unq, unq_cnt = np.unique(train_labels, return_counts=True) + print( + ( + f"Loaded {len(train_idx)} training indices from {args.train_idx_path} " + "with label distribution:\nUnique labels: {unq}\nUnique Counts: {unq_cnt}" + ) + ) + + # Convert training & testing data to datasets + train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels)) + test_ds = tf.data.Dataset.from_tensor_slices((test_images, test_labels)) + + # Preprocessing + train_ds = preprocess_dataset(train_ds, is_training=True, batch_size=args.batch_size) + test_ds = preprocess_dataset(test_ds, is_training=False, batch_size=args.batch_size) + + model = ModerateTFNet() + model.build(input_shape=(None, 32, 32, 3)) + + callbacks = [tf.keras.callbacks.TensorBoard(log_dir="./logs_keras", write_graph=False)] + + loss = losses.SparseCategoricalCrossentropy(from_logits=True) + optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, clipnorm=args.clip_norm) + + model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) + model.summary() + + scaffold_helper = TFScaffoldHelper() + scaffold_helper.init(model=model) + + # (2) initializes NVFlare client API + flare.init() + + summary_writer = SummaryWriter() + tf_summary_writer = tf.summary.create_file_writer(logdir="./logs/validation") + while flare.is_running(): + # (3) receives FLModel from NVFlare + input_model = flare.receive() + print(f"current_round={input_model.current_round}") + + # (optional) print system info + system_info = flare.system_info() + print(f"NVFlare system info: {system_info}") + + # (4) loads model from NVFlare + for k, v in input_model.params.items(): + model.get_layer(k).set_weights(v) + + if args.fedprox_mu > 0: + + local_model_weights = model.trainable_variables + global_model_weights = copy.deepcopy(model.trainable_variables) + model.loss = TFFedProxLoss(local_model_weights, global_model_weights, args.fedprox_mu, loss) + elif args.fedprox_mu < 0.0: + + raise ValueError("mu should be no less than 0.0") + + # (step 4) load regularization parameters from scaffold + global_ctrl_weights = input_model.meta.get(AlgorithmConstants.SCAFFOLD_CTRL_GLOBAL) + + scaffold_helper.load_global_controls(weights=global_ctrl_weights) + + c_global_para, c_local_para = scaffold_helper.get_params() + + model_global = tf.keras.models.clone_model(model) + model_global.set_weights(model.get_weights()) + + # (5) evaluate aggregated/received model + _, test_global_acc = model.evaluate(x=test_ds, verbose=2) + summary_writer.add_scalar( + tag="global_model_accuracy", + scalar=test_global_acc, + global_step=input_model.current_round, + ) + + with tf_summary_writer.as_default(): + tf.summary.scalar( + "global_model_accuracy", + test_global_acc, + input_model.current_round, + ) + print( + f"Accuracy of the received model on round {input_model.current_round} on the {len(test_images)} test images: {test_global_acc * 100} %" + ) + + start_epoch = args.epochs * input_model.current_round + end_epoch = start_epoch + args.epochs + + print(f"Train from epoch {start_epoch} to {end_epoch}") + model.fit( + x=train_ds, + epochs=end_epoch, + validation_data=test_ds, + callbacks=[callbacks, ScaffoldCallback(scaffold_helper)], + initial_epoch=start_epoch, + validation_freq=1, # args.epochs + ) + + curr_lr = get_lr_values(optimizer=optimizer) + + print("Finished Training") + + scaffold_helper.terms_update( + model=model, + curr_lr=curr_lr, + c_global_para=c_global_para, + c_local_para=c_local_para, + model_global=model_global, + ) + + model.save_weights(PATH) + + _, test_acc = model.evaluate(x=test_ds, verbose=2) + + summary_writer.add_scalar( + tag="local_model_accuracy", + scalar=test_acc, + global_step=input_model.current_round, + ) + + with tf_summary_writer.as_default(): + tf.summary.scalar( + "local_model_accuracy", + test_acc, + input_model.current_round, + ) + print(f"Accuracy of the model on the {len(test_images)} test images: {test_acc * 100} %") + + # (6) construct trained FL model (A dict of {layer name: layer weights} from the keras model) + output_model = flare.FLModel( + params={layer.name: layer.get_weights() for layer in model.layers}, + metrics={"accuracy": test_global_acc}, + meta={ + AlgorithmConstants.SCAFFOLD_CTRL_DIFF: scaffold_helper.get_delta_controls(), + }, + ) + # (7) send model back to NVFlare + flare.send(output_model) + + +if __name__ == "__main__": + main() diff --git a/examples/advanced/job_api/tf/src/tf_net.py b/examples/advanced/job_api/tf/src/tf_net.py new file mode 100644 index 0000000000..bdd8d29147 --- /dev/null +++ b/examples/advanced/job_api/tf/src/tf_net.py @@ -0,0 +1,65 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tensorflow.keras import layers, models + + +class TFNet(models.Sequential): + def __init__(self, input_shape=(None, 32, 32, 3)): + super().__init__() + self._input_shape = input_shape + # Do not specify input as we will use delayed built only during runtime of the model + # self.add(layers.Input(shape=(32, 32, 3))) + self.add(layers.Conv2D(32, (3, 3), activation="relu")) + self.add(layers.MaxPooling2D((2, 2))) + self.add(layers.Conv2D(64, (3, 3), activation="relu")) + self.add(layers.MaxPooling2D((2, 2))) + self.add(layers.Conv2D(64, (3, 3), activation="relu")) + self.add(layers.Flatten()) + self.add(layers.Dense(64, activation="relu")) + self.add(layers.Dense(10)) + + +class ModerateTFNet(models.Sequential): + # Follow ModerateCNN architecture from cifar10_nets.py + def __init__(self, input_shape=(None, 32, 32, 3)): + super().__init__() + self._input_shape = input_shape + + # Do not specify input as we will use delayed built only during runtime of the model + # self.add(layers.Input(shape=(32, 32, 3))) + + # Conv Layer block 1 + self.add(layers.Conv2D(32, (3, 3), activation="relu", padding="same")) + self.add(layers.Conv2D(64, (3, 3), activation="relu", padding="same")) + self.add(layers.MaxPooling2D((2, 2))) + + # Conv Layer block 2 + self.add(layers.Conv2D(128, (3, 3), activation="relu", padding="same")) + self.add(layers.Conv2D(128, (3, 3), activation="relu", padding="same")) + self.add(layers.MaxPooling2D((2, 2))) + self.add(layers.Dropout(rate=0.05)) + + # Conv Layer block 3 + self.add(layers.Conv2D(256, (3, 3), activation="relu", padding="same")) + self.add(layers.Conv2D(256, (3, 3), activation="relu", padding="same")) + self.add(layers.MaxPooling2D((2, 2))) + self.add(layers.Flatten()) + + # FC Layer + self.add(layers.Dropout(rate=0.1)) + self.add(layers.Dense(512, activation="relu")) + self.add(layers.Dense(512, activation="relu")) + self.add(layers.Dropout(rate=0.1)) + self.add(layers.Dense(10)) diff --git a/examples/advanced/job_api/tf/tf_fl_script_executor_cifar10.py b/examples/advanced/job_api/tf/tf_fl_script_executor_cifar10.py new file mode 100644 index 0000000000..4f2d294f03 --- /dev/null +++ b/examples/advanced/job_api/tf/tf_fl_script_executor_cifar10.py @@ -0,0 +1,164 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import multiprocessing + +import tensorflow as tf +from src.cifar10_data_split import cifar10_split +from src.tf_net import ModerateTFNet + +from nvflare import FedJob, ScriptExecutor + +gpu_devices = tf.config.experimental.list_physical_devices("GPU") +for device in gpu_devices: + tf.config.experimental.set_memory_growth(device, True) + + +CENTRALIZED_ALGO = "centralized" +FEDAVG_ALGO = "fedavg" +FEDOPT_ALGO = "fedopt" +SCAFFOLD_ALGO = "scaffold" +FEDPROX_ALGO = "fedprox" + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--algo", + type=str, + required=True, + ) + parser.add_argument( + "--fedprox_mu", + type=float, + default=0.0, + ) + parser.add_argument( + "--n_clients", + type=int, + default=8, + ) + parser.add_argument( + "--num_rounds", + type=int, + default=50, + ) + parser.add_argument( + "--batch_size", + type=int, + default=64, + ) + parser.add_argument( + "--epochs", + type=int, + default=4, + ) + parser.add_argument( + "--alpha", + type=float, + default=1.0, + ) + parser.add_argument( + "--workspace", + type=str, + default="/tmp", + ) + parser.add_argument( + "--gpu", + type=int, + default=0, + ) + + args = parser.parse_args() + multiprocessing.set_start_method("spawn") + + supported_algos = (CENTRALIZED_ALGO, FEDAVG_ALGO, FEDOPT_ALGO, SCAFFOLD_ALGO, FEDPROX_ALGO) + + if args.algo not in supported_algos: + raise ValueError(f"--algo should be one of: {supported_algos}, got: {args.algo}") + + train_script = "src/cifar10_tf_fl_alpha_split.py" + train_split_root = ( + f"{args.workspace}/cifar10_splits/clients{args.n_clients}_alpha{args.alpha}" # avoid overwriting results + ) + + # Prepare data splits + if args.alpha > 0.0: + + # Do alpha splitting if alpha value > 0.0 + print(f"preparing CIFAR10 and doing alpha split with alpha = {args.alpha}") + train_idx_paths = cifar10_split(num_sites=args.n_clients, alpha=args.alpha, split_dir=train_split_root) + + print(train_idx_paths) + else: + train_idx_paths = [None for __ in range(args.n_clients)] + + # Define job + job = FedJob(name=f"cifar10_tf_{args.algo}_alpha{args.alpha}") + + # Define the controller workflow and send to server + controller = None + task_script_args = f"--batch_size {args.batch_size} --epochs {args.epochs}" + + if args.algo == FEDAVG_ALGO or args.algo == CENTRALIZED_ALGO: + from nvflare import FedAvg + + controller = FedAvg( + num_clients=args.n_clients, + num_rounds=args.num_rounds, + ) + + elif args.algo == FEDOPT_ALGO: + from nvflare.app_opt.tf.fedopt_ctl import FedOpt + + controller = FedOpt( + num_clients=args.n_clients, + num_rounds=args.num_rounds, + ) + elif args.algo == FEDPROX_ALGO: + from nvflare import FedAvg + + controller = FedAvg( + num_clients=args.n_clients, + num_rounds=args.num_rounds, + ) + task_script_args += f" --fedprox_mu {args.fedprox_mu}" + + elif args.algo == SCAFFOLD_ALGO: + train_script = "src/cifar10_tf_fl_alpha_split_scaffold.py" + from nvflare.app_common.workflows.scaffold import Scaffold + + controller = Scaffold( + num_clients=args.n_clients, + num_rounds=args.num_rounds, + ) + + job.to(controller, "server") + + # Define the initial global model and send to server + job.to(ModerateTFNet(input_shape=(None, 32, 32, 3)), "server") + + # Add clients + for i, train_idx_path in enumerate(train_idx_paths): + curr_task_script_args = task_script_args + f" --train_idx_path {train_idx_path}" + executor = ScriptExecutor(task_script_path=train_script, task_script_args=curr_task_script_args) + job.to(executor, f"site-{i+1}", gpu=args.gpu) + + # Can export current job to folder. + # job.export_job(f"{args.workspace}/nvflare/jobs/job_config") + + # Here we launch the job using simulator. + job.simulator_run(f"{args.workspace}/nvflare/jobs/{job.name}") diff --git a/examples/advanced/job_config/hello-pt/add_shareable_parameter.py b/examples/advanced/job_config/hello-pt/add_shareable_parameter.py deleted file mode 100644 index f7c819c1c7..0000000000 --- a/examples/advanced/job_config/hello-pt/add_shareable_parameter.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from nvflare.apis.filter import Filter -from nvflare.apis.fl_context import FLContext -from nvflare.apis.shareable import Shareable - - -class AddShareable(Filter): - def process(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable: - print(f"{fl_ctx.get_identity_name()} ---- AddShareable Filter ----") - - return shareable diff --git a/examples/advanced/job_config/hello-pt/cifar10trainer.py b/examples/advanced/job_config/hello-pt/cifar10trainer.py deleted file mode 100644 index 181e84ad13..0000000000 --- a/examples/advanced/job_config/hello-pt/cifar10trainer.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os.path - -import torch -from pt_constants import PTConstants -from simple_network import SimpleNetwork -from torch import nn -from torch.optim import SGD -from torch.utils.data.dataloader import DataLoader -from torchvision.datasets import CIFAR10 -from torchvision.transforms import Compose, Normalize, ToTensor - -from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable -from nvflare.apis.executor import Executor -from nvflare.apis.fl_constant import ReservedKey, ReturnCode -from nvflare.apis.fl_context import FLContext -from nvflare.apis.shareable import Shareable, make_reply -from nvflare.apis.signal import Signal -from nvflare.app_common.abstract.model import make_model_learnable, model_learnable_to_dxo -from nvflare.app_common.app_constant import AppConstants -from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager - - -class Cifar10Trainer(Executor): - def __init__( - self, - data_path="~/data", - lr=0.01, - epochs=5, - train_task_name=AppConstants.TASK_TRAIN, - submit_model_task_name=AppConstants.TASK_SUBMIT_MODEL, - exclude_vars=None, - pre_train_task_name=AppConstants.TASK_GET_WEIGHTS, - ): - """Cifar10 Trainer handles train and submit_model tasks. During train_task, it trains a - simple network on CIFAR10 dataset. For submit_model task, it sends the locally trained model - (if present) to the server. - - Args: - lr (float, optional): Learning rate. Defaults to 0.01 - epochs (int, optional): Epochs. Defaults to 5 - train_task_name (str, optional): Task name for train task. Defaults to "train". - submit_model_task_name (str, optional): Task name for submit model. Defaults to "submit_model". - exclude_vars (list): List of variables to exclude during model loading. - pre_train_task_name: Task name for pre train task, i.e., sending initial model weights. - """ - super().__init__() - - self._lr = lr - self._epochs = epochs - self._train_task_name = train_task_name - self._pre_train_task_name = pre_train_task_name - self._submit_model_task_name = submit_model_task_name - self._exclude_vars = exclude_vars - - # Training setup - self.model = SimpleNetwork() - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.model.to(self.device) - self.loss = nn.CrossEntropyLoss() - self.optimizer = SGD(self.model.parameters(), lr=lr, momentum=0.9) - - # Create Cifar10 dataset for training. - transforms = Compose( - [ - ToTensor(), - Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), - ] - ) - self._train_dataset = CIFAR10(root=data_path, transform=transforms, download=True, train=True) - self._train_loader = DataLoader(self._train_dataset, batch_size=4, shuffle=True) - self._n_iterations = len(self._train_loader) - - # Setup the persistence manager to save PT model. - # The default training configuration is used by persistence manager - # in case no initial model is found. - self._default_train_conf = {"train": {"model": type(self.model).__name__}} - self.persistence_manager = PTModelPersistenceFormatManager( - data=self.model.state_dict(), default_train_conf=self._default_train_conf - ) - - def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: - try: - if task_name == self._pre_train_task_name: - # Get the new state dict and send as weights - return self._get_model_weights() - elif task_name == self._train_task_name: - # Get model weights - try: - dxo = from_shareable(shareable) - except: - self.log_error(fl_ctx, "Unable to extract dxo from shareable.") - return make_reply(ReturnCode.BAD_TASK_DATA) - - # Ensure data kind is weights. - if not dxo.data_kind == DataKind.WEIGHTS: - self.log_error(fl_ctx, f"data_kind expected WEIGHTS but got {dxo.data_kind} instead.") - return make_reply(ReturnCode.BAD_TASK_DATA) - - # Convert weights to tensor. Run training - torch_weights = {k: torch.as_tensor(v) for k, v in dxo.data.items()} - self._local_train(fl_ctx, torch_weights, abort_signal) - - # Check the abort_signal after training. - # local_train returns early if abort_signal is triggered. - if abort_signal.triggered: - return make_reply(ReturnCode.TASK_ABORTED) - - # Save the local model after training. - self._save_local_model(fl_ctx) - - # Get the new state dict and send as weights - return self._get_model_weights() - elif task_name == self._submit_model_task_name: - # Load local model - ml = self._load_local_model(fl_ctx) - - # Get the model parameters and create dxo from it - dxo = model_learnable_to_dxo(ml) - return dxo.to_shareable() - else: - return make_reply(ReturnCode.TASK_UNKNOWN) - except Exception as e: - self.log_exception(fl_ctx, f"Exception in simple trainer: {e}.") - return make_reply(ReturnCode.EXECUTION_EXCEPTION) - - def _get_model_weights(self) -> Shareable: - # Get the new state dict and send as weights - weights = {k: v.cpu().numpy() for k, v in self.model.state_dict().items()} - - outgoing_dxo = DXO( - data_kind=DataKind.WEIGHTS, data=weights, meta={MetaKey.NUM_STEPS_CURRENT_ROUND: self._n_iterations} - ) - return outgoing_dxo.to_shareable() - - def _local_train(self, fl_ctx, weights, abort_signal): - # Set the model weights - self.model.load_state_dict(state_dict=weights) - - # Basic training - self.model.train() - for epoch in range(self._epochs): - running_loss = 0.0 - for i, batch in enumerate(self._train_loader): - if abort_signal.triggered: - # If abort_signal is triggered, we simply return. - # The outside function will check it again and decide steps to take. - return - - images, labels = batch[0].to(self.device), batch[1].to(self.device) - self.optimizer.zero_grad() - - predictions = self.model(images) - cost = self.loss(predictions, labels) - cost.backward() - self.optimizer.step() - - running_loss += cost.cpu().detach().numpy() / images.size()[0] - if i % 3000 == 0: - self.log_info( - fl_ctx, f"Epoch: {epoch}/{self._epochs}, Iteration: {i}, " f"Loss: {running_loss/3000}" - ) - running_loss = 0.0 - - def _save_local_model(self, fl_ctx: FLContext): - run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM)) - models_dir = os.path.join(run_dir, PTConstants.PTModelsDir) - if not os.path.exists(models_dir): - os.makedirs(models_dir) - model_path = os.path.join(models_dir, PTConstants.PTLocalModelName) - - ml = make_model_learnable(self.model.state_dict(), {}) - self.persistence_manager.update(ml) - torch.save(self.persistence_manager.to_persistence_dict(), model_path) - - def _load_local_model(self, fl_ctx: FLContext): - run_dir = fl_ctx.get_engine().get_workspace().get_run_dir(fl_ctx.get_prop(ReservedKey.RUN_NUM)) - models_dir = os.path.join(run_dir, PTConstants.PTModelsDir) - if not os.path.exists(models_dir): - return None - model_path = os.path.join(models_dir, PTConstants.PTLocalModelName) - - self.persistence_manager = PTModelPersistenceFormatManager( - data=torch.load(model_path), default_train_conf=self._default_train_conf - ) - ml = self.persistence_manager.to_model_learnable(exclude_vars=self._exclude_vars) - return ml diff --git a/examples/advanced/job_config/hello-pt/cifar10validator.py b/examples/advanced/job_config/hello-pt/cifar10validator.py deleted file mode 100644 index 80a0c7b714..0000000000 --- a/examples/advanced/job_config/hello-pt/cifar10validator.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -from simple_network import SimpleNetwork -from torch.utils.data import DataLoader -from torchvision.datasets import CIFAR10 -from torchvision.transforms import Compose, Normalize, ToTensor - -from nvflare.apis.dxo import DXO, DataKind, from_shareable -from nvflare.apis.executor import Executor -from nvflare.apis.fl_constant import ReturnCode -from nvflare.apis.fl_context import FLContext -from nvflare.apis.shareable import Shareable, make_reply -from nvflare.apis.signal import Signal -from nvflare.app_common.app_constant import AppConstants - - -class Cifar10Validator(Executor): - def __init__(self, data_path="~/data", validate_task_name=AppConstants.TASK_VALIDATION): - super().__init__() - - self._validate_task_name = validate_task_name - - # Setup the model - self.model = SimpleNetwork() - self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - self.model.to(self.device) - - # Preparing the dataset for testing. - transforms = Compose( - [ - ToTensor(), - Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), - ] - ) - test_data = CIFAR10(root=data_path, train=False, transform=transforms) - self._test_loader = DataLoader(test_data, batch_size=4, shuffle=False) - - def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: - if task_name == self._validate_task_name: - model_owner = "?" - try: - try: - dxo = from_shareable(shareable) - except: - self.log_error(fl_ctx, "Error in extracting dxo from shareable.") - return make_reply(ReturnCode.BAD_TASK_DATA) - - # Ensure data_kind is weights. - if not dxo.data_kind == DataKind.WEIGHTS: - self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.") - return make_reply(ReturnCode.BAD_TASK_DATA) - - # Extract weights and ensure they are tensor. - model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?") - weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()} - - # Get validation accuracy - val_accuracy = self._validate(weights, abort_signal) - if abort_signal.triggered: - return make_reply(ReturnCode.TASK_ABORTED) - - self.log_info( - fl_ctx, - f"Accuracy when validating {model_owner}'s model on" - f" {fl_ctx.get_identity_name()}" - f"s data: {val_accuracy}", - ) - - dxo = DXO(data_kind=DataKind.METRICS, data={"val_acc": val_accuracy}) - return dxo.to_shareable() - except: - self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}") - return make_reply(ReturnCode.EXECUTION_EXCEPTION) - else: - return make_reply(ReturnCode.TASK_UNKNOWN) - - def _validate(self, weights, abort_signal): - self.model.load_state_dict(weights) - - self.model.eval() - - correct = 0 - total = 0 - with torch.no_grad(): - for i, (images, labels) in enumerate(self._test_loader): - if abort_signal.triggered: - return 0 - - images, labels = images.to(self.device), labels.to(self.device) - output = self.model(images) - - _, pred_label = torch.max(output, 1) - - correct += (pred_label == labels).sum().item() - total += images.size()[0] - - metric = correct / float(total) - - return metric diff --git a/examples/advanced/job_config/hello-pt/hello_pt_job.py b/examples/advanced/job_config/hello-pt/hello_pt_job.py deleted file mode 100644 index 9f40041dac..0000000000 --- a/examples/advanced/job_config/hello-pt/hello_pt_job.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from add_shareable_parameter import AddShareable -from cifar10trainer import Cifar10Trainer -from cifar10validator import Cifar10Validator -from print_shareable_parameter import PrintShareable -from pt_model_locator import PTModelLocator - -from nvflare.apis.dxo import DataKind -from nvflare.app_common.aggregators import InTimeAccumulateWeightedAggregator -from nvflare.app_common.shareablegenerators import FullModelShareableGenerator -from nvflare.app_common.widgets.validation_json_generator import ValidationJsonGenerator -from nvflare.app_common.workflows.cross_site_model_eval import CrossSiteModelEval -from nvflare.app_common.workflows.initialize_global_weights import InitializeGlobalWeights -from nvflare.app_common.workflows.scatter_and_gather import ScatterAndGather -from nvflare.app_opt.pt import PTFileModelPersistor -from nvflare.job_config.fed_app_config import ClientAppConfig, FedAppConfig, ServerAppConfig -from nvflare.job_config.fed_job_config import FedJobConfig - - -class HelloPTJob: - def __init__(self) -> None: - super().__init__() - self.job = self.define_job() - - def define_job(self) -> FedJobConfig: - # job = FedJobConfig(job_name="hello-pt", min_clients=2, mandatory_clients="site-1") - job: FedJobConfig = FedJobConfig(job_name="hello-pt", min_clients=2) - - server_app = self._create_server_app() - client_app = self._create_client_app() - - app = FedAppConfig(server_app=server_app, client_app=client_app) - job.add_fed_app("app", app) - - # app = FedAppConfig(client_app=client_app) - # job.add_fed_app("client_app", app) - # job.set_site_app("server", "app") - # job.set_site_app("site-1", "app") - # job.set_site_app("site-2", "client_app") - # job.add_resource_spec("site-1", {"memory": "8GB"}) - - job.set_site_app("@ALL", "app") - - return job - - def _create_client_app(self): - client_app = ClientAppConfig() - executor = Cifar10Trainer(lr=0.01, epochs=1) - client_app.add_executor(["train", "submit_model", "get_weights"], executor) - validator = Cifar10Validator() - client_app.add_executor(["validate"], validator) - - task_filter = AddShareable() - client_app.add_task_result_filter(["train"], task_filter) - task_filter = PrintShareable() - client_app.add_task_data_filter(["validate", "train"], task_filter) - return client_app - - def _create_server_app(self): - server_app = ServerAppConfig() - controller = InitializeGlobalWeights(task_name="get_weights") - server_app.add_workflow("pre_train", controller) - controller = ScatterAndGather( - min_clients=2, - num_rounds=2, - start_round=0, - wait_time_after_min_received=10, - aggregator_id="aggregator", - persistor_id="persistor", - shareable_generator_id="shareable_generator", - train_task_name="train", - train_timeout=0, - ) - server_app.add_workflow("scatter_and_gather", controller) - controller = CrossSiteModelEval(model_locator_id="model_locator") - server_app.add_workflow("cross_site_validate", controller) - - component = PTFileModelPersistor() - server_app.add_component("persistor", component) - component = FullModelShareableGenerator() - server_app.add_component("shareable_generator", component) - component = InTimeAccumulateWeightedAggregator( - expected_data_kind=DataKind.WEIGHTS, aggregation_weights={"site-1": 1.0, "site-2": 1.0} - ) - server_app.add_component("aggregator", component) - component = PTModelLocator() - server_app.add_component("model_locator", component) - component = ValidationJsonGenerator() - server_app.add_component("json_generator", component) - - task_filter = AddShareable() - server_app.add_task_data_filter(["train"], task_filter) - task_filter = PrintShareable() - server_app.add_task_result_filter(["validate", "train"], task_filter) - return server_app - - def export_job(self, job_root): - self.job.generate_job_config(job_root) - - def simulator_run(self, workspace): - self.job.simulator_run(workspace, threads=2) - - -if __name__ == "__main__": - job = HelloPTJob() - - # job.export_job("/tmp/nvflare/jobs") - job.simulator_run("/tmp/nvflare/simulator_workspace") diff --git a/examples/advanced/job_config/hello-pt/print_shareable_parameter.py b/examples/advanced/job_config/hello-pt/print_shareable_parameter.py deleted file mode 100644 index 35d68317ba..0000000000 --- a/examples/advanced/job_config/hello-pt/print_shareable_parameter.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from nvflare.apis.dxo import from_shareable -from nvflare.apis.filter import Filter -from nvflare.apis.fl_context import FLContext -from nvflare.apis.shareable import Shareable - - -class PrintShareable(Filter): - def process(self, shareable: Shareable, fl_ctx: FLContext) -> Shareable: - dxo = from_shareable(shareable) - model_weights = dxo.data - - count = 0 - keys = "" - for item in model_weights.keys(): - keys += item + "; " - count += 1 - print(f"{fl_ctx.get_identity_name()} ----- Total parameters in the Shareable: {count}") - - return shareable diff --git a/examples/advanced/job_config/hello-pt/pt_constants.py b/examples/advanced/job_config/hello-pt/pt_constants.py deleted file mode 100644 index d8deca3517..0000000000 --- a/examples/advanced/job_config/hello-pt/pt_constants.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class PTConstants: - PTServerName = "server" - PTFileModelName = "FL_global_model.pt" - PTLocalModelName = "local_model.pt" - - PTModelsDir = "models" diff --git a/examples/advanced/job_config/hello-pt/pt_model_locator.py b/examples/advanced/job_config/hello-pt/pt_model_locator.py deleted file mode 100644 index 8681b2b529..0000000000 --- a/examples/advanced/job_config/hello-pt/pt_model_locator.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from typing import List, Union - -import torch.cuda -from pt_constants import PTConstants -from simple_network import SimpleNetwork - -from nvflare.apis.dxo import DXO -from nvflare.apis.fl_context import FLContext -from nvflare.app_common.abstract.model import model_learnable_to_dxo -from nvflare.app_common.abstract.model_locator import ModelLocator -from nvflare.app_opt.pt.model_persistence_format_manager import PTModelPersistenceFormatManager - - -class PTModelLocator(ModelLocator): - def __init__(self): - super().__init__() - self.model = SimpleNetwork() - - def get_model_names(self, fl_ctx: FLContext) -> List[str]: - return [PTConstants.PTServerName] - - def locate_model(self, model_name, fl_ctx: FLContext) -> Union[DXO, None]: - if model_name == PTConstants.PTServerName: - try: - server_run_dir = fl_ctx.get_engine().get_workspace().get_app_dir(fl_ctx.get_job_id()) - model_path = os.path.join(server_run_dir, PTConstants.PTFileModelName) - if not os.path.exists(model_path): - return None - - # Load the torch model - device = "cuda" if torch.cuda.is_available() else "cpu" - data = torch.load(model_path, map_location=device) - - # Set up the persistence manager. - if self.model: - default_train_conf = {"train": {"model": type(self.model).__name__}} - else: - default_train_conf = None - - # Use persistence manager to get learnable - persistence_manager = PTModelPersistenceFormatManager(data, default_train_conf=default_train_conf) - ml = persistence_manager.to_model_learnable(exclude_vars=None) - - # Create dxo and return - return model_learnable_to_dxo(ml) - except Exception as e: - self.log_error(fl_ctx, f"Error in retrieving {model_name}: {e}.", fire_event=False) - return None - else: - self.log_error(fl_ctx, f"PTModelLocator doesn't recognize name: {model_name}", fire_event=False) - return None diff --git a/examples/getting_started/README.md b/examples/getting_started/README.md index bb49a50c88..f87e553594 100644 --- a/examples/getting_started/README.md +++ b/examples/getting_started/README.md @@ -13,7 +13,8 @@ We can also add data filters (for example, for [homomorphic encryption](https:// or [differential privacy filters](https://arxiv.org/abs/1910.00962)) to the task data or results received or produced by the server or clients. -![NVIDIA FLARE Overview](../../docs/resources/nvflare_overview.svg) +![NVIDIA FLARE Overview](../../docs/resources/controller_executor_no_filter.png) + ### Examples We provide several examples to quickly get you started using NVFlare's Job API. @@ -25,5 +26,5 @@ such as [FedOpt](https://arxiv.org/abs/2003.00295), or [SCAFFOLD](https://arxiv. ### 2. [Tensorflow Examples](./tf/README.md) ### 3. [Scikit-Learn Examples](./sklearn/README.md) -> [!NOTE] -> More examples can be found at https://nvidia.github.io/NVFlare. +Once you finished above examples, +you can look at the ["hello-world"](../hello-world) examples or look at more examples at tutorial catelog https://nvidia.github.io/NVFlare/. diff --git a/examples/getting_started/pt/fedavg_script_executor_cifar10_all.py b/examples/getting_started/pt/fedavg_script_executor_cifar10_all.py deleted file mode 100644 index af15043785..0000000000 --- a/examples/getting_started/pt/fedavg_script_executor_cifar10_all.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from src.net import Net - -from nvflare import FedAvg, FedJob, ScriptExecutor - -if __name__ == "__main__": - n_clients = 2 - num_rounds = 2 - train_script = "src/cifar10_fl.py" - - job = FedJob(name="cifar10_fedavg") - - # Define the controller workflow and send to server - controller = FedAvg( - num_clients=n_clients, - num_rounds=num_rounds, - ) - job.to_server(controller) - - # Define the initial global model and send to server - job.to_server(Net()) - - # Send executor to all clients - executor = ScriptExecutor( - task_script_path=train_script, task_script_args="" # f"--batch_size 32 --data_path /tmp/data/site-{i}" - ) - job.to_clients(executor) - - # job.export_job("/tmp/nvflare/jobs/job_config") - job.simulator_run("/tmp/nvflare/jobs/workdir", n_clients=n_clients) diff --git a/examples/getting_started/pt/nvflare_lightning_getting_started.ipynb b/examples/getting_started/pt/nvflare_lightning_getting_started.ipynb index de68424250..ac9d91ab35 100644 --- a/examples/getting_started/pt/nvflare_lightning_getting_started.ipynb +++ b/examples/getting_started/pt/nvflare_lightning_getting_started.ipynb @@ -26,9 +26,7 @@ "number of training rounds). The clients run executors which can listen for tasks and perform the necessary computations locally, such as model training. This task-based interaction repeats\n", "until the experiment’s objectives are met. \n", "\n", - "We can also add data filters (for example, for [homomorphic encryption](https://www.usenix.org/conference/atc20/presentation/zhang-chengliang) or [differential privacy filters](https://arxiv.org/abs/1910.00962)) to the task data or results received or produced by the server or clients.\n", - "\n", - "\"NVIDIA" + "\"NVIDIA" ] }, { @@ -493,7 +491,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.8.19" } }, "nbformat": 4, diff --git a/examples/getting_started/pt/nvflare_pt_getting_started.ipynb b/examples/getting_started/pt/nvflare_pt_getting_started.ipynb index 8cdf81a097..35192010e4 100644 --- a/examples/getting_started/pt/nvflare_pt_getting_started.ipynb +++ b/examples/getting_started/pt/nvflare_pt_getting_started.ipynb @@ -16,7 +16,10 @@ { "cell_type": "markdown", "id": "fcf2b4a8-ed42-421d-8898-c0c93f9d8a09", - "metadata": {}, + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, "source": [ "## Basic Concepts\n", "At the heart of NVFlare lies the concept of collaboration through\n", @@ -26,9 +29,7 @@ "number of training rounds). The clients run executors which can listen for tasks and perform the necessary computations locally, such as model training. This task-based interaction repeats\n", "until the experiment’s objectives are met. \n", "\n", - "We can also add data filters (for example, for [homomorphic encryption](https://www.usenix.org/conference/atc20/presentation/zhang-chengliang) or [differential privacy filters](https://arxiv.org/abs/1910.00962)) to the task data or results received or produced by the server or clients.\n", - "\n", - "\"NVIDIA" + "\"NVIDIA" ] }, { @@ -265,7 +266,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "id": "13771bfb-901f-485a-9a23-84db1ccd5fe4", "metadata": {}, "outputs": [], @@ -286,7 +287,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "id": "6962e6cc-995e-4356-8156-3ceba2c7a249", "metadata": {}, "outputs": [], @@ -311,7 +312,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "0e2c514c-7758-4d30-bb5c-ae3c63be50aa", "metadata": {}, "outputs": [], @@ -343,7 +344,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "ad5d36fe-9ae5-43c3-80bc-2cdc66bf7a7e", "metadata": {}, "outputs": [], @@ -368,7 +369,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "id": "99a270bf-c906-425b-b999-2306cb76eb62", "metadata": {}, "outputs": [], @@ -387,12 +388,339 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "id": "13068ab7-35cf-49e7-91ed-10993049ef0d", "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2024-08-16 12:29:21,325 - SimulatorRunner - INFO - When running with multi GPU, each GPU group will run with only 1 thread. Set the Threads to 1.\n", + "2024-08-16 12:29:21,327 - SimulatorRunner - INFO - Create the Simulator Server.\n", + "2024-08-16 12:29:21,329 - CoreCell - INFO - server: creating listener on tcp://0:34219\n", + "2024-08-16 12:29:21,338 - CoreCell - INFO - server: created backbone external listener for tcp://0:34219\n", + "2024-08-16 12:29:21,339 - ConnectorManager - INFO - 27586: Try start_listener Listener resources: {'secure': False, 'host': 'localhost'}\n", + "2024-08-16 12:29:21,340 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connector [CH00002 PASSIVE tcp://0:26151] is starting\n", + "2024-08-16 12:29:21,841 - CoreCell - INFO - server: created backbone internal listener for tcp://localhost:26151\n", + "2024-08-16 12:29:21,842 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connector [CH00001 PASSIVE tcp://0:34219] is starting\n", + "2024-08-16 12:29:21,908 - nvflare.fuel.hci.server.hci - INFO - Starting Admin Server localhost on Port 50775\n", + "2024-08-16 12:29:21,908 - SimulatorRunner - INFO - Deploy the Apps.\n", + "2024-08-16 12:29:21,914 - SimulatorRunner - INFO - Create the simulate clients.\n", + "2024-08-16 12:29:21,920 - ClientManager - INFO - Client: New client site-1@192.168.86.31 joined. Sent token: 530696e3-3924-4486-8808-fce1b242f3db. Total clients: 1\n", + "2024-08-16 12:29:21,921 - FederatedClient - INFO - Successfully registered client:site-1 for project simulator_server. Token:530696e3-3924-4486-8808-fce1b242f3db SSID:\n", + "2024-08-16 12:29:21,923 - ClientManager - INFO - Client: New client site-2@192.168.86.31 joined. Sent token: 11c59cad-e85b-4807-a3dc-b0b37b56520f. Total clients: 2\n", + "2024-08-16 12:29:21,923 - FederatedClient - INFO - Successfully registered client:site-2 for project simulator_server. Token:11c59cad-e85b-4807-a3dc-b0b37b56520f SSID:\n", + "2024-08-16 12:29:21,924 - SimulatorRunner - INFO - Set the client status ready.\n", + "2024-08-16 12:29:21,924 - SimulatorRunner - INFO - Deploy and start the Server App.\n", + "2024-08-16 12:29:21,925 - Cell - INFO - Register blob CB for channel='server_command', topic='*'\n", + "2024-08-16 12:29:21,926 - Cell - INFO - Register blob CB for channel='aux_communication', topic='*'\n", + "2024-08-16 12:29:21,926 - ServerCommandAgent - INFO - ServerCommandAgent cell register_request_cb: server.simulate_job\n", + "2024-08-16 12:29:23,514 - IntimeModelSelector - INFO - model selection weights control: {}\n", + "2024-08-16 12:29:23,517 - AuxRunner - INFO - registered aux handler for topic __sync_runner__\n", + "2024-08-16 12:29:23,518 - AuxRunner - INFO - registered aux handler for topic __job_heartbeat__\n", + "2024-08-16 12:29:23,519 - AuxRunner - INFO - registered aux handler for topic __task_check__\n", + "2024-08-16 12:29:23,520 - AuxRunner - INFO - registered aux handler for topic RM.RELIABLE_REQUEST\n", + "2024-08-16 12:29:23,521 - AuxRunner - INFO - registered aux handler for topic RM.RELIABLE_REPLY\n", + "2024-08-16 12:29:23,521 - ReliableMessage - INFO - enabled reliable message: max_request_workers=20 query_interval=2.0\n", + "2024-08-16 12:29:23,522 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job]: Server runner starting ...\n", + "2024-08-16 12:29:23,523 - AuxRunner - INFO - registered aux handler for topic fed.event\n", + "2024-08-16 12:29:23,523 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job]: starting workflow controller () ...\n", + "2024-08-16 12:29:23,524 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Initializing BaseModelController workflow.\n", + "2024-08-16 12:29:23,524 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Workflow controller () started\n", + "2024-08-16 12:29:23,525 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Beginning model controller run.\n", + "2024-08-16 12:29:23,525 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Start FedAvg.\n", + "2024-08-16 12:29:23,526 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: loading initial model from persistor\n", + "2024-08-16 12:29:23,526 - PTFileModelPersistor - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Both source_ckpt_file_full_name and ckpt_preload_path are not provided. Using the default model weights initialized on the persistor side.\n", + "2024-08-16 12:29:23,527 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Round 0 started.\n", + "2024-08-16 12:29:23,528 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Sampled clients: ['site-1', 'site-2']\n", + "2024-08-16 12:29:23,528 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Sending task train to ['site-1', 'site-2']\n", + "2024-08-16 12:29:23,529 - WFCommServer - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: scheduled task train\n", + "2024-08-16 12:29:23,927 - SimulatorClientRunner - INFO - Start the clients run simulation.\n", + "2024-08-16 12:29:23,928 - SimulatorClientRunner - INFO - Start the clients run simulation.\n", + "2024-08-16 12:29:24,931 - SimulatorClientRunner - INFO - Simulate Run client: site-1 on GPU group: 0\n", + "2024-08-16 12:29:24,932 - SimulatorClientRunner - INFO - Simulate Run client: site-2 on GPU group: 0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2024-08-16 12:29:26.813808: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", + "2024-08-16 12:29:26.847750: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2024-08-16 12:29:28,017 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connection [CN00005 127.0.0.1:34219 <= 127.0.0.1:50372] is created: PID: 27586\n", + "2024-08-16 12:29:28,030 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connection [CN00006 127.0.0.1:34219 <= 127.0.0.1:50382] is created: PID: 27586\n", + "2024-08-16 12:29:27,967 - ClientTaskWorker - INFO - ClientTaskWorker started to run\n", + "2024-08-16 12:29:27,981 - ClientTaskWorker - INFO - ClientTaskWorker started to run\n", + "2024-08-16 12:29:28,016 - CoreCell - INFO - site-1.simulate_job: created backbone external connector to tcp://localhost:34219\n", + "2024-08-16 12:29:28,016 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connector [CH00001 ACTIVE tcp://localhost:34219] is starting\n", + "2024-08-16 12:29:28,017 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connection [CN00002 127.0.0.1:50372 => 127.0.0.1:34219] is created: PID: 27698\n", + "2024-08-16 12:29:28,029 - CoreCell - INFO - site-2.simulate_job: created backbone external connector to tcp://localhost:34219\n", + "2024-08-16 12:29:28,029 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connector [CH00001 ACTIVE tcp://localhost:34219] is starting\n", + "2024-08-16 12:29:28,030 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connection [CN00002 127.0.0.1:50382 => 127.0.0.1:34219] is created: PID: 27699\n", + "2024-08-16 12:29:29,828 - AuxRunner - INFO - registered aux handler for topic __end_run__\n", + "2024-08-16 12:29:29,828 - AuxRunner - INFO - registered aux handler for topic __do_task__\n", + "2024-08-16 12:29:29,828 - Cell - INFO - Register blob CB for channel='aux_communication', topic='*'\n", + "2024-08-16 12:29:29,848 - AuxRunner - INFO - registered aux handler for topic __end_run__\n", + "2024-08-16 12:29:29,848 - AuxRunner - INFO - registered aux handler for topic __do_task__\n", + "2024-08-16 12:29:29,849 - Cell - INFO - Register blob CB for channel='aux_communication', topic='*'\n", + "2024-08-16 12:29:30,346 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: assigned task to client site-1: name=train, id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20\n", + "2024-08-16 12:29:30,347 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: sent task assignment to client. client_name:site-1 task_id:c2ea9c72-12a7-4c56-bdac-7dfc83524f20\n", + "2024-08-16 12:29:30,348 - GetTaskCommand - INFO - return task to client. client_name: site-1 task_name: train task_id: c2ea9c72-12a7-4c56-bdac-7dfc83524f20 sharable_header_task_id: c2ea9c72-12a7-4c56-bdac-7dfc83524f20\n", + "2024-08-16 12:29:30,365 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-2, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: assigned task to client site-2: name=train, id=8e246740-698d-406e-8cab-9f91a762db52\n", + "2024-08-16 12:29:30,366 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-2, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: sent task assignment to client. client_name:site-2 task_id:8e246740-698d-406e-8cab-9f91a762db52\n", + "2024-08-16 12:29:30,366 - GetTaskCommand - INFO - return task to client. client_name: site-2 task_name: train task_id: 8e246740-698d-406e-8cab-9f91a762db52 sharable_header_task_id: 8e246740-698d-406e-8cab-9f91a762db52\n", + "2024-08-16 12:29:30,338 - Cell - INFO - broadcast: channel='aux_communication', topic='__sync_runner__', targets=['server.simulate_job'], timeout=2.0\n", + "2024-08-16 12:29:30,343 - ClientRunner - INFO - [identity=site-1, run=simulate_job]: synced to Server Runner in 0.5059611797332764 seconds\n", + "2024-08-16 12:29:30,343 - AuxRunner - INFO - registered aux handler for topic RM.RELIABLE_REQUEST\n", + "2024-08-16 12:29:30,343 - AuxRunner - INFO - registered aux handler for topic RM.RELIABLE_REPLY\n", + "2024-08-16 12:29:30,343 - ReliableMessage - INFO - enabled reliable message: max_request_workers=20 query_interval=2.0\n", + "2024-08-16 12:29:30,344 - TaskScriptRunner - INFO - start task run() with full path: /tmp/nvflare/jobs/workdir/site-1/simulate_job/app_site-1/custom/src/cifar10_fl.py\n", + "2024-08-16 12:29:30,344 - AuxRunner - INFO - registered aux handler for topic fed.event\n", + "2024-08-16 12:29:30,344 - ClientRunner - INFO - [identity=site-1, run=simulate_job]: client runner started\n", + "2024-08-16 12:29:30,344 - ClientTaskWorker - INFO - Initialize ClientRunner for client: site-1\n", + "2024-08-16 12:29:30,358 - Cell - INFO - broadcast: channel='aux_communication', topic='__sync_runner__', targets=['server.simulate_job'], timeout=2.0\n", + "2024-08-16 12:29:30,361 - ClientRunner - INFO - [identity=site-2, run=simulate_job]: synced to Server Runner in 0.5038559436798096 seconds\n", + "2024-08-16 12:29:30,362 - AuxRunner - INFO - registered aux handler for topic RM.RELIABLE_REQUEST\n", + "2024-08-16 12:29:30,362 - AuxRunner - INFO - registered aux handler for topic RM.RELIABLE_REPLY\n", + "2024-08-16 12:29:30,362 - ReliableMessage - INFO - enabled reliable message: max_request_workers=20 query_interval=2.0\n", + "2024-08-16 12:29:30,362 - TaskScriptRunner - INFO - start task run() with full path: /tmp/nvflare/jobs/workdir/site-2/simulate_job/app_site-2/custom/src/cifar10_fl.py\n", + "2024-08-16 12:29:30,362 - AuxRunner - INFO - registered aux handler for topic fed.event\n", + "2024-08-16 12:29:30,362 - ClientRunner - INFO - [identity=site-2, run=simulate_job]: client runner started\n", + "2024-08-16 12:29:30,362 - ClientTaskWorker - INFO - Initialize ClientRunner for client: site-2\n", + "2024-08-16 12:29:30,393 - Communicator - INFO - Received from simulator_server server. getTask: train size: 251.5KB (251471 Bytes) time: 0.049074 seconds\n", + "2024-08-16 12:29:30,394 - FederatedClient - INFO - pull_task completed. Task name:train Status:True \n", + "2024-08-16 12:29:30,394 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job]: got task assignment: name=train, id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20\n", + "2024-08-16 12:29:30,394 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: invoking task executor ScriptExecutor\n", + "2024-08-16 12:29:30,394 - ScriptExecutor - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: execute for task (train)\n", + "2024-08-16 12:29:30,394 - ScriptExecutor - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: send data to peer\n", + "2024-08-16 12:29:30,394 - ScriptExecutor - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: sending payload to peer\n", + "2024-08-16 12:29:30,394 - ScriptExecutor - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: Waiting for result from peer\n", + "2024-08-16 12:29:30,412 - Communicator - INFO - Received from simulator_server server. getTask: train size: 251.5KB (251471 Bytes) time: 0.049131 seconds\n", + "2024-08-16 12:29:30,412 - FederatedClient - INFO - pull_task completed. Task name:train Status:True \n", + "2024-08-16 12:29:30,412 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job]: got task assignment: name=train, id=8e246740-698d-406e-8cab-9f91a762db52\n", + "2024-08-16 12:29:30,412 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: invoking task executor ScriptExecutor\n", + "2024-08-16 12:29:30,413 - ScriptExecutor - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: execute for task (train)\n", + "2024-08-16 12:29:30,413 - ScriptExecutor - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: send data to peer\n", + "2024-08-16 12:29:30,413 - ScriptExecutor - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: sending payload to peer\n", + "2024-08-16 12:29:30,413 - ScriptExecutor - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: Waiting for result from peer\n", + "2024-08-16 12:29:30,570 - nvflare.app_common.executors.task_script_runner - INFO - Files already downloaded and verified\n", + "2024-08-16 12:29:30,591 - nvflare.app_common.executors.task_script_runner - INFO - Files already downloaded and verified\n", + "2024-08-16 12:29:31,221 - nvflare.app_common.executors.task_script_runner - INFO - Files already downloaded and verified\n", + "2024-08-16 12:29:31,243 - nvflare.app_common.executors.task_script_runner - INFO - Files already downloaded and verified\n", + "2024-08-16 12:29:31,446 - nvflare.app_common.executors.task_script_runner - INFO - current_round=0\n", + "2024-08-16 12:29:31,471 - nvflare.app_common.executors.task_script_runner - INFO - current_round=0\n", + "2024-08-16 12:29:35,393 - nvflare.app_common.executors.task_script_runner - INFO - [1, 2000] loss: 2.169\n", + "2024-08-16 12:29:35,476 - nvflare.app_common.executors.task_script_runner - INFO - [1, 2000] loss: 2.182\n", + "2024-08-16 12:29:39,001 - nvflare.app_common.executors.task_script_runner - INFO - [1, 4000] loss: 1.868\n", + "2024-08-16 12:29:39,231 - nvflare.app_common.executors.task_script_runner - INFO - [1, 4000] loss: 1.889\n", + "2024-08-16 12:29:42,652 - nvflare.app_common.executors.task_script_runner - INFO - [1, 6000] loss: 1.678\n", + "2024-08-16 12:29:43,070 - nvflare.app_common.executors.task_script_runner - INFO - [1, 6000] loss: 1.678\n", + "2024-08-16 12:29:46,068 - nvflare.app_common.executors.task_script_runner - INFO - [1, 8000] loss: 1.575\n", + "2024-08-16 12:29:46,496 - nvflare.app_common.executors.task_script_runner - INFO - [1, 8000] loss: 1.582\n", + "2024-08-16 12:29:49,514 - nvflare.app_common.executors.task_script_runner - INFO - [1, 10000] loss: 1.513\n", + "2024-08-16 12:29:50,091 - nvflare.app_common.executors.task_script_runner - INFO - [1, 10000] loss: 1.511\n", + "2024-08-16 12:29:52,948 - nvflare.app_common.executors.task_script_runner - INFO - [1, 12000] loss: 1.455\n", + "2024-08-16 12:29:53,525 - nvflare.app_common.executors.task_script_runner - INFO - [1, 12000] loss: 1.459\n", + "2024-08-16 12:29:57,355 - nvflare.app_common.executors.task_script_runner - INFO - [2, 2000] loss: 1.366\n", + "2024-08-16 12:29:58,065 - nvflare.app_common.executors.task_script_runner - INFO - [2, 2000] loss: 1.388\n", + "2024-08-16 12:30:00,912 - nvflare.app_common.executors.task_script_runner - INFO - [2, 4000] loss: 1.347\n", + "2024-08-16 12:30:01,693 - nvflare.app_common.executors.task_script_runner - INFO - [2, 4000] loss: 1.353\n", + "2024-08-16 12:30:04,517 - nvflare.app_common.executors.task_script_runner - INFO - [2, 6000] loss: 1.310\n", + "2024-08-16 12:30:05,375 - nvflare.app_common.executors.task_script_runner - INFO - [2, 6000] loss: 1.344\n", + "2024-08-16 12:30:08,208 - nvflare.app_common.executors.task_script_runner - INFO - [2, 8000] loss: 1.288\n", + "2024-08-16 12:30:09,223 - nvflare.app_common.executors.task_script_runner - INFO - [2, 8000] loss: 1.330\n", + "2024-08-16 12:30:11,910 - nvflare.app_common.executors.task_script_runner - INFO - [2, 10000] loss: 1.277\n", + "2024-08-16 12:30:12,984 - nvflare.app_common.executors.task_script_runner - INFO - [2, 10000] loss: 1.295\n", + "2024-08-16 12:30:16,006 - nvflare.app_common.executors.task_script_runner - INFO - [2, 12000] loss: 1.265\n", + "2024-08-16 12:30:17,097 - nvflare.app_common.executors.task_script_runner - INFO - Finished Training\n", + "2024-08-16 12:30:17,228 - nvflare.app_common.executors.task_script_runner - INFO - [2, 12000] loss: 1.258\n", + "2024-08-16 12:30:18,154 - nvflare.app_common.executors.task_script_runner - INFO - Finished Training\n", + "2024-08-16 12:30:19,991 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-2, peer_run=simulate_job]: got result from client site-2 for task: name=train, id=8e246740-698d-406e-8cab-9f91a762db52\n", + "2024-08-16 12:30:19,912 - nvflare.app_common.executors.task_script_runner - INFO - Accuracy of the network on the 10000 test images: 10 %\n", + "2024-08-16 12:30:19,915 - InProcessClientAPI - INFO - send local model back to peer \n", + "2024-08-16 12:30:19,980 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: finished processing task\n", + "2024-08-16 12:30:19,981 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: try #1: sending task result to server\n", + "2024-08-16 12:30:19,981 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: checking task ...\n", + "2024-08-16 12:30:19,981 - Cell - INFO - broadcast: channel='aux_communication', topic='__task_check__', targets=['server.simulate_job'], timeout=5.0\n", + "2024-08-16 12:30:19,984 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: start to send task result to server\n", + "2024-08-16 12:30:19,984 - FederatedClient - INFO - Starting to push execute result.\n", + "2024-08-16 12:30:20,228 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-2, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: finished processing client result by controller\n", + "2024-08-16 12:30:20,229 - SubmitUpdateCommand - INFO - submit_update process. client_name:site-2 task_id:8e246740-698d-406e-8cab-9f91a762db52\n", + "2024-08-16 12:30:20,231 - Communicator - INFO - SubmitUpdate size: 251.5KB (251509 Bytes). time: 0.247214 seconds\n", + "2024-08-16 12:30:20,232 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=8e246740-698d-406e-8cab-9f91a762db52]: task result sent to server\n", + "2024-08-16 12:30:20,232 - ClientTaskWorker - INFO - Finished one task run for client: site-2 interval: 2 task_processed: True\n", + "2024-08-16 12:30:20,661 - nvflare.app_common.executors.task_script_runner - INFO - Accuracy of the network on the 10000 test images: 10 %\n", + "2024-08-16 12:30:20,664 - InProcessClientAPI - INFO - send local model back to peer \n", + "2024-08-16 12:30:20,970 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job]: got result from client site-1 for task: name=train, id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20\n", + "2024-08-16 12:30:20,960 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: finished processing task\n", + "2024-08-16 12:30:20,961 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: try #1: sending task result to server\n", + "2024-08-16 12:30:20,961 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: checking task ...\n", + "2024-08-16 12:30:20,961 - Cell - INFO - broadcast: channel='aux_communication', topic='__task_check__', targets=['server.simulate_job'], timeout=5.0\n", + "2024-08-16 12:30:20,965 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: start to send task result to server\n", + "2024-08-16 12:30:20,965 - FederatedClient - INFO - Starting to push execute result.\n", + "2024-08-16 12:30:21,172 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: finished processing client result by controller\n", + "2024-08-16 12:30:21,173 - WFCommServer - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: task train exit with status TaskCompletionStatus.OK\n", + "2024-08-16 12:30:21,174 - SubmitUpdateCommand - INFO - submit_update process. client_name:site-1 task_id:c2ea9c72-12a7-4c56-bdac-7dfc83524f20\n", + "2024-08-16 12:30:21,372 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: aggregating 2 update(s) at round 0\n", + "2024-08-16 12:30:21,374 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: Start persist model on server.\n", + "2024-08-16 12:30:21,176 - Communicator - INFO - SubmitUpdate size: 251.5KB (251509 Bytes). time: 0.210118 seconds\n", + "2024-08-16 12:30:21,176 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: task result sent to server\n", + "2024-08-16 12:30:21,176 - ClientTaskWorker - INFO - Finished one task run for client: site-1 interval: 2 task_processed: True\n", + "2024-08-16 12:30:21,376 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: End persist model on server.\n", + "2024-08-16 12:30:21,377 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: Round 1 started.\n", + "2024-08-16 12:30:21,377 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: Sampled clients: ['site-1', 'site-2']\n", + "2024-08-16 12:30:21,378 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: Sending task train to ['site-1', 'site-2']\n", + "2024-08-16 12:30:21,379 - WFCommServer - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=c2ea9c72-12a7-4c56-bdac-7dfc83524f20]: scheduled task train\n", + "2024-08-16 12:30:22,237 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-2, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: assigned task to client site-2: name=train, id=913b8035-b820-4e38-9efb-ba761ae5b0d8\n", + "2024-08-16 12:30:22,239 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-2, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: sent task assignment to client. client_name:site-2 task_id:913b8035-b820-4e38-9efb-ba761ae5b0d8\n", + "2024-08-16 12:30:22,239 - GetTaskCommand - INFO - return task to client. client_name: site-2 task_name: train task_id: 913b8035-b820-4e38-9efb-ba761ae5b0d8 sharable_header_task_id: 913b8035-b820-4e38-9efb-ba761ae5b0d8\n", + "2024-08-16 12:30:22,244 - Communicator - INFO - Received from simulator_server server. getTask: train size: 251.5KB (251536 Bytes) time: 0.009714 seconds\n", + "2024-08-16 12:30:22,245 - FederatedClient - INFO - pull_task completed. Task name:train Status:True \n", + "2024-08-16 12:30:22,245 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job]: got task assignment: name=train, id=913b8035-b820-4e38-9efb-ba761ae5b0d8\n", + "2024-08-16 12:30:22,245 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: invoking task executor ScriptExecutor\n", + "2024-08-16 12:30:22,245 - ScriptExecutor - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: execute for task (train)\n", + "2024-08-16 12:30:22,245 - ScriptExecutor - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: send data to peer\n", + "2024-08-16 12:30:22,245 - ScriptExecutor - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: sending payload to peer\n", + "2024-08-16 12:30:22,246 - ScriptExecutor - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: Waiting for result from peer\n", + "2024-08-16 12:30:22,419 - nvflare.app_common.executors.task_script_runner - INFO - current_round=1\n", + "2024-08-16 12:30:23,179 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: assigned task to client site-1: name=train, id=410b5bfc-db8e-4149-b890-64ee730c4fa4\n", + "2024-08-16 12:30:23,180 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: sent task assignment to client. client_name:site-1 task_id:410b5bfc-db8e-4149-b890-64ee730c4fa4\n", + "2024-08-16 12:30:23,181 - GetTaskCommand - INFO - return task to client. client_name: site-1 task_name: train task_id: 410b5bfc-db8e-4149-b890-64ee730c4fa4 sharable_header_task_id: 410b5bfc-db8e-4149-b890-64ee730c4fa4\n", + "2024-08-16 12:30:23,185 - Communicator - INFO - Received from simulator_server server. getTask: train size: 251.5KB (251536 Bytes) time: 0.006905 seconds\n", + "2024-08-16 12:30:23,185 - FederatedClient - INFO - pull_task completed. Task name:train Status:True \n", + "2024-08-16 12:30:23,185 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job]: got task assignment: name=train, id=410b5bfc-db8e-4149-b890-64ee730c4fa4\n", + "2024-08-16 12:30:23,185 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: invoking task executor ScriptExecutor\n", + "2024-08-16 12:30:23,185 - ScriptExecutor - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: execute for task (train)\n", + "2024-08-16 12:30:23,185 - ScriptExecutor - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: send data to peer\n", + "2024-08-16 12:30:23,185 - ScriptExecutor - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: sending payload to peer\n", + "2024-08-16 12:30:23,186 - ScriptExecutor - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: Waiting for result from peer\n", + "2024-08-16 12:30:23,668 - nvflare.app_common.executors.task_script_runner - INFO - current_round=1\n", + "2024-08-16 12:30:25,721 - nvflare.app_common.executors.task_script_runner - INFO - [1, 2000] loss: 1.231\n", + "2024-08-16 12:30:27,161 - nvflare.app_common.executors.task_script_runner - INFO - [1, 2000] loss: 1.226\n", + "2024-08-16 12:30:29,237 - nvflare.app_common.executors.task_script_runner - INFO - [1, 4000] loss: 1.232\n", + "2024-08-16 12:30:30,768 - nvflare.app_common.executors.task_script_runner - INFO - [1, 4000] loss: 1.222\n", + "2024-08-16 12:30:33,029 - nvflare.app_common.executors.task_script_runner - INFO - [1, 6000] loss: 1.219\n", + "2024-08-16 12:30:34,387 - nvflare.app_common.executors.task_script_runner - INFO - [1, 6000] loss: 1.220\n", + "2024-08-16 12:30:36,732 - nvflare.app_common.executors.task_script_runner - INFO - [1, 8000] loss: 1.206\n", + "2024-08-16 12:30:38,238 - nvflare.app_common.executors.task_script_runner - INFO - [1, 8000] loss: 1.187\n", + "2024-08-16 12:30:41,041 - nvflare.app_common.executors.task_script_runner - INFO - [1, 10000] loss: 1.169\n", + "2024-08-16 12:30:42,687 - nvflare.app_common.executors.task_script_runner - INFO - [1, 10000] loss: 1.192\n", + "2024-08-16 12:30:45,298 - nvflare.app_common.executors.task_script_runner - INFO - [1, 12000] loss: 1.173\n", + "2024-08-16 12:30:46,980 - nvflare.app_common.executors.task_script_runner - INFO - [1, 12000] loss: 1.166\n", + "2024-08-16 12:30:49,936 - nvflare.app_common.executors.task_script_runner - INFO - [2, 2000] loss: 1.111\n", + "2024-08-16 12:30:51,449 - nvflare.app_common.executors.task_script_runner - INFO - [2, 2000] loss: 1.117\n", + "2024-08-16 12:30:53,517 - nvflare.app_common.executors.task_script_runner - INFO - [2, 4000] loss: 1.107\n", + "2024-08-16 12:30:54,919 - nvflare.app_common.executors.task_script_runner - INFO - [2, 4000] loss: 1.102\n", + "2024-08-16 12:30:57,182 - nvflare.app_common.executors.task_script_runner - INFO - [2, 6000] loss: 1.101\n", + "2024-08-16 12:30:58,493 - nvflare.app_common.executors.task_script_runner - INFO - [2, 6000] loss: 1.097\n", + "2024-08-16 12:31:00,841 - nvflare.app_common.executors.task_script_runner - INFO - [2, 8000] loss: 1.111\n", + "2024-08-16 12:31:01,974 - nvflare.app_common.executors.task_script_runner - INFO - [2, 8000] loss: 1.098\n", + "2024-08-16 12:31:04,379 - nvflare.app_common.executors.task_script_runner - INFO - [2, 10000] loss: 1.115\n", + "2024-08-16 12:31:05,393 - nvflare.app_common.executors.task_script_runner - INFO - [2, 10000] loss: 1.105\n", + "2024-08-16 12:31:07,895 - nvflare.app_common.executors.task_script_runner - INFO - [2, 12000] loss: 1.101\n", + "2024-08-16 12:31:08,798 - nvflare.app_common.executors.task_script_runner - INFO - Finished Training\n", + "2024-08-16 12:31:08,851 - nvflare.app_common.executors.task_script_runner - INFO - [2, 12000] loss: 1.095\n", + "2024-08-16 12:31:09,709 - nvflare.app_common.executors.task_script_runner - INFO - Finished Training\n", + "2024-08-16 12:31:11,804 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-2, peer_run=simulate_job]: got result from client site-2 for task: name=train, id=913b8035-b820-4e38-9efb-ba761ae5b0d8\n", + "2024-08-16 12:31:11,805 - IntimeModelSelector - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-2, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: validation metric 54 from client site-2\n", + "2024-08-16 12:31:11,644 - nvflare.app_common.executors.task_script_runner - INFO - Accuracy of the network on the 10000 test images: 54 %\n", + "2024-08-16 12:31:11,647 - InProcessClientAPI - INFO - send local model back to peer \n", + "2024-08-16 12:31:11,798 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: finished processing task\n", + "2024-08-16 12:31:11,798 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: try #1: sending task result to server\n", + "2024-08-16 12:31:11,798 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: checking task ...\n", + "2024-08-16 12:31:11,798 - Cell - INFO - broadcast: channel='aux_communication', topic='__task_check__', targets=['server.simulate_job'], timeout=5.0\n", + "2024-08-16 12:31:11,801 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: start to send task result to server\n", + "2024-08-16 12:31:11,801 - FederatedClient - INFO - Starting to push execute result.\n", + "2024-08-16 12:31:12,011 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-2, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: finished processing client result by controller\n", + "2024-08-16 12:31:12,012 - SubmitUpdateCommand - INFO - submit_update process. client_name:site-2 task_id:913b8035-b820-4e38-9efb-ba761ae5b0d8\n", + "2024-08-16 12:31:12,014 - Communicator - INFO - SubmitUpdate size: 251.5KB (251509 Bytes). time: 0.212835 seconds\n", + "2024-08-16 12:31:12,014 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=913b8035-b820-4e38-9efb-ba761ae5b0d8]: task result sent to server\n", + "2024-08-16 12:31:12,014 - ClientTaskWorker - INFO - Finished one task run for client: site-2 interval: 2 task_processed: True\n", + "2024-08-16 12:31:12,366 - nvflare.app_common.executors.task_script_runner - INFO - Accuracy of the network on the 10000 test images: 54 %\n", + "2024-08-16 12:31:12,370 - InProcessClientAPI - INFO - send local model back to peer \n", + "2024-08-16 12:31:12,753 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job]: got result from client site-1 for task: name=train, id=410b5bfc-db8e-4149-b890-64ee730c4fa4\n", + "2024-08-16 12:31:12,755 - IntimeModelSelector - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: validation metric 54 from client site-1\n", + "2024-08-16 12:31:12,744 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: finished processing task\n", + "2024-08-16 12:31:12,745 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: try #1: sending task result to server\n", + "2024-08-16 12:31:12,745 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: checking task ...\n", + "2024-08-16 12:31:12,745 - Cell - INFO - broadcast: channel='aux_communication', topic='__task_check__', targets=['server.simulate_job'], timeout=5.0\n", + "2024-08-16 12:31:12,749 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: start to send task result to server\n", + "2024-08-16 12:31:12,749 - FederatedClient - INFO - Starting to push execute result.\n", + "2024-08-16 12:31:12,962 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: finished processing client result by controller\n", + "2024-08-16 12:31:12,962 - WFCommServer - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: task train exit with status TaskCompletionStatus.OK\n", + "2024-08-16 12:31:12,963 - SubmitUpdateCommand - INFO - submit_update process. client_name:site-1 task_id:410b5bfc-db8e-4149-b890-64ee730c4fa4\n", + "2024-08-16 12:31:13,162 - IntimeModelSelector - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: new best validation metric at round 1: 54.0\n", + "2024-08-16 12:31:13,164 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: aggregating 2 update(s) at round 1\n", + "2024-08-16 12:31:12,965 - Communicator - INFO - SubmitUpdate size: 251.5KB (251509 Bytes). time: 0.215980 seconds\n", + "2024-08-16 12:31:12,965 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: task result sent to server\n", + "2024-08-16 12:31:12,965 - ClientTaskWorker - INFO - Finished one task run for client: site-1 interval: 2 task_processed: True\n", + "2024-08-16 12:31:13,166 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: Start persist model on server.\n", + "2024-08-16 12:31:13,167 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: End persist model on server.\n", + "2024-08-16 12:31:13,168 - FedAvg - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job, peer_rc=OK, task_name=train, task_id=410b5bfc-db8e-4149-b890-64ee730c4fa4]: Finished FedAvg.\n", + "2024-08-16 12:31:13,169 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Workflow: controller finalizing ...\n", + "2024-08-16 12:31:13,169 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: ABOUT_TO_END_RUN fired\n", + "2024-08-16 12:31:13,171 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Firing CHECK_END_RUN_READINESS ...\n", + "2024-08-16 12:31:13,172 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job]: received request from Server to end current RUN\n", + "2024-08-16 12:31:13,173 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job]: received request from Server to end current RUN\n", + "2024-08-16 12:31:13,673 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Firing CHECK_END_RUN_READINESS ...\n", + "2024-08-16 12:31:14,018 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-2, peer_run=simulate_job]: server runner is finalizing - asked client to end the run\n", + "2024-08-16 12:31:14,020 - GetTaskCommand - INFO - return task to client. client_name: site-2 task_name: __end_run__ task_id: sharable_header_task_id: \n", + "2024-08-16 12:31:14,175 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Firing CHECK_END_RUN_READINESS ...\n", + "2024-08-16 12:31:14,193 - FederatedClient - INFO - Shutting down client run: site-2\n", + "2024-08-16 12:31:14,195 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connection [CN00006 Not Connected] is closed PID: 27586\n", + "2024-08-16 12:31:14,021 - FederatedClient - INFO - pull_task completed. Task name:__end_run__ Status:True \n", + "2024-08-16 12:31:14,022 - ClientRunner - INFO - [identity=site-2, run=simulate_job, peer=simulator_server, peer_run=simulate_job]: server asked to end the run\n", + "2024-08-16 12:31:14,022 - ClientRunner - INFO - [identity=site-2, run=simulate_job]: started end-run events sequence\n", + "2024-08-16 12:31:14,022 - ClientRunner - INFO - [identity=site-2, run=simulate_job]: ABOUT_TO_END_RUN fired\n", + "2024-08-16 12:31:14,022 - ClientRunner - INFO - [identity=site-2, run=simulate_job]: Firing CHECK_END_RUN_READINESS ...\n", + "2024-08-16 12:31:14,022 - InProcessClientAPI - WARNING - ask to stop job: reason: END_RUN received\n", + "2024-08-16 12:31:14,149 - InProcessClientAPI - WARNING - request to stop the job for reason END_RUN received\n", + "2024-08-16 12:31:14,152 - ClientRunner - INFO - [identity=site-2, run=simulate_job]: END_RUN fired\n", + "2024-08-16 12:31:14,152 - ClientTaskWorker - INFO - End the Simulator run.\n", + "2024-08-16 12:31:14,193 - ClientTaskWorker - INFO - Clean up ClientRunner for : site-2 \n", + "2024-08-16 12:31:14,195 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connection [CN00002 Not Connected] is closed PID: 27699\n", + "2024-08-16 12:31:14,676 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Firing CHECK_END_RUN_READINESS ...\n", + "2024-08-16 12:31:14,968 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller, peer=site-1, peer_run=simulate_job]: server runner is finalizing - asked client to end the run\n", + "2024-08-16 12:31:14,969 - GetTaskCommand - INFO - return task to client. client_name: site-1 task_name: __end_run__ task_id: sharable_header_task_id: \n", + "2024-08-16 12:31:14,971 - FederatedClient - INFO - pull_task completed. Task name:__end_run__ Status:True \n", + "2024-08-16 12:31:14,971 - ClientRunner - INFO - [identity=site-1, run=simulate_job, peer=simulator_server, peer_run=simulate_job]: server asked to end the run\n", + "2024-08-16 12:31:14,971 - ClientRunner - INFO - [identity=site-1, run=simulate_job]: started end-run events sequence\n", + "2024-08-16 12:31:14,971 - ClientRunner - INFO - [identity=site-1, run=simulate_job]: ABOUT_TO_END_RUN fired\n", + "2024-08-16 12:31:14,971 - ClientRunner - INFO - [identity=site-1, run=simulate_job]: Firing CHECK_END_RUN_READINESS ...\n", + "2024-08-16 12:31:14,972 - InProcessClientAPI - WARNING - ask to stop job: reason: END_RUN received\n", + "2024-08-16 12:31:15,418 - FederatedClient - INFO - Shutting down client run: site-1\n", + "2024-08-16 12:31:15,419 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: asked to abort - triggered abort_signal to stop the RUN\n", + "2024-08-16 12:31:15,420 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connection [CN00005 Not Connected] is closed PID: 27586\n", + "2024-08-16 12:31:15,374 - InProcessClientAPI - WARNING - request to stop the job for reason END_RUN received\n", + "2024-08-16 12:31:15,376 - ClientRunner - INFO - [identity=site-1, run=simulate_job]: END_RUN fired\n", + "2024-08-16 12:31:15,377 - ClientTaskWorker - INFO - End the Simulator run.\n", + "2024-08-16 12:31:15,418 - ClientTaskWorker - INFO - Clean up ClientRunner for : site-1 \n", + "2024-08-16 12:31:15,420 - nvflare.fuel.f3.sfm.conn_manager - INFO - Connection [CN00002 Not Connected] is closed PID: 27698\n", + "2024-08-16 12:31:16,688 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: END_RUN fired\n", + "2024-08-16 12:31:16,690 - ReliableMessage - INFO - ReliableMessage is shutdown\n", + "2024-08-16 12:31:16,690 - ServerRunner - INFO - [identity=simulator_server, run=simulate_job, wf=controller]: Server runner finished.\n", + "2024-08-16 12:31:17,628 - ReliableMessage - INFO - shutdown reliable message monitor\n", + "2024-08-16 12:31:17,638 - SimulatorServer - INFO - Server app stopped.\n", + "\n", + "\n", + "2024-08-16 12:31:17,730 - nvflare.fuel.hci.server.hci - INFO - Admin Server localhost on Port 50775 shutdown!\n", + "2024-08-16 12:31:17,732 - SimulatorServer - INFO - shutting down server\n", + "2024-08-16 12:31:17,733 - SimulatorServer - INFO - canceling sync locks\n", + "2024-08-16 12:31:17,733 - SimulatorServer - INFO - server off\n", + "2024-08-16 12:31:21,035 - MPM - WARNING - #### MPM: still running thread Thread-14\n", + "2024-08-16 12:31:21,036 - MPM - INFO - MPM: Good Bye!\n" + ] + } + ], "source": [ "job.simulator_run(\"/tmp/nvflare/jobs/workdir\")" ] @@ -420,7 +748,8 @@ "id": "f474ddfa-0d2e-4d7f-b033-8ccfbbd57a75", "metadata": {}, "source": [ - "### Visualize the Training Results\n", + "tensorboard --logdir=/tmp/nvflare/jobs/workdir/server/simulate_job/tb_events\n", + "tensorboard --logdir=/tmp/nvflare/jobs/workdir/server/simulate_job/tb_events### Visualize the Training Results\n", "By default, we enable TensorBoard metric [streaming](https://nvflare.readthedocs.io/en/main/examples/tensorboard_streaming.html) using NVFlare's `SummaryWriter` in [src/cifar10_fl.py](src/cifar10_fl.py). \n", "\n", "The TensorBoard metrics will be received at the server, and you can visualize the training progress by running \n", @@ -449,7 +778,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.8.19" } }, "nbformat": 4, diff --git a/examples/getting_started/tf/README.md b/examples/getting_started/tf/README.md index 07122348a8..0d6c8be4dc 100644 --- a/examples/getting_started/tf/README.md +++ b/examples/getting_started/tf/README.md @@ -25,12 +25,6 @@ APIs were used to programmatically set up an alleviating the need of writing job config files, simplifying development process. -Before continuing with the following sections, you can first refer to -the [getting started notebook](nvflare_tf_getting_started.ipynb) -included under this folder, to learn more about the implementation -details, with an example walkthrough of FedAvg using a small -Tensorflow model. - ## 1. Install requirements Install required packages @@ -117,53 +111,11 @@ for alpha in 1.0 0.5 0.3 0.1; do done ``` -### 2.3 Advanced FL algorithms (FedOpt) - -Next, let's try some different FL algorithms on a more heterogeneous split: - -[FedOpt](https://arxiv.org/abs/2003.00295) uses optimizers on server -side to update the global model from client-side gradients. Here we -use SGD with momentum and cosine learning rate decay: -``` -python ./tf_fl_script_executor_cifar10.py \ - --algo fedopt \ - --n_clients 8 \ - --num_rounds 50 \ - --batch_size 64 \ - --epochs 4 \ - --alpha 0.1 -``` -[FedProx](https://arxiv.org/abs/1812.06127) adds a regularizer to the loss: -``` -python ./tf_fl_script_executor_cifar10.py \ - --algo fedprox \ - --n_clients 8 \ - --num_rounds 50 \ - --batch_size 64 \ - --epochs 4 \ - --fedprox_mu 1e-5 \ - --alpha 0.1 -``` -[SCAFFOLD](https://arxiv.org/abs/1910.06378) adds a correction term -during local training following the -[implementation](https://github.com/Xtra-Computing/NIID-Bench) as -described in [Li et al.](https://arxiv.org/abs/2102.02079) - -``` -python ./tf_fl_script_executor_cifar10.py \ - --algo scaffold \ - --n_clients 8 \ - --num_rounds 50 \ - --batch_size 64 \ - --epochs 4 \ - --fedprox_mu 1e-5 \ - --alpha 0.1 -``` -## 3. Results +## 2. Results Now let's compare experimental results. -### 3.1 Centralized training vs. FedAvg for homogeneous split +### 2.1 Centralized training vs. FedAvg for homogeneous split Let's first compare FedAvg with homogeneous data split (i.e. `alpha=1.0`) and centralized training. As can be seen from the figure and table below, FedAvg can achieve similar performance to @@ -177,7 +129,7 @@ no difference in data distributions among different clients. ![Central vs. FedAvg](./figs/fedavg-vs-centralized.png) -### 3.2 Impact of client data heterogeneity +### 2.2 Impact of client data heterogeneity Here we compare the impact of data heterogeneity by varying the `alpha` value, where lower values cause higher heterogeneity. As can @@ -193,26 +145,7 @@ as data heterogeneity becomes higher. ![Impact of client data heterogeneity](./figs/fedavg-diff-alphas.png) - -### 3.3 Impact of different FL algorithms - -Lastly, we compare the performance of different FL algorithms, with -`alpha` value fixed to 0.1, i.e., a high client data heterogeneity. We can observe from the figure below that, FedProx and -SCAFFOLD achieve better performance, with better convergence rates -compared to FedAvg and FedProx with the same alpha setting. SCAFFOLD -achieves that by adding a correction term when updating the client -models, while FedOpt utilizes SGD with momentum to update the global -model on the server. Both achieve better performance with the same -number of training steps as FedAvg/FedProx. - -| Config | Alpha | Val score | -| ----------- | ----------- | ----------- | -| cifar10_fedavg | 0.1 | 0.7903 | -| cifar10_fedopt | 0.1 | 0.8145 | -| cifar10_fedprox | 0.1 | 0.7843 | -| cifar10_scaffold | 0.1 | 0.8164 | - -![Impact of different FL algorithms](./figs/fedavg-diff-algos-new.png) + > [!NOTE] > More examples can be found at https://nvidia.github.io/NVFlare. diff --git a/examples/getting_started/tf/nvflare_tf_getting_started.ipynb b/examples/getting_started/tf/nvflare_tf_getting_started.ipynb index 17374b9356..3e0e134c73 100644 --- a/examples/getting_started/tf/nvflare_tf_getting_started.ipynb +++ b/examples/getting_started/tf/nvflare_tf_getting_started.ipynb @@ -26,9 +26,7 @@ "number of training rounds). The clients run executors which can listen for tasks and perform the necessary computations locally, such as model training. This task-based interaction repeats\n", "until the experiment’s objectives are met. \n", "\n", - "We can also add data filters (for example, for [homomorphic encryption](https://www.usenix.org/conference/atc20/presentation/zhang-chengliang) or [differential privacy filters](https://arxiv.org/abs/1910.00962)) to the task data or results received or produced by the server or clients.\n", - "\n", - "\"NVIDIA" + "\"NVIDIA" ] }, { @@ -438,7 +436,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.8.19" } }, "nbformat": 4, From 876e6d35ae71ccf13c0ab66951a6217e6ea46d06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Mon, 19 Aug 2024 08:14:25 -0700 Subject: [PATCH 24/26] Update secure xgboost examples (#2803) * Update secure xgboost examples * Update readme --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> --- docs/user_guide.rst | 1 + examples/advanced/xgboost_secure/README.md | 68 ++++++++++++++--- .../app_server/config/config_fed_server.json | 35 --------- .../app_site-1/config/config_fed_client.json | 48 ------------ .../app_site-2/config/config_fed_client.json | 48 ------------ .../app_site-3/config/config_fed_client.json | 48 ------------ .../xgboost_secure/jobs/xgb_hori/meta.json | 19 ----- .../app_server/config/config_fed_server.json | 35 --------- .../app_site-1/config/config_fed_client.json | 48 ------------ .../app_site-2/config/config_fed_client.json | 48 ------------ .../app_site-3/config/config_fed_client.json | 48 ------------ .../jobs/xgb_hori_secure/meta.json | 19 ----- .../app_server/config/config_fed_server.json | 35 --------- .../app_site-1/config/config_fed_client.json | 48 ------------ .../app_site-2/config/config_fed_client.json | 48 ------------ .../app_site-3/config/config_fed_client.json | 48 ------------ .../xgboost_secure/jobs/xgb_vert/meta.json | 19 ----- .../app_server/config/config_fed_server.json | 35 --------- .../app_site-1/config/config_fed_client.json | 48 ------------ .../app_site-2/config/config_fed_client.json | 48 ------------ .../app_site-3/config/config_fed_client.json | 48 ------------ .../jobs/xgb_vert_secure/meta.json | 19 ----- .../xgboost_secure/prepare_flare_job.sh | 25 +++++++ examples/advanced/xgboost_secure/project.yml | 75 +++++++++++++++++++ .../advanced/xgboost_secure/requirements.txt | 5 +- .../xgboost_secure/run_training_flare.sh | 9 ++- job_templates/xgboost/config_fed_client.conf | 46 ++++++++++++ job_templates/xgboost/config_fed_server.conf | 33 ++++++++ job_templates/xgboost/meta.conf | 8 ++ ...cure_data_loader.py => csv_data_loader.py} | 23 +++--- 30 files changed, 264 insertions(+), 821 deletions(-) delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_hori/app_server/config/config_fed_server.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-1/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-2/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-3/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_hori/meta.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_server/config/config_fed_server.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-1/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-2/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-3/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_hori_secure/meta.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_vert/app_server/config/config_fed_server.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-1/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-2/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-3/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_vert/meta.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_server/config/config_fed_server.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-1/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-2/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-3/config/config_fed_client.json delete mode 100644 examples/advanced/xgboost_secure/jobs/xgb_vert_secure/meta.json create mode 100755 examples/advanced/xgboost_secure/prepare_flare_job.sh create mode 100644 examples/advanced/xgboost_secure/project.yml create mode 100644 job_templates/xgboost/config_fed_client.conf create mode 100644 job_templates/xgboost/config_fed_server.conf create mode 100644 job_templates/xgboost/meta.conf rename nvflare/app_opt/xgboost/histogram_based_v2/{secure_data_loader.py => csv_data_loader.py} (63%) diff --git a/docs/user_guide.rst b/docs/user_guide.rst index 0670440c9b..4af0aad348 100644 --- a/docs/user_guide.rst +++ b/docs/user_guide.rst @@ -23,3 +23,4 @@ please refer to the :ref:`programming_guide`. user_guide/helm_chart user_guide/confidential_computing user_guide/hierarchy_unification_bridge + user_guide/federated_xgboost diff --git a/examples/advanced/xgboost_secure/README.md b/examples/advanced/xgboost_secure/README.md index be939d358c..258127c638 100644 --- a/examples/advanced/xgboost_secure/README.md +++ b/examples/advanced/xgboost_secure/README.md @@ -27,37 +27,81 @@ By default, we assume the dataset is downloaded, uncompressed, and stored in `${ To prepare data for further experiments, we perform the following steps: 1. Split the dataset into training/validation and testing sets. 2. Split the training/validation set: - * Into train and valid for baseline centralized training. - * Into train and valid for each client under horizontal setting. - * Into train and valid for each client under vertical setting. + * Into "train" and "valid" for baseline centralized training. + * Into "train" and "valid" for each client under horizontal setting. + * Into "train" and "valid" for each client under vertical setting. Data splits used in this example can be generated with ``` bash prepare_data.sh ``` -This will generate data splits for 3 clients under all experimental settings. In this example, we assume the Private Set Intersection (PSI) step has already been performed for vertical collaboration. -See [vertical xgboost](https://github.com/NVIDIA/NVFlare/tree/main/examples/advanced/vertical_xgboost) for more details. With this assumption, the overlapping ratio between clients for vertical setting is 1.0, such that the training data amount is the same as baseline and horizontal experiments. +This will generate data splits for 3 clients under all experimental settings. + +> **_NOTE:_** In this example, we have divided the dataset into separate columns for each site, +> assuming that the datasets from different sites have already been joined using Private Set +> Intersection (PSI). However, in practice, each site initially has its own separate dataset. To +> combine these datasets accurately, you need to use PSI to match records with the same ID across +> different sites. For more information on how to perform PSI, please refer to the +> [vertical xgboost example](https://github.com/NVIDIA/NVFlare/tree/main/examples/advanced/vertical_xgboost). + > **_NOTE:_** The generated data files will be stored in the folder `/tmp/nvflare/xgb_dataset/`, -> and will be used by jobs by specifying the path within `config_fed_client.json` +> and will be used by jobs by specifying the path within `config_fed_client` ## Run Baseline and Standalone Experiments First, we run the baseline centralized training and standalone federated XGBoost training for comparison. In this case, we utilized the `mock` plugin to simulate the homomorphic encryption process. -For more details regarding federated XGBoost and the interface-plugin design, please refer to our [documentation](). +For more details regarding federated XGBoost and the interface-plugin design, +please refer to our [documentation](https://nvflare.readthedocs.io/en/main/user_guide/federated_xgboost/secure_xgboost_user_guide.html). To run all experiments, we provide a script for all settings. ``` bash run_training_standalone.sh ``` -This will cover baseline centralized training, local FL with and without secure feature. +This will cover baseline centralized training, federated xgboost run in the same machine +(server and clients are running in different processes) with and without secure feature. + +## Generates the FLARE Job +We can use our job template and `nvflare job` command to generates different jobs for +different scenarios: + +``` +# config the job template directory +nvflare config -jt ../../../job_templates/ + +# create horizontal job +nvflare job create -force -w xgboost -j ./jobs/xgb_hori \ + -f config_fed_server.conf secure_training=false split_mode=0 \ + -f config_fed_client.conf folder="/tmp/nvflare/xgb_dataset/horizontal_xgb_data" + +# create horizontal secure job +nvflare job create -force -w xgboost -j ./jobs/xgb_hori_secure \ + -f config_fed_server.conf secure_training=true split_mode=0 \ + -f config_fed_client.conf folder="/tmp/nvflare/xgb_dataset/horizontal_xgb_data" + +# create vertical job +nvflare job create -force -w xgboost -j ./jobs/xgb_vert \ + -f config_fed_server.conf secure_training=false split_mode=1 \ + -f config_fed_client.conf folder="/tmp/nvflare/xgb_dataset/vertical_xgb_data" + +# create vertical secure job +nvflare job create -force -w xgboost -j ./jobs/xgb_vert_secure \ + -f config_fed_server.conf secure_training=true split_mode=1 \ + -f config_fed_client.conf folder="/tmp/nvflare/xgb_dataset/vertical_xgb_data" + +``` + +Or you can just run the script: +``` +bash prepare_flare_job.sh +``` ## Run Federated Experiments with NVFlare Next, we run the federated XGBoost training without and with homomorphic encryption using NVFlare. We run the NVFlare jobs using simulator with: ``` -bash run_training_fl.sh +bash run_training_flare.sh ``` The running time of each job depends mainly on the encryption workload. @@ -85,7 +129,7 @@ The AUC of horizontal learning (both secure and non-secure): Comparing the tree models with centralized baseline, we have the following observations: 1. Vertical federated learning (non-secure) has exactly the same tree model as the centralized baseline. -2. Vertical federated learning (secure) has the same tree structures as the centralized baseline, however, it produces produces different tree records at different parties - because each party holds different feature subsets, as illustrated below. +2. Vertical federated learning (secure) has the same tree structures as the centralized baseline, however, it produces different tree records at different parties - because each party holds different feature subsets, as illustrated below. 3. Horizontal federated learning (both secure and non-secure) have different tree models from the centralized baseline. | ![Tree Structures](./figs/tree.base.png) | @@ -102,5 +146,5 @@ In this case we can notice that Party 0 holds Feature 7 and 10, Party 1 holds Fe By combining the feature splits at all parties, the tree structures will be identical to the centralized baseline model. -## Different Encryption Plugins -We can switch to different plugins for encryption/decryption in federated xgboost. The plugin information is specified in `xgb.collective.CommunicatorContext`. \ No newline at end of file +For more information on the secure xgboost user guide please refer to +https://nvflare.readthedocs.io/en/main/user_guide/federated_xgboost/secure_xgboost_user_guide.html diff --git a/examples/advanced/xgboost_secure/jobs/xgb_hori/app_server/config/config_fed_server.json b/examples/advanced/xgboost_secure/jobs/xgb_hori/app_server/config/config_fed_server.json deleted file mode 100644 index cad43329d5..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_hori/app_server/config/config_fed_server.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "format_version": 2, - "num_rounds": 3, - "task_data_filters": [], - "task_result_filters": [], - "components": [ - ], - "workflows": [ - { - "id": "xgb_controller", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_controller.XGBFedController", - "args": { - "num_rounds": "{num_rounds}", - "training_mode": "horizontal", - "xgb_options": { - "early_stopping_rounds": 3 - }, - "xgb_params": { - "max_depth": 3, - "eta": 0.1, - "objective": "binary:logistic", - "eval_metric": "auc", - "tree_method": "hist", - "nthread": 1 - }, - "client_ranks": { - "site-1": 0, - "site-2": 1, - "site-3": 2 - }, - "in_process": true - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-1/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-1/config/config_fed_client.json deleted file mode 100644 index 12a365ff69..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-1/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 0, - "folder": "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-2/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-2/config/config_fed_client.json deleted file mode 100644 index 0ac8786064..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-2/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 1, - "folder": "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-3/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-3/config/config_fed_client.json deleted file mode 100644 index 43567d51a7..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_hori/app_site-3/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 2, - "folder": "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_hori/meta.json b/examples/advanced/xgboost_secure/jobs/xgb_hori/meta.json deleted file mode 100644 index a98de75b18..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_hori/meta.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "xgb_horizontal", - "resource_spec": {}, - "deploy_map": { - "app_server": [ - "server" - ], - "app_site-1": [ - "site-1" - ], - "app_site-2": [ - "site-2" - ], - "app_site-3": [ - "site-3" - ] - }, - "min_clients": 3 -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_server/config/config_fed_server.json b/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_server/config/config_fed_server.json deleted file mode 100644 index 091c8d4191..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_server/config/config_fed_server.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "format_version": 2, - "num_rounds": 3, - "task_data_filters": [], - "task_result_filters": [], - "components": [ - ], - "workflows": [ - { - "id": "xgb_controller", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_controller.XGBFedController", - "args": { - "num_rounds": "{num_rounds}", - "training_mode": "horizontal_secure", - "xgb_options": { - "early_stopping_rounds": 3 - }, - "xgb_params": { - "max_depth": 3, - "eta": 0.1, - "objective": "binary:logistic", - "eval_metric": "auc", - "tree_method": "hist", - "nthread": 1 - }, - "client_ranks": { - "site-1": 0, - "site-2": 1, - "site-3": 2 - }, - "in_process": true - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-1/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-1/config/config_fed_client.json deleted file mode 100644 index 12a365ff69..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-1/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 0, - "folder": "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-2/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-2/config/config_fed_client.json deleted file mode 100644 index 0ac8786064..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-2/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 1, - "folder": "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-3/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-3/config/config_fed_client.json deleted file mode 100644 index 43567d51a7..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/app_site-3/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 2, - "folder": "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/meta.json b/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/meta.json deleted file mode 100644 index c01b56a3ea..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_hori_secure/meta.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "xgb_horizontal_secure", - "resource_spec": {}, - "deploy_map": { - "app_server": [ - "server" - ], - "app_site-1": [ - "site-1" - ], - "app_site-2": [ - "site-2" - ], - "app_site-3": [ - "site-3" - ] - }, - "min_clients": 3 -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_vert/app_server/config/config_fed_server.json b/examples/advanced/xgboost_secure/jobs/xgb_vert/app_server/config/config_fed_server.json deleted file mode 100644 index 1e5d8d2663..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_vert/app_server/config/config_fed_server.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "format_version": 2, - "num_rounds": 3, - "task_data_filters": [], - "task_result_filters": [], - "components": [ - ], - "workflows": [ - { - "id": "xgb_controller", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_controller.XGBFedController", - "args": { - "num_rounds": "{num_rounds}", - "training_mode": "vertical", - "xgb_options": { - "early_stopping_rounds": 3 - }, - "xgb_params": { - "max_depth": 3, - "eta": 0.1, - "objective": "binary:logistic", - "eval_metric": "auc", - "tree_method": "hist", - "nthread": 1 - }, - "client_ranks": { - "site-1": 0, - "site-2": 1, - "site-3": 2 - }, - "in_process": true - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-1/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-1/config/config_fed_client.json deleted file mode 100644 index fa6fd76a23..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-1/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 0, - "folder": "/tmp/nvflare/xgb_dataset/vertical_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-2/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-2/config/config_fed_client.json deleted file mode 100644 index e38f1fdf32..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-2/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 1, - "folder": "/tmp/nvflare/xgb_dataset/vertical_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-3/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-3/config/config_fed_client.json deleted file mode 100644 index 2df2e4a405..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_vert/app_site-3/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 2, - "folder": "/tmp/nvflare/xgb_dataset/vertical_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_vert/meta.json b/examples/advanced/xgboost_secure/jobs/xgb_vert/meta.json deleted file mode 100644 index ca5f164f7c..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_vert/meta.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "xgb_vertical", - "resource_spec": {}, - "deploy_map": { - "app_server": [ - "server" - ], - "app_site-1": [ - "site-1" - ], - "app_site-2": [ - "site-2" - ], - "app_site-3": [ - "site-3" - ] - }, - "min_clients": 3 -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_server/config/config_fed_server.json b/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_server/config/config_fed_server.json deleted file mode 100644 index dd24fddfe2..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_server/config/config_fed_server.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "format_version": 2, - "num_rounds": 3, - "task_data_filters": [], - "task_result_filters": [], - "components": [ - ], - "workflows": [ - { - "id": "xgb_controller", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_controller.XGBFedController", - "args": { - "num_rounds": "{num_rounds}", - "training_mode": "vertical_secure", - "xgb_options": { - "early_stopping_rounds": 3 - }, - "xgb_params": { - "max_depth": 3, - "eta": 0.1, - "objective": "binary:logistic", - "eval_metric": "auc", - "tree_method": "hist", - "nthread": 1 - }, - "client_ranks": { - "site-1": 0, - "site-2": 1, - "site-3": 2 - }, - "in_process": true - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-1/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-1/config/config_fed_client.json deleted file mode 100644 index fa6fd76a23..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-1/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 0, - "folder": "/tmp/nvflare/xgb_dataset/vertical_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-2/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-2/config/config_fed_client.json deleted file mode 100644 index e38f1fdf32..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-2/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 1, - "folder": "/tmp/nvflare/xgb_dataset/vertical_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-3/config/config_fed_client.json b/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-3/config/config_fed_client.json deleted file mode 100644 index 2df2e4a405..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/app_site-3/config/config_fed_client.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "format_version": 2, - "executors": [ - { - "tasks": [ - "config", - "start" - ], - "executor": { - "id": "Executor", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor", - "args": { - "data_loader_id": "dataloader", - "in_process": true - } - } - } - ], - "task_result_filters": [], - "task_data_filters": [], - "components": [ - { - "id": "dataloader", - "path": "nvflare.app_opt.xgboost.histogram_based_v2.secure_data_loader.SecureDataLoader", - "args": { - "rank": 2, - "folder": "/tmp/nvflare/xgb_dataset/vertical_xgb_data" - } - }, - { - "id": "metrics_writer", - "path": "nvflare.app_opt.tracking.tb.tb_writer.TBWriter", - "args": { - "event_type": "analytix_log_stats" - } - }, - { - "id": "event_to_fed", - "name": "ConvertToFedEvent", - "args": { - "events_to_convert": [ - "analytix_log_stats" - ], - "fed_event_prefix": "fed." - } - } - ] -} diff --git a/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/meta.json b/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/meta.json deleted file mode 100644 index 13a643e73a..0000000000 --- a/examples/advanced/xgboost_secure/jobs/xgb_vert_secure/meta.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "xgb_vertical_secure", - "resource_spec": {}, - "deploy_map": { - "app_server": [ - "server" - ], - "app_site-1": [ - "site-1" - ], - "app_site-2": [ - "site-2" - ], - "app_site-3": [ - "site-3" - ] - }, - "min_clients": 3 -} diff --git a/examples/advanced/xgboost_secure/prepare_flare_job.sh b/examples/advanced/xgboost_secure/prepare_flare_job.sh new file mode 100755 index 0000000000..accc723845 --- /dev/null +++ b/examples/advanced/xgboost_secure/prepare_flare_job.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + + +# config the job template directory +nvflare config -jt ../../../job_templates/ + +# create horizontal job +nvflare job create -force -w xgboost -j ./jobs/xgb_hori \ + -f config_fed_server.conf secure_training=false split_mode=0 \ + -f config_fed_client.conf folder="/tmp/nvflare/xgb_dataset/horizontal_xgb_data" + +# create horizontal secure job +nvflare job create -force -w xgboost -j ./jobs/xgb_hori_secure \ + -f config_fed_server.conf secure_training=true split_mode=0 \ + -f config_fed_client.conf folder="/tmp/nvflare/xgb_dataset/horizontal_xgb_data" + +# create vertical job +nvflare job create -force -w xgboost -j ./jobs/xgb_vert \ + -f config_fed_server.conf secure_training=false split_mode=1 \ + -f config_fed_client.conf folder="/tmp/nvflare/xgb_dataset/vertical_xgb_data" + +# create vertical secure job +nvflare job create -force -w xgboost -j ./jobs/xgb_vert_secure \ + -f config_fed_server.conf secure_training=true split_mode=1 \ + -f config_fed_client.conf folder="/tmp/nvflare/xgb_dataset/vertical_xgb_data" diff --git a/examples/advanced/xgboost_secure/project.yml b/examples/advanced/xgboost_secure/project.yml new file mode 100644 index 0000000000..8d7072ec46 --- /dev/null +++ b/examples/advanced/xgboost_secure/project.yml @@ -0,0 +1,75 @@ +api_version: 3 +name: example_project +description: NVIDIA FLARE sample project yaml file + +participants: + # change overseer.example.com to the FQDN of the overseer + - name: overseer + type: overseer + org: nvidia + protocol: https + api_root: /api/v1 + port: 8443 + # change example.com to the FQDN of the server + - name: server1 + type: server + org: nvidia + fed_learn_port: 8002 + admin_port: 8003 + - name: site-1 + type: client + org: nvidia + # listening_host will enable creating one pair of cert/private key for this client + # so it can behave like a server for client api. The value must be a hostname that + # client api can reach via network. + # listening_host: site-1-lh + - name: site-2 + type: client + org: nvidia + - name: admin@nvidia.com + type: admin + org: nvidia + role: project_admin + +# The same methods in all builders are called in their order defined in builders section +builders: + - path: nvflare.lighter.impl.workspace.WorkspaceBuilder + args: + template_file: master_template.yml + - path: nvflare.lighter.impl.template.TemplateBuilder + - path: nvflare.lighter.impl.static_file.StaticFileBuilder + args: + # config_folder can be set to inform NVIDIA FLARE where to get configuration + config_folder: config + + # scheme for communication driver (currently supporting the default, grpc, only). + # scheme: grpc + + # app_validator is used to verify if uploaded app has proper structures + # if not set, no app_validator is included in fed_server.json + # app_validator: PATH_TO_YOUR_OWN_APP_VALIDATOR + + # when docker_image is set to a docker image name, docker.sh will be generated on server/client/admin + # docker_image: + + # download_job_url is set to http://download.server.com/ as default in fed_server.json. You can override this + # to different url. + # download_job_url: http://download.server.com/ + + overseer_agent: + path: nvflare.ha.overseer_agent.HttpOverseerAgent + # if overseer_exists is true, args here are ignored. Provisioning + # tool will fill role, name and other local parameters automatically. + # if overseer_exists is false, args in this section will be used. + overseer_exists: true + # args: + # sp_end_point: example1.com.8002:8003 + + - path: nvflare.lighter.impl.cert.CertBuilder + - path: nvflare.lighter.impl.he.HEBuilder + args: + poly_modulus_degree: 8192 + coeff_mod_bit_sizes: [60, 40, 40] + scale_bits: 40 + scheme: CKKS + - path: nvflare.lighter.impl.signature.SignatureBuilder diff --git a/examples/advanced/xgboost_secure/requirements.txt b/examples/advanced/xgboost_secure/requirements.txt index c9f1320544..36d3fe78f2 100644 --- a/examples/advanced/xgboost_secure/requirements.txt +++ b/examples/advanced/xgboost_secure/requirements.txt @@ -1,9 +1,10 @@ nvflare~=2.5.0rc ipcl_python @ git+https://github.com/intel/pailliercryptolib_python.git@development -xgboost @ https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/vertical-federated-learning/xgboost-2.1.0.dev0%2Bde4013fc733648dfe5c2c803a13e2782056e00a2-py3-none-manylinux_2_28_x86_64.whl +--extra-index-url https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/list.html?prefix=federated-secure/ +xgboost pandas scikit-learn shap matplotlib tensorboard -tenseal \ No newline at end of file +tenseal diff --git a/examples/advanced/xgboost_secure/run_training_flare.sh b/examples/advanced/xgboost_secure/run_training_flare.sh index 12070115cc..dcc5821e32 100755 --- a/examples/advanced/xgboost_secure/run_training_flare.sh +++ b/examples/advanced/xgboost_secure/run_training_flare.sh @@ -1,12 +1,15 @@ #!/usr/bin/env bash WORKSPACE_ROOT="/tmp/nvflare/xgb_workspaces" -n=2 +n=3 echo "Training horizontal" nvflare simulator jobs/xgb_hori -w ${WORKSPACE_ROOT}/workspace_hori -n ${n} -t ${n} +echo "Prepare secure horizontal tenseal context" +nvflare provision -p project.yml -w ${WORKSPACE_ROOT}/workspace_hori_secure echo "Training secure horizontal" -nvflare simulator jobs/xgb_hori_secure -w ${WORKSPACE_ROOT}/workspace_hori_secure -n ${n} -t ${n} +nvflare simulator jobs/xgb_hori_secure \ + -w ${WORKSPACE_ROOT}/workspace_hori_secure/example_project/prod_00/site-1 -n ${n} -t ${n} echo "Training vertical" nvflare simulator jobs/xgb_vert -w ${WORKSPACE_ROOT}/workspace_vert -n ${n} -t ${n} echo "Training secure vertical" -nvflare simulator jobs/xgb_vert_secure -w ${WORKSPACE_ROOT}/workspace_vert_secure -n ${n} -t ${n} \ No newline at end of file +nvflare simulator jobs/xgb_vert_secure -w ${WORKSPACE_ROOT}/workspace_vert_secure -n ${n} -t ${n} diff --git a/job_templates/xgboost/config_fed_client.conf b/job_templates/xgboost/config_fed_client.conf new file mode 100644 index 0000000000..1651d0a079 --- /dev/null +++ b/job_templates/xgboost/config_fed_client.conf @@ -0,0 +1,46 @@ +format_version = 2 +executors = [ + { + tasks = [ + "config", + "start" + ] + executor { + id = "Executor" + path = "nvflare.app_opt.xgboost.histogram_based_v2.fed_executor.FedXGBHistogramExecutor" + args { + data_loader_id = dataloader + in_process = true + } + } + } +] +task_result_filters = [] +task_data_filters = [] +components = [ + { + id = "dataloader" + path = "nvflare.app_opt.xgboost.histogram_based_v2.csv_data_loader.CSVDataLoader" + args { + # please change to your own directory + folder = "/tmp/nvflare/xgb_dataset/horizontal_xgb_data" + } + }, + { + id = "metrics_writer" + path = "nvflare.app_opt.tracking.tb.tb_writer.TBWriter" + args { + event_type = "analytix_log_stats" + } + }, + { + id = "event_to_fed" + path = "nvflare.app_common.widgets.convert_to_fed_event.ConvertToFedEvent" + args { + events_to_convert = [ + "analytix_log_stats" + ] + fed_event_prefix = "fed." + } + } +] diff --git a/job_templates/xgboost/config_fed_server.conf b/job_templates/xgboost/config_fed_server.conf new file mode 100644 index 0000000000..ff81c74fcf --- /dev/null +++ b/job_templates/xgboost/config_fed_server.conf @@ -0,0 +1,33 @@ +format_version = 2 +num_rounds = 3 +task_result_filters = [] +task_data_filters = [] +components = [] +workflows = [ + { + id = "xgb_controller" + path = "nvflare.app_opt.xgboost.histogram_based_v2.fed_controller.XGBFedController" + args { + num_rounds = "{num_rounds}" + split_mode = 0 + secure_training = false + xgb_options { + early_stopping_rounds = 3 + } + xgb_params { + max_depth = 3 + eta = 0.1 + objective = "binary:logistic" + eval_metric = "auc" + tree_method = "hist" + nthread = 1 + } + client_ranks { + site-1 = 0 + site-2 = 1 + site-3 = 2 + } + in_process = true + } + } +] diff --git a/job_templates/xgboost/meta.conf b/job_templates/xgboost/meta.conf new file mode 100644 index 0000000000..9b824a14dc --- /dev/null +++ b/job_templates/xgboost/meta.conf @@ -0,0 +1,8 @@ +name = "xgboost" +resource_spec = {} +min_clients = 2 +deploy_map { + app = [ + "@ALL" + ] +} diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py b/nvflare/app_opt/xgboost/histogram_based_v2/csv_data_loader.py similarity index 63% rename from nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py rename to nvflare/app_opt/xgboost/histogram_based_v2/csv_data_loader.py index f5514e950f..cf10d68645 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/csv_data_loader.py @@ -15,31 +15,30 @@ import xgboost as xgb from nvflare.app_opt.xgboost.data_loader import XGBDataLoader -from nvflare.app_opt.xgboost.histogram_based_v2.defs import SplitMode -class SecureDataLoader(XGBDataLoader): - def __init__(self, rank: int, folder: str): - """Reads CSV dataset and return XGB data matrix in vertical secure mode. +class CSVDataLoader(XGBDataLoader): + def __init__(self, folder: str): + """Reads CSV dataset and return XGB data matrix. + + Note: if split mode is vertical, we assume the label owner is rank 0. Args: - rank: Rank of the site folder: Folder to find the CSV files """ - self.rank = rank self.folder = folder - def load_data(self, client_id: str, split_mode: int): + def load_data(self): - train_path = f"{self.folder}/{client_id}/train.csv" - valid_path = f"{self.folder}/{client_id}/valid.csv" + train_path = f"{self.folder}/{self.client_id}/train.csv" + valid_path = f"{self.folder}/{self.client_id}/valid.csv" - if self.rank == 0 or split_mode == SplitMode.ROW: + if self.rank == 0 or self.data_split_mode == xgb.core.DataSplitMode.ROW: label = "&label_column=0" else: label = "" - train_data = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=split_mode) - valid_data = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=split_mode) + train_data = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=self.data_split_mode) + valid_data = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=self.data_split_mode) return train_data, valid_data From 0a1b036abc23c5e6943a38a29bb730b09e43c77c Mon Sep 17 00:00:00 2001 From: Zhihong Zhang <100308595+nvidianz@users.noreply.github.com> Date: Mon, 19 Aug 2024 17:03:51 -0400 Subject: [PATCH 25/26] XGBoost user interface change and XGBoost version check (#2808) * Updated FOBS readme to add DatumManager, added agrpcs as secure scheme * Changed split_mode to data_split_mode and added version check * Fixed format errors --- .../base_v2/app/config/config_fed_server.json | 2 +- .../xgboost/utils/prepare_job_config.py | 4 +- .../adaptors/grpc_client_adaptor.py | 2 +- .../adaptors/xgb_adaptor.py | 18 +++++--- .../xgboost/histogram_based_v2/controller.py | 18 +++++--- .../xgboost/histogram_based_v2/defs.py | 14 +++---- .../histogram_based_v2/fed_controller.py | 6 ++- .../mock/mock_controller.py | 3 +- .../runners/xgb_client_runner.py | 7 +++- .../histogram_based_v2/sec/client_handler.py | 42 +++++++++++++++++-- .../histogram_based_v2/sec/server_handler.py | 4 +- .../adaptors/xgb_adaptor_test.py | 3 +- 12 files changed, 89 insertions(+), 34 deletions(-) diff --git a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json index d8c19cddfa..d0dd1e3908 100755 --- a/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json +++ b/examples/advanced/xgboost/histogram-based/jobs/base_v2/app/config/config_fed_server.json @@ -18,7 +18,7 @@ "path": "nvflare.app_opt.xgboost.histogram_based_v2.fed_controller.XGBFedController", "args": { "num_rounds": "{num_rounds}", - "split_mode": 0, + "data_split_mode": 0, "secure_training": false, "xgb_params": { "max_depth": 8, diff --git a/examples/advanced/xgboost/utils/prepare_job_config.py b/examples/advanced/xgboost/utils/prepare_job_config.py index 1f267a8754..c7339391ab 100644 --- a/examples/advanced/xgboost/utils/prepare_job_config.py +++ b/examples/advanced/xgboost/utils/prepare_job_config.py @@ -52,7 +52,7 @@ def job_config_args_parser(): parser.add_argument( "--tree_method", type=str, default="hist", help="tree_method for xgboost - use hist for best perf" ) - parser.add_argument("--split_mode", type=int, default=0, help="dataset split mode, 0 or 1") + parser.add_argument("--data_split_mode", type=int, default=0, help="dataset split mode, 0 or 1") parser.add_argument("--secure_training", type=bool, default=False, help="histogram_v2 secure training or not") return parser @@ -153,7 +153,7 @@ def _update_server_config(config: dict, args): config["num_rounds"] = args.round_num config["workflows"][0]["args"]["xgb_params"]["nthread"] = args.nthread config["workflows"][0]["args"]["xgb_params"]["tree_method"] = args.tree_method - config["workflows"][0]["args"]["split_mode"] = args.split_mode + config["workflows"][0]["args"]["data_split_mode"] = args.data_split_mode config["workflows"][0]["args"]["secure_training"] = args.secure_training diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py index c4819fea1b..c5fdc94ee2 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py @@ -98,7 +98,7 @@ class since the self object contains a sender that contains a Core Cell which ca Constant.RUNNER_CTX_SERVER_ADDR: server_addr, Constant.RUNNER_CTX_RANK: self.rank, Constant.RUNNER_CTX_NUM_ROUNDS: self.num_rounds, - Constant.RUNNER_CTX_SPLIT_MODE: self.split_mode, + Constant.RUNNER_CTX_DATA_SPLIT_MODE: self.data_split_mode, Constant.RUNNER_CTX_SECURE_TRAINING: self.secure_training, Constant.RUNNER_CTX_XGB_PARAMS: self.xgb_params, Constant.RUNNER_CTX_XGB_OPTIONS: self.xgb_options, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py index c77827c472..ba9a8c0899 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py @@ -150,10 +150,11 @@ def __init__(self, in_process: bool, per_msg_timeout: float, tx_timeout: float): self.stopped = False self.rank = None self.num_rounds = None - self.split_mode = None + self.data_split_mode = None self.secure_training = None self.xgb_params = None self.xgb_options = None + self.disable_version_check = None self.world_size = None self.per_msg_timeout = per_msg_timeout self.tx_timeout = tx_timeout @@ -197,10 +198,10 @@ def configure(self, config: dict, fl_ctx: FLContext): check_positive_int(Constant.CONF_KEY_NUM_ROUNDS, num_rounds) self.num_rounds = num_rounds - self.split_mode = config.get(Constant.CONF_KEY_SPLIT_MODE) - if self.split_mode is None: - raise RuntimeError("split_mode is not configured") - fl_ctx.set_prop(key=Constant.PARAM_KEY_SPLIT_MODE, value=self.split_mode, private=True, sticky=True) + self.data_split_mode = config.get(Constant.CONF_KEY_DATA_SPLIT_MODE) + if self.data_split_mode is None: + raise RuntimeError("data_split_mode is not configured") + fl_ctx.set_prop(key=Constant.PARAM_KEY_DATA_SPLIT_MODE, value=self.data_split_mode, private=True, sticky=True) self.secure_training = config.get(Constant.CONF_KEY_SECURE_TRAINING) if self.secure_training is None: @@ -213,6 +214,13 @@ def configure(self, config: dict, fl_ctx: FLContext): self.xgb_options = config.get(Constant.CONF_KEY_XGB_OPTIONS, {}) + self.disable_version_check = config.get(Constant.CONF_KEY_DISABLE_VERSION_CHECK) + if self.disable_version_check is None: + raise RuntimeError("disable_version_check is not configured") + fl_ctx.set_prop( + key=Constant.PARAM_KEY_DISABLE_VERSION_CHECK, value=self.disable_version_check, private=True, sticky=True + ) + def _send_request(self, op: str, req: Shareable) -> Tuple[bytes, Shareable]: """Send XGB operation request to the FL server via FLARE message. diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py index 048c1573cf..61207b5654 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py @@ -15,6 +15,8 @@ import time from typing import Optional +import xgboost + from nvflare.apis.client import Client from nvflare.apis.controller_spec import ClientTask, Task from nvflare.apis.fl_constant import FLContextKey @@ -59,10 +61,11 @@ def __init__( self, adaptor_component_id: str, num_rounds: int, - split_mode: int, + data_split_mode: int, secure_training: bool, xgb_params: dict, xgb_options: Optional[dict] = None, + disable_version_check=False, configure_task_name=Constant.CONFIG_TASK_NAME, configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT, start_task_name=Constant.START_TASK_NAME, @@ -81,10 +84,11 @@ def __init__( Args: adaptor_component_id - the component ID of server target adaptor num_rounds - number of rounds - split_mode - 0 for horizontal/row-split, 1 for vertical/column-split + data_split_mode - 0 for horizontal/row-split, 1 for vertical/column-split secure_training - If true, secure training is enabled xgb_params - The params argument for train method xgb_options - All other arguments for train method are passed through this dictionary + disable_version_check - If true, XGBoost version check for secure training is skipped configure_task_name - name of the config task configure_task_timeout - time to wait for clients’ responses to the config task before timeout. start_task_name - name of the start task @@ -102,10 +106,11 @@ def __init__( Controller.__init__(self) self.adaptor_component_id = adaptor_component_id self.num_rounds = num_rounds - self.split_mode = split_mode + self.data_split_mode = data_split_mode self.secure_training = secure_training self.xgb_params = xgb_params self.xgb_options = xgb_options + self.disable_version_check = disable_version_check self.configure_task_name = configure_task_name self.start_task_name = start_task_name self.start_task_timeout = start_task_timeout @@ -121,8 +126,8 @@ def __init__( self.client_statuses = {} # client name => ClientStatus self.abort_signal = None - if split_mode not in {0, 1}: - raise ValueError("split_mode must be either 0 or 1") + if data_split_mode not in {0, 1}: + raise ValueError(f"Invalid data_split_mode: {data_split_mode}. It must be either 0 or 1") if not self.xgb_params: raise ValueError("xgb_params can't be empty") @@ -463,10 +468,11 @@ def _configure_clients(self, abort_signal: Signal, fl_ctx: FLContext): shareable[Constant.CONF_KEY_CLIENT_RANKS] = self.client_ranks shareable[Constant.CONF_KEY_NUM_ROUNDS] = self.num_rounds - shareable[Constant.CONF_KEY_SPLIT_MODE] = self.split_mode + shareable[Constant.CONF_KEY_DATA_SPLIT_MODE] = xgboost.core.DataSplitMode(self.data_split_mode) shareable[Constant.CONF_KEY_SECURE_TRAINING] = self.secure_training shareable[Constant.CONF_KEY_XGB_PARAMS] = self.xgb_params shareable[Constant.CONF_KEY_XGB_OPTIONS] = self.xgb_options + shareable[Constant.CONF_KEY_DISABLE_VERSION_CHECK] = self.disable_version_check task = Task( name=self.configure_task_name, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py index a08b40e28a..5fbf2b30e0 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py @@ -25,10 +25,11 @@ class Constant: CONF_KEY_CLIENT_RANKS = "client_ranks" CONF_KEY_WORLD_SIZE = "world_size" CONF_KEY_NUM_ROUNDS = "num_rounds" - CONF_KEY_SPLIT_MODE = "split_mode" + CONF_KEY_DATA_SPLIT_MODE = "data_split_mode" CONF_KEY_SECURE_TRAINING = "secure_training" CONF_KEY_XGB_PARAMS = "xgb_params" CONF_KEY_XGB_OPTIONS = "xgb_options" + CONF_KEY_DISABLE_VERSION_CHECK = "xgb_disable_version_check" # default component config values CONFIG_TASK_TIMEOUT = 60 @@ -87,18 +88,20 @@ class Constant: PARAM_KEY_REPLY = "xgb.reply" PARAM_KEY_REQUEST = "xgb.request" PARAM_KEY_EVENT = "xgb.event" - PARAM_KEY_SPLIT_MODE = "xgb.split_mode" + PARAM_KEY_DATA_SPLIT_MODE = "xgb.data_split_mode" PARAM_KEY_SECURE_TRAINING = "xgb.secure_training" PARAM_KEY_CONFIG_ERROR = "xgb.config_error" + PARAM_KEY_DISABLE_VERSION_CHECK = "xgb.disable_version_check" RUNNER_CTX_SERVER_ADDR = "server_addr" RUNNER_CTX_PORT = "port" RUNNER_CTX_CLIENT_NAME = "client_name" RUNNER_CTX_NUM_ROUNDS = "num_rounds" - RUNNER_CTX_SPLIT_MODE = "split_mode" + RUNNER_CTX_DATA_SPLIT_MODE = "data_split_mode" RUNNER_CTX_SECURE_TRAINING = "secure_training" RUNNER_CTX_XGB_PARAMS = "xgb_params" RUNNER_CTX_XGB_OPTIONS = "xgb_options" + RUNNER_CTX_XGB_DISABLE_VERSION_CHECK = "xgb_disable_version_check" RUNNER_CTX_WORLD_SIZE = "world_size" RUNNER_CTX_RANK = "rank" RUNNER_CTX_MODEL_DIR = "model_dir" @@ -124,8 +127,3 @@ class Constant: ("grpc.max_send_message_length", MAX_FRAME_SIZE), ("grpc.max_receive_message_length", MAX_FRAME_SIZE), ] - - -class SplitMode: - ROW = 0 - COL = 1 diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py index 2d6a8cf875..103bb3841a 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py @@ -27,10 +27,11 @@ class XGBFedController(XGBController): def __init__( self, num_rounds: int, - split_mode: int, + data_split_mode: int, secure_training: bool, xgb_params: dict, xgb_options: Optional[dict] = None, + disable_version_check=False, configure_task_name=Constant.CONFIG_TASK_NAME, configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT, start_task_name=Constant.START_TASK_NAME, @@ -45,10 +46,11 @@ def __init__( self, adaptor_component_id="", num_rounds=num_rounds, - split_mode=split_mode, + data_split_mode=data_split_mode, secure_training=secure_training, xgb_params=xgb_params, xgb_options=xgb_options, + disable_version_check=disable_version_check, configure_task_name=configure_task_name, configure_task_timeout=configure_task_timeout, start_task_name=start_task_name, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py index ea81a4a1ee..807f020f71 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/mock/mock_controller.py @@ -37,9 +37,10 @@ def __init__( ): XGBController.__init__( self, - split_mode=0, + data_split_mode=0, secure_training=False, xgb_params={"max_depth": 3}, + disable_version_check=False, adaptor_component_id="", num_rounds=num_rounds, configure_task_name=configure_task_name, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py index d66a116902..9053bf144c 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py @@ -144,7 +144,7 @@ def run(self, ctx: dict): self._rank = ctx[Constant.RUNNER_CTX_RANK] self._world_size = ctx[Constant.RUNNER_CTX_WORLD_SIZE] self._num_rounds = ctx[Constant.RUNNER_CTX_NUM_ROUNDS] - self._data_split_mode = ctx.get(Constant.RUNNER_CTX_SPLIT_MODE, 0) + self._data_split_mode = ctx.get(Constant.RUNNER_CTX_DATA_SPLIT_MODE, 0) self._secure_training = ctx.get(Constant.RUNNER_CTX_SECURE_TRAINING, False) self._xgb_params = ctx[Constant.RUNNER_CTX_XGB_PARAMS] self._xgb_options = ctx.get(Constant.RUNNER_CTX_XGB_OPTIONS, {}) @@ -203,6 +203,11 @@ def run(self, ctx: dict): communicator_env[PLUGIN_PARAM_KEY] = xgb_plugin_params + self.logger.info( + f"XGBoost secure training with plugin name: {xgb_plugin_params.get(PLUGIN_KEY_NAME)} " + f"path: {xgb_plugin_params.get(PLUGIN_KEY_PATH)}" + ) + self._data_loader.initialize( client_id=self._client_name, rank=self._rank, data_split_mode=self._data_split_mode ) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py index 56396f434a..4384a29dc7 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py @@ -14,13 +14,16 @@ import os import time +import xgboost +from packaging import version + from nvflare.apis.event_type import EventType from nvflare.apis.fl_component import FLComponent from nvflare.apis.fl_constant import FLContextKey from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable from nvflare.app_opt.xgboost.histogram_based_v2.aggr import Aggregator -from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant, SplitMode +from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant from nvflare.app_opt.xgboost.histogram_based_v2.sec.dam import DamDecoder from nvflare.app_opt.xgboost.histogram_based_v2.sec.data_converter import FeatureAggregationResult from nvflare.app_opt.xgboost.histogram_based_v2.sec.partial_he.adder import Adder @@ -55,6 +58,8 @@ tenseal_imported = False tenseal_error = f"Import error: {ex}" +XGBOOST_MIN_VERSION = "2.2.0-dev" + class ClientSecurityHandler(SecurityHandler): def __init__(self, key_length=1024, num_workers=10, tenseal_context_file="client_context.tenseal"): @@ -404,18 +409,47 @@ def _process_after_all_gather_v_horizontal(self, fl_ctx: FLContext): result += zero_buf fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=result, private=True, sticky=False) + def _check_xgboost_version(self, disable_version_check: bool) -> bool: + """Check XGBoost version. Returns true if it supports secure training""" + if disable_version_check: + self.logger.info("XGBoost version check is disabled") + return True + + try: + min_version = version.parse(XGBOOST_MIN_VERSION) + current_version = version.parse(xgboost.__version__) + if current_version < min_version: + self.logger.error(f"XGBoost version {xgboost.__version__} doesn't support secure training") + return False + else: + return True + except Exception as error: + self.logger.error(f"Unknown XGBoost version {xgboost.__version__}. Error: {error}") + return False + def handle_event(self, event_type: str, fl_ctx: FLContext): global tenseal_error if event_type == Constant.EVENT_XGB_JOB_CONFIGURED: task_data = fl_ctx.get_prop(FLContextKey.TASK_DATA) - split_mode = task_data.get(Constant.CONF_KEY_SPLIT_MODE) + data_split_mode = task_data.get(Constant.CONF_KEY_DATA_SPLIT_MODE) secure_training = task_data.get(Constant.CONF_KEY_SECURE_TRAINING) - if secure_training and split_mode == SplitMode.COL and ipcl_imported: + disable_version_check = task_data.get(Constant.CONF_KEY_DISABLE_VERSION_CHECK) + + if secure_training and not self._check_xgboost_version(disable_version_check): + fl_ctx.set_prop( + Constant.PARAM_KEY_CONFIG_ERROR, + f"XGBoost version {xgboost.__version__} doesn't support secure training", + private=True, + sticky=False, + ) + return + + if secure_training and data_split_mode == xgboost.core.DataSplitMode.COL and ipcl_imported: self.public_key, self.private_key = generate_keys(self.key_length) self.encryptor = Encryptor(self.public_key, self.num_workers) self.decrypter = Decrypter(self.private_key, self.num_workers) self.adder = Adder(self.num_workers) - elif secure_training and split_mode == SplitMode.ROW: + elif secure_training and data_split_mode == xgboost.core.DataSplitMode.ROW: if not tenseal_imported: fl_ctx.set_prop(Constant.PARAM_KEY_CONFIG_ERROR, tenseal_error, private=True, sticky=False) return diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py index 47e44d17d6..edf0687d4e 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py @@ -103,8 +103,8 @@ def _process_before_all_gather_v(self, fl_ctx: FLContext): return horizontal = request.get_header(Constant.HEADER_KEY_HORIZONTAL) - split_mode = "horizontal" if horizontal else "vertical" - self.info(fl_ctx, f"start - {split_mode}") + training_mode = "horizontal" if horizontal else "vertical" + self.info(fl_ctx, f"start - {training_mode}") fl_ctx.set_prop(key=Constant.HEADER_KEY_IN_AGGR, value=True, private=True, sticky=False) fl_ctx.set_prop(key=Constant.HEADER_KEY_HORIZONTAL, value=horizontal, private=True, sticky=False) diff --git a/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py b/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py index 6de75c5052..b21bb6be12 100644 --- a/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py +++ b/tests/unit_test/app_opt/xgboost/histrogram_based_v2/adaptors/xgb_adaptor_test.py @@ -37,9 +37,10 @@ def test_configure(self): config = { Constant.CONF_KEY_CLIENT_RANKS: {"site-test": 1}, Constant.CONF_KEY_NUM_ROUNDS: 100, - Constant.CONF_KEY_SPLIT_MODE: 0, + Constant.CONF_KEY_DATA_SPLIT_MODE: 0, Constant.CONF_KEY_SECURE_TRAINING: False, Constant.CONF_KEY_XGB_PARAMS: {"depth": 1}, + Constant.CONF_KEY_DISABLE_VERSION_CHECK: False, } ctx = FLContext() ctx.set_prop(ReservedKey.IDENTITY_NAME, "site-test") From 1ddff916952cecf6c919c7c127e79f2f787c4580 Mon Sep 17 00:00:00 2001 From: Zhihong Zhang <100308595+nvidianz@users.noreply.github.com> Date: Tue, 20 Aug 2024 12:33:02 -0400 Subject: [PATCH 26/26] Added lock in ReliableMessage (#2811) --- nvflare/apis/utils/reliable_message.py | 102 +++++++++--------- .../adaptors/grpc_server_adaptor.py | 1 + 2 files changed, 54 insertions(+), 49 deletions(-) diff --git a/nvflare/apis/utils/reliable_message.py b/nvflare/apis/utils/reliable_message.py index 4e3d40d570..2551a28cb9 100644 --- a/nvflare/apis/utils/reliable_message.py +++ b/nvflare/apis/utils/reliable_message.py @@ -96,63 +96,67 @@ def __init__(self, topic, request_handler_f, executor, per_msg_timeout, tx_timeo self.tx_id = None self.reply_time = None self.replying = False + self.lock = threading.Lock() def process(self, request: Shareable, fl_ctx: FLContext) -> Shareable: if not ReliableMessage.is_available(): return make_reply(ReturnCode.SERVICE_UNAVAILABLE) - self.tx_id = request.get_header(HEADER_TX_ID) - op = request.get_header(HEADER_OP) - peer_ctx = fl_ctx.get_peer_context() - assert isinstance(peer_ctx, FLContext) - self.source = peer_ctx.get_identity_name() - if op == OP_REQUEST: - # it is possible that a new request for the same tx is received while we are processing the previous one - if not self.rcv_time: - self.rcv_time = time.time() - self.per_msg_timeout = request.get_header(HEADER_PER_MSG_TIMEOUT) - self.tx_timeout = request.get_header(HEADER_TX_TIMEOUT) - - # start processing - ReliableMessage.info(fl_ctx, f"started processing request of topic {self.topic}") - try: - self.executor.submit(self._do_request, request, fl_ctx) - return _status_reply(STATUS_IN_PROCESS) # ack - except Exception as ex: - # it is possible that the RM is already closed (self.executor is shut down) - ReliableMessage.error(fl_ctx, f"failed to submit request: {secure_format_exception(ex)}") - return make_reply(ReturnCode.SERVICE_UNAVAILABLE) - elif self.result: - # we already finished processing - send the result back - ReliableMessage.info(fl_ctx, "resend result back to requester") - return self.result - else: - # we are still processing - ReliableMessage.info(fl_ctx, "got request - the request is being processed") - return _status_reply(STATUS_IN_PROCESS) - elif op == OP_QUERY: - if self.result: - if self.reply_time: - # result already sent back successfully - ReliableMessage.info(fl_ctx, "got query: we already replied successfully") - return _status_reply(STATUS_REPLIED) - elif self.replying: - # result is being sent - ReliableMessage.info(fl_ctx, "got query: reply is being sent") - return _status_reply(STATUS_IN_REPLY) - else: - # try to send the result again - ReliableMessage.info(fl_ctx, "got query: sending reply again") + with self.lock: + self.tx_id = request.get_header(HEADER_TX_ID) + op = request.get_header(HEADER_OP) + peer_ctx = fl_ctx.get_peer_context() + assert isinstance(peer_ctx, FLContext) + self.source = peer_ctx.get_identity_name() + if op == OP_REQUEST: + # it is possible that a new request for the same tx is received while we are processing the previous one + if not self.rcv_time: + self.rcv_time = time.time() + self.per_msg_timeout = request.get_header(HEADER_PER_MSG_TIMEOUT) + self.tx_timeout = request.get_header(HEADER_TX_TIMEOUT) + + # start processing + ReliableMessage.info(fl_ctx, f"started processing request of topic {self.topic}") + try: + self.executor.submit(self._do_request, request, fl_ctx) + return _status_reply(STATUS_IN_PROCESS) # ack + except Exception as ex: + # it is possible that the RM is already closed (self.executor is shut down) + ReliableMessage.error(fl_ctx, f"failed to submit request: {secure_format_exception(ex)}") + return make_reply(ReturnCode.SERVICE_UNAVAILABLE) + elif self.result: + # we already finished processing - send the result back + ReliableMessage.info(fl_ctx, "resend result back to requester") return self.result - else: - # still in process - if time.time() - self.rcv_time > self.tx_timeout: - # the process is taking too much time - ReliableMessage.error(fl_ctx, f"aborting processing since exceeded max tx time {self.tx_timeout}") - return _status_reply(STATUS_ABORTED) else: - ReliableMessage.debug(fl_ctx, "got query: request is in-process") + # we are still processing + ReliableMessage.info(fl_ctx, "got request - the request is being processed") return _status_reply(STATUS_IN_PROCESS) + elif op == OP_QUERY: + if self.result: + if self.reply_time: + # result already sent back successfully + ReliableMessage.info(fl_ctx, "got query: we already replied successfully") + return _status_reply(STATUS_REPLIED) + elif self.replying: + # result is being sent + ReliableMessage.info(fl_ctx, "got query: reply is being sent") + return _status_reply(STATUS_IN_REPLY) + else: + # try to send the result again + ReliableMessage.info(fl_ctx, "got query: sending reply again") + return self.result + else: + # still in process + if time.time() - self.rcv_time > self.tx_timeout: + # the process is taking too much time + ReliableMessage.error( + fl_ctx, f"aborting processing since exceeded max tx time {self.tx_timeout}" + ) + return _status_reply(STATUS_ABORTED) + else: + ReliableMessage.debug(fl_ctx, "got query: request is in-process") + return _status_reply(STATUS_IN_PROCESS) def _try_reply(self, fl_ctx: FLContext): engine = fl_ctx.get_engine() diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_server_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_server_adaptor.py index 2fb4b6229e..036a22f242 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_server_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_server_adaptor.py @@ -146,6 +146,7 @@ def all_reduce( raise RuntimeError(f"bad result from XGB server: expect AllreduceReply but got {type(result)}") def broadcast(self, rank: int, seq: int, root: int, send_buf: bytes, fl_ctx: FLContext) -> bytes: + self.logger.debug(f"Sending broadcast: {rank=} {seq=} {root=} {len(send_buf)=}") result = self.internal_xgb_client.send_broadcast(seq_num=seq, rank=rank, data=send_buf, root=root) if isinstance(result, pb2.BroadcastReply): return result.receive_buffer