Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix eval strategy compat #1143

Merged
merged 3 commits into from
Jun 16, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
update
  • Loading branch information
huangjintao committed Jun 15, 2024
commit ce1760ffc8ba1c82ddcd570c6f3e8b131986fb93
10 changes: 5 additions & 5 deletions swift/llm/dpo.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
from transformers.utils import is_torch_npu_available

from swift.trainers.dpo_trainers import DPOTrainer
from swift.utils import (check_json_format, get_dist_setting, get_logger, get_main, get_model_info, is_ddp_plus_mp,
is_dist, is_master, plot_images, seed_everything, show_layers)
from swift.utils import (append_to_jsonl, check_json_format, get_dist_setting, get_logger, get_main, get_model_info,
is_ddp_plus_mp, is_dist, is_local_master, is_master, plot_images, seed_everything, show_layers)
from .tuner import prepare_model
from .utils import (DPOArguments, Template, get_dataset, get_model_tokenizer, get_template, get_time_info,
set_generation_config)
Expand Down Expand Up @@ -228,9 +228,9 @@ def llm_dpo(args: DPOArguments) -> str:
'model_info': model_info,
'dataset_info': trainer.dataset_info,
}
jsonl_path = os.path.join(args.output_dir, 'logging.jsonl')
with open(jsonl_path, 'a', encoding='utf-8') as f:
f.write(json.dumps(run_info) + '\n')
if is_local_master():
jsonl_path = os.path.join(args.output_dir, 'logging.jsonl')
append_to_jsonl(jsonl_path, run_info)
return run_info


Expand Down
10 changes: 5 additions & 5 deletions swift/llm/orpo.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
from transformers.utils import is_torch_npu_available

from swift.trainers.orpo_trainers import ORPOTrainer
from swift.utils import (check_json_format, get_dist_setting, get_logger, get_main, get_model_info, is_ddp_plus_mp,
is_dist, is_master, plot_images, seed_everything, show_layers)
from swift.utils import (append_to_jsonl, check_json_format, get_dist_setting, get_logger, get_main, get_model_info,
is_ddp_plus_mp, is_dist, is_local_master, is_master, plot_images, seed_everything, show_layers)
from .tuner import prepare_model
from .utils import (ORPOArguments, Template, get_dataset, get_model_tokenizer, get_template, get_time_info,
set_generation_config)
Expand Down Expand Up @@ -231,9 +231,9 @@ def llm_orpo(args: ORPOArguments) -> str:
'model_info': model_info,
'dataset_info': trainer.dataset_info,
}
jsonl_path = os.path.join(args.output_dir, 'logging.jsonl')
with open(jsonl_path, 'a', encoding='utf-8') as f:
f.write(json.dumps(run_info) + '\n')
if is_local_master():
jsonl_path = os.path.join(args.output_dir, 'logging.jsonl')
append_to_jsonl(jsonl_path, run_info)
return run_info


Expand Down
10 changes: 5 additions & 5 deletions swift/llm/simpo.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
from transformers.utils import is_torch_npu_available

from swift.trainers.simpo_trainers import SimPOTrainer
from swift.utils import (check_json_format, get_dist_setting, get_logger, get_main, get_model_info, is_ddp_plus_mp,
is_dist, is_master, plot_images, seed_everything, show_layers)
from swift.utils import (append_to_jsonl, check_json_format, get_dist_setting, get_logger, get_main, get_model_info,
is_ddp_plus_mp, is_dist, is_local_master, is_master, plot_images, seed_everything, show_layers)
from .tuner import prepare_model
from .utils import (SimPOArguments, Template, get_dataset, get_model_tokenizer, get_template, get_time_info,
set_generation_config)
Expand Down Expand Up @@ -216,9 +216,9 @@ def llm_simpo(args: SimPOArguments) -> str:
'model_info': model_info,
'dataset_info': trainer.dataset_info,
}
jsonl_path = os.path.join(args.output_dir, 'logging.jsonl')
with open(jsonl_path, 'a', encoding='utf-8') as f:
f.write(json.dumps(run_info) + '\n')
if is_local_master():
jsonl_path = os.path.join(args.output_dir, 'logging.jsonl')
append_to_jsonl(jsonl_path, run_info)
return run_info


Expand Down
7 changes: 3 additions & 4 deletions swift/trainers/callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from transformers.trainer_callback import (DefaultFlowCallback, ProgressCallback, TrainerCallback, TrainerControl,
TrainerState)
from transformers.trainer_utils import IntervalStrategy, has_length, speed_metrics
from transformers.utils import append_to_jsonl

from swift.utils import is_pai_training_job, use_torchacc
from .arguments import TrainingArguments
Expand Down Expand Up @@ -55,8 +56,7 @@ def on_log(self, args: TrainingArguments, state: TrainerState, control, logs=Non
logs[k] = round(logs[k], 8)
if not is_pai_training_job() and state.is_local_process_zero:
jsonl_path = os.path.join(args.output_dir, 'logging.jsonl')
with open(jsonl_path, 'a', encoding='utf-8') as f:
f.write(json.dumps(logs) + '\n')
append_to_jsonl(jsonl_path, logs)
super().on_log(args, state, control, logs, **kwargs)
if state.is_local_process_zero and self.training_bar is not None:
self.training_bar.refresh()
Expand Down Expand Up @@ -85,8 +85,7 @@ def on_log(self, args, state, control, logs=None, **kwargs):
logs[k] = round(logs[k], 8)
if not is_pai_training_job() and state.is_local_process_zero:
jsonl_path = os.path.join(args.output_dir, 'logging.jsonl')
with open(jsonl_path, 'a', encoding='utf-8') as f:
f.write(json.dumps(logs) + '\n')
append_to_jsonl(jsonl_path, logs)

_ = logs.pop('total_flos', None)
if state.is_local_process_zero:
Expand Down
Loading