forked from hiyouga/ChatGLM-Efficient-Tuning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
export_model.py
26 lines (16 loc) · 905 Bytes
/
export_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# coding=utf-8
# Exports the fine-tuned ChatGLM-6B model.
# Usage: python export_model.py --checkpoint_dir path_to_checkpoint --output_dir path_to_save_model
from transformers import HfArgumentParser, TrainingArguments
from transformers.utils.versions import require_version
from utils import ModelArguments, load_pretrained
def main():
require_version("transformers==4.27.4", "To fix: pip install transformers==4.27.4") # higher version may cause problems
parser = HfArgumentParser((ModelArguments, TrainingArguments))
model_args, training_args = parser.parse_args_into_dataclasses()
model, tokenizer = load_pretrained(model_args)
model.save_pretrained(training_args.output_dir, max_shard_size="1GB")
tokenizer.save_pretrained(training_args.output_dir)
print("model and tokenizer have been saved at:", training_args.output_dir)
if __name__ == "__main__":
main()